blob: 40405f2304f1be2376f8c275aec9a3ecd83d0d4a [file] [log] [blame]
Greg Kroah-Hartmana17ae4c2017-11-24 15:00:32 +01001// SPDX-License-Identifier: GPL-2.0
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02004 * Copyright IBM Corp. 1999, 2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "arch/i386/kernel/setup.c"
9 * Copyright (C) 1995, Linus Torvalds
10 */
11
12/*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +010016#define KMSG_COMPONENT "setup"
17#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/errno.h>
Heiko Carstens08729222013-01-07 13:56:17 +010020#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010022#include <linux/sched/task.h>
Ingo Molnar1777e462017-02-05 14:47:12 +010023#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
Tejun Heoff38df32011-12-08 10:22:09 -080025#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/mm.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29#include <linux/ptrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020030#include <linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/user.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/tty.h>
33#include <linux/ioport.h>
34#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/init.h>
36#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/root_dev.h>
38#include <linux/console.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/kernel_stat.h>
Christoph Hellwig0b1abd12020-09-11 10:56:52 +020040#include <linux/dma-map-ops.h>
Heiko Carstens1e8e3382005-10-30 15:00:11 -080041#include <linux/device.h>
Peter Oberparleiter585c3042006-06-29 15:08:25 +020042#include <linux/notifier.h>
Heiko Carstens65912a82006-09-20 15:58:41 +020043#include <linux/pfn.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010044#include <linux/ctype.h>
Heiko Carstens2b67fc42007-02-05 21:16:47 +010045#include <linux/reboot.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020046#include <linux/topology.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010047#include <linux/kexec.h>
48#include <linux/crash_dump.h>
49#include <linux/memory.h>
Heiko Carstens048cd4e2012-02-27 10:01:52 +010050#include <linux/compat.h>
Martin Schwidefskyce3dc442017-09-12 16:37:33 +020051#include <linux/start_kernel.h>
Gerald Schaefer343dbdb2020-12-08 19:47:15 +010052#include <linux/hugetlb.h>
Sven Schnelle436fc4f2021-08-27 08:36:06 +020053#include <linux/kmemleak.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +010055#include <asm/boot_data.h>
Michael Holzheu46b05d22007-02-21 10:55:21 +010056#include <asm/ipl.h>
Heiko Carstens1e3cab22012-03-30 09:40:55 +020057#include <asm/facility.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058#include <asm/smp.h>
59#include <asm/mmu_context.h>
60#include <asm/cpcmd.h>
61#include <asm/lowcore.h>
Martin Schwidefsky6c815112017-10-12 13:24:47 +020062#include <asm/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <asm/irq.h>
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -070064#include <asm/page.h>
65#include <asm/ptrace.h>
Heiko Carstenscc13ad62006-06-25 05:49:30 -070066#include <asm/sections.h>
Hongjie Yangfe355b72007-02-05 21:18:24 +010067#include <asm/ebcdic.h>
Michael Holzheu60a0c682011-10-30 15:16:40 +010068#include <asm/diag.h>
Michael Holzheu4857d4b2012-03-11 11:59:34 -040069#include <asm/os_info.h>
Heinz Graalfscd183452012-06-11 16:06:59 +020070#include <asm/sclp.h>
Martin Schwidefsky78c98f92019-01-28 08:33:08 +010071#include <asm/stacktrace.h>
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +020072#include <asm/sysinfo.h>
Philipp Hachtmann3a368f72014-03-06 18:25:13 +010073#include <asm/numa.h>
Vasily Gorbik686140a2017-10-12 13:01:47 +020074#include <asm/alternative.h>
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +010075#include <asm/nospec-branch.h>
Vasily Gorbik6966d602018-04-11 11:56:55 +020076#include <asm/mem_detect.h>
Vasily Gorbik5abb9352019-04-01 19:11:03 +020077#include <asm/uv.h>
Sven Schnelle0b38b5e2020-01-22 13:38:22 +010078#include <asm/asm-offsets.h>
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040079#include "entry.h"
Gerald Schaeferc1821c22007-02-05 21:18:17 +010080
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/*
82 * Machine setup..
83 */
84unsigned int console_mode = 0;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010085EXPORT_SYMBOL(console_mode);
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087unsigned int console_devno = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010088EXPORT_SYMBOL(console_devno);
89
Linus Torvalds1da177e2005-04-16 15:20:36 -070090unsigned int console_irq = -1;
Heiko Carstens1485c5c2009-03-26 15:24:04 +010091EXPORT_SYMBOL(console_irq);
92
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020093/*
94 * Some code and data needs to stay below 2 GB, even when the kernel would be
95 * relocated above 2 GB, because it has to use 31 bit addresses.
Heiko Carstensc78d0c72021-08-04 13:40:31 +020096 * Such code and data is part of the .amode31 section.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +020097 */
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +020098unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31;
99unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31;
100unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31;
101unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31;
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200102struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
103struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200104
105/*
106 * Control registers CR2, CR5 and CR15 are initialized with addresses
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200107 * of tables that must be placed below 2G which is handled by the AMODE31
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200108 * sections.
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200109 * Because the AMODE31 sections are relocated below 2G at startup,
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200110 * the content of control registers CR2, CR5 and CR15 must be updated
111 * with new addresses after the relocation. The initial initialization of
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200112 * control registers occurs in head64.S and then gets updated again after AMODE31
113 * relocation. We must access the relevant AMODE31 tables indirectly via
114 * pointers placed in the .amode31.refs linker section. Those pointers get
115 * updated automatically during AMODE31 relocation and always contain a valid
116 * address within AMODE31 sections.
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200117 */
118
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200119static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200120
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200121static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200122 [1] = 0xffffffffffffffff
123};
124
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200125static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200126 0x80000000, 0, 0, 0,
127 0x80000000, 0, 0, 0,
128 0x80000000, 0, 0, 0,
129 0x80000000, 0, 0, 0,
130 0x80000000, 0, 0, 0,
131 0x80000000, 0, 0, 0,
132 0x80000000, 0, 0, 0,
133 0x80000000, 0, 0, 0
134};
135
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200136static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = {
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200137 0, 0, 0x89000000, 0,
138 0, 0, 0x8a000000, 0
139};
140
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200141static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31;
142static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31;
143static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31;
144static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200145
Vasily Gorbikd58106c2017-11-17 18:44:28 +0100146int __bootdata(noexec_disabled);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200147unsigned long __bootdata(ident_map_size);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200148struct mem_detect_info __bootdata(mem_detect);
Alexander Egorenkov84733282021-06-15 14:15:07 +0200149struct initrd_data __bootdata(initrd_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
Gerald Schaeferb2d24b92019-02-03 21:37:20 +0100151unsigned long __bootdata_preserved(__kaslr_offset);
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200152unsigned long __bootdata(__amode31_base);
Mikhail Zaslonkoc65e6812020-01-30 22:16:27 -0800153unsigned int __bootdata_preserved(zlib_dfltcc_support);
154EXPORT_SYMBOL(zlib_dfltcc_support);
Sven Schnelle17e89e12021-05-05 22:01:10 +0200155u64 __bootdata_preserved(stfle_fac_list[16]);
156EXPORT_SYMBOL(stfle_fac_list);
157u64 __bootdata_preserved(alt_stfle_fac_list[16]);
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200158struct oldmem_data __bootdata_preserved(oldmem_data);
Gerald Schaefera80313f2019-02-03 21:37:20 +0100159
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100160unsigned long VMALLOC_START;
161EXPORT_SYMBOL(VMALLOC_START);
162
163unsigned long VMALLOC_END;
164EXPORT_SYMBOL(VMALLOC_END);
165
166struct page *vmemmap;
167EXPORT_SYMBOL(vmemmap);
Vasily Gorbike670e642020-09-11 12:51:59 +0200168unsigned long vmemmap_size;
Martin Schwidefsky14045eb2011-12-27 11:27:07 +0100169
Heiko Carstensc972cc62012-10-05 16:52:18 +0200170unsigned long MODULES_VADDR;
171unsigned long MODULES_END;
Heiko Carstensc972cc62012-10-05 16:52:18 +0200172
Frank Munzert099b7652009-03-26 15:23:43 +0100173/* An array with a pointer to the lowcore of every CPU. */
Heiko Carstensc667aea2015-12-31 10:29:00 +0100174struct lowcore *lowcore_ptr[NR_CPUS];
Frank Munzert099b7652009-03-26 15:23:43 +0100175EXPORT_SYMBOL(lowcore_ptr);
176
Sven Schnelle3b051e82021-04-07 09:20:17 +0200177DEFINE_STATIC_KEY_FALSE(cpu_has_bear);
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179/*
Niklas Schnelleb02002c2020-07-13 14:12:49 +0200180 * The Write Back bit position in the physaddr is given by the SLPC PCI.
181 * Leaving the mask zero always uses write through which is safe
182 */
183unsigned long mio_wb_bit_mask __ro_after_init;
184
185/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 * This is set up by the setup-routine at boot-time
187 * for S390 need to find out, what we have to setup
188 * using address 0x10400 ...
189 */
190
191#include <asm/setup.h>
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * condev= and conmode= setup parameter.
195 */
196
197static int __init condev_setup(char *str)
198{
199 int vdev;
200
201 vdev = simple_strtoul(str, &str, 0);
202 if (vdev >= 0 && vdev < 65536) {
203 console_devno = vdev;
204 console_irq = -1;
205 }
206 return 1;
207}
208
209__setup("condev=", condev_setup);
210
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200211static void __init set_preferred_console(void)
212{
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200213 if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200214 add_preferred_console("ttyS", 0, NULL);
Hendrik Bruecknerc4de0c12009-09-11 10:28:56 +0200215 else if (CONSOLE_IS_3270)
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200216 add_preferred_console("tty3270", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200217 else if (CONSOLE_IS_VT220)
Valentin Vidicb7d91d22021-04-27 21:40:10 +0200218 add_preferred_console("ttysclp", 0, NULL);
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200219 else if (CONSOLE_IS_HVC)
220 add_preferred_console("hvc", 0, NULL);
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200221}
222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223static int __init conmode_setup(char *str)
224{
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100225#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200226 if (!strcmp(str, "hwc") || !strcmp(str, "sclp"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 SET_CONSOLE_SCLP;
228#endif
229#if defined(CONFIG_TN3215_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200230 if (!strcmp(str, "3215"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 SET_CONSOLE_3215;
232#endif
233#if defined(CONFIG_TN3270_CONSOLE)
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200234 if (!strcmp(str, "3270"))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 SET_CONSOLE_3270;
236#endif
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200237 set_preferred_console();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return 1;
239}
240
241__setup("conmode=", conmode_setup);
242
243static void __init conmode_default(void)
244{
245 char query_buffer[1024];
246 char *ptr;
247
248 if (MACHINE_IS_VM) {
Heiko Carstens740b5702006-12-04 15:40:30 +0100249 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
251 ptr = strstr(query_buffer, "SUBCHANNEL =");
252 console_irq = simple_strtoul(ptr + 13, NULL, 16);
Heiko Carstens740b5702006-12-04 15:40:30 +0100253 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 ptr = strstr(query_buffer, "CONMODE");
255 /*
256 * Set the conmode to 3215 so that the device recognition
257 * will set the cu_type of the console to 3215. If the
258 * conmode is 3270 and we don't set it back then both
259 * 3215 and the 3270 driver will try to access the console
260 * device (3215 as console and 3270 as normal tty).
261 */
Heiko Carstens740b5702006-12-04 15:40:30 +0100262 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 if (ptr == NULL) {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100264#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 SET_CONSOLE_SCLP;
266#endif
267 return;
268 }
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200269 if (str_has_prefix(ptr + 8, "3270")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270#if defined(CONFIG_TN3270_CONSOLE)
271 SET_CONSOLE_3270;
272#elif defined(CONFIG_TN3215_CONSOLE)
273 SET_CONSOLE_3215;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100274#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 SET_CONSOLE_SCLP;
276#endif
Vasily Gorbikd0b31982019-08-19 17:32:44 +0200277 } else if (str_has_prefix(ptr + 8, "3215")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278#if defined(CONFIG_TN3215_CONSOLE)
279 SET_CONSOLE_3215;
280#elif defined(CONFIG_TN3270_CONSOLE)
281 SET_CONSOLE_3270;
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100282#elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 SET_CONSOLE_SCLP;
284#endif
285 }
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200286 } else if (MACHINE_IS_KVM) {
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700287 if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200288 SET_CONSOLE_VT220;
Masahiro Yamadaa5ff1b32016-08-25 15:17:02 -0700289 else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
Peter Oberparleiter8f50af42016-07-07 07:52:38 +0200290 SET_CONSOLE_SCLP;
291 else
292 SET_CONSOLE_HVC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 } else {
Peter Oberparleiter8c0933e2008-02-05 16:50:41 +0100294#if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 SET_CONSOLE_SCLP;
296#endif
297 }
298}
299
Michael Holzheubf28a592014-04-14 10:38:05 +0200300#ifdef CONFIG_CRASH_DUMP
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200301static void __init setup_zfcpdump(void)
Michael Holzheu411ed322007-04-27 16:01:49 +0200302{
Alexander Egorenkovbd37b362020-09-29 20:24:55 +0200303 if (!is_ipl_type_dump())
Michael Holzheu411ed322007-04-27 16:01:49 +0200304 return;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200305 if (oldmem_data.start)
Michael Holzheu3f25dc42011-11-14 11:19:05 +0100306 return;
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200307 strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev");
Michael Holzheu411ed322007-04-27 16:01:49 +0200308 console_loglevel = 2;
309}
310#else
Sebastian Ottfe72ffb2013-04-30 17:18:46 +0200311static inline void setup_zfcpdump(void) {}
Michael Holzheubf28a592014-04-14 10:38:05 +0200312#endif /* CONFIG_CRASH_DUMP */
Michael Holzheu411ed322007-04-27 16:01:49 +0200313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 /*
315 * Reboot, halt and power_off stubs. They just call _machine_restart,
316 * _machine_halt or _machine_power_off.
317 */
318
319void machine_restart(char *command)
320{
Christian Borntraeger7aa8dac2007-11-20 11:13:31 +0100321 if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200322 /*
323 * Only unblank the console if we are called in enabled
324 * context or a bust_spinlocks cleared the way for us.
325 */
326 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 _machine_restart(command);
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330void machine_halt(void)
331{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200332 if (!in_interrupt() || oops_in_progress)
333 /*
334 * Only unblank the console if we are called in enabled
335 * context or a bust_spinlocks cleared the way for us.
336 */
337 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 _machine_halt();
339}
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341void machine_power_off(void)
342{
Martin Schwidefsky06fa46a2006-06-29 14:57:32 +0200343 if (!in_interrupt() || oops_in_progress)
344 /*
345 * Only unblank the console if we are called in enabled
346 * context or a bust_spinlocks cleared the way for us.
347 */
348 console_unblank();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 _machine_power_off();
350}
351
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800352/*
353 * Dummy power off function.
354 */
355void (*pm_power_off)(void) = machine_power_off;
Heiko Carstens08729222013-01-07 13:56:17 +0100356EXPORT_SYMBOL_GPL(pm_power_off);
Martin Schwidefsky53df7512006-01-14 13:21:01 -0800357
Alexander Egorenkov980d5f92020-09-02 16:52:06 +0200358void *restart_stack;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400359
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200360unsigned long stack_alloc(void)
361{
362#ifdef CONFIG_VMAP_STACK
Sven Schnelle436fc4f2021-08-27 08:36:06 +0200363 void *ret;
364
365 ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP,
366 NUMA_NO_NODE, __builtin_return_address(0));
367 kmemleak_not_leak(ret);
368 return (unsigned long)ret;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200369#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200370 return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200371#endif
372}
373
374void stack_free(unsigned long stack)
375{
376#ifdef CONFIG_VMAP_STACK
377 vfree((void *) stack);
378#else
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200379 free_pages(stack, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200380#endif
381}
382
383int __init arch_early_irq_init(void)
384{
385 unsigned long stack;
386
Vasily Gorbik32ce55a2018-09-18 18:23:40 +0200387 stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200388 if (!stack)
389 panic("Couldn't allocate async stack");
390 S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
391 return 0;
392}
393
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200394void __init arch_call_rest_init(void)
395{
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200396 unsigned long stack;
397
398 stack = stack_alloc();
399 if (!stack)
400 panic("Couldn't allocate kernel stack");
401 current->stack = (void *) stack;
402#ifdef CONFIG_VMAP_STACK
403 current->stack_vm_area = (void *) stack;
404#endif
405 set_task_stack_end_magic(current);
406 stack += STACK_INIT_OFFSET;
407 S390_lowcore.kernel_stack = stack;
Heiko Carstensb55e6922021-07-05 17:55:32 +0200408 call_on_stack_noreturn(rest_init, stack);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200409}
410
Martin Schwidefsky87276382019-02-14 15:40:56 +0100411static void __init setup_lowcore_dat_off(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700412{
Vasily Gorbik21a66712020-09-24 01:01:29 +0200413 unsigned long int_psw_mask = PSW_KERNEL_BITS;
Sven Schnelleb61b1592021-02-03 09:02:51 +0100414 unsigned long mcck_stack;
Heiko Carstensc667aea2015-12-31 10:29:00 +0100415 struct lowcore *lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700416
Vasily Gorbik21a66712020-09-24 01:01:29 +0200417 if (IS_ENABLED(CONFIG_KASAN))
418 int_psw_mask |= PSW_MASK_DAT;
419
Heiko Carstensc9e37352005-05-01 08:58:57 -0700420 /*
421 * Setup lowcore for boot cpu
422 */
Heiko Carstensf1c11742017-07-05 07:37:27 +0200423 BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE);
Mike Rapoporteb31d552018-10-30 15:08:04 -0700424 lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc));
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700425 if (!lc)
426 panic("%s: Failed to allocate %zu bytes align=%zx\n",
427 __func__, sizeof(*lc), sizeof(*lc));
428
Martin Schwidefskye258d712013-09-24 09:14:56 +0200429 lc->restart_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100430 lc->restart_psw.addr = (unsigned long) restart_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200431 lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100432 lc->external_new_psw.addr = (unsigned long) ext_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200433 lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100434 lc->svc_new_psw.addr = (unsigned long) system_call;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200435 lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100436 lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
Martin Schwidefskye258d712013-09-24 09:14:56 +0200437 lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100438 lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
Vasily Gorbik21a66712020-09-24 01:01:29 +0200439 lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK;
Heiko Carstensfecc868a2016-01-18 12:49:44 +0100440 lc->io_new_psw.addr = (unsigned long) io_int_handler;
Martin Schwidefsky6e2ef5e2016-10-27 12:41:39 +0200441 lc->clock_comparator = clock_comparator_max;
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200442 lc->nodat_stack = ((unsigned long) &init_thread_union)
Martin Schwidefskydc7ee002013-04-24 10:20:43 +0200443 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
Heiko Carstensd5c352c2016-11-08 11:08:26 +0100444 lc->current_task = (unsigned long)&init_task;
Heiko Carstens8f100bb2016-03-10 10:32:21 +0100445 lc->lpp = LPP_MAGIC;
Christian Ehrhardt25097bf2009-04-14 15:36:16 +0200446 lc->machine_flags = S390_lowcore.machine_flags;
Martin Schwidefskyc3601922016-10-25 12:21:44 +0200447 lc->preempt_count = S390_lowcore.preempt_count;
Martin Schwidefsky6c815112017-10-12 13:24:47 +0200448 nmi_alloc_boot_cpu(lc);
Sven Schnelle56e62a72020-11-21 11:14:56 +0100449 lc->sys_enter_timer = S390_lowcore.sys_enter_timer;
Martin Schwidefskyab96e792009-04-14 15:36:29 +0200450 lc->exit_timer = S390_lowcore.exit_timer;
451 lc->user_timer = S390_lowcore.user_timer;
452 lc->system_timer = S390_lowcore.system_timer;
453 lc->steal_timer = S390_lowcore.steal_timer;
454 lc->last_update_timer = S390_lowcore.last_update_timer;
455 lc->last_update_clock = S390_lowcore.last_update_clock;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400456
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200457 /*
458 * Allocate the global restart stack which is the same for
459 * all CPUs in cast *one* of them does a PSW restart.
460 */
Mike Rapoporteb31d552018-10-30 15:08:04 -0700461 restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700462 if (!restart_stack)
463 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
464 __func__, THREAD_SIZE, THREAD_SIZE);
Martin Schwidefskyce3dc442017-09-12 16:37:33 +0200465 restart_stack += STACK_INIT_OFFSET;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400466
467 /*
468 * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
Hendrik Bruecknerb4a96012013-12-13 12:53:42 +0100469 * restart data to the absolute zero lowcore. This is necessary if
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400470 * PSW restart is done on an offline CPU that has lowcore zero.
471 */
472 lc->restart_stack = (unsigned long) restart_stack;
473 lc->restart_fn = (unsigned long) do_restart;
474 lc->restart_data = 0;
Alexander Gordeev915fea02021-08-24 15:30:21 +0200475 lc->restart_source = -1U;
Michael Holzheu73bf4632012-05-24 14:35:16 +0200476
Sven Schnelleb61b1592021-02-03 09:02:51 +0100477 mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
478 if (!mcck_stack)
479 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
480 __func__, THREAD_SIZE, THREAD_SIZE);
481 lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
482
Michael Holzheu73bf4632012-05-24 14:35:16 +0200483 /* Setup absolute zero lowcore */
Heiko Carstensfbe76562012-06-05 09:59:52 +0200484 mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
485 mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
486 mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
487 mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
488 mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400489
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200490 lc->spinlock_lockval = arch_spin_lockval(0);
Martin Schwidefskyb96f7d82017-03-24 17:25:02 +0100491 lc->spinlock_index = 0;
492 arch_spin_lock_setup(0);
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +0100493 lc->br_r1_trampoline = 0x07f1; /* br %r1 */
Sven Schnelle0b38b5e2020-01-22 13:38:22 +0100494 lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
495 lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
Valentin Schneider6a942f52021-07-07 17:33:38 +0100496 lc->preempt_count = PREEMPT_DISABLED;
Philipp Hachtmann6c8cd5b2014-04-07 18:25:23 +0200497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 set_prefix((u32)(unsigned long) lc);
Frank Munzert099b7652009-03-26 15:23:43 +0100499 lowcore_ptr[0] = lc;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700500}
501
Martin Schwidefsky87276382019-02-14 15:40:56 +0100502static void __init setup_lowcore_dat_on(void)
503{
Alexander Gordeev915fea02021-08-24 15:30:21 +0200504 struct lowcore *lc = lowcore_ptr[0];
505
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100506 __ctl_clear_bit(0, 28);
507 S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
508 S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
509 S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
510 S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
Alexander Gordeev915fea02021-08-24 15:30:21 +0200511 __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
Martin Schwidefsky86a86802019-02-18 18:10:08 +0100512 __ctl_set_bit(0, 28);
Alexander Gordeev915fea02021-08-24 15:30:21 +0200513 mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
514 mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
515 memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
516 sizeof(S390_lowcore.cregs_save_area));
Martin Schwidefsky87276382019-02-14 15:40:56 +0100517}
518
Heiko Carstens71189282011-03-23 10:15:59 +0100519static struct resource code_resource = {
520 .name = "Kernel code",
Toshi Kani35d98e92016-01-26 21:57:22 +0100521 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100522};
523
524static struct resource data_resource = {
525 .name = "Kernel data",
Toshi Kani35d98e92016-01-26 21:57:22 +0100526 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens71189282011-03-23 10:15:59 +0100527};
528
Heiko Carstens4cc69532011-03-23 10:16:00 +0100529static struct resource bss_resource = {
530 .name = "Kernel bss",
Toshi Kani35d98e92016-01-26 21:57:22 +0100531 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100532};
533
Heiko Carstens71189282011-03-23 10:15:59 +0100534static struct resource __initdata *standard_resources[] = {
535 &code_resource,
536 &data_resource,
Heiko Carstens4cc69532011-03-23 10:16:00 +0100537 &bss_resource,
Heiko Carstens71189282011-03-23 10:15:59 +0100538};
539
540static void __init setup_resources(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700541{
Heiko Carstens71189282011-03-23 10:15:59 +0100542 struct resource *res, *std_res, *sub_res;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700543 phys_addr_t start, end;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100544 int j;
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700545 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700546
Vasily Gorbik320d9552018-02-20 13:28:33 +0100547 code_resource.start = (unsigned long) _text;
548 code_resource.end = (unsigned long) _etext - 1;
549 data_resource.start = (unsigned long) _etext;
550 data_resource.end = (unsigned long) _edata - 1;
551 bss_resource.start = (unsigned long) __bss_start;
552 bss_resource.end = (unsigned long) __bss_stop - 1;
Heiko Carstenscc13ad62006-06-25 05:49:30 -0700553
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700554 for_each_mem_range(i, &start, &end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700555 res = memblock_alloc(sizeof(*res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700556 if (!res)
557 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
558 __func__, sizeof(*res), 8);
Toshi Kani35d98e92016-01-26 21:57:22 +0100559 res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100560
561 res->name = "System RAM";
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700562 res->start = start;
563 /*
564 * In memblock, end points to the first byte after the
565 * range while in resourses, end points to the last byte in
566 * the range.
567 */
568 res->end = end - 1;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700569 request_resource(&iomem_resource, res);
Hongjie Yangfe355b72007-02-05 21:18:24 +0100570
Heiko Carstens71189282011-03-23 10:15:59 +0100571 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
572 std_res = standard_resources[j];
573 if (std_res->start < res->start ||
574 std_res->start > res->end)
575 continue;
576 if (std_res->end > res->end) {
Mike Rapoporteb31d552018-10-30 15:08:04 -0700577 sub_res = memblock_alloc(sizeof(*sub_res), 8);
Mike Rapoport8a7f97b2019-03-11 23:30:31 -0700578 if (!sub_res)
579 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
580 __func__, sizeof(*sub_res), 8);
Heiko Carstens71189282011-03-23 10:15:59 +0100581 *sub_res = *std_res;
582 sub_res->end = res->end;
583 std_res->start = res->end + 1;
584 request_resource(res, sub_res);
585 } else {
586 request_resource(res, std_res);
587 }
Hongjie Yangfe355b72007-02-05 21:18:24 +0100588 }
Heiko Carstensc9e37352005-05-01 08:58:57 -0700589 }
Heiko Carstens4e042af2016-05-31 09:14:00 +0200590#ifdef CONFIG_CRASH_DUMP
591 /*
592 * Re-add removed crash kernel memory as reserved memory. This makes
593 * sure it will be mapped with the identity mapping and struct pages
594 * will be created, so it can be resized later on.
595 * However add it later since the crash kernel resource should not be
596 * part of the System RAM resource.
597 */
598 if (crashk_res.end) {
David Hildenbrand952eea92021-11-05 13:44:49 -0700599 memblock_add_node(crashk_res.start, resource_size(&crashk_res),
600 0, MEMBLOCK_NONE);
Heiko Carstens4e042af2016-05-31 09:14:00 +0200601 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
602 insert_resource(&iomem_resource, &crashk_res);
603 }
604#endif
Heiko Carstensc9e37352005-05-01 08:58:57 -0700605}
606
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200607static void __init setup_memory_end(void)
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100608{
Vasily Gorbik73045a02020-10-19 11:01:33 +0200609 memblock_remove(ident_map_size, ULONG_MAX);
Vasily Gorbik0c4f2622020-10-06 22:12:39 +0200610 max_pfn = max_low_pfn = PFN_DOWN(ident_map_size);
Vasily Gorbik73045a02020-10-19 11:01:33 +0200611 pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20);
Heiko Carstens8b62bc92006-12-04 15:40:56 +0100612}
613
Michael Holzheu60a0c682011-10-30 15:16:40 +0100614#ifdef CONFIG_CRASH_DUMP
615
616/*
David Hildenbrand47656002020-04-24 10:39:04 +0200617 * When kdump is enabled, we have to ensure that no memory from the area
618 * [0 - crashkernel memory size] is set offline - it will be exchanged with
619 * the crashkernel memory region when kdump is triggered. The crashkernel
620 * memory region can never get offlined (pages are unmovable).
Michael Holzheu60a0c682011-10-30 15:16:40 +0100621 */
622static int kdump_mem_notifier(struct notifier_block *nb,
623 unsigned long action, void *data)
624{
625 struct memory_notify *arg = data;
626
Michael Holzheubd858e82014-07-10 18:14:20 +0200627 if (action != MEM_GOING_OFFLINE)
628 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100629 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
630 return NOTIFY_BAD;
David Hildenbrand47656002020-04-24 10:39:04 +0200631 return NOTIFY_OK;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100632}
633
634static struct notifier_block kdump_mem_nb = {
635 .notifier_call = kdump_mem_notifier,
636};
637
638#endif
639
640/*
Vasily Gorbik73045a02020-10-19 11:01:33 +0200641 * Make sure that the area above identity mapping is protected
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100642 */
Vasily Gorbik73045a02020-10-19 11:01:33 +0200643static void __init reserve_above_ident_map(void)
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100644{
Vasily Gorbik73045a02020-10-19 11:01:33 +0200645 memblock_reserve(ident_map_size, ULONG_MAX);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100646}
647
648/*
Michael Holzheu60a0c682011-10-30 15:16:40 +0100649 * Reserve memory for kdump kernel to be loaded with kexec
650 */
651static void __init reserve_crashkernel(void)
652{
653#ifdef CONFIG_CRASH_DUMP
654 unsigned long long crash_base, crash_size;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100655 phys_addr_t low, high;
Michael Holzheu60a0c682011-10-30 15:16:40 +0100656 int rc;
657
Vasily Gorbik73045a02020-10-19 11:01:33 +0200658 rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size,
Michael Holzheu60a0c682011-10-30 15:16:40 +0100659 &crash_base);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100660
Michael Holzheudab7a7b2011-10-30 15:16:44 +0100661 crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
662 crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100663 if (rc || crash_size == 0)
664 return;
665
666 if (memblock.memory.regions[0].size < crash_size) {
667 pr_info("crashkernel reservation failed: %s\n",
668 "first memory chunk must be at least crashkernel size");
669 return;
670 }
671
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200672 low = crash_base ?: oldmem_data.start;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100673 high = low + crash_size;
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200674 if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) {
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100675 /* The crashkernel fits into OLDMEM, reuse OLDMEM */
676 crash_base = low;
677 } else {
678 /* Find suitable area in free memory */
David Hildenbrand37c5f6c2015-05-06 13:18:59 +0200679 low = max_t(unsigned long, crash_size, sclp.hsa_size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100680 high = crash_base ? crash_base + crash_size : ULONG_MAX;
681
682 if (crash_base && crash_base < low) {
683 pr_info("crashkernel reservation failed: %s\n",
684 "crash_base too low");
685 return;
686 }
687 low = crash_base ?: low;
Mike Rapoporta7259df2021-09-02 15:00:26 -0700688 crash_base = memblock_phys_alloc_range(crash_size,
689 KEXEC_CRASH_MEM_ALIGN,
690 low, high);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100691 }
692
693 if (!crash_base) {
694 pr_info("crashkernel reservation failed: %s\n",
695 "no suitable area found");
696 return;
697 }
698
Mike Rapoporta7259df2021-09-02 15:00:26 -0700699 if (register_memory_notifier(&kdump_mem_nb)) {
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700700 memblock_phys_free(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100701 return;
Mike Rapoporta7259df2021-09-02 15:00:26 -0700702 }
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100703
Alexander Egorenkove9e78702021-06-15 14:25:41 +0200704 if (!oldmem_data.start && MACHINE_IS_VM)
Michael Holzheu60a0c682011-10-30 15:16:40 +0100705 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
706 crashk_res.start = crash_base;
707 crashk_res.end = crash_base + crash_size - 1;
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100708 memblock_remove(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100709 pr_info("Reserving %lluMB of memory at %lluMB "
710 "for crashkernel (System RAM: %luMB)\n",
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100711 crash_size >> 20, crash_base >> 20,
712 (unsigned long)memblock.memory.total_size >> 20);
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400713 os_info_crashkernel_add(crash_base, crash_size);
Michael Holzheu60a0c682011-10-30 15:16:40 +0100714#endif
715}
716
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100717/*
718 * Reserve the initrd from being used by memblock
719 */
720static void __init reserve_initrd(void)
721{
722#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200723 if (!initrd_data.start || !initrd_data.size)
Heiko Carstens7be5e352016-12-27 14:47:42 +0100724 return;
Alexander Gordeevdd9089b2021-10-07 15:01:39 +0200725 initrd_start = (unsigned long)__va(initrd_data.start);
Alexander Egorenkov84733282021-06-15 14:15:07 +0200726 initrd_end = initrd_start + initrd_data.size;
727 memblock_reserve(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100728#endif
729}
730
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100731/*
732 * Reserve the memory area used to pass the certificate lists
733 */
734static void __init reserve_certificate_list(void)
735{
736 if (ipl_cert_list_addr)
737 memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
738}
739
Vasily Gorbik6966d602018-04-11 11:56:55 +0200740static void __init reserve_mem_detect_info(void)
741{
742 unsigned long start, size;
743
744 get_mem_detect_reserved(&start, &size);
745 if (size)
746 memblock_reserve(start, size);
747}
748
749static void __init free_mem_detect_info(void)
750{
751 unsigned long start, size;
752
753 get_mem_detect_reserved(&start, &size);
754 if (size)
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700755 memblock_phys_free(start, size);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200756}
757
Vasily Gorbikf01b8bc2018-09-24 15:27:30 +0200758static const char * __init get_mem_info_source(void)
759{
760 switch (mem_detect.info_source) {
761 case MEM_DETECT_SCLP_STOR_INFO:
762 return "sclp storage info";
763 case MEM_DETECT_DIAG260:
764 return "diag260";
765 case MEM_DETECT_SCLP_READ_INFO:
766 return "sclp read info";
767 case MEM_DETECT_BIN_SEARCH:
768 return "binary search";
769 }
770 return "none";
771}
772
Vasily Gorbik6966d602018-04-11 11:56:55 +0200773static void __init memblock_add_mem_detect_info(void)
774{
775 unsigned long start, end;
776 int i;
777
Mike Rapoport87c55872020-10-13 16:57:54 -0700778 pr_debug("physmem info source: %s (%hhd)\n",
779 get_mem_info_source(), mem_detect.info_source);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200780 /* keep memblock lists close to the kernel */
781 memblock_set_bottom_up(true);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800782 for_each_mem_detect_block(i, &start, &end) {
783 memblock_add(start, end - start);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200784 memblock_physmem_add(start, end - start);
Anshuman Khandual02634a42020-01-30 22:14:20 -0800785 }
Vasily Gorbik6966d602018-04-11 11:56:55 +0200786 memblock_set_bottom_up(false);
Heiko Carstens701dc812020-02-19 13:29:15 +0100787 memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
Vasily Gorbik6966d602018-04-11 11:56:55 +0200788 memblock_dump_all();
789}
790
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100791/*
792 * Check for initrd being in usable memory
793 */
794static void __init check_initrd(void)
795{
796#ifdef CONFIG_BLK_DEV_INITRD
Alexander Egorenkov84733282021-06-15 14:15:07 +0200797 if (initrd_data.start && initrd_data.size &&
798 !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
Martin Schwidefsky6d7b2ee2016-12-13 16:19:11 +0100799 pr_err("The initial RAM disk does not fit into the memory\n");
Mike Rapoport3ecc6832021-11-05 13:43:19 -0700800 memblock_phys_free(initrd_data.start, initrd_data.size);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100801 initrd_start = initrd_end = 0;
802 }
803#endif
804}
805
806/*
Sebastian Ott0c36b8a2015-06-16 14:03:37 +0200807 * Reserve memory used for lowcore/command line/kernel image.
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100808 */
809static void __init reserve_kernel(void)
810{
Alexander Egorenkovf1a54692021-06-16 14:10:03 +0200811 memblock_reserve(0, STARTUP_NORMAL_OFFSET);
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200812 memblock_reserve(__amode31_base, __eamode31 - __samode31);
Alexander Gordeev04f11ed2021-01-21 13:06:02 +0100813 memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
814 memblock_reserve(__pa(_stext), _end - _stext);
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100815}
816
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400817static void __init setup_memory(void)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700818{
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700819 phys_addr_t start, end;
820 u64 i;
Heiko Carstensc9e37352005-05-01 08:58:57 -0700821
822 /*
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100823 * Init storage key for present memory
Heiko Carstensc9e37352005-05-01 08:58:57 -0700824 */
Mike Rapoportb10d6bc2020-10-13 16:58:08 -0700825 for_each_mem_range(i, &start, &end)
826 storage_key_init_range(start, end);
827
Peter Oberparleiter0b642ed2005-05-01 08:58:58 -0700828 psw_set_key(PAGE_DEFAULT_KEY);
829
Philipp Hachtmann50be6342014-01-29 18:16:01 +0100830 /* Only cosmetics */
831 memblock_enforce_memory_limit(memblock_end_of_DRAM());
Heiko Carstensc9e37352005-05-01 08:58:57 -0700832}
833
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200834static void __init relocate_amode31_section(void)
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200835{
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200836 unsigned long amode31_size = __eamode31 - __samode31;
837 long amode31_offset = __amode31_base - __samode31;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200838 long *ptr;
839
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200840 pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200841
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200842 /* Move original AMODE31 section to the new one */
Alexander Gordeeve3ec8e02021-09-27 14:18:26 +0200843 memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200844 /* Zero out the old AMODE31 section to catch invalid accesses within it */
845 memset((void *)__samode31, 0, amode31_size);
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200846
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200847 /* Update all AMODE31 region references */
848 for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
849 *ptr += amode31_offset;
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200850}
851
Heiko Carstensc78d0c72021-08-04 13:40:31 +0200852/* This must be called after AMODE31 relocation */
Alexander Egorenkov6bda6672021-06-15 19:17:36 +0200853static void __init setup_cr(void)
854{
855 union ctlreg2 cr2;
856 union ctlreg5 cr5;
857 union ctlreg15 cr15;
858
859 __ctl_duct[1] = (unsigned long)__ctl_aste;
860 __ctl_duct[2] = (unsigned long)__ctl_aste;
861 __ctl_duct[4] = (unsigned long)__ctl_duald;
862
863 /* Update control registers CR2, CR5 and CR15 */
864 __ctl_store(cr2.val, 2, 2);
865 __ctl_store(cr5.val, 5, 5);
866 __ctl_store(cr15.val, 15, 15);
867 cr2.ducto = (unsigned long)__ctl_duct >> 6;
868 cr5.pasteo = (unsigned long)__ctl_duct >> 6;
869 cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3;
870 __ctl_load(cr2.val, 2, 2);
871 __ctl_load(cr5.val, 5, 5);
872 __ctl_load(cr15.val, 15, 15);
873}
874
Martin Schwidefskycf8ba7a2007-05-04 18:48:28 +0200875/*
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200876 * Add system information as device randomness
877 */
878static void __init setup_randomness(void)
879{
880 struct sysinfo_3_2_2 *vmms;
881
Alexander Gordeeve0353892021-10-07 12:14:09 +0200882 vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
Mike Rapoportecc3e772019-03-11 23:29:26 -0700883 if (!vmms)
884 panic("Failed to allocate memory for sysinfo structure\n");
Heiko Carstensda8fd822017-02-04 11:40:36 +0100885 if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
Heiko Carstens4920e3c2017-02-05 23:03:18 +0100886 add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
Linus Torvalds0b707e52021-11-06 14:48:06 -0700887 memblock_free(vmms, PAGE_SIZE);
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +0200888}
889
890/*
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200891 * Find the correct size for the task_struct. This depends on
892 * the size of the struct fpu at the end of the thread_struct
893 * which is embedded in the task_struct.
894 */
895static void __init setup_task_size(void)
896{
897 int task_size = sizeof(struct task_struct);
898
899 if (!MACHINE_HAS_VX) {
900 task_size -= sizeof(__vector128) * __NUM_VXRS;
901 task_size += sizeof(freg_t) * __NUM_FPRS;
902 }
903 arch_task_struct_size = task_size;
904}
905
906/*
Collin Walling4ad78b82018-12-06 17:30:04 -0500907 * Issue diagnose 318 to set the control program name and
908 * version codes.
909 */
910static void __init setup_control_program_code(void)
911{
912 union diag318_info diag318_info = {
913 .cpnc = CPNC_LINUX,
Collin Wallinga23816f2020-06-22 11:46:35 -0400914 .cpvc = 0,
Collin Walling4ad78b82018-12-06 17:30:04 -0500915 };
916
917 if (!sclp.has_diag318)
918 return;
919
920 diag_stat_inc(DIAG_STAT_X318);
921 asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val));
922}
923
924/*
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100925 * Print the component list from the IPL report
926 */
927static void __init log_component_list(void)
928{
929 struct ipl_rb_component_entry *ptr, *end;
930 char *str;
931
932 if (!early_ipl_comp_list_addr)
933 return;
Philipp Rudo40260b02019-12-18 11:24:43 +0100934 if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL)
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100935 pr_info("Linux is running with Secure-IPL enabled\n");
936 else
937 pr_info("Linux is running with Secure-IPL disabled\n");
938 ptr = (void *) early_ipl_comp_list_addr;
939 end = (void *) ptr + early_ipl_comp_list_size;
940 pr_info("The IPL report contains the following components:\n");
941 while (ptr < end) {
942 if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) {
943 if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED)
944 str = "signed, verified";
945 else
946 str = "signed, verification failed";
947 } else {
948 str = "not signed";
949 }
950 pr_info("%016llx - %016llx (%s)\n",
951 ptr->addr, ptr->addr + ptr->len, str);
952 ptr++;
953 }
954}
955
956/*
Heiko Carstensc9e37352005-05-01 08:58:57 -0700957 * Setup function called from init/main.c just after the banner
958 * was printed.
959 */
960
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -0400961void __init setup_arch(char **cmdline_p)
Heiko Carstensc9e37352005-05-01 08:58:57 -0700962{
963 /*
964 * print what head.S has found out about the machine
965 */
Carsten Ottefa587742008-03-25 18:47:44 +0100966 if (MACHINE_IS_VM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100967 pr_info("Linux is running as a z/VM "
968 "guest operating system in 64-bit mode\n");
Hendrik Brueckner637952c2009-08-23 18:09:06 +0200969 else if (MACHINE_IS_KVM)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100970 pr_info("Linux is running under KVM in 64-bit mode\n");
Martin Schwidefsky27d71602010-02-26 22:37:38 +0100971 else if (MACHINE_IS_LPAR)
Martin Schwidefsky3b6ed4a2008-12-25 13:39:40 +0100972 pr_info("Linux is running natively in 64-bit mode\n");
Christian Borntraeger03aa0472018-11-09 09:21:47 +0100973 else
974 pr_info("Linux is running as a guest in 64-bit mode\n");
Heiko Carstensc9e37352005-05-01 08:58:57 -0700975
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +0100976 log_component_list();
977
Hendrik Bruecknera0443fb2008-07-14 09:59:09 +0200978 /* Have one command line that is parsed and saved in /proc/cmdline */
979 /* boot_command_line has been already set up in early.c */
980 *cmdline_p = boot_command_line;
Heiko Carstens59685292006-03-24 03:15:15 -0800981
Heiko Carstensc9e37352005-05-01 08:58:57 -0700982 ROOT_DEV = Root_RAM0;
Heiko Carstens59685292006-03-24 03:15:15 -0800983
Kefeng Wang638cd5a32021-07-07 18:08:57 -0700984 setup_initial_init_mm(_text, _etext, _edata, _end);
Heiko Carstens59685292006-03-24 03:15:15 -0800985
Martin Schwidefsky6a3d1e82018-04-11 08:35:23 +0200986 if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
987 nospec_auto_detect();
988
Vasily Gorbik95e61b12020-06-18 17:17:19 +0200989 jump_label_init();
Heiko Carstens59685292006-03-24 03:15:15 -0800990 parse_early_param();
Martin Schwidefsky8a07dd02015-10-14 15:53:06 +0200991#ifdef CONFIG_CRASH_DUMP
992 /* Deactivate elfcorehdr= kernel parameter */
993 elfcorehdr_addr = ELFCORE_ADDR_MAX;
994#endif
995
Michael Holzheu4857d4b2012-03-11 11:59:34 -0400996 os_info_init();
Michael Holzheu99ca4e52008-01-26 14:11:11 +0100997 setup_ipl();
Martin Schwidefsky3f6813b2016-04-01 15:42:15 +0200998 setup_task_size();
Collin Walling4ad78b82018-12-06 17:30:04 -0500999 setup_control_program_code();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001000
1001 /* Do some memory reservations *before* memory is added to memblock */
Vasily Gorbik73045a02020-10-19 11:01:33 +02001002 reserve_above_ident_map();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001003 reserve_kernel();
1004 reserve_initrd();
Martin Schwidefsky9641b8c2019-02-21 14:23:04 +01001005 reserve_certificate_list();
Vasily Gorbik6966d602018-04-11 11:56:55 +02001006 reserve_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001007 memblock_allow_resize();
1008
1009 /* Get information about *all* installed memory */
Vasily Gorbik6966d602018-04-11 11:56:55 +02001010 memblock_add_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001011
Vasily Gorbik6966d602018-04-11 11:56:55 +02001012 free_mem_detect_info();
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001013
Heiko Carstensc78d0c72021-08-04 13:40:31 +02001014 relocate_amode31_section();
Alexander Egorenkov6bda6672021-06-15 19:17:36 +02001015 setup_cr();
1016
Vasily Gorbik1d6671a2020-09-11 11:38:21 +02001017 setup_uv();
Vasily Gorbik0c4f2622020-10-06 22:12:39 +02001018 setup_memory_end();
Heiko Carstensc9e37352005-05-01 08:58:57 -07001019 setup_memory();
Vasily Gorbik73045a02020-10-19 11:01:33 +02001020 dma_contiguous_reserve(ident_map_size);
Heiko Carstens3f429842017-08-07 15:16:15 +02001021 vmcp_cma_reserve();
Gerald Schaefer343dbdb2020-12-08 19:47:15 +01001022 if (MACHINE_HAS_EDAT2)
1023 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001024
1025 check_initrd();
1026 reserve_crashkernel();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001027#ifdef CONFIG_CRASH_DUMP
Michael Holzheu1592a8e2015-05-26 19:05:23 +02001028 /*
1029 * Be aware that smp_save_dump_cpus() triggers a system reset.
1030 * Therefore CPU and device initialization should be done afterwards.
1031 */
1032 smp_save_dump_cpus();
Martin Schwidefsky1a36a392015-10-29 10:28:26 +01001033#endif
Philipp Hachtmann50be6342014-01-29 18:16:01 +01001034
Heiko Carstensc9e37352005-05-01 08:58:57 -07001035 setup_resources();
Martin Schwidefsky87276382019-02-14 15:40:56 +01001036 setup_lowcore_dat_off();
Heiko Carstensd80512f2013-12-16 14:31:26 +01001037 smp_fill_possible_mask();
Heiko Carstens097a1162016-04-14 12:35:22 +02001038 cpu_detect_mhz_feature();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 cpu_init();
Philipp Hachtmann3a368f72014-03-06 18:25:13 +01001040 numa_setup();
Heiko Carstensaf51160e2016-12-03 09:48:01 +01001041 smp_detect_cpus();
Heiko Carstens8c9105802016-12-03 09:50:21 +01001042 topology_init_early();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043
Sven Schnelle3b051e82021-04-07 09:20:17 +02001044 if (test_facility(193))
1045 static_branch_enable(&cpu_has_bear);
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 /*
1048 * Create kernel page tables and switch to virtual addressing.
1049 */
1050 paging_init();
1051
Martin Schwidefsky87276382019-02-14 15:40:56 +01001052 /*
1053 * After paging_init created the kernel page table, the new PSWs
1054 * in lowcore can now run with DAT enabled.
1055 */
1056 setup_lowcore_dat_on();
1057
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058 /* Setup default console */
1059 conmode_default();
Hendrik Brueckner637952c2009-08-23 18:09:06 +02001060 set_preferred_console();
Michael Holzheu411ed322007-04-27 16:01:49 +02001061
Vasily Gorbik686140a2017-10-12 13:01:47 +02001062 apply_alternative_instructions();
Martin Schwidefskyf19fbd52018-01-26 12:46:47 +01001063 if (IS_ENABLED(CONFIG_EXPOLINE))
1064 nospec_init_branches();
Vasily Gorbik686140a2017-10-12 13:01:47 +02001065
Alexander Egorenkovbd37b362020-09-29 20:24:55 +02001066 /* Setup zfcp/nvme dump support */
Sebastian Ottfe72ffb2013-04-30 17:18:46 +02001067 setup_zfcpdump();
Martin Schwidefskybcfcbb62014-08-11 12:20:58 +02001068
1069 /* Add system specific data to the random pool */
1070 setup_randomness();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071}