Greg Kroah-Hartman | a17ae4c | 2017-11-24 15:00:32 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * S390 version |
Heiko Carstens | a53c8fa | 2012-07-20 11:15:04 +0200 | [diff] [blame] | 4 | * Copyright IBM Corp. 1999, 2012 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
| 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
| 7 | * |
| 8 | * Derived from "arch/i386/kernel/setup.c" |
| 9 | * Copyright (C) 1995, Linus Torvalds |
| 10 | */ |
| 11 | |
| 12 | /* |
| 13 | * This file handles the architecture-dependent parts of initialization |
| 14 | */ |
| 15 | |
Martin Schwidefsky | 3b6ed4a | 2008-12-25 13:39:40 +0100 | [diff] [blame] | 16 | #define KMSG_COMPONENT "setup" |
| 17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
| 18 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <linux/errno.h> |
Heiko Carstens | 0872922 | 2013-01-07 13:56:17 +0100 | [diff] [blame] | 20 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/sched.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 22 | #include <linux/sched/task.h> |
Ingo Molnar | 1777e46 | 2017-02-05 14:47:12 +0100 | [diff] [blame] | 23 | #include <linux/cpu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/kernel.h> |
Tejun Heo | ff38df3 | 2011-12-08 10:22:09 -0800 | [diff] [blame] | 25 | #include <linux/memblock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <linux/mm.h> |
| 27 | #include <linux/stddef.h> |
| 28 | #include <linux/unistd.h> |
| 29 | #include <linux/ptrace.h> |
Martin Schwidefsky | bcfcbb6 | 2014-08-11 12:20:58 +0200 | [diff] [blame] | 30 | #include <linux/random.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #include <linux/user.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <linux/tty.h> |
| 33 | #include <linux/ioport.h> |
| 34 | #include <linux/delay.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <linux/init.h> |
| 36 | #include <linux/initrd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <linux/root_dev.h> |
| 38 | #include <linux/console.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #include <linux/kernel_stat.h> |
Christoph Hellwig | 0b1abd1 | 2020-09-11 10:56:52 +0200 | [diff] [blame] | 40 | #include <linux/dma-map-ops.h> |
Heiko Carstens | 1e8e338 | 2005-10-30 15:00:11 -0800 | [diff] [blame] | 41 | #include <linux/device.h> |
Peter Oberparleiter | 585c304 | 2006-06-29 15:08:25 +0200 | [diff] [blame] | 42 | #include <linux/notifier.h> |
Heiko Carstens | 65912a8 | 2006-09-20 15:58:41 +0200 | [diff] [blame] | 43 | #include <linux/pfn.h> |
Hongjie Yang | fe355b7 | 2007-02-05 21:18:24 +0100 | [diff] [blame] | 44 | #include <linux/ctype.h> |
Heiko Carstens | 2b67fc4 | 2007-02-05 21:16:47 +0100 | [diff] [blame] | 45 | #include <linux/reboot.h> |
Heiko Carstens | dbd70fb | 2008-04-17 07:46:12 +0200 | [diff] [blame] | 46 | #include <linux/topology.h> |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 47 | #include <linux/kexec.h> |
| 48 | #include <linux/crash_dump.h> |
| 49 | #include <linux/memory.h> |
Heiko Carstens | 048cd4e | 2012-02-27 10:01:52 +0100 | [diff] [blame] | 50 | #include <linux/compat.h> |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 51 | #include <linux/start_kernel.h> |
Gerald Schaefer | 343dbdb | 2020-12-08 19:47:15 +0100 | [diff] [blame] | 52 | #include <linux/hugetlb.h> |
Sven Schnelle | 436fc4f | 2021-08-27 08:36:06 +0200 | [diff] [blame] | 53 | #include <linux/kmemleak.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Martin Schwidefsky | 9641b8c | 2019-02-21 14:23:04 +0100 | [diff] [blame] | 55 | #include <asm/boot_data.h> |
Michael Holzheu | 46b05d2 | 2007-02-21 10:55:21 +0100 | [diff] [blame] | 56 | #include <asm/ipl.h> |
Heiko Carstens | 1e3cab2 | 2012-03-30 09:40:55 +0200 | [diff] [blame] | 57 | #include <asm/facility.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | #include <asm/smp.h> |
| 59 | #include <asm/mmu_context.h> |
| 60 | #include <asm/cpcmd.h> |
| 61 | #include <asm/lowcore.h> |
Martin Schwidefsky | 6c81511 | 2017-10-12 13:24:47 +0200 | [diff] [blame] | 62 | #include <asm/nmi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | #include <asm/irq.h> |
Peter Oberparleiter | 0b642ed | 2005-05-01 08:58:58 -0700 | [diff] [blame] | 64 | #include <asm/page.h> |
| 65 | #include <asm/ptrace.h> |
Heiko Carstens | cc13ad6 | 2006-06-25 05:49:30 -0700 | [diff] [blame] | 66 | #include <asm/sections.h> |
Hongjie Yang | fe355b7 | 2007-02-05 21:18:24 +0100 | [diff] [blame] | 67 | #include <asm/ebcdic.h> |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 68 | #include <asm/diag.h> |
Michael Holzheu | 4857d4b | 2012-03-11 11:59:34 -0400 | [diff] [blame] | 69 | #include <asm/os_info.h> |
Heinz Graalfs | cd18345 | 2012-06-11 16:06:59 +0200 | [diff] [blame] | 70 | #include <asm/sclp.h> |
Martin Schwidefsky | 78c98f9 | 2019-01-28 08:33:08 +0100 | [diff] [blame] | 71 | #include <asm/stacktrace.h> |
Martin Schwidefsky | bcfcbb6 | 2014-08-11 12:20:58 +0200 | [diff] [blame] | 72 | #include <asm/sysinfo.h> |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 73 | #include <asm/numa.h> |
Vasily Gorbik | 686140a | 2017-10-12 13:01:47 +0200 | [diff] [blame] | 74 | #include <asm/alternative.h> |
Martin Schwidefsky | f19fbd5 | 2018-01-26 12:46:47 +0100 | [diff] [blame] | 75 | #include <asm/nospec-branch.h> |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 76 | #include <asm/mem_detect.h> |
Vasily Gorbik | 5abb935 | 2019-04-01 19:11:03 +0200 | [diff] [blame] | 77 | #include <asm/uv.h> |
Sven Schnelle | 0b38b5e | 2020-01-22 13:38:22 +0100 | [diff] [blame] | 78 | #include <asm/asm-offsets.h> |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 79 | #include "entry.h" |
Gerald Schaefer | c1821c2 | 2007-02-05 21:18:17 +0100 | [diff] [blame] | 80 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* |
| 82 | * Machine setup.. |
| 83 | */ |
| 84 | unsigned int console_mode = 0; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 85 | EXPORT_SYMBOL(console_mode); |
| 86 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | unsigned int console_devno = -1; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 88 | EXPORT_SYMBOL(console_devno); |
| 89 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | unsigned int console_irq = -1; |
Heiko Carstens | 1485c5c | 2009-03-26 15:24:04 +0100 | [diff] [blame] | 91 | EXPORT_SYMBOL(console_irq); |
| 92 | |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 93 | /* |
| 94 | * Some code and data needs to stay below 2 GB, even when the kernel would be |
| 95 | * relocated above 2 GB, because it has to use 31 bit addresses. |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 96 | * Such code and data is part of the .amode31 section. |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 97 | */ |
Alexander Gordeev | e3ec8e0 | 2021-09-27 14:18:26 +0200 | [diff] [blame] | 98 | unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31; |
| 99 | unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31; |
| 100 | unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31; |
| 101 | unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31; |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 102 | struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; |
| 103 | struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * Control registers CR2, CR5 and CR15 are initialized with addresses |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 107 | * of tables that must be placed below 2G which is handled by the AMODE31 |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 108 | * sections. |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 109 | * Because the AMODE31 sections are relocated below 2G at startup, |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 110 | * the content of control registers CR2, CR5 and CR15 must be updated |
| 111 | * with new addresses after the relocation. The initial initialization of |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 112 | * control registers occurs in head64.S and then gets updated again after AMODE31 |
| 113 | * relocation. We must access the relevant AMODE31 tables indirectly via |
| 114 | * pointers placed in the .amode31.refs linker section. Those pointers get |
| 115 | * updated automatically during AMODE31 relocation and always contain a valid |
| 116 | * address within AMODE31 sections. |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 117 | */ |
| 118 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 119 | static __amode31_data u32 __ctl_duct_amode31[16] __aligned(64); |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 120 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 121 | static __amode31_data u64 __ctl_aste_amode31[8] __aligned(64) = { |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 122 | [1] = 0xffffffffffffffff |
| 123 | }; |
| 124 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 125 | static __amode31_data u32 __ctl_duald_amode31[32] __aligned(128) = { |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 126 | 0x80000000, 0, 0, 0, |
| 127 | 0x80000000, 0, 0, 0, |
| 128 | 0x80000000, 0, 0, 0, |
| 129 | 0x80000000, 0, 0, 0, |
| 130 | 0x80000000, 0, 0, 0, |
| 131 | 0x80000000, 0, 0, 0, |
| 132 | 0x80000000, 0, 0, 0, |
| 133 | 0x80000000, 0, 0, 0 |
| 134 | }; |
| 135 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 136 | static __amode31_data u32 __ctl_linkage_stack_amode31[8] __aligned(64) = { |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 137 | 0, 0, 0x89000000, 0, |
| 138 | 0, 0, 0x8a000000, 0 |
| 139 | }; |
| 140 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 141 | static u64 __amode31_ref *__ctl_aste = __ctl_aste_amode31; |
| 142 | static u32 __amode31_ref *__ctl_duald = __ctl_duald_amode31; |
| 143 | static u32 __amode31_ref *__ctl_linkage_stack = __ctl_linkage_stack_amode31; |
| 144 | static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31; |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 145 | |
Vasily Gorbik | d58106c | 2017-11-17 18:44:28 +0100 | [diff] [blame] | 146 | int __bootdata(noexec_disabled); |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 147 | unsigned long __bootdata(ident_map_size); |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 148 | struct mem_detect_info __bootdata(mem_detect); |
Alexander Egorenkov | 8473328 | 2021-06-15 14:15:07 +0200 | [diff] [blame] | 149 | struct initrd_data __bootdata(initrd_data); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | |
Gerald Schaefer | b2d24b9 | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 151 | unsigned long __bootdata_preserved(__kaslr_offset); |
Alexander Gordeev | e3ec8e0 | 2021-09-27 14:18:26 +0200 | [diff] [blame] | 152 | unsigned long __bootdata(__amode31_base); |
Mikhail Zaslonko | c65e681 | 2020-01-30 22:16:27 -0800 | [diff] [blame] | 153 | unsigned int __bootdata_preserved(zlib_dfltcc_support); |
| 154 | EXPORT_SYMBOL(zlib_dfltcc_support); |
Sven Schnelle | 17e89e1 | 2021-05-05 22:01:10 +0200 | [diff] [blame] | 155 | u64 __bootdata_preserved(stfle_fac_list[16]); |
| 156 | EXPORT_SYMBOL(stfle_fac_list); |
| 157 | u64 __bootdata_preserved(alt_stfle_fac_list[16]); |
Alexander Egorenkov | e9e7870 | 2021-06-15 14:25:41 +0200 | [diff] [blame] | 158 | struct oldmem_data __bootdata_preserved(oldmem_data); |
Gerald Schaefer | a80313f | 2019-02-03 21:37:20 +0100 | [diff] [blame] | 159 | |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 160 | unsigned long VMALLOC_START; |
| 161 | EXPORT_SYMBOL(VMALLOC_START); |
| 162 | |
| 163 | unsigned long VMALLOC_END; |
| 164 | EXPORT_SYMBOL(VMALLOC_END); |
| 165 | |
| 166 | struct page *vmemmap; |
| 167 | EXPORT_SYMBOL(vmemmap); |
Vasily Gorbik | e670e64 | 2020-09-11 12:51:59 +0200 | [diff] [blame] | 168 | unsigned long vmemmap_size; |
Martin Schwidefsky | 14045eb | 2011-12-27 11:27:07 +0100 | [diff] [blame] | 169 | |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 170 | unsigned long MODULES_VADDR; |
| 171 | unsigned long MODULES_END; |
Heiko Carstens | c972cc6 | 2012-10-05 16:52:18 +0200 | [diff] [blame] | 172 | |
Frank Munzert | 099b765 | 2009-03-26 15:23:43 +0100 | [diff] [blame] | 173 | /* An array with a pointer to the lowcore of every CPU. */ |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 174 | struct lowcore *lowcore_ptr[NR_CPUS]; |
Frank Munzert | 099b765 | 2009-03-26 15:23:43 +0100 | [diff] [blame] | 175 | EXPORT_SYMBOL(lowcore_ptr); |
| 176 | |
Sven Schnelle | 3b051e8 | 2021-04-07 09:20:17 +0200 | [diff] [blame] | 177 | DEFINE_STATIC_KEY_FALSE(cpu_has_bear); |
| 178 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | /* |
Niklas Schnelle | b02002c | 2020-07-13 14:12:49 +0200 | [diff] [blame] | 180 | * The Write Back bit position in the physaddr is given by the SLPC PCI. |
| 181 | * Leaving the mask zero always uses write through which is safe |
| 182 | */ |
| 183 | unsigned long mio_wb_bit_mask __ro_after_init; |
| 184 | |
| 185 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | * This is set up by the setup-routine at boot-time |
| 187 | * for S390 need to find out, what we have to setup |
| 188 | * using address 0x10400 ... |
| 189 | */ |
| 190 | |
| 191 | #include <asm/setup.h> |
| 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | * condev= and conmode= setup parameter. |
| 195 | */ |
| 196 | |
| 197 | static int __init condev_setup(char *str) |
| 198 | { |
| 199 | int vdev; |
| 200 | |
| 201 | vdev = simple_strtoul(str, &str, 0); |
| 202 | if (vdev >= 0 && vdev < 65536) { |
| 203 | console_devno = vdev; |
| 204 | console_irq = -1; |
| 205 | } |
| 206 | return 1; |
| 207 | } |
| 208 | |
| 209 | __setup("condev=", condev_setup); |
| 210 | |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 211 | static void __init set_preferred_console(void) |
| 212 | { |
Peter Oberparleiter | 8f50af4 | 2016-07-07 07:52:38 +0200 | [diff] [blame] | 213 | if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP) |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 214 | add_preferred_console("ttyS", 0, NULL); |
Hendrik Brueckner | c4de0c1 | 2009-09-11 10:28:56 +0200 | [diff] [blame] | 215 | else if (CONSOLE_IS_3270) |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 216 | add_preferred_console("tty3270", 0, NULL); |
Peter Oberparleiter | 8f50af4 | 2016-07-07 07:52:38 +0200 | [diff] [blame] | 217 | else if (CONSOLE_IS_VT220) |
Valentin Vidic | b7d91d2 | 2021-04-27 21:40:10 +0200 | [diff] [blame] | 218 | add_preferred_console("ttysclp", 0, NULL); |
Peter Oberparleiter | 8f50af4 | 2016-07-07 07:52:38 +0200 | [diff] [blame] | 219 | else if (CONSOLE_IS_HVC) |
| 220 | add_preferred_console("hvc", 0, NULL); |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 221 | } |
| 222 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | static int __init conmode_setup(char *str) |
| 224 | { |
Peter Oberparleiter | 8c0933e | 2008-02-05 16:50:41 +0100 | [diff] [blame] | 225 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
Vasily Gorbik | d0b3198 | 2019-08-19 17:32:44 +0200 | [diff] [blame] | 226 | if (!strcmp(str, "hwc") || !strcmp(str, "sclp")) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | SET_CONSOLE_SCLP; |
| 228 | #endif |
| 229 | #if defined(CONFIG_TN3215_CONSOLE) |
Vasily Gorbik | d0b3198 | 2019-08-19 17:32:44 +0200 | [diff] [blame] | 230 | if (!strcmp(str, "3215")) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | SET_CONSOLE_3215; |
| 232 | #endif |
| 233 | #if defined(CONFIG_TN3270_CONSOLE) |
Vasily Gorbik | d0b3198 | 2019-08-19 17:32:44 +0200 | [diff] [blame] | 234 | if (!strcmp(str, "3270")) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | SET_CONSOLE_3270; |
| 236 | #endif |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 237 | set_preferred_console(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | return 1; |
| 239 | } |
| 240 | |
| 241 | __setup("conmode=", conmode_setup); |
| 242 | |
| 243 | static void __init conmode_default(void) |
| 244 | { |
| 245 | char query_buffer[1024]; |
| 246 | char *ptr; |
| 247 | |
| 248 | if (MACHINE_IS_VM) { |
Heiko Carstens | 740b570 | 2006-12-04 15:40:30 +0100 | [diff] [blame] | 249 | cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | console_devno = simple_strtoul(query_buffer + 5, NULL, 16); |
| 251 | ptr = strstr(query_buffer, "SUBCHANNEL ="); |
| 252 | console_irq = simple_strtoul(ptr + 13, NULL, 16); |
Heiko Carstens | 740b570 | 2006-12-04 15:40:30 +0100 | [diff] [blame] | 253 | cpcmd("QUERY TERM", query_buffer, 1024, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 254 | ptr = strstr(query_buffer, "CONMODE"); |
| 255 | /* |
| 256 | * Set the conmode to 3215 so that the device recognition |
| 257 | * will set the cu_type of the console to 3215. If the |
| 258 | * conmode is 3270 and we don't set it back then both |
| 259 | * 3215 and the 3270 driver will try to access the console |
| 260 | * device (3215 as console and 3270 as normal tty). |
| 261 | */ |
Heiko Carstens | 740b570 | 2006-12-04 15:40:30 +0100 | [diff] [blame] | 262 | cpcmd("TERM CONMODE 3215", NULL, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | if (ptr == NULL) { |
Peter Oberparleiter | 8c0933e | 2008-02-05 16:50:41 +0100 | [diff] [blame] | 264 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | SET_CONSOLE_SCLP; |
| 266 | #endif |
| 267 | return; |
| 268 | } |
Vasily Gorbik | d0b3198 | 2019-08-19 17:32:44 +0200 | [diff] [blame] | 269 | if (str_has_prefix(ptr + 8, "3270")) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | #if defined(CONFIG_TN3270_CONSOLE) |
| 271 | SET_CONSOLE_3270; |
| 272 | #elif defined(CONFIG_TN3215_CONSOLE) |
| 273 | SET_CONSOLE_3215; |
Peter Oberparleiter | 8c0933e | 2008-02-05 16:50:41 +0100 | [diff] [blame] | 274 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | SET_CONSOLE_SCLP; |
| 276 | #endif |
Vasily Gorbik | d0b3198 | 2019-08-19 17:32:44 +0200 | [diff] [blame] | 277 | } else if (str_has_prefix(ptr + 8, "3215")) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | #if defined(CONFIG_TN3215_CONSOLE) |
| 279 | SET_CONSOLE_3215; |
| 280 | #elif defined(CONFIG_TN3270_CONSOLE) |
| 281 | SET_CONSOLE_3270; |
Peter Oberparleiter | 8c0933e | 2008-02-05 16:50:41 +0100 | [diff] [blame] | 282 | #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | SET_CONSOLE_SCLP; |
| 284 | #endif |
| 285 | } |
Peter Oberparleiter | 8f50af4 | 2016-07-07 07:52:38 +0200 | [diff] [blame] | 286 | } else if (MACHINE_IS_KVM) { |
Masahiro Yamada | a5ff1b3 | 2016-08-25 15:17:02 -0700 | [diff] [blame] | 287 | if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE)) |
Peter Oberparleiter | 8f50af4 | 2016-07-07 07:52:38 +0200 | [diff] [blame] | 288 | SET_CONSOLE_VT220; |
Masahiro Yamada | a5ff1b3 | 2016-08-25 15:17:02 -0700 | [diff] [blame] | 289 | else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE)) |
Peter Oberparleiter | 8f50af4 | 2016-07-07 07:52:38 +0200 | [diff] [blame] | 290 | SET_CONSOLE_SCLP; |
| 291 | else |
| 292 | SET_CONSOLE_HVC; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | } else { |
Peter Oberparleiter | 8c0933e | 2008-02-05 16:50:41 +0100 | [diff] [blame] | 294 | #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | SET_CONSOLE_SCLP; |
| 296 | #endif |
| 297 | } |
| 298 | } |
| 299 | |
Michael Holzheu | bf28a59 | 2014-04-14 10:38:05 +0200 | [diff] [blame] | 300 | #ifdef CONFIG_CRASH_DUMP |
Sebastian Ott | fe72ffb | 2013-04-30 17:18:46 +0200 | [diff] [blame] | 301 | static void __init setup_zfcpdump(void) |
Michael Holzheu | 411ed32 | 2007-04-27 16:01:49 +0200 | [diff] [blame] | 302 | { |
Alexander Egorenkov | bd37b36 | 2020-09-29 20:24:55 +0200 | [diff] [blame] | 303 | if (!is_ipl_type_dump()) |
Michael Holzheu | 411ed32 | 2007-04-27 16:01:49 +0200 | [diff] [blame] | 304 | return; |
Alexander Egorenkov | e9e7870 | 2021-06-15 14:25:41 +0200 | [diff] [blame] | 305 | if (oldmem_data.start) |
Michael Holzheu | 3f25dc4 | 2011-11-14 11:19:05 +0100 | [diff] [blame] | 306 | return; |
Sebastian Ott | fe72ffb | 2013-04-30 17:18:46 +0200 | [diff] [blame] | 307 | strcat(boot_command_line, " cio_ignore=all,!ipldev,!condev"); |
Michael Holzheu | 411ed32 | 2007-04-27 16:01:49 +0200 | [diff] [blame] | 308 | console_loglevel = 2; |
| 309 | } |
| 310 | #else |
Sebastian Ott | fe72ffb | 2013-04-30 17:18:46 +0200 | [diff] [blame] | 311 | static inline void setup_zfcpdump(void) {} |
Michael Holzheu | bf28a59 | 2014-04-14 10:38:05 +0200 | [diff] [blame] | 312 | #endif /* CONFIG_CRASH_DUMP */ |
Michael Holzheu | 411ed32 | 2007-04-27 16:01:49 +0200 | [diff] [blame] | 313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | /* |
| 315 | * Reboot, halt and power_off stubs. They just call _machine_restart, |
| 316 | * _machine_halt or _machine_power_off. |
| 317 | */ |
| 318 | |
| 319 | void machine_restart(char *command) |
| 320 | { |
Christian Borntraeger | 7aa8dac | 2007-11-20 11:13:31 +0100 | [diff] [blame] | 321 | if ((!in_interrupt() && !in_atomic()) || oops_in_progress) |
Martin Schwidefsky | 06fa46a | 2006-06-29 14:57:32 +0200 | [diff] [blame] | 322 | /* |
| 323 | * Only unblank the console if we are called in enabled |
| 324 | * context or a bust_spinlocks cleared the way for us. |
| 325 | */ |
| 326 | console_unblank(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 327 | _machine_restart(command); |
| 328 | } |
| 329 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | void machine_halt(void) |
| 331 | { |
Martin Schwidefsky | 06fa46a | 2006-06-29 14:57:32 +0200 | [diff] [blame] | 332 | if (!in_interrupt() || oops_in_progress) |
| 333 | /* |
| 334 | * Only unblank the console if we are called in enabled |
| 335 | * context or a bust_spinlocks cleared the way for us. |
| 336 | */ |
| 337 | console_unblank(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | _machine_halt(); |
| 339 | } |
| 340 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | void machine_power_off(void) |
| 342 | { |
Martin Schwidefsky | 06fa46a | 2006-06-29 14:57:32 +0200 | [diff] [blame] | 343 | if (!in_interrupt() || oops_in_progress) |
| 344 | /* |
| 345 | * Only unblank the console if we are called in enabled |
| 346 | * context or a bust_spinlocks cleared the way for us. |
| 347 | */ |
| 348 | console_unblank(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | _machine_power_off(); |
| 350 | } |
| 351 | |
Martin Schwidefsky | 53df751 | 2006-01-14 13:21:01 -0800 | [diff] [blame] | 352 | /* |
| 353 | * Dummy power off function. |
| 354 | */ |
| 355 | void (*pm_power_off)(void) = machine_power_off; |
Heiko Carstens | 0872922 | 2013-01-07 13:56:17 +0100 | [diff] [blame] | 356 | EXPORT_SYMBOL_GPL(pm_power_off); |
Martin Schwidefsky | 53df751 | 2006-01-14 13:21:01 -0800 | [diff] [blame] | 357 | |
Alexander Egorenkov | 980d5f9 | 2020-09-02 16:52:06 +0200 | [diff] [blame] | 358 | void *restart_stack; |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 359 | |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 360 | unsigned long stack_alloc(void) |
| 361 | { |
| 362 | #ifdef CONFIG_VMAP_STACK |
Sven Schnelle | 436fc4f | 2021-08-27 08:36:06 +0200 | [diff] [blame] | 363 | void *ret; |
| 364 | |
| 365 | ret = __vmalloc_node(THREAD_SIZE, THREAD_SIZE, THREADINFO_GFP, |
| 366 | NUMA_NO_NODE, __builtin_return_address(0)); |
| 367 | kmemleak_not_leak(ret); |
| 368 | return (unsigned long)ret; |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 369 | #else |
Vasily Gorbik | 32ce55a | 2018-09-18 18:23:40 +0200 | [diff] [blame] | 370 | return __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 371 | #endif |
| 372 | } |
| 373 | |
| 374 | void stack_free(unsigned long stack) |
| 375 | { |
| 376 | #ifdef CONFIG_VMAP_STACK |
| 377 | vfree((void *) stack); |
| 378 | #else |
Vasily Gorbik | 32ce55a | 2018-09-18 18:23:40 +0200 | [diff] [blame] | 379 | free_pages(stack, THREAD_SIZE_ORDER); |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 380 | #endif |
| 381 | } |
| 382 | |
| 383 | int __init arch_early_irq_init(void) |
| 384 | { |
| 385 | unsigned long stack; |
| 386 | |
Vasily Gorbik | 32ce55a | 2018-09-18 18:23:40 +0200 | [diff] [blame] | 387 | stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 388 | if (!stack) |
| 389 | panic("Couldn't allocate async stack"); |
| 390 | S390_lowcore.async_stack = stack + STACK_INIT_OFFSET; |
| 391 | return 0; |
| 392 | } |
| 393 | |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 394 | void __init arch_call_rest_init(void) |
| 395 | { |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 396 | unsigned long stack; |
| 397 | |
| 398 | stack = stack_alloc(); |
| 399 | if (!stack) |
| 400 | panic("Couldn't allocate kernel stack"); |
| 401 | current->stack = (void *) stack; |
| 402 | #ifdef CONFIG_VMAP_STACK |
| 403 | current->stack_vm_area = (void *) stack; |
| 404 | #endif |
| 405 | set_task_stack_end_magic(current); |
| 406 | stack += STACK_INIT_OFFSET; |
| 407 | S390_lowcore.kernel_stack = stack; |
Heiko Carstens | b55e692 | 2021-07-05 17:55:32 +0200 | [diff] [blame] | 408 | call_on_stack_noreturn(rest_init, stack); |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 409 | } |
| 410 | |
Martin Schwidefsky | 8727638 | 2019-02-14 15:40:56 +0100 | [diff] [blame] | 411 | static void __init setup_lowcore_dat_off(void) |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 412 | { |
Vasily Gorbik | 21a6671 | 2020-09-24 01:01:29 +0200 | [diff] [blame] | 413 | unsigned long int_psw_mask = PSW_KERNEL_BITS; |
Sven Schnelle | b61b159 | 2021-02-03 09:02:51 +0100 | [diff] [blame] | 414 | unsigned long mcck_stack; |
Heiko Carstens | c667aea | 2015-12-31 10:29:00 +0100 | [diff] [blame] | 415 | struct lowcore *lc; |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 416 | |
Vasily Gorbik | 21a6671 | 2020-09-24 01:01:29 +0200 | [diff] [blame] | 417 | if (IS_ENABLED(CONFIG_KASAN)) |
| 418 | int_psw_mask |= PSW_MASK_DAT; |
| 419 | |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 420 | /* |
| 421 | * Setup lowcore for boot cpu |
| 422 | */ |
Heiko Carstens | f1c1174 | 2017-07-05 07:37:27 +0200 | [diff] [blame] | 423 | BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * PAGE_SIZE); |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 424 | lc = memblock_alloc_low(sizeof(*lc), sizeof(*lc)); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 425 | if (!lc) |
| 426 | panic("%s: Failed to allocate %zu bytes align=%zx\n", |
| 427 | __func__, sizeof(*lc), sizeof(*lc)); |
| 428 | |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 429 | lc->restart_psw.mask = PSW_KERNEL_BITS; |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 430 | lc->restart_psw.addr = (unsigned long) restart_int_handler; |
Vasily Gorbik | 21a6671 | 2020-09-24 01:01:29 +0200 | [diff] [blame] | 431 | lc->external_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 432 | lc->external_new_psw.addr = (unsigned long) ext_int_handler; |
Vasily Gorbik | 21a6671 | 2020-09-24 01:01:29 +0200 | [diff] [blame] | 433 | lc->svc_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 434 | lc->svc_new_psw.addr = (unsigned long) system_call; |
Vasily Gorbik | 21a6671 | 2020-09-24 01:01:29 +0200 | [diff] [blame] | 435 | lc->program_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 436 | lc->program_new_psw.addr = (unsigned long) pgm_check_handler; |
Martin Schwidefsky | e258d71 | 2013-09-24 09:14:56 +0200 | [diff] [blame] | 437 | lc->mcck_new_psw.mask = PSW_KERNEL_BITS; |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 438 | lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; |
Vasily Gorbik | 21a6671 | 2020-09-24 01:01:29 +0200 | [diff] [blame] | 439 | lc->io_new_psw.mask = int_psw_mask | PSW_MASK_MCHECK; |
Heiko Carstens | fecc868a | 2016-01-18 12:49:44 +0100 | [diff] [blame] | 440 | lc->io_new_psw.addr = (unsigned long) io_int_handler; |
Martin Schwidefsky | 6e2ef5e | 2016-10-27 12:41:39 +0200 | [diff] [blame] | 441 | lc->clock_comparator = clock_comparator_max; |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 442 | lc->nodat_stack = ((unsigned long) &init_thread_union) |
Martin Schwidefsky | dc7ee00 | 2013-04-24 10:20:43 +0200 | [diff] [blame] | 443 | + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); |
Heiko Carstens | d5c352c | 2016-11-08 11:08:26 +0100 | [diff] [blame] | 444 | lc->current_task = (unsigned long)&init_task; |
Heiko Carstens | 8f100bb | 2016-03-10 10:32:21 +0100 | [diff] [blame] | 445 | lc->lpp = LPP_MAGIC; |
Christian Ehrhardt | 25097bf | 2009-04-14 15:36:16 +0200 | [diff] [blame] | 446 | lc->machine_flags = S390_lowcore.machine_flags; |
Martin Schwidefsky | c360192 | 2016-10-25 12:21:44 +0200 | [diff] [blame] | 447 | lc->preempt_count = S390_lowcore.preempt_count; |
Martin Schwidefsky | 6c81511 | 2017-10-12 13:24:47 +0200 | [diff] [blame] | 448 | nmi_alloc_boot_cpu(lc); |
Sven Schnelle | 56e62a7 | 2020-11-21 11:14:56 +0100 | [diff] [blame] | 449 | lc->sys_enter_timer = S390_lowcore.sys_enter_timer; |
Martin Schwidefsky | ab96e79 | 2009-04-14 15:36:29 +0200 | [diff] [blame] | 450 | lc->exit_timer = S390_lowcore.exit_timer; |
| 451 | lc->user_timer = S390_lowcore.user_timer; |
| 452 | lc->system_timer = S390_lowcore.system_timer; |
| 453 | lc->steal_timer = S390_lowcore.steal_timer; |
| 454 | lc->last_update_timer = S390_lowcore.last_update_timer; |
| 455 | lc->last_update_clock = S390_lowcore.last_update_clock; |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 456 | |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 457 | /* |
| 458 | * Allocate the global restart stack which is the same for |
| 459 | * all CPUs in cast *one* of them does a PSW restart. |
| 460 | */ |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 461 | restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 462 | if (!restart_stack) |
| 463 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 464 | __func__, THREAD_SIZE, THREAD_SIZE); |
Martin Schwidefsky | ce3dc44 | 2017-09-12 16:37:33 +0200 | [diff] [blame] | 465 | restart_stack += STACK_INIT_OFFSET; |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 466 | |
| 467 | /* |
| 468 | * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant |
Hendrik Brueckner | b4a9601 | 2013-12-13 12:53:42 +0100 | [diff] [blame] | 469 | * restart data to the absolute zero lowcore. This is necessary if |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 470 | * PSW restart is done on an offline CPU that has lowcore zero. |
| 471 | */ |
| 472 | lc->restart_stack = (unsigned long) restart_stack; |
| 473 | lc->restart_fn = (unsigned long) do_restart; |
| 474 | lc->restart_data = 0; |
Alexander Gordeev | 915fea0 | 2021-08-24 15:30:21 +0200 | [diff] [blame] | 475 | lc->restart_source = -1U; |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 476 | |
Sven Schnelle | b61b159 | 2021-02-03 09:02:51 +0100 | [diff] [blame] | 477 | mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE); |
| 478 | if (!mcck_stack) |
| 479 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", |
| 480 | __func__, THREAD_SIZE, THREAD_SIZE); |
| 481 | lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET; |
| 482 | |
Michael Holzheu | 73bf463 | 2012-05-24 14:35:16 +0200 | [diff] [blame] | 483 | /* Setup absolute zero lowcore */ |
Heiko Carstens | fbe7656 | 2012-06-05 09:59:52 +0200 | [diff] [blame] | 484 | mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack); |
| 485 | mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn); |
| 486 | mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data); |
| 487 | mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source); |
| 488 | mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw); |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 489 | |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 490 | lc->spinlock_lockval = arch_spin_lockval(0); |
Martin Schwidefsky | b96f7d8 | 2017-03-24 17:25:02 +0100 | [diff] [blame] | 491 | lc->spinlock_index = 0; |
| 492 | arch_spin_lock_setup(0); |
Martin Schwidefsky | f19fbd5 | 2018-01-26 12:46:47 +0100 | [diff] [blame] | 493 | lc->br_r1_trampoline = 0x07f1; /* br %r1 */ |
Sven Schnelle | 0b38b5e | 2020-01-22 13:38:22 +0100 | [diff] [blame] | 494 | lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW); |
| 495 | lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW); |
Valentin Schneider | 6a942f5 | 2021-07-07 17:33:38 +0100 | [diff] [blame] | 496 | lc->preempt_count = PREEMPT_DISABLED; |
Philipp Hachtmann | 6c8cd5b | 2014-04-07 18:25:23 +0200 | [diff] [blame] | 497 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | set_prefix((u32)(unsigned long) lc); |
Frank Munzert | 099b765 | 2009-03-26 15:23:43 +0100 | [diff] [blame] | 499 | lowcore_ptr[0] = lc; |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 500 | } |
| 501 | |
Martin Schwidefsky | 8727638 | 2019-02-14 15:40:56 +0100 | [diff] [blame] | 502 | static void __init setup_lowcore_dat_on(void) |
| 503 | { |
Alexander Gordeev | 915fea0 | 2021-08-24 15:30:21 +0200 | [diff] [blame] | 504 | struct lowcore *lc = lowcore_ptr[0]; |
| 505 | |
Martin Schwidefsky | 86a8680 | 2019-02-18 18:10:08 +0100 | [diff] [blame] | 506 | __ctl_clear_bit(0, 28); |
| 507 | S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT; |
| 508 | S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT; |
| 509 | S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT; |
| 510 | S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT; |
Alexander Gordeev | 915fea0 | 2021-08-24 15:30:21 +0200 | [diff] [blame] | 511 | __ctl_store(S390_lowcore.cregs_save_area, 0, 15); |
Martin Schwidefsky | 86a8680 | 2019-02-18 18:10:08 +0100 | [diff] [blame] | 512 | __ctl_set_bit(0, 28); |
Alexander Gordeev | 915fea0 | 2021-08-24 15:30:21 +0200 | [diff] [blame] | 513 | mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS); |
| 514 | mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw); |
| 515 | memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area, |
| 516 | sizeof(S390_lowcore.cregs_save_area)); |
Martin Schwidefsky | 8727638 | 2019-02-14 15:40:56 +0100 | [diff] [blame] | 517 | } |
| 518 | |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 519 | static struct resource code_resource = { |
| 520 | .name = "Kernel code", |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 521 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 522 | }; |
| 523 | |
| 524 | static struct resource data_resource = { |
| 525 | .name = "Kernel data", |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 526 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 527 | }; |
| 528 | |
Heiko Carstens | 4cc6953 | 2011-03-23 10:16:00 +0100 | [diff] [blame] | 529 | static struct resource bss_resource = { |
| 530 | .name = "Kernel bss", |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 531 | .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, |
Heiko Carstens | 4cc6953 | 2011-03-23 10:16:00 +0100 | [diff] [blame] | 532 | }; |
| 533 | |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 534 | static struct resource __initdata *standard_resources[] = { |
| 535 | &code_resource, |
| 536 | &data_resource, |
Heiko Carstens | 4cc6953 | 2011-03-23 10:16:00 +0100 | [diff] [blame] | 537 | &bss_resource, |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 538 | }; |
| 539 | |
| 540 | static void __init setup_resources(void) |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 541 | { |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 542 | struct resource *res, *std_res, *sub_res; |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 543 | phys_addr_t start, end; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 544 | int j; |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 545 | u64 i; |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 546 | |
Vasily Gorbik | 320d955 | 2018-02-20 13:28:33 +0100 | [diff] [blame] | 547 | code_resource.start = (unsigned long) _text; |
| 548 | code_resource.end = (unsigned long) _etext - 1; |
| 549 | data_resource.start = (unsigned long) _etext; |
| 550 | data_resource.end = (unsigned long) _edata - 1; |
| 551 | bss_resource.start = (unsigned long) __bss_start; |
| 552 | bss_resource.end = (unsigned long) __bss_stop - 1; |
Heiko Carstens | cc13ad6 | 2006-06-25 05:49:30 -0700 | [diff] [blame] | 553 | |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 554 | for_each_mem_range(i, &start, &end) { |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 555 | res = memblock_alloc(sizeof(*res), 8); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 556 | if (!res) |
| 557 | panic("%s: Failed to allocate %zu bytes align=0x%x\n", |
| 558 | __func__, sizeof(*res), 8); |
Toshi Kani | 35d98e9 | 2016-01-26 21:57:22 +0100 | [diff] [blame] | 559 | res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 560 | |
| 561 | res->name = "System RAM"; |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 562 | res->start = start; |
| 563 | /* |
| 564 | * In memblock, end points to the first byte after the |
| 565 | * range while in resourses, end points to the last byte in |
| 566 | * the range. |
| 567 | */ |
| 568 | res->end = end - 1; |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 569 | request_resource(&iomem_resource, res); |
Hongjie Yang | fe355b7 | 2007-02-05 21:18:24 +0100 | [diff] [blame] | 570 | |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 571 | for (j = 0; j < ARRAY_SIZE(standard_resources); j++) { |
| 572 | std_res = standard_resources[j]; |
| 573 | if (std_res->start < res->start || |
| 574 | std_res->start > res->end) |
| 575 | continue; |
| 576 | if (std_res->end > res->end) { |
Mike Rapoport | eb31d55 | 2018-10-30 15:08:04 -0700 | [diff] [blame] | 577 | sub_res = memblock_alloc(sizeof(*sub_res), 8); |
Mike Rapoport | 8a7f97b | 2019-03-11 23:30:31 -0700 | [diff] [blame] | 578 | if (!sub_res) |
| 579 | panic("%s: Failed to allocate %zu bytes align=0x%x\n", |
| 580 | __func__, sizeof(*sub_res), 8); |
Heiko Carstens | 7118928 | 2011-03-23 10:15:59 +0100 | [diff] [blame] | 581 | *sub_res = *std_res; |
| 582 | sub_res->end = res->end; |
| 583 | std_res->start = res->end + 1; |
| 584 | request_resource(res, sub_res); |
| 585 | } else { |
| 586 | request_resource(res, std_res); |
| 587 | } |
Hongjie Yang | fe355b7 | 2007-02-05 21:18:24 +0100 | [diff] [blame] | 588 | } |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 589 | } |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 590 | #ifdef CONFIG_CRASH_DUMP |
| 591 | /* |
| 592 | * Re-add removed crash kernel memory as reserved memory. This makes |
| 593 | * sure it will be mapped with the identity mapping and struct pages |
| 594 | * will be created, so it can be resized later on. |
| 595 | * However add it later since the crash kernel resource should not be |
| 596 | * part of the System RAM resource. |
| 597 | */ |
| 598 | if (crashk_res.end) { |
David Hildenbrand | 952eea9 | 2021-11-05 13:44:49 -0700 | [diff] [blame] | 599 | memblock_add_node(crashk_res.start, resource_size(&crashk_res), |
| 600 | 0, MEMBLOCK_NONE); |
Heiko Carstens | 4e042af | 2016-05-31 09:14:00 +0200 | [diff] [blame] | 601 | memblock_reserve(crashk_res.start, resource_size(&crashk_res)); |
| 602 | insert_resource(&iomem_resource, &crashk_res); |
| 603 | } |
| 604 | #endif |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 605 | } |
| 606 | |
Vasily Gorbik | 0c4f262 | 2020-10-06 22:12:39 +0200 | [diff] [blame] | 607 | static void __init setup_memory_end(void) |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 608 | { |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 609 | memblock_remove(ident_map_size, ULONG_MAX); |
Vasily Gorbik | 0c4f262 | 2020-10-06 22:12:39 +0200 | [diff] [blame] | 610 | max_pfn = max_low_pfn = PFN_DOWN(ident_map_size); |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 611 | pr_notice("The maximum memory size is %luMB\n", ident_map_size >> 20); |
Heiko Carstens | 8b62bc9 | 2006-12-04 15:40:56 +0100 | [diff] [blame] | 612 | } |
| 613 | |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 614 | #ifdef CONFIG_CRASH_DUMP |
| 615 | |
| 616 | /* |
David Hildenbrand | 4765600 | 2020-04-24 10:39:04 +0200 | [diff] [blame] | 617 | * When kdump is enabled, we have to ensure that no memory from the area |
| 618 | * [0 - crashkernel memory size] is set offline - it will be exchanged with |
| 619 | * the crashkernel memory region when kdump is triggered. The crashkernel |
| 620 | * memory region can never get offlined (pages are unmovable). |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 621 | */ |
| 622 | static int kdump_mem_notifier(struct notifier_block *nb, |
| 623 | unsigned long action, void *data) |
| 624 | { |
| 625 | struct memory_notify *arg = data; |
| 626 | |
Michael Holzheu | bd858e8 | 2014-07-10 18:14:20 +0200 | [diff] [blame] | 627 | if (action != MEM_GOING_OFFLINE) |
| 628 | return NOTIFY_OK; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 629 | if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) |
| 630 | return NOTIFY_BAD; |
David Hildenbrand | 4765600 | 2020-04-24 10:39:04 +0200 | [diff] [blame] | 631 | return NOTIFY_OK; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 632 | } |
| 633 | |
| 634 | static struct notifier_block kdump_mem_nb = { |
| 635 | .notifier_call = kdump_mem_notifier, |
| 636 | }; |
| 637 | |
| 638 | #endif |
| 639 | |
| 640 | /* |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 641 | * Make sure that the area above identity mapping is protected |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 642 | */ |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 643 | static void __init reserve_above_ident_map(void) |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 644 | { |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 645 | memblock_reserve(ident_map_size, ULONG_MAX); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 646 | } |
| 647 | |
| 648 | /* |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 649 | * Reserve memory for kdump kernel to be loaded with kexec |
| 650 | */ |
| 651 | static void __init reserve_crashkernel(void) |
| 652 | { |
| 653 | #ifdef CONFIG_CRASH_DUMP |
| 654 | unsigned long long crash_base, crash_size; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 655 | phys_addr_t low, high; |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 656 | int rc; |
| 657 | |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 658 | rc = parse_crashkernel(boot_command_line, ident_map_size, &crash_size, |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 659 | &crash_base); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 660 | |
Michael Holzheu | dab7a7b | 2011-10-30 15:16:44 +0100 | [diff] [blame] | 661 | crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN); |
| 662 | crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 663 | if (rc || crash_size == 0) |
| 664 | return; |
| 665 | |
| 666 | if (memblock.memory.regions[0].size < crash_size) { |
| 667 | pr_info("crashkernel reservation failed: %s\n", |
| 668 | "first memory chunk must be at least crashkernel size"); |
| 669 | return; |
| 670 | } |
| 671 | |
Alexander Egorenkov | e9e7870 | 2021-06-15 14:25:41 +0200 | [diff] [blame] | 672 | low = crash_base ?: oldmem_data.start; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 673 | high = low + crash_size; |
Alexander Egorenkov | e9e7870 | 2021-06-15 14:25:41 +0200 | [diff] [blame] | 674 | if (low >= oldmem_data.start && high <= oldmem_data.start + oldmem_data.size) { |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 675 | /* The crashkernel fits into OLDMEM, reuse OLDMEM */ |
| 676 | crash_base = low; |
| 677 | } else { |
| 678 | /* Find suitable area in free memory */ |
David Hildenbrand | 37c5f6c | 2015-05-06 13:18:59 +0200 | [diff] [blame] | 679 | low = max_t(unsigned long, crash_size, sclp.hsa_size); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 680 | high = crash_base ? crash_base + crash_size : ULONG_MAX; |
| 681 | |
| 682 | if (crash_base && crash_base < low) { |
| 683 | pr_info("crashkernel reservation failed: %s\n", |
| 684 | "crash_base too low"); |
| 685 | return; |
| 686 | } |
| 687 | low = crash_base ?: low; |
Mike Rapoport | a7259df | 2021-09-02 15:00:26 -0700 | [diff] [blame] | 688 | crash_base = memblock_phys_alloc_range(crash_size, |
| 689 | KEXEC_CRASH_MEM_ALIGN, |
| 690 | low, high); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 691 | } |
| 692 | |
| 693 | if (!crash_base) { |
| 694 | pr_info("crashkernel reservation failed: %s\n", |
| 695 | "no suitable area found"); |
| 696 | return; |
| 697 | } |
| 698 | |
Mike Rapoport | a7259df | 2021-09-02 15:00:26 -0700 | [diff] [blame] | 699 | if (register_memory_notifier(&kdump_mem_nb)) { |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame] | 700 | memblock_phys_free(crash_base, crash_size); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 701 | return; |
Mike Rapoport | a7259df | 2021-09-02 15:00:26 -0700 | [diff] [blame] | 702 | } |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 703 | |
Alexander Egorenkov | e9e7870 | 2021-06-15 14:25:41 +0200 | [diff] [blame] | 704 | if (!oldmem_data.start && MACHINE_IS_VM) |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 705 | diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size)); |
| 706 | crashk_res.start = crash_base; |
| 707 | crashk_res.end = crash_base + crash_size - 1; |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 708 | memblock_remove(crash_base, crash_size); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 709 | pr_info("Reserving %lluMB of memory at %lluMB " |
| 710 | "for crashkernel (System RAM: %luMB)\n", |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 711 | crash_size >> 20, crash_base >> 20, |
| 712 | (unsigned long)memblock.memory.total_size >> 20); |
Michael Holzheu | 4857d4b | 2012-03-11 11:59:34 -0400 | [diff] [blame] | 713 | os_info_crashkernel_add(crash_base, crash_size); |
Michael Holzheu | 60a0c68 | 2011-10-30 15:16:40 +0100 | [diff] [blame] | 714 | #endif |
| 715 | } |
| 716 | |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 717 | /* |
| 718 | * Reserve the initrd from being used by memblock |
| 719 | */ |
| 720 | static void __init reserve_initrd(void) |
| 721 | { |
| 722 | #ifdef CONFIG_BLK_DEV_INITRD |
Alexander Egorenkov | 8473328 | 2021-06-15 14:15:07 +0200 | [diff] [blame] | 723 | if (!initrd_data.start || !initrd_data.size) |
Heiko Carstens | 7be5e35 | 2016-12-27 14:47:42 +0100 | [diff] [blame] | 724 | return; |
Alexander Gordeev | dd9089b | 2021-10-07 15:01:39 +0200 | [diff] [blame] | 725 | initrd_start = (unsigned long)__va(initrd_data.start); |
Alexander Egorenkov | 8473328 | 2021-06-15 14:15:07 +0200 | [diff] [blame] | 726 | initrd_end = initrd_start + initrd_data.size; |
| 727 | memblock_reserve(initrd_data.start, initrd_data.size); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 728 | #endif |
| 729 | } |
| 730 | |
Martin Schwidefsky | 9641b8c | 2019-02-21 14:23:04 +0100 | [diff] [blame] | 731 | /* |
| 732 | * Reserve the memory area used to pass the certificate lists |
| 733 | */ |
| 734 | static void __init reserve_certificate_list(void) |
| 735 | { |
| 736 | if (ipl_cert_list_addr) |
| 737 | memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size); |
| 738 | } |
| 739 | |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 740 | static void __init reserve_mem_detect_info(void) |
| 741 | { |
| 742 | unsigned long start, size; |
| 743 | |
| 744 | get_mem_detect_reserved(&start, &size); |
| 745 | if (size) |
| 746 | memblock_reserve(start, size); |
| 747 | } |
| 748 | |
| 749 | static void __init free_mem_detect_info(void) |
| 750 | { |
| 751 | unsigned long start, size; |
| 752 | |
| 753 | get_mem_detect_reserved(&start, &size); |
| 754 | if (size) |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame] | 755 | memblock_phys_free(start, size); |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 756 | } |
| 757 | |
Vasily Gorbik | f01b8bc | 2018-09-24 15:27:30 +0200 | [diff] [blame] | 758 | static const char * __init get_mem_info_source(void) |
| 759 | { |
| 760 | switch (mem_detect.info_source) { |
| 761 | case MEM_DETECT_SCLP_STOR_INFO: |
| 762 | return "sclp storage info"; |
| 763 | case MEM_DETECT_DIAG260: |
| 764 | return "diag260"; |
| 765 | case MEM_DETECT_SCLP_READ_INFO: |
| 766 | return "sclp read info"; |
| 767 | case MEM_DETECT_BIN_SEARCH: |
| 768 | return "binary search"; |
| 769 | } |
| 770 | return "none"; |
| 771 | } |
| 772 | |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 773 | static void __init memblock_add_mem_detect_info(void) |
| 774 | { |
| 775 | unsigned long start, end; |
| 776 | int i; |
| 777 | |
Mike Rapoport | 87c5587 | 2020-10-13 16:57:54 -0700 | [diff] [blame] | 778 | pr_debug("physmem info source: %s (%hhd)\n", |
| 779 | get_mem_info_source(), mem_detect.info_source); |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 780 | /* keep memblock lists close to the kernel */ |
| 781 | memblock_set_bottom_up(true); |
Anshuman Khandual | 02634a4 | 2020-01-30 22:14:20 -0800 | [diff] [blame] | 782 | for_each_mem_detect_block(i, &start, &end) { |
| 783 | memblock_add(start, end - start); |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 784 | memblock_physmem_add(start, end - start); |
Anshuman Khandual | 02634a4 | 2020-01-30 22:14:20 -0800 | [diff] [blame] | 785 | } |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 786 | memblock_set_bottom_up(false); |
Heiko Carstens | 701dc81 | 2020-02-19 13:29:15 +0100 | [diff] [blame] | 787 | memblock_set_node(0, ULONG_MAX, &memblock.memory, 0); |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 788 | memblock_dump_all(); |
| 789 | } |
| 790 | |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 791 | /* |
| 792 | * Check for initrd being in usable memory |
| 793 | */ |
| 794 | static void __init check_initrd(void) |
| 795 | { |
| 796 | #ifdef CONFIG_BLK_DEV_INITRD |
Alexander Egorenkov | 8473328 | 2021-06-15 14:15:07 +0200 | [diff] [blame] | 797 | if (initrd_data.start && initrd_data.size && |
| 798 | !memblock_is_region_memory(initrd_data.start, initrd_data.size)) { |
Martin Schwidefsky | 6d7b2ee | 2016-12-13 16:19:11 +0100 | [diff] [blame] | 799 | pr_err("The initial RAM disk does not fit into the memory\n"); |
Mike Rapoport | 3ecc683 | 2021-11-05 13:43:19 -0700 | [diff] [blame] | 800 | memblock_phys_free(initrd_data.start, initrd_data.size); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 801 | initrd_start = initrd_end = 0; |
| 802 | } |
| 803 | #endif |
| 804 | } |
| 805 | |
| 806 | /* |
Sebastian Ott | 0c36b8a | 2015-06-16 14:03:37 +0200 | [diff] [blame] | 807 | * Reserve memory used for lowcore/command line/kernel image. |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 808 | */ |
| 809 | static void __init reserve_kernel(void) |
| 810 | { |
Alexander Egorenkov | f1a5469 | 2021-06-16 14:10:03 +0200 | [diff] [blame] | 811 | memblock_reserve(0, STARTUP_NORMAL_OFFSET); |
Alexander Gordeev | e3ec8e0 | 2021-09-27 14:18:26 +0200 | [diff] [blame] | 812 | memblock_reserve(__amode31_base, __eamode31 - __samode31); |
Alexander Gordeev | 04f11ed | 2021-01-21 13:06:02 +0100 | [diff] [blame] | 813 | memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP); |
| 814 | memblock_reserve(__pa(_stext), _end - _stext); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 815 | } |
| 816 | |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 817 | static void __init setup_memory(void) |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 818 | { |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 819 | phys_addr_t start, end; |
| 820 | u64 i; |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 821 | |
| 822 | /* |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 823 | * Init storage key for present memory |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 824 | */ |
Mike Rapoport | b10d6bc | 2020-10-13 16:58:08 -0700 | [diff] [blame] | 825 | for_each_mem_range(i, &start, &end) |
| 826 | storage_key_init_range(start, end); |
| 827 | |
Peter Oberparleiter | 0b642ed | 2005-05-01 08:58:58 -0700 | [diff] [blame] | 828 | psw_set_key(PAGE_DEFAULT_KEY); |
| 829 | |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 830 | /* Only cosmetics */ |
| 831 | memblock_enforce_memory_limit(memblock_end_of_DRAM()); |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 832 | } |
| 833 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 834 | static void __init relocate_amode31_section(void) |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 835 | { |
Alexander Gordeev | e3ec8e0 | 2021-09-27 14:18:26 +0200 | [diff] [blame] | 836 | unsigned long amode31_size = __eamode31 - __samode31; |
| 837 | long amode31_offset = __amode31_base - __samode31; |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 838 | long *ptr; |
| 839 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 840 | pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 841 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 842 | /* Move original AMODE31 section to the new one */ |
Alexander Gordeev | e3ec8e0 | 2021-09-27 14:18:26 +0200 | [diff] [blame] | 843 | memmove((void *)__amode31_base, (void *)__samode31, amode31_size); |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 844 | /* Zero out the old AMODE31 section to catch invalid accesses within it */ |
| 845 | memset((void *)__samode31, 0, amode31_size); |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 846 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 847 | /* Update all AMODE31 region references */ |
| 848 | for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++) |
| 849 | *ptr += amode31_offset; |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 850 | } |
| 851 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 852 | /* This must be called after AMODE31 relocation */ |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 853 | static void __init setup_cr(void) |
| 854 | { |
| 855 | union ctlreg2 cr2; |
| 856 | union ctlreg5 cr5; |
| 857 | union ctlreg15 cr15; |
| 858 | |
| 859 | __ctl_duct[1] = (unsigned long)__ctl_aste; |
| 860 | __ctl_duct[2] = (unsigned long)__ctl_aste; |
| 861 | __ctl_duct[4] = (unsigned long)__ctl_duald; |
| 862 | |
| 863 | /* Update control registers CR2, CR5 and CR15 */ |
| 864 | __ctl_store(cr2.val, 2, 2); |
| 865 | __ctl_store(cr5.val, 5, 5); |
| 866 | __ctl_store(cr15.val, 15, 15); |
| 867 | cr2.ducto = (unsigned long)__ctl_duct >> 6; |
| 868 | cr5.pasteo = (unsigned long)__ctl_duct >> 6; |
| 869 | cr15.lsea = (unsigned long)__ctl_linkage_stack >> 3; |
| 870 | __ctl_load(cr2.val, 2, 2); |
| 871 | __ctl_load(cr5.val, 5, 5); |
| 872 | __ctl_load(cr15.val, 15, 15); |
| 873 | } |
| 874 | |
Martin Schwidefsky | cf8ba7a | 2007-05-04 18:48:28 +0200 | [diff] [blame] | 875 | /* |
Martin Schwidefsky | bcfcbb6 | 2014-08-11 12:20:58 +0200 | [diff] [blame] | 876 | * Add system information as device randomness |
| 877 | */ |
| 878 | static void __init setup_randomness(void) |
| 879 | { |
| 880 | struct sysinfo_3_2_2 *vmms; |
| 881 | |
Alexander Gordeev | e035389 | 2021-10-07 12:14:09 +0200 | [diff] [blame] | 882 | vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
Mike Rapoport | ecc3e77 | 2019-03-11 23:29:26 -0700 | [diff] [blame] | 883 | if (!vmms) |
| 884 | panic("Failed to allocate memory for sysinfo structure\n"); |
Heiko Carstens | da8fd82 | 2017-02-04 11:40:36 +0100 | [diff] [blame] | 885 | if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) |
Heiko Carstens | 4920e3c | 2017-02-05 23:03:18 +0100 | [diff] [blame] | 886 | add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); |
Linus Torvalds | 0b707e5 | 2021-11-06 14:48:06 -0700 | [diff] [blame^] | 887 | memblock_free(vmms, PAGE_SIZE); |
Martin Schwidefsky | bcfcbb6 | 2014-08-11 12:20:58 +0200 | [diff] [blame] | 888 | } |
| 889 | |
| 890 | /* |
Martin Schwidefsky | 3f6813b | 2016-04-01 15:42:15 +0200 | [diff] [blame] | 891 | * Find the correct size for the task_struct. This depends on |
| 892 | * the size of the struct fpu at the end of the thread_struct |
| 893 | * which is embedded in the task_struct. |
| 894 | */ |
| 895 | static void __init setup_task_size(void) |
| 896 | { |
| 897 | int task_size = sizeof(struct task_struct); |
| 898 | |
| 899 | if (!MACHINE_HAS_VX) { |
| 900 | task_size -= sizeof(__vector128) * __NUM_VXRS; |
| 901 | task_size += sizeof(freg_t) * __NUM_FPRS; |
| 902 | } |
| 903 | arch_task_struct_size = task_size; |
| 904 | } |
| 905 | |
| 906 | /* |
Collin Walling | 4ad78b8 | 2018-12-06 17:30:04 -0500 | [diff] [blame] | 907 | * Issue diagnose 318 to set the control program name and |
| 908 | * version codes. |
| 909 | */ |
| 910 | static void __init setup_control_program_code(void) |
| 911 | { |
| 912 | union diag318_info diag318_info = { |
| 913 | .cpnc = CPNC_LINUX, |
Collin Walling | a23816f | 2020-06-22 11:46:35 -0400 | [diff] [blame] | 914 | .cpvc = 0, |
Collin Walling | 4ad78b8 | 2018-12-06 17:30:04 -0500 | [diff] [blame] | 915 | }; |
| 916 | |
| 917 | if (!sclp.has_diag318) |
| 918 | return; |
| 919 | |
| 920 | diag_stat_inc(DIAG_STAT_X318); |
| 921 | asm volatile("diag %0,0,0x318\n" : : "d" (diag318_info.val)); |
| 922 | } |
| 923 | |
| 924 | /* |
Martin Schwidefsky | 9641b8c | 2019-02-21 14:23:04 +0100 | [diff] [blame] | 925 | * Print the component list from the IPL report |
| 926 | */ |
| 927 | static void __init log_component_list(void) |
| 928 | { |
| 929 | struct ipl_rb_component_entry *ptr, *end; |
| 930 | char *str; |
| 931 | |
| 932 | if (!early_ipl_comp_list_addr) |
| 933 | return; |
Philipp Rudo | 40260b0 | 2019-12-18 11:24:43 +0100 | [diff] [blame] | 934 | if (ipl_block.hdr.flags & IPL_PL_FLAG_SIPL) |
Martin Schwidefsky | 9641b8c | 2019-02-21 14:23:04 +0100 | [diff] [blame] | 935 | pr_info("Linux is running with Secure-IPL enabled\n"); |
| 936 | else |
| 937 | pr_info("Linux is running with Secure-IPL disabled\n"); |
| 938 | ptr = (void *) early_ipl_comp_list_addr; |
| 939 | end = (void *) ptr + early_ipl_comp_list_size; |
| 940 | pr_info("The IPL report contains the following components:\n"); |
| 941 | while (ptr < end) { |
| 942 | if (ptr->flags & IPL_RB_COMPONENT_FLAG_SIGNED) { |
| 943 | if (ptr->flags & IPL_RB_COMPONENT_FLAG_VERIFIED) |
| 944 | str = "signed, verified"; |
| 945 | else |
| 946 | str = "signed, verification failed"; |
| 947 | } else { |
| 948 | str = "not signed"; |
| 949 | } |
| 950 | pr_info("%016llx - %016llx (%s)\n", |
| 951 | ptr->addr, ptr->addr + ptr->len, str); |
| 952 | ptr++; |
| 953 | } |
| 954 | } |
| 955 | |
| 956 | /* |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 957 | * Setup function called from init/main.c just after the banner |
| 958 | * was printed. |
| 959 | */ |
| 960 | |
Martin Schwidefsky | 8b646bd | 2012-03-11 11:59:26 -0400 | [diff] [blame] | 961 | void __init setup_arch(char **cmdline_p) |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 962 | { |
| 963 | /* |
| 964 | * print what head.S has found out about the machine |
| 965 | */ |
Carsten Otte | fa58774 | 2008-03-25 18:47:44 +0100 | [diff] [blame] | 966 | if (MACHINE_IS_VM) |
Martin Schwidefsky | 3b6ed4a | 2008-12-25 13:39:40 +0100 | [diff] [blame] | 967 | pr_info("Linux is running as a z/VM " |
| 968 | "guest operating system in 64-bit mode\n"); |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 969 | else if (MACHINE_IS_KVM) |
Martin Schwidefsky | 3b6ed4a | 2008-12-25 13:39:40 +0100 | [diff] [blame] | 970 | pr_info("Linux is running under KVM in 64-bit mode\n"); |
Martin Schwidefsky | 27d7160 | 2010-02-26 22:37:38 +0100 | [diff] [blame] | 971 | else if (MACHINE_IS_LPAR) |
Martin Schwidefsky | 3b6ed4a | 2008-12-25 13:39:40 +0100 | [diff] [blame] | 972 | pr_info("Linux is running natively in 64-bit mode\n"); |
Christian Borntraeger | 03aa047 | 2018-11-09 09:21:47 +0100 | [diff] [blame] | 973 | else |
| 974 | pr_info("Linux is running as a guest in 64-bit mode\n"); |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 975 | |
Martin Schwidefsky | 9641b8c | 2019-02-21 14:23:04 +0100 | [diff] [blame] | 976 | log_component_list(); |
| 977 | |
Hendrik Brueckner | a0443fb | 2008-07-14 09:59:09 +0200 | [diff] [blame] | 978 | /* Have one command line that is parsed and saved in /proc/cmdline */ |
| 979 | /* boot_command_line has been already set up in early.c */ |
| 980 | *cmdline_p = boot_command_line; |
Heiko Carstens | 5968529 | 2006-03-24 03:15:15 -0800 | [diff] [blame] | 981 | |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 982 | ROOT_DEV = Root_RAM0; |
Heiko Carstens | 5968529 | 2006-03-24 03:15:15 -0800 | [diff] [blame] | 983 | |
Kefeng Wang | 638cd5a3 | 2021-07-07 18:08:57 -0700 | [diff] [blame] | 984 | setup_initial_init_mm(_text, _etext, _edata, _end); |
Heiko Carstens | 5968529 | 2006-03-24 03:15:15 -0800 | [diff] [blame] | 985 | |
Martin Schwidefsky | 6a3d1e8 | 2018-04-11 08:35:23 +0200 | [diff] [blame] | 986 | if (IS_ENABLED(CONFIG_EXPOLINE_AUTO)) |
| 987 | nospec_auto_detect(); |
| 988 | |
Vasily Gorbik | 95e61b1 | 2020-06-18 17:17:19 +0200 | [diff] [blame] | 989 | jump_label_init(); |
Heiko Carstens | 5968529 | 2006-03-24 03:15:15 -0800 | [diff] [blame] | 990 | parse_early_param(); |
Martin Schwidefsky | 8a07dd0 | 2015-10-14 15:53:06 +0200 | [diff] [blame] | 991 | #ifdef CONFIG_CRASH_DUMP |
| 992 | /* Deactivate elfcorehdr= kernel parameter */ |
| 993 | elfcorehdr_addr = ELFCORE_ADDR_MAX; |
| 994 | #endif |
| 995 | |
Michael Holzheu | 4857d4b | 2012-03-11 11:59:34 -0400 | [diff] [blame] | 996 | os_info_init(); |
Michael Holzheu | 99ca4e5 | 2008-01-26 14:11:11 +0100 | [diff] [blame] | 997 | setup_ipl(); |
Martin Schwidefsky | 3f6813b | 2016-04-01 15:42:15 +0200 | [diff] [blame] | 998 | setup_task_size(); |
Collin Walling | 4ad78b8 | 2018-12-06 17:30:04 -0500 | [diff] [blame] | 999 | setup_control_program_code(); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1000 | |
| 1001 | /* Do some memory reservations *before* memory is added to memblock */ |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 1002 | reserve_above_ident_map(); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1003 | reserve_kernel(); |
| 1004 | reserve_initrd(); |
Martin Schwidefsky | 9641b8c | 2019-02-21 14:23:04 +0100 | [diff] [blame] | 1005 | reserve_certificate_list(); |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 1006 | reserve_mem_detect_info(); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1007 | memblock_allow_resize(); |
| 1008 | |
| 1009 | /* Get information about *all* installed memory */ |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 1010 | memblock_add_mem_detect_info(); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1011 | |
Vasily Gorbik | 6966d60 | 2018-04-11 11:56:55 +0200 | [diff] [blame] | 1012 | free_mem_detect_info(); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1013 | |
Heiko Carstens | c78d0c7 | 2021-08-04 13:40:31 +0200 | [diff] [blame] | 1014 | relocate_amode31_section(); |
Alexander Egorenkov | 6bda667 | 2021-06-15 19:17:36 +0200 | [diff] [blame] | 1015 | setup_cr(); |
| 1016 | |
Vasily Gorbik | 1d6671a | 2020-09-11 11:38:21 +0200 | [diff] [blame] | 1017 | setup_uv(); |
Vasily Gorbik | 0c4f262 | 2020-10-06 22:12:39 +0200 | [diff] [blame] | 1018 | setup_memory_end(); |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 1019 | setup_memory(); |
Vasily Gorbik | 73045a0 | 2020-10-19 11:01:33 +0200 | [diff] [blame] | 1020 | dma_contiguous_reserve(ident_map_size); |
Heiko Carstens | 3f42984 | 2017-08-07 15:16:15 +0200 | [diff] [blame] | 1021 | vmcp_cma_reserve(); |
Gerald Schaefer | 343dbdb | 2020-12-08 19:47:15 +0100 | [diff] [blame] | 1022 | if (MACHINE_HAS_EDAT2) |
| 1023 | hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT); |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1024 | |
| 1025 | check_initrd(); |
| 1026 | reserve_crashkernel(); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 1027 | #ifdef CONFIG_CRASH_DUMP |
Michael Holzheu | 1592a8e | 2015-05-26 19:05:23 +0200 | [diff] [blame] | 1028 | /* |
| 1029 | * Be aware that smp_save_dump_cpus() triggers a system reset. |
| 1030 | * Therefore CPU and device initialization should be done afterwards. |
| 1031 | */ |
| 1032 | smp_save_dump_cpus(); |
Martin Schwidefsky | 1a36a39 | 2015-10-29 10:28:26 +0100 | [diff] [blame] | 1033 | #endif |
Philipp Hachtmann | 50be634 | 2014-01-29 18:16:01 +0100 | [diff] [blame] | 1034 | |
Heiko Carstens | c9e3735 | 2005-05-01 08:58:57 -0700 | [diff] [blame] | 1035 | setup_resources(); |
Martin Schwidefsky | 8727638 | 2019-02-14 15:40:56 +0100 | [diff] [blame] | 1036 | setup_lowcore_dat_off(); |
Heiko Carstens | d80512f | 2013-12-16 14:31:26 +0100 | [diff] [blame] | 1037 | smp_fill_possible_mask(); |
Heiko Carstens | 097a116 | 2016-04-14 12:35:22 +0200 | [diff] [blame] | 1038 | cpu_detect_mhz_feature(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | cpu_init(); |
Philipp Hachtmann | 3a368f7 | 2014-03-06 18:25:13 +0100 | [diff] [blame] | 1040 | numa_setup(); |
Heiko Carstens | af51160e | 2016-12-03 09:48:01 +0100 | [diff] [blame] | 1041 | smp_detect_cpus(); |
Heiko Carstens | 8c910580 | 2016-12-03 09:50:21 +0100 | [diff] [blame] | 1042 | topology_init_early(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | |
Sven Schnelle | 3b051e8 | 2021-04-07 09:20:17 +0200 | [diff] [blame] | 1044 | if (test_facility(193)) |
| 1045 | static_branch_enable(&cpu_has_bear); |
| 1046 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | /* |
| 1048 | * Create kernel page tables and switch to virtual addressing. |
| 1049 | */ |
| 1050 | paging_init(); |
| 1051 | |
Martin Schwidefsky | 8727638 | 2019-02-14 15:40:56 +0100 | [diff] [blame] | 1052 | /* |
| 1053 | * After paging_init created the kernel page table, the new PSWs |
| 1054 | * in lowcore can now run with DAT enabled. |
| 1055 | */ |
| 1056 | setup_lowcore_dat_on(); |
| 1057 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1058 | /* Setup default console */ |
| 1059 | conmode_default(); |
Hendrik Brueckner | 637952c | 2009-08-23 18:09:06 +0200 | [diff] [blame] | 1060 | set_preferred_console(); |
Michael Holzheu | 411ed32 | 2007-04-27 16:01:49 +0200 | [diff] [blame] | 1061 | |
Vasily Gorbik | 686140a | 2017-10-12 13:01:47 +0200 | [diff] [blame] | 1062 | apply_alternative_instructions(); |
Martin Schwidefsky | f19fbd5 | 2018-01-26 12:46:47 +0100 | [diff] [blame] | 1063 | if (IS_ENABLED(CONFIG_EXPOLINE)) |
| 1064 | nospec_init_branches(); |
Vasily Gorbik | 686140a | 2017-10-12 13:01:47 +0200 | [diff] [blame] | 1065 | |
Alexander Egorenkov | bd37b36 | 2020-09-29 20:24:55 +0200 | [diff] [blame] | 1066 | /* Setup zfcp/nvme dump support */ |
Sebastian Ott | fe72ffb | 2013-04-30 17:18:46 +0200 | [diff] [blame] | 1067 | setup_zfcpdump(); |
Martin Schwidefsky | bcfcbb6 | 2014-08-11 12:20:58 +0200 | [diff] [blame] | 1068 | |
| 1069 | /* Add system specific data to the random pool */ |
| 1070 | setup_randomness(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | } |