blob: c5e29db02a4693f007559dc70aacae585c8297f9 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +03002#include <linux/io.h>
Andy Lutomirski5ff3e2c2016-08-10 02:29:16 -07003#include <linux/slab.h>
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +03004#include <linux/memblock.h>
Tom Lendacky32cb4d02021-09-08 17:58:36 -05005#include <linux/cc_platform.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -07006#include <linux/pgtable.h>
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +03007
Laura Abbottd1163652017-05-08 15:58:11 -07008#include <asm/set_memory.h>
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +03009#include <asm/realmode.h>
Andy Lutomirski18bc7bd2016-08-10 02:29:14 -070010#include <asm/tlbflush.h>
Lianbo Jiang6f599d82019-11-08 17:00:25 +080011#include <asm/crash.h>
Brijesh Singhe7599592021-04-27 06:16:34 -050012#include <asm/sev.h>
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030013
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030014struct real_mode_header *real_mode_header;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +030015u32 *trampoline_cr4_features;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030016
Thomas Garnierb234e8a2016-06-21 17:47:01 -070017/* Hold the pgd entry used on booting additional CPUs */
18pgd_t trampoline_pgd_entry;
19
Joerg Roedel71d50492021-12-02 16:32:25 +010020void load_trampoline_pgtable(void)
21{
22#ifdef CONFIG_X86_32
23 load_cr3(initial_page_table);
24#else
25 /*
26 * This function is called before exiting to real-mode and that will
27 * fail with CR4.PCIDE still set.
28 */
29 if (boot_cpu_has(X86_FEATURE_PCID))
30 cr4_clear_bits(X86_CR4_PCIDE);
31
32 write_cr3(real_mode_header->trampoline_pgd);
33#endif
34
35 /*
36 * The CR3 write above will not flush global TLB entries.
37 * Stale, global entries from previous page tables may still be
38 * present. Flush those stale entries.
39 *
40 * This ensures that memory accessed while running with
41 * trampoline_pgd is *actually* mapped into trampoline_pgd.
42 */
43 __flush_tlb_all();
44}
45
Andy Lutomirski5ff3e2c2016-08-10 02:29:16 -070046void __init reserve_real_mode(void)
47{
48 phys_addr_t mem;
49 size_t size = real_mode_size_needed();
50
51 if (!size)
52 return;
53
54 WARN_ON(slab_is_available());
55
56 /* Has to be under 1M so we can execute real-mode AP code. */
Mike Rapoporta7259df2021-09-02 15:00:26 -070057 mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
Mike Rapoportf1d4d472021-06-01 10:53:52 +030058 if (!mem)
Andy Lutomirski5ff3e2c2016-08-10 02:29:16 -070059 pr_info("No sub-1M memory is available for the trampoline\n");
Mike Rapoportf1d4d472021-06-01 10:53:52 +030060 else
61 set_real_mode_mem(mem);
Andy Lutomirski5ff3e2c2016-08-10 02:29:16 -070062
Mike Rapoportf1d4d472021-06-01 10:53:52 +030063 /*
64 * Unconditionally reserve the entire fisrt 1M, see comment in
65 * setup_arch().
66 */
67 memblock_reserve(0, SZ_1M);
Andy Lutomirski5ff3e2c2016-08-10 02:29:16 -070068}
69
Tom Lendacky8940ac92020-09-07 15:16:07 +020070static void sme_sev_setup_real_mode(struct trampoline_header *th)
71{
72#ifdef CONFIG_AMD_MEM_ENCRYPT
Tom Lendacky32cb4d02021-09-08 17:58:36 -050073 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
Tom Lendacky8940ac92020-09-07 15:16:07 +020074 th->flags |= TH_FLAGS_SME_ACTIVE;
75
Tom Lendacky6283f2e2021-09-08 17:58:38 -050076 if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
Joerg Roedel3ecacdb2020-09-07 15:16:09 +020077 /*
78 * Skip the call to verify_cpu() in secondary_startup_64 as it
79 * will cause #VC exceptions when the AP can't handle them yet.
80 */
81 th->start = (u64) secondary_startup_64_no_verify;
82
Tom Lendacky8940ac92020-09-07 15:16:07 +020083 if (sev_es_setup_ap_jump_table(real_mode_header))
84 panic("Failed to get/update SEV-ES AP Jump Table");
85 }
86#endif
87}
88
Andy Lutomirskid0de0f62016-08-10 02:29:15 -070089static void __init setup_real_mode(void)
Yinghai Lu4f7b92262013-01-24 12:19:51 -080090{
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030091 u16 real_mode_seg;
H. Peter Anvin73060062013-12-18 15:52:13 -080092 const u32 *rel;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030093 u32 count;
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030094 unsigned char *base;
H. Peter Anvin73060062013-12-18 15:52:13 -080095 unsigned long phys_base;
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030096 struct trampoline_header *trampoline_header;
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030097 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030098#ifdef CONFIG_X86_64
99 u64 *trampoline_pgd;
H. Peter Anvin638d9572012-05-16 14:02:05 -0700100 u64 efer;
Joerg Roedel51523ed2021-12-02 16:32:26 +0100101 int i;
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +0300102#endif
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300103
Yinghai Lu4f7b92262013-01-24 12:19:51 -0800104 base = (unsigned char *)real_mode_header;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300105
Tom Lendacky163ea3c2017-07-17 16:10:20 -0500106 /*
107 * If SME is active, the trampoline area will need to be in
108 * decrypted memory in order to bring up other processors
Tom Lendackyfcdcd6c2017-10-20 09:30:46 -0500109 * successfully. This is not needed for SEV.
Tom Lendacky163ea3c2017-07-17 16:10:20 -0500110 */
Tom Lendacky32cb4d02021-09-08 17:58:36 -0500111 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
Tom Lendackyfcdcd6c2017-10-20 09:30:46 -0500112 set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
Tom Lendacky163ea3c2017-07-17 16:10:20 -0500113
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300114 memcpy(base, real_mode_blob, size);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300115
H. Peter Anvin73060062013-12-18 15:52:13 -0800116 phys_base = __pa(base);
117 real_mode_seg = phys_base >> 4;
118
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300119 rel = (u32 *) real_mode_relocs;
120
121 /* 16-bit segment relocations. */
H. Peter Anvin73060062013-12-18 15:52:13 -0800122 count = *rel++;
123 while (count--) {
124 u16 *seg = (u16 *) (base + *rel++);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300125 *seg = real_mode_seg;
126 }
127
128 /* 32-bit linear relocations. */
H. Peter Anvin73060062013-12-18 15:52:13 -0800129 count = *rel++;
130 while (count--) {
131 u32 *ptr = (u32 *) (base + *rel++);
132 *ptr += phys_base;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300133 }
134
Ingo Molnar163b0992021-03-21 22:28:53 +0100135 /* Must be performed *after* relocation. */
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +0300136 trampoline_header = (struct trampoline_header *)
137 __va(real_mode_header->trampoline_header);
138
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +0300139#ifdef CONFIG_X86_32
Alexander Duyckfc8d7822012-11-16 13:57:13 -0800140 trampoline_header->start = __pa_symbol(startup_32_smp);
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +0300141 trampoline_header->gdt_limit = __BOOT_DS + 7;
Alexander Duyckfc8d7822012-11-16 13:57:13 -0800142 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +0300143#else
H. Peter Anvin79603872012-05-16 13:22:41 -0700144 /*
145 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
146 * so we need to mask it out.
147 */
H. Peter Anvin638d9572012-05-16 14:02:05 -0700148 rdmsrl(MSR_EFER, efer);
149 trampoline_header->efer = efer & ~EFER_LMA;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +0300150
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +0300151 trampoline_header->start = (u64) secondary_startup_64;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +0300152 trampoline_cr4_features = &trampoline_header->cr4;
Andy Lutomirski18bc7bd2016-08-10 02:29:14 -0700153 *trampoline_cr4_features = mmu_cr4_features;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +0300154
Tom Lendacky46d010e2017-07-17 16:10:25 -0500155 trampoline_header->flags = 0;
Tom Lendacky46d010e2017-07-17 16:10:25 -0500156
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +0300157 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
Joerg Roedel51523ed2021-12-02 16:32:26 +0100158
159 /* Map the real mode stub as virtual == physical */
Thomas Garnierb234e8a2016-06-21 17:47:01 -0700160 trampoline_pgd[0] = trampoline_pgd_entry.pgd;
Joerg Roedel51523ed2021-12-02 16:32:26 +0100161
162 /*
163 * Include the entirety of the kernel mapping into the trampoline
164 * PGD. This way, all mappings present in the normal kernel page
165 * tables are usable while running on trampoline_pgd.
166 */
167 for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
168 trampoline_pgd[i] = init_top_pgt[i].pgd;
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +0300169#endif
Tom Lendacky8940ac92020-09-07 15:16:07 +0200170
171 sme_sev_setup_real_mode(trampoline_header);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300172}
173
174/*
Yinghai Lu4f7b92262013-01-24 12:19:51 -0800175 * reserve_real_mode() gets called very early, to guarantee the
Yinghai Lu231b3642013-01-24 12:19:47 -0800176 * availability of low memory. This is before the proper kernel page
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300177 * tables are set up, so we cannot set page permissions in that
Yinghai Lu231b3642013-01-24 12:19:47 -0800178 * function. Also trampoline code will be executed by APs so we
179 * need to mark it executable at do_pre_smp_initcalls() at least,
180 * thus run it as a early_initcall().
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300181 */
Andy Lutomirskid0de0f62016-08-10 02:29:15 -0700182static void __init set_real_mode_permissions(void)
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300183{
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300184 unsigned char *base = (unsigned char *) real_mode_header;
185 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300186
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300187 size_t ro_size =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300188 PAGE_ALIGN(real_mode_header->ro_end) -
189 __pa(base);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300190
191 size_t text_size =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300192 PAGE_ALIGN(real_mode_header->ro_end) -
193 real_mode_header->text_start;
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300194
195 unsigned long text_start =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300196 (unsigned long) __va(real_mode_header->text_start);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300197
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300198 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
199 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300200 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
Andy Lutomirskid0de0f62016-08-10 02:29:15 -0700201}
202
203static int __init init_real_mode(void)
204{
205 if (!real_mode_header)
206 panic("Real mode trampoline was not allocated");
207
208 setup_real_mode();
209 set_real_mode_permissions();
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300210
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300211 return 0;
212}
Andy Lutomirskid0de0f62016-08-10 02:29:15 -0700213early_initcall(init_real_mode);