blob: cbca565af5bd5113db16274000b003550f12e52b [file] [log] [blame]
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +03001#include <linux/io.h>
2#include <linux/memblock.h>
3
4#include <asm/cacheflush.h>
5#include <asm/pgtable.h>
6#include <asm/realmode.h>
7
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +03008struct real_mode_header *real_mode_header;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +03009u32 *trampoline_cr4_features;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030010
11void __init setup_real_mode(void)
12{
13 phys_addr_t mem;
14 u16 real_mode_seg;
15 u32 *rel;
16 u32 count;
17 u32 *ptr;
18 u16 *seg;
19 int i;
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030020 unsigned char *base;
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030021 struct trampoline_header *trampoline_header;
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030022 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030023#ifdef CONFIG_X86_64
24 u64 *trampoline_pgd;
H. Peter Anvin638d9572012-05-16 14:02:05 -070025 u64 efer;
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030026#endif
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030027
28 /* Has to be in very low memory so we can execute real-mode AP code. */
29 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
30 if (!mem)
31 panic("Cannot allocate trampoline\n");
32
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030033 base = __va(mem);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030034 memblock_reserve(mem, size);
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030035 real_mode_header = (struct real_mode_header *) base;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030036 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030037 base, (unsigned long long)mem, size);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030038
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030039 memcpy(base, real_mode_blob, size);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030040
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030041 real_mode_seg = __pa(base) >> 4;
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030042 rel = (u32 *) real_mode_relocs;
43
44 /* 16-bit segment relocations. */
45 count = rel[0];
46 rel = &rel[1];
47 for (i = 0; i < count; i++) {
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030048 seg = (u16 *) (base + rel[i]);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030049 *seg = real_mode_seg;
50 }
51
52 /* 32-bit linear relocations. */
53 count = rel[i];
54 rel = &rel[i + 1];
55 for (i = 0; i < count; i++) {
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030056 ptr = (u32 *) (base + rel[i]);
57 *ptr += __pa(base);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030058 }
59
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030060 /* Must be perfomed *after* relocation. */
61 trampoline_header = (struct trampoline_header *)
62 __va(real_mode_header->trampoline_header);
63
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +030064#ifdef CONFIG_X86_32
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030065 trampoline_header->start = __pa(startup_32_smp);
66 trampoline_header->gdt_limit = __BOOT_DS + 7;
67 trampoline_header->gdt_base = __pa(boot_gdt);
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +030068#else
H. Peter Anvin79603872012-05-16 13:22:41 -070069 /*
70 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
71 * so we need to mask it out.
72 */
H. Peter Anvin638d9572012-05-16 14:02:05 -070073 rdmsrl(MSR_EFER, efer);
74 trampoline_header->efer = efer & ~EFER_LMA;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +030075
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030076 trampoline_header->start = (u64) secondary_startup_64;
Jarkko Sakkinencda846f2012-05-08 21:22:46 +030077 trampoline_cr4_features = &trampoline_header->cr4;
78 *trampoline_cr4_features = read_cr4();
79
Jarkko Sakkinenf37240f2012-05-08 21:22:43 +030080 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
81 trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
82 trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
Jarkko Sakkinen48927bb2012-05-08 21:22:28 +030083#endif
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030084}
85
86/*
87 * set_real_mode_permissions() gets called very early, to guarantee the
88 * availability of low memory. This is before the proper kernel page
89 * tables are set up, so we cannot set page permissions in that
90 * function. Thus, we use an arch_initcall instead.
91 */
92static int __init set_real_mode_permissions(void)
93{
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030094 unsigned char *base = (unsigned char *) real_mode_header;
95 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +030096
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +030097 size_t ro_size =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +030098 PAGE_ALIGN(real_mode_header->ro_end) -
99 __pa(base);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300100
101 size_t text_size =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300102 PAGE_ALIGN(real_mode_header->ro_end) -
103 real_mode_header->text_start;
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300104
105 unsigned long text_start =
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300106 (unsigned long) __va(real_mode_header->text_start);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300107
Jarkko Sakkinenb429dbf2012-05-08 21:22:41 +0300108 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
109 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
Jarkko Sakkinenf156ffc2012-05-08 21:22:30 +0300110 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
111
Jarkko Sakkinen084ee1c62012-05-08 21:22:26 +0300112 return 0;
113}
114
115arch_initcall(set_real_mode_permissions);