Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * |
| 3 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 4 | * |
| 5 | * Enhanced CPU detection and feature setting code by Mike Jagdis |
| 6 | * and Martin Mares, November 1997. |
| 7 | */ |
| 8 | |
| 9 | .text |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/threads.h> |
Sam Ravnborg | 8b2f7ff | 2008-01-30 13:33:28 +0100 | [diff] [blame] | 11 | #include <linux/init.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | #include <linux/linkage.h> |
| 13 | #include <asm/segment.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 14 | #include <asm/page_types.h> |
| 15 | #include <asm/pgtable_types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <asm/cache.h> |
| 17 | #include <asm/thread_info.h> |
Sam Ravnborg | 86feeaa | 2005-09-09 19:28:28 +0200 | [diff] [blame] | 18 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/setup.h> |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 20 | #include <asm/processor-flags.h> |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 21 | #include <asm/msr-index.h> |
| 22 | #include <asm/cpufeature.h> |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 23 | #include <asm/percpu.h> |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 24 | #include <asm/nops.h> |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 25 | |
| 26 | /* Physical address */ |
| 27 | #define pa(X) ((X) - __PAGE_OFFSET) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
| 29 | /* |
| 30 | * References to members of the new_cpu_data structure. |
| 31 | */ |
| 32 | |
| 33 | #define X86 new_cpu_data+CPUINFO_x86 |
| 34 | #define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor |
| 35 | #define X86_MODEL new_cpu_data+CPUINFO_x86_model |
| 36 | #define X86_MASK new_cpu_data+CPUINFO_x86_mask |
| 37 | #define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math |
| 38 | #define X86_CPUID new_cpu_data+CPUINFO_cpuid_level |
| 39 | #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability |
| 40 | #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id |
| 41 | |
| 42 | /* |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 43 | * This is how much memory in addition to the memory covered up to |
| 44 | * and including _end we need mapped initially. |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 45 | * We need: |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 46 | * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) |
| 47 | * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | * |
| 49 | * Modulo rounding, each megabyte assigned here requires a kilobyte of |
| 50 | * memory, which is currently unreclaimed. |
| 51 | * |
| 52 | * This should be a multiple of a page. |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 53 | * |
| 54 | * KERNEL_IMAGE_SIZE should be greater than pa(_end) |
| 55 | * and small than max_low_pfn, otherwise will waste some page table entries |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 58 | #if PTRS_PER_PMD > 1 |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 59 | #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 60 | #else |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 61 | #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 62 | #endif |
Jeremy Fitzhardinge | 9ce8c2e | 2007-05-02 19:27:16 +0200 | [diff] [blame] | 63 | |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 64 | /* Number of possible pages in the lowmem region */ |
| 65 | LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) |
| 66 | |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 67 | /* Enough space to fit pagetables for the low memory linear map */ |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 68 | MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 69 | |
| 70 | /* |
| 71 | * Worst-case size of the kernel mapping we need to make: |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 72 | * a relocatable kernel can live anywhere in lowmem, so we need to be able |
| 73 | * to map all of lowmem. |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 74 | */ |
H. Peter Anvin | 147dd56 | 2010-12-16 19:11:09 -0800 | [diff] [blame] | 75 | KERNEL_PAGES = LOWMEM_PAGES |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 76 | |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 77 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 78 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) |
Jeremy Fitzhardinge | 796216a | 2009-03-12 16:09:49 -0700 | [diff] [blame] | 79 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | /* |
| 81 | * 32-bit kernel entrypoint; only used by the boot CPU. On entry, |
| 82 | * %esi points to the real-mode code as a 32-bit pointer. |
| 83 | * CS and DS must be 4 GB flat segments, but we don't depend on |
| 84 | * any particular GDT layout, because we load our own as soon as we |
| 85 | * can. |
| 86 | */ |
Tim Abbott | 4ae59b9 | 2009-09-16 16:44:28 -0400 | [diff] [blame] | 87 | __HEAD |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | ENTRY(startup_32) |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 89 | movl pa(stack_start),%ecx |
| 90 | |
Rusty Russell | a24e785 | 2007-10-21 16:41:35 -0700 | [diff] [blame] | 91 | /* test KEEP_SEGMENTS flag to see if the bootloader is asking |
| 92 | us to not reload segments */ |
| 93 | testb $(1<<6), BP_loadflags(%esi) |
| 94 | jnz 2f |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | |
| 96 | /* |
| 97 | * Set segments to known values. |
| 98 | */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 99 | lgdt pa(boot_gdt_descr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | movl $(__BOOT_DS),%eax |
| 101 | movl %eax,%ds |
| 102 | movl %eax,%es |
| 103 | movl %eax,%fs |
| 104 | movl %eax,%gs |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 105 | movl %eax,%ss |
Rusty Russell | a24e785 | 2007-10-21 16:41:35 -0700 | [diff] [blame] | 106 | 2: |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 107 | leal -__PAGE_OFFSET(%ecx),%esp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
| 109 | /* |
| 110 | * Clear BSS first so that there are no surprises... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | */ |
Rusty Russell | a24e785 | 2007-10-21 16:41:35 -0700 | [diff] [blame] | 112 | cld |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | xorl %eax,%eax |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 114 | movl $pa(__bss_start),%edi |
| 115 | movl $pa(__bss_stop),%ecx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | subl %edi,%ecx |
| 117 | shrl $2,%ecx |
| 118 | rep ; stosl |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 119 | /* |
| 120 | * Copy bootup parameters out of the way. |
| 121 | * Note: %esi still has the pointer to the real-mode data. |
| 122 | * With the kexec as boot loader, parameter segment might be loaded beyond |
| 123 | * kernel image and might not even be addressable by early boot page tables. |
| 124 | * (kexec on panic case). Hence copy out the parameters before initializing |
| 125 | * page tables. |
| 126 | */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 127 | movl $pa(boot_params),%edi |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 128 | movl $(PARAM_SIZE/4),%ecx |
| 129 | cld |
| 130 | rep |
| 131 | movsl |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 132 | movl pa(boot_params) + NEW_CL_POINTER,%esi |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 133 | andl %esi,%esi |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 134 | jz 1f # No command line |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 135 | movl $pa(boot_command_line),%edi |
Vivek Goyal | 484b90c | 2005-09-03 15:56:31 -0700 | [diff] [blame] | 136 | movl $(COMMAND_LINE_SIZE/4),%ecx |
| 137 | rep |
| 138 | movsl |
| 139 | 1: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | |
Thomas Gleixner | dc3119e70 | 2011-02-23 10:08:31 +0100 | [diff] [blame] | 141 | #ifdef CONFIG_OLPC |
Andres Salomon | fd699c7 | 2010-06-18 17:46:53 -0400 | [diff] [blame] | 142 | /* save OFW's pgdir table for later use when calling into OFW */ |
| 143 | movl %cr3, %eax |
| 144 | movl %eax, pa(olpc_ofw_pgd) |
| 145 | #endif |
| 146 | |
Fenghua Yu | 63b553c | 2012-12-20 23:44:29 -0800 | [diff] [blame] | 147 | #ifdef CONFIG_MICROCODE_EARLY |
| 148 | /* Early load ucode on BSP. */ |
| 149 | call load_ucode_bsp |
| 150 | #endif |
| 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | /* |
| 153 | * Initialize page tables. This creates a PDE and a set of page |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 154 | * tables, which are located immediately beyond __brk_base. The variable |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 155 | * _brk_end is set up to point to the first "safe" location. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | * Mappings are created both at virtual address 0 (identity mapping) |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 157 | * and PAGE_OFFSET for up to _end. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 159 | #ifdef CONFIG_X86_PAE |
| 160 | |
| 161 | /* |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 162 | * In PAE mode initial_page_table is statically defined to contain |
| 163 | * enough entries to cover the VMSPLIT option (that is the top 1, 2 or 3 |
| 164 | * entries). The identity mapping is handled by pointing two PGD entries |
| 165 | * to the first kernel PMD. |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 166 | * |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 167 | * Note the upper half of each PMD or PTE are always zero at this stage. |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 168 | */ |
| 169 | |
Joe Korty | 86b2b70 | 2008-06-02 17:21:06 -0400 | [diff] [blame] | 170 | #define KPMDS (((-__PAGE_OFFSET) >> 30) & 3) /* Number of kernel PMDs */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 171 | |
| 172 | xorl %ebx,%ebx /* %ebx is kept at zero */ |
| 173 | |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 174 | movl $pa(__brk_base), %edi |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 175 | movl $pa(initial_pg_pmd), %edx |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 176 | movl $PTE_IDENT_ATTR, %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | 10: |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 178 | leal PDE_IDENT_ATTR(%edi),%ecx /* Create PMD entry */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 179 | movl %ecx,(%edx) /* Store PMD entry */ |
| 180 | /* Upper half already zero */ |
| 181 | addl $8,%edx |
| 182 | movl $512,%ecx |
| 183 | 11: |
| 184 | stosl |
| 185 | xchgl %eax,%ebx |
| 186 | stosl |
| 187 | xchgl %eax,%ebx |
| 188 | addl $0x1000,%eax |
| 189 | loop 11b |
| 190 | |
| 191 | /* |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 192 | * End condition: we must map up to the end + MAPPING_BEYOND_END. |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 193 | */ |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 194 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 195 | cmpl %ebp,%eax |
| 196 | jb 10b |
| 197 | 1: |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 198 | addl $__PAGE_OFFSET, %edi |
| 199 | movl %edi, pa(_brk_end) |
Yinghai Lu | 6af61a7 | 2008-06-01 23:53:50 -0700 | [diff] [blame] | 200 | shrl $12, %eax |
| 201 | movl %eax, pa(max_pfn_mapped) |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 202 | |
| 203 | /* Do early initialization of the fixmap area */ |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 204 | movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax |
| 205 | movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8) |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 206 | #else /* Not PAE */ |
| 207 | |
| 208 | page_pde_offset = (__PAGE_OFFSET >> 20); |
| 209 | |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 210 | movl $pa(__brk_base), %edi |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 211 | movl $pa(initial_page_table), %edx |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 212 | movl $PTE_IDENT_ATTR, %eax |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 213 | 10: |
Suresh Siddha | b2bc273 | 2008-09-23 14:00:36 -0700 | [diff] [blame] | 214 | leal PDE_IDENT_ATTR(%edi),%ecx /* Create PDE entry */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | movl %ecx,(%edx) /* Store identity PDE entry */ |
| 216 | movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */ |
| 217 | addl $4,%edx |
| 218 | movl $1024, %ecx |
| 219 | 11: |
| 220 | stosl |
| 221 | addl $0x1000,%eax |
| 222 | loop 11b |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 223 | /* |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 224 | * End condition: we must map up to the end + MAPPING_BEYOND_END. |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 225 | */ |
Jeremy Fitzhardinge | c090f53 | 2009-03-16 12:07:54 -0700 | [diff] [blame] | 226 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | cmpl %ebp,%eax |
| 228 | jb 10b |
Jeremy Fitzhardinge | ccf3fe0 | 2009-02-27 13:27:38 -0800 | [diff] [blame] | 229 | addl $__PAGE_OFFSET, %edi |
| 230 | movl %edi, pa(_brk_end) |
Yinghai Lu | 6af61a7 | 2008-06-01 23:53:50 -0700 | [diff] [blame] | 231 | shrl $12, %eax |
| 232 | movl %eax, pa(max_pfn_mapped) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 234 | /* Do early initialization of the fixmap area */ |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 235 | movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax |
| 236 | movl %eax,pa(initial_page_table+0xffc) |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 237 | #endif |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 238 | |
| 239 | #ifdef CONFIG_PARAVIRT |
| 240 | /* This is can only trip for a broken bootloader... */ |
| 241 | cmpw $0x207, pa(boot_params + BP_version) |
| 242 | jb default_entry |
| 243 | |
| 244 | /* Paravirt-compatible boot parameters. Look to see what architecture |
| 245 | we're booting under. */ |
| 246 | movl pa(boot_params + BP_hardware_subarch), %eax |
| 247 | cmpl $num_subarch_entries, %eax |
| 248 | jae bad_subarch |
| 249 | |
| 250 | movl pa(subarch_entries)(,%eax,4), %eax |
| 251 | subl $__PAGE_OFFSET, %eax |
| 252 | jmp *%eax |
| 253 | |
| 254 | bad_subarch: |
| 255 | WEAK(lguest_entry) |
| 256 | WEAK(xen_entry) |
| 257 | /* Unknown implementation; there's really |
| 258 | nothing we can do at this point. */ |
| 259 | ud2a |
| 260 | |
| 261 | __INITDATA |
| 262 | |
| 263 | subarch_entries: |
| 264 | .long default_entry /* normal x86/PC */ |
| 265 | .long lguest_entry /* lguest hypervisor */ |
| 266 | .long xen_entry /* Xen hypervisor */ |
| 267 | .long default_entry /* Moorestown MID */ |
| 268 | num_subarch_entries = (. - subarch_entries) / 4 |
| 269 | .previous |
| 270 | #else |
| 271 | jmp default_entry |
| 272 | #endif /* CONFIG_PARAVIRT */ |
| 273 | |
Fenghua Yu | 3e2a0cc | 2012-11-13 11:32:45 -0800 | [diff] [blame] | 274 | #ifdef CONFIG_HOTPLUG_CPU |
| 275 | /* |
| 276 | * Boot CPU0 entry point. It's called from play_dead(). Everything has been set |
| 277 | * up already except stack. We just set up stack here. Then call |
| 278 | * start_secondary(). |
| 279 | */ |
| 280 | ENTRY(start_cpu0) |
| 281 | movl stack_start, %ecx |
| 282 | movl %ecx, %esp |
| 283 | jmp *(initial_code) |
| 284 | ENDPROC(start_cpu0) |
| 285 | #endif |
| 286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | /* |
| 288 | * Non-boot CPU entry point; entered from trampoline.S |
| 289 | * We can't lgdt here, because lgdt itself uses a data segment, but |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 290 | * we know the trampoline has already loaded the boot_gdt for us. |
Vivek Goyal | f8657e1 | 2007-02-13 13:26:22 +0100 | [diff] [blame] | 291 | * |
| 292 | * If cpu hotplug is not supported then this code can go in init section |
| 293 | * which will be freed later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | */ |
| 295 | ENTRY(startup_32_smp) |
| 296 | cld |
| 297 | movl $(__BOOT_DS),%eax |
| 298 | movl %eax,%ds |
| 299 | movl %eax,%es |
| 300 | movl %eax,%fs |
| 301 | movl %eax,%gs |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 302 | movl pa(stack_start),%ecx |
| 303 | movl %eax,%ss |
| 304 | leal -__PAGE_OFFSET(%ecx),%esp |
Jarkko Sakkinen | 48927bb | 2012-05-08 21:22:28 +0300 | [diff] [blame] | 305 | |
Fenghua Yu | 63b553c | 2012-12-20 23:44:29 -0800 | [diff] [blame] | 306 | #ifdef CONFIG_MICROCODE_EARLY |
| 307 | /* Early load ucode on AP. */ |
| 308 | call load_ucode_ap |
| 309 | #endif |
| 310 | |
| 311 | |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 312 | default_entry: |
H. Peter Anvin | 021ef05 | 2013-01-19 10:29:37 -0800 | [diff] [blame] | 313 | #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \ |
| 314 | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \ |
| 315 | X86_CR0_PG) |
| 316 | movl $(CR0_STATE & ~X86_CR0_PG),%eax |
| 317 | movl %eax,%cr0 |
| 318 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | /* |
Borislav Petkov | 9efb58d | 2013-02-11 15:22:16 +0100 | [diff] [blame] | 320 | * We want to start out with EFLAGS unambiguously cleared. Some BIOSes leave |
| 321 | * bits like NT set. This would confuse the debugger if this code is traced. So |
| 322 | * initialize them properly now before switching to protected mode. That means |
| 323 | * DF in particular (even though we have cleared it earlier after copying the |
| 324 | * command line) because GCC expects it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | */ |
H. Peter Anvin | 5a5a51d | 2012-09-24 16:05:48 -0700 | [diff] [blame] | 326 | pushl $0 |
| 327 | popfl |
Borislav Petkov | 9efb58d | 2013-02-11 15:22:16 +0100 | [diff] [blame] | 328 | |
| 329 | /* |
| 330 | * New page tables may be in 4Mbyte page mode and may be using the global pages. |
| 331 | * |
| 332 | * NOTE! If we are on a 486 we may have no cr4 at all! Specifically, cr4 exists |
| 333 | * if and only if CPUID exists and has flags other than the FPU flag set. |
| 334 | */ |
| 335 | movl $-1,pa(X86_CPUID) # preset CPUID level |
| 336 | movl $X86_EFLAGS_ID,%ecx |
| 337 | pushl %ecx |
| 338 | popfl # set EFLAGS=ID |
H. Peter Anvin | 5a5a51d | 2012-09-24 16:05:48 -0700 | [diff] [blame] | 339 | pushfl |
Borislav Petkov | 9efb58d | 2013-02-11 15:22:16 +0100 | [diff] [blame] | 340 | popl %eax # get EFLAGS |
| 341 | testl $X86_EFLAGS_ID,%eax # did EFLAGS.ID remained set? |
Borislav Petkov | 5e2a044 | 2013-02-11 15:22:18 +0100 | [diff] [blame] | 342 | jz enable_paging # hw disallowed setting of ID bit |
Borislav Petkov | 9efb58d | 2013-02-11 15:22:16 +0100 | [diff] [blame] | 343 | # which means no CPUID and no CR4 |
| 344 | |
| 345 | xorl %eax,%eax |
| 346 | cpuid |
| 347 | movl %eax,pa(X86_CPUID) # save largest std CPUID function |
H. Peter Anvin | 5a5a51d | 2012-09-24 16:05:48 -0700 | [diff] [blame] | 348 | |
H. Peter Anvin | 6662c34 | 2012-11-27 08:54:36 -0800 | [diff] [blame] | 349 | movl $1,%eax |
| 350 | cpuid |
Borislav Petkov | 9efb58d | 2013-02-11 15:22:16 +0100 | [diff] [blame] | 351 | andl $~1,%edx # Ignore CPUID.FPU |
Borislav Petkov | 5e2a044 | 2013-02-11 15:22:18 +0100 | [diff] [blame] | 352 | jz enable_paging # No flags or only CPUID.FPU = no CR4 |
H. Peter Anvin | 6662c34 | 2012-11-27 08:54:36 -0800 | [diff] [blame] | 353 | |
H. Peter Anvin | 5a5a51d | 2012-09-24 16:05:48 -0700 | [diff] [blame] | 354 | movl pa(mmu_cr4_features),%eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | movl %eax,%cr4 |
| 356 | |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 357 | testb $X86_CR4_PAE, %al # check if PAE is enabled |
Borislav Petkov | 5e2a044 | 2013-02-11 15:22:18 +0100 | [diff] [blame] | 358 | jz enable_paging |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | |
| 360 | /* Check if extended functions are implemented */ |
| 361 | movl $0x80000000, %eax |
| 362 | cpuid |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 363 | /* Value must be in the range 0x80000001 to 0x8000ffff */ |
| 364 | subl $0x80000001, %eax |
| 365 | cmpl $(0x8000ffff-0x80000001), %eax |
Borislav Petkov | 5e2a044 | 2013-02-11 15:22:18 +0100 | [diff] [blame] | 366 | ja enable_paging |
Kees Cook | ebba638 | 2010-11-10 10:35:53 -0800 | [diff] [blame] | 367 | |
| 368 | /* Clear bogus XD_DISABLE bits */ |
| 369 | call verify_cpu |
| 370 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | mov $0x80000001, %eax |
| 372 | cpuid |
| 373 | /* Execute Disable bit supported? */ |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 374 | btl $(X86_FEATURE_NX & 31), %edx |
Borislav Petkov | 5e2a044 | 2013-02-11 15:22:18 +0100 | [diff] [blame] | 375 | jnc enable_paging |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | |
| 377 | /* Setup EFER (Extended Feature Enable Register) */ |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 378 | movl $MSR_EFER, %ecx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | rdmsr |
| 380 | |
H. Peter Anvin | 8a50e51 | 2009-11-13 15:28:13 -0800 | [diff] [blame] | 381 | btsl $_EFER_NX, %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 382 | /* Make changes effective */ |
| 383 | wrmsr |
| 384 | |
Borislav Petkov | 5e2a044 | 2013-02-11 15:22:18 +0100 | [diff] [blame] | 385 | enable_paging: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | |
| 387 | /* |
| 388 | * Enable paging |
| 389 | */ |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 390 | movl $pa(initial_page_table), %eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | movl %eax,%cr3 /* set the page table pointer.. */ |
H. Peter Anvin | 021ef05 | 2013-01-19 10:29:37 -0800 | [diff] [blame] | 392 | movl $CR0_STATE,%eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | movl %eax,%cr0 /* ..and set paging (PG) bit */ |
| 394 | ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */ |
| 395 | 1: |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 396 | /* Shift the stack pointer to a virtual address */ |
| 397 | addl $__PAGE_OFFSET, %esp |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | |
| 399 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | * start system 32-bit setup. We need to re-do some of the things done |
| 401 | * in 16-bit mode for the "real" operations. |
| 402 | */ |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 403 | movl setup_once_ref,%eax |
| 404 | andl %eax,%eax |
| 405 | jz 1f # Did we do this already? |
| 406 | call *%eax |
| 407 | 1: |
Borislav Petkov | 166df91 | 2013-02-11 15:22:15 +0100 | [diff] [blame] | 408 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | /* |
Borislav Petkov | 166df91 | 2013-02-11 15:22:15 +0100 | [diff] [blame] | 410 | * Check if it is 486 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | */ |
Wang YanQing | 237d154 | 2013-06-28 22:45:16 +0800 | [diff] [blame] | 412 | movb $4,X86 # at least 486 |
Borislav Petkov | c3a22a2 | 2013-02-11 15:22:17 +0100 | [diff] [blame] | 413 | cmpl $-1,X86_CPUID |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | je is486 |
| 415 | |
| 416 | /* get vendor info */ |
| 417 | xorl %eax,%eax # call CPUID with 0 -> return vendor ID |
| 418 | cpuid |
| 419 | movl %eax,X86_CPUID # save CPUID level |
| 420 | movl %ebx,X86_VENDOR_ID # lo 4 chars |
| 421 | movl %edx,X86_VENDOR_ID+4 # next 4 chars |
| 422 | movl %ecx,X86_VENDOR_ID+8 # last 4 chars |
| 423 | |
| 424 | orl %eax,%eax # do we have processor info as well? |
| 425 | je is486 |
| 426 | |
| 427 | movl $1,%eax # Use the CPUID instruction to get CPU type |
| 428 | cpuid |
| 429 | movb %al,%cl # save reg for future use |
| 430 | andb $0x0f,%ah # mask processor family |
| 431 | movb %ah,X86 |
| 432 | andb $0xf0,%al # mask model |
| 433 | shrb $4,%al |
| 434 | movb %al,X86_MODEL |
| 435 | andb $0x0f,%cl # mask mask revision |
| 436 | movb %cl,X86_MASK |
| 437 | movl %edx,X86_CAPABILITY |
| 438 | |
Borislav Petkov | c3a22a2 | 2013-02-11 15:22:17 +0100 | [diff] [blame] | 439 | is486: |
Borislav Petkov | c3a22a2 | 2013-02-11 15:22:17 +0100 | [diff] [blame] | 440 | movl $0x50022,%ecx # set AM, WP, NE and MP |
Borislav Petkov | 166df91 | 2013-02-11 15:22:15 +0100 | [diff] [blame] | 441 | movl %cr0,%eax |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | andl $0x80000011,%eax # Save PG,PE,ET |
| 443 | orl %ecx,%eax |
| 444 | movl %eax,%cr0 |
| 445 | |
Rusty Russell | 2a57ff1 | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 446 | lgdt early_gdt_descr |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | lidt idt_descr |
| 448 | ljmp $(__KERNEL_CS),$1f |
| 449 | 1: movl $(__KERNEL_DS),%eax # reload all the segment registers |
| 450 | movl %eax,%ss # after changing gdt. |
| 451 | |
| 452 | movl $(__USER_DS),%eax # DS/ES contains default USER segment |
| 453 | movl %eax,%ds |
| 454 | movl %eax,%es |
| 455 | |
Brian Gerst | 0dd76d7 | 2009-01-21 17:26:05 +0900 | [diff] [blame] | 456 | movl $(__KERNEL_PERCPU), %eax |
| 457 | movl %eax,%fs # set this cpu's percpu |
| 458 | |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 459 | movl $(__KERNEL_STACK_CANARY),%eax |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 460 | movl %eax,%gs |
Tejun Heo | 60a5317 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 461 | |
| 462 | xorl %eax,%eax # Clear LDT |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | lldt %ax |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 464 | |
Jeremy Fitzhardinge | 26fd5e0 | 2006-10-21 18:37:02 +0200 | [diff] [blame] | 465 | pushl $0 # fake return address for unwinder |
Glauber Costa | e3f77ed | 2008-05-28 12:57:02 -0300 | [diff] [blame] | 466 | jmp *(initial_code) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 468 | #include "verify_cpu.S" |
| 469 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 470 | /* |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 471 | * setup_once |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | * |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 473 | * The setup work we only want to run on the BSP. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | * |
| 475 | * Warning: %esi is live across this function. |
| 476 | */ |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 477 | __INIT |
| 478 | setup_once: |
| 479 | /* |
| 480 | * Set up a idt with 256 entries pointing to ignore_int, |
| 481 | * interrupt gates. It doesn't actually load idt - that needs |
| 482 | * to be done on each CPU. Interrupts are enabled elsewhere, |
| 483 | * when we can be relatively sure everything is ok. |
| 484 | */ |
| 485 | |
| 486 | movl $idt_table,%edi |
| 487 | movl $early_idt_handlers,%eax |
| 488 | movl $NUM_EXCEPTION_VECTORS,%ecx |
| 489 | 1: |
| 490 | movl %eax,(%edi) |
| 491 | movl %eax,4(%edi) |
| 492 | /* interrupt gate, dpl=0, present */ |
| 493 | movl $(0x8E000000 + __KERNEL_CS),2(%edi) |
| 494 | addl $9,%eax |
| 495 | addl $8,%edi |
| 496 | loop 1b |
| 497 | |
| 498 | movl $256 - NUM_EXCEPTION_VECTORS,%ecx |
| 499 | movl $ignore_int,%edx |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | movl $(__KERNEL_CS << 16),%eax |
| 501 | movw %dx,%ax /* selector = 0x0010 = cs */ |
| 502 | movw $0x8E00,%dx /* interrupt gate - dpl=0, present */ |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 503 | 2: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 504 | movl %eax,(%edi) |
| 505 | movl %edx,4(%edi) |
| 506 | addl $8,%edi |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 507 | loop 2b |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 508 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 509 | #ifdef CONFIG_CC_STACKPROTECTOR |
| 510 | /* |
| 511 | * Configure the stack canary. The linker can't handle this by |
| 512 | * relocation. Manually set base address in stack canary |
| 513 | * segment descriptor. |
| 514 | */ |
| 515 | movl $gdt_page,%eax |
| 516 | movl $stack_canary,%ecx |
| 517 | movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) |
| 518 | shrl $16, %ecx |
| 519 | movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) |
| 520 | movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax) |
| 521 | #endif |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 522 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 523 | andl $0,setup_once_ref /* Once is enough, thanks */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | ret |
| 525 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 526 | ENTRY(early_idt_handlers) |
| 527 | # 36(%esp) %eflags |
| 528 | # 32(%esp) %cs |
| 529 | # 28(%esp) %eip |
| 530 | # 24(%rsp) error code |
| 531 | i = 0 |
| 532 | .rept NUM_EXCEPTION_VECTORS |
| 533 | .if (EXCEPTION_ERRCODE_MASK >> i) & 1 |
| 534 | ASM_NOP2 |
| 535 | .else |
| 536 | pushl $0 # Dummy error code, to make stack frame uniform |
| 537 | .endif |
| 538 | pushl $i # 20(%esp) Vector number |
| 539 | jmp early_idt_handler |
| 540 | i = i + 1 |
| 541 | .endr |
| 542 | ENDPROC(early_idt_handlers) |
| 543 | |
| 544 | /* This is global to keep gas from relaxing the jumps */ |
| 545 | ENTRY(early_idt_handler) |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 546 | cld |
H. Peter Anvin | 5fa1019 | 2014-03-07 15:05:20 -0800 | [diff] [blame^] | 547 | |
| 548 | cmpl $X86_TRAP_NMI,(%esp) |
| 549 | je is_nmi # Ignore NMI |
| 550 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 551 | cmpl $2,%ss:early_recursion_flag |
| 552 | je hlt_loop |
| 553 | incl %ss:early_recursion_flag |
| 554 | |
| 555 | push %eax # 16(%esp) |
| 556 | push %ecx # 12(%esp) |
| 557 | push %edx # 8(%esp) |
| 558 | push %ds # 4(%esp) |
| 559 | push %es # 0(%esp) |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 560 | movl $(__KERNEL_DS),%eax |
| 561 | movl %eax,%ds |
| 562 | movl %eax,%es |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 563 | |
| 564 | cmpl $(__KERNEL_CS),32(%esp) |
| 565 | jne 10f |
| 566 | |
| 567 | leal 28(%esp),%eax # Pointer to %eip |
| 568 | call early_fixup_exception |
| 569 | andl %eax,%eax |
| 570 | jnz ex_entry /* found an exception entry */ |
| 571 | |
| 572 | 10: |
| 573 | #ifdef CONFIG_PRINTK |
| 574 | xorl %eax,%eax |
| 575 | movw %ax,2(%esp) /* clean up the segment values on some cpus */ |
| 576 | movw %ax,6(%esp) |
| 577 | movw %ax,34(%esp) |
| 578 | leal 40(%esp),%eax |
| 579 | pushl %eax /* %esp before the exception */ |
| 580 | pushl %ebx |
| 581 | pushl %ebp |
| 582 | pushl %esi |
| 583 | pushl %edi |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 584 | movl %cr2,%eax |
| 585 | pushl %eax |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 586 | pushl (20+6*4)(%esp) /* trapno */ |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 587 | pushl $fault_msg |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 588 | call printk |
| 589 | #endif |
Ingo Molnar | 94878ef | 2008-01-30 13:33:09 +0100 | [diff] [blame] | 590 | call dump_stack |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 591 | hlt_loop: |
| 592 | hlt |
| 593 | jmp hlt_loop |
| 594 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 595 | ex_entry: |
| 596 | pop %es |
| 597 | pop %ds |
| 598 | pop %edx |
| 599 | pop %ecx |
| 600 | pop %eax |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 601 | decl %ss:early_recursion_flag |
H. Peter Anvin | 5fa1019 | 2014-03-07 15:05:20 -0800 | [diff] [blame^] | 602 | is_nmi: |
| 603 | addl $8,%esp /* drop vector number and error code */ |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 604 | iret |
| 605 | ENDPROC(early_idt_handler) |
| 606 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 607 | /* This is the default interrupt "handler" :-) */ |
| 608 | ALIGN |
| 609 | ignore_int: |
| 610 | cld |
Matt Mackall | d59745c | 2005-05-01 08:59:02 -0700 | [diff] [blame] | 611 | #ifdef CONFIG_PRINTK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | pushl %eax |
| 613 | pushl %ecx |
| 614 | pushl %edx |
| 615 | pushl %es |
| 616 | pushl %ds |
| 617 | movl $(__KERNEL_DS),%eax |
| 618 | movl %eax,%ds |
| 619 | movl %eax,%es |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 620 | cmpl $2,early_recursion_flag |
| 621 | je hlt_loop |
| 622 | incl early_recursion_flag |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 623 | pushl 16(%esp) |
| 624 | pushl 24(%esp) |
| 625 | pushl 32(%esp) |
| 626 | pushl 40(%esp) |
| 627 | pushl $int_msg |
| 628 | call printk |
Ingo Molnar | d5e397c | 2009-01-26 06:09:00 +0100 | [diff] [blame] | 629 | |
| 630 | call dump_stack |
| 631 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | addl $(5*4),%esp |
| 633 | popl %ds |
| 634 | popl %es |
| 635 | popl %edx |
| 636 | popl %ecx |
| 637 | popl %eax |
Matt Mackall | d59745c | 2005-05-01 08:59:02 -0700 | [diff] [blame] | 638 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | iret |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 640 | ENDPROC(ignore_int) |
| 641 | __INITDATA |
| 642 | .align 4 |
| 643 | early_recursion_flag: |
| 644 | .long 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 646 | __REFDATA |
| 647 | .align 4 |
Thomas Gleixner | 583323b | 2008-07-27 21:43:11 +0200 | [diff] [blame] | 648 | ENTRY(initial_code) |
| 649 | .long i386_start_kernel |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 650 | ENTRY(setup_once_ref) |
| 651 | .long setup_once |
Thomas Gleixner | 583323b | 2008-07-27 21:43:11 +0200 | [diff] [blame] | 652 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | /* |
| 654 | * BSS section |
| 655 | */ |
Tim Abbott | 02b7da3 | 2009-09-20 18:14:14 -0400 | [diff] [blame] | 656 | __PAGE_ALIGNED_BSS |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 657 | .align PAGE_SIZE |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 658 | #ifdef CONFIG_X86_PAE |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 659 | initial_pg_pmd: |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 660 | .fill 1024*KPMDS,4,0 |
| 661 | #else |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 662 | ENTRY(initial_page_table) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | .fill 1024,4,0 |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 664 | #endif |
Rusty Russell | d50d8fe | 2011-01-04 17:20:54 +1030 | [diff] [blame] | 665 | initial_pg_fixmap: |
Eric W. Biderman | b1c931e | 2007-07-15 23:37:28 -0700 | [diff] [blame] | 666 | .fill 1024,4,0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 667 | ENTRY(empty_zero_page) |
| 668 | .fill 4096,1,0 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 669 | ENTRY(swapper_pg_dir) |
| 670 | .fill 1024,4,0 |
Yinghai Lu | 2bd2753 | 2009-03-09 01:15:57 -0700 | [diff] [blame] | 671 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | /* |
| 673 | * This starts the data section. |
| 674 | */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 675 | #ifdef CONFIG_X86_PAE |
Tim Abbott | abe1ee3 | 2009-09-20 18:14:15 -0400 | [diff] [blame] | 676 | __PAGE_ALIGNED_DATA |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 677 | /* Page-aligned for the benefit of paravirt? */ |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 678 | .align PAGE_SIZE |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 679 | ENTRY(initial_page_table) |
| 680 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 681 | # if KPMDS == 3 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 682 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 |
| 683 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 |
| 684 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x2000),0 |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 685 | # elif KPMDS == 2 |
| 686 | .long 0,0 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 687 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 |
| 688 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR+0x1000),0 |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 689 | # elif KPMDS == 1 |
| 690 | .long 0,0 |
| 691 | .long 0,0 |
Borislav Petkov | b40827fa | 2010-08-28 15:58:33 +0200 | [diff] [blame] | 692 | .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 693 | # else |
| 694 | # error "Kernel PMDs should be 1, 2 or 3" |
| 695 | # endif |
Stratos Psomadakis | 7bf04be | 2011-02-25 22:46:13 +0200 | [diff] [blame] | 696 | .align PAGE_SIZE /* needs to be page-sized too */ |
Ian Campbell | 551889a6 | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 697 | #endif |
| 698 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | .data |
H. Peter Anvin | 11d4c3f | 2011-02-04 16:14:11 -0800 | [diff] [blame] | 700 | .balign 4 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | ENTRY(stack_start) |
| 702 | .long init_thread_union+THREAD_SIZE |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 704 | __INITRODATA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 705 | int_msg: |
Ingo Molnar | d5e397c | 2009-01-26 06:09:00 +0100 | [diff] [blame] | 706 | .asciz "Unknown interrupt or fault at: %p %p %p\n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 708 | fault_msg: |
Vegard Nossum | 575ca73 | 2008-04-25 21:02:34 +0200 | [diff] [blame] | 709 | /* fault info: */ |
| 710 | .ascii "BUG: Int %d: CR2 %p\n" |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 711 | /* regs pushed in early_idt_handler: */ |
| 712 | .ascii " EDI %p ESI %p EBP %p EBX %p\n" |
| 713 | .ascii " ESP %p ES %p DS %p\n" |
| 714 | .ascii " EDX %p ECX %p EAX %p\n" |
Vegard Nossum | 575ca73 | 2008-04-25 21:02:34 +0200 | [diff] [blame] | 715 | /* fault frame: */ |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 716 | .ascii " vec %p err %p EIP %p CS %p flg %p\n" |
Vegard Nossum | 575ca73 | 2008-04-25 21:02:34 +0200 | [diff] [blame] | 717 | .ascii "Stack: %p %p %p %p %p %p %p %p\n" |
| 718 | .ascii " %p %p %p %p %p %p %p %p\n" |
| 719 | .asciz " %p %p %p %p %p %p %p %p\n" |
Chuck Ebbert | ec5c092 | 2006-09-26 10:52:39 +0200 | [diff] [blame] | 720 | |
Thomas Gleixner | 9702785 | 2007-10-11 11:16:51 +0200 | [diff] [blame] | 721 | #include "../../x86/xen/xen-head.S" |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 722 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 723 | /* |
| 724 | * The IDT and GDT 'descriptors' are a strange 48-bit object |
| 725 | * only used by the lidt and lgdt instructions. They are not |
| 726 | * like usual segment descriptors - they consist of a 16-bit |
| 727 | * segment size, and 32-bit linear address value: |
| 728 | */ |
| 729 | |
H. Peter Anvin | 4c5023a | 2012-04-18 17:16:50 -0700 | [diff] [blame] | 730 | .data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | .globl boot_gdt_descr |
| 732 | .globl idt_descr |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | |
| 734 | ALIGN |
| 735 | # early boot GDT descriptor (must use 1:1 address mapping) |
| 736 | .word 0 # 32 bit align gdt_desc.address |
| 737 | boot_gdt_descr: |
| 738 | .word __BOOT_DS+7 |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 739 | .long boot_gdt - __PAGE_OFFSET |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 740 | |
| 741 | .word 0 # 32-bit align idt_desc.address |
| 742 | idt_descr: |
| 743 | .word IDT_ENTRIES*8-1 # idt contains 256 entries |
| 744 | .long idt_table |
| 745 | |
| 746 | # boot GDT descriptor (later on used by CPU#0): |
| 747 | .word 0 # 32 bit align gdt_desc.address |
Rusty Russell | 2a57ff1 | 2007-02-13 13:26:26 +0100 | [diff] [blame] | 748 | ENTRY(early_gdt_descr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 749 | .word GDT_ENTRIES*8-1 |
Rusty Russell | dd17c8f | 2009-10-29 22:34:15 +0900 | [diff] [blame] | 750 | .long gdt_page /* Overwritten for secondary CPUs */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 751 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | /* |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 753 | * The boot_gdt must mirror the equivalent in setup.S and is |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 754 | * used only for booting. |
| 755 | */ |
| 756 | .align L1_CACHE_BYTES |
Sebastien Dugue | 52de74d | 2007-05-02 19:27:10 +0200 | [diff] [blame] | 757 | ENTRY(boot_gdt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | .fill GDT_ENTRY_BOOT_CS,8,0 |
| 759 | .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ |
| 760 | .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ |