Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 3 | * Copyright (C) 1991,1992 Linus Torvalds |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 5 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 7 | * Stack layout while running C code: |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 8 | * ptrace needs to have all registers on the stack. |
| 9 | * If the order here is changed, it needs to be |
| 10 | * updated in fork.c:copy_process(), signal.c:do_signal(), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * ptrace.c and ptrace.h |
| 12 | * |
| 13 | * 0(%esp) - %ebx |
| 14 | * 4(%esp) - %ecx |
| 15 | * 8(%esp) - %edx |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 16 | * C(%esp) - %esi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * 10(%esp) - %edi |
| 18 | * 14(%esp) - %ebp |
| 19 | * 18(%esp) - %eax |
| 20 | * 1C(%esp) - %ds |
| 21 | * 20(%esp) - %es |
Jeremy Fitzhardinge | 464d1a7 | 2007-02-13 13:26:20 +0100 | [diff] [blame] | 22 | * 24(%esp) - %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 23 | * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS |
| 24 | * 2C(%esp) - orig_eax |
| 25 | * 30(%esp) - %eip |
| 26 | * 34(%esp) - %cs |
| 27 | * 38(%esp) - %eflags |
| 28 | * 3C(%esp) - %oldesp |
| 29 | * 40(%esp) - %oldss |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | */ |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <linux/linkage.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 33 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/thread_info.h> |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 35 | #include <asm/irqflags.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/errno.h> |
| 37 | #include <asm/segment.h> |
| 38 | #include <asm/smp.h> |
Stas Sergeev | be44d2a | 2006-12-07 02:14:01 +0100 | [diff] [blame] | 39 | #include <asm/percpu.h> |
Cyrill Gorcunov | ab68ed9 | 2008-03-25 22:16:32 +0300 | [diff] [blame] | 40 | #include <asm/processor-flags.h> |
Thomas Gleixner | 9b7dc56 | 2008-05-02 20:10:09 +0200 | [diff] [blame] | 41 | #include <asm/irq_vectors.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 42 | #include <asm/cpufeatures.h> |
Andy Lutomirski | b4ca46e | 2011-08-25 16:10:33 -0400 | [diff] [blame] | 43 | #include <asm/alternative-asm.h> |
H. Peter Anvin | 6837a54 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 44 | #include <asm/asm.h> |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 45 | #include <asm/smap.h> |
Josh Poimboeuf | 4d516f4 | 2016-09-21 16:04:01 -0500 | [diff] [blame] | 46 | #include <asm/frame.h> |
David Woodhouse | 2641f08 | 2018-01-11 21:46:28 +0000 | [diff] [blame] | 47 | #include <asm/nospec-branch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 49 | #include "calling.h" |
| 50 | |
Jiri Olsa | ea71454 | 2011-03-07 19:10:39 +0100 | [diff] [blame] | 51 | .section .entry.text, "ax" |
| 52 | |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 53 | /* |
| 54 | * We use macros for low-level operations which need to be overridden |
| 55 | * for paravirtualization. The following will never clobber any registers: |
| 56 | * INTERRUPT_RETURN (aka. "iret") |
| 57 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") |
Jeremy Fitzhardinge | d75cd22 | 2008-06-25 00:19:26 -0400 | [diff] [blame] | 58 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 59 | * |
| 60 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must |
| 61 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). |
| 62 | * Allowing a register to be clobbered can shrink the paravirt replacement |
| 63 | * enough to patch inline, increasing performance. |
| 64 | */ |
| 65 | |
Thomas Gleixner | 4859397 | 2019-07-26 23:19:42 +0200 | [diff] [blame] | 66 | #ifdef CONFIG_PREEMPTION |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 67 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 69 | # define preempt_stop(clobbers) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | #endif |
| 71 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 72 | .macro TRACE_IRQS_IRET |
| 73 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 74 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
| 75 | jz 1f |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 76 | TRACE_IRQS_ON |
| 77 | 1: |
| 78 | #endif |
| 79 | .endm |
| 80 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 81 | #define PTI_SWITCH_MASK (1 << PAGE_SHIFT) |
| 82 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 83 | /* |
| 84 | * User gs save/restore |
| 85 | * |
| 86 | * %gs is used for userland TLS and kernel only uses it for stack |
| 87 | * canary which is required to be at %gs:20 by gcc. Read the comment |
| 88 | * at the top of stackprotector.h for more info. |
| 89 | * |
| 90 | * Local labels 98 and 99 are used. |
| 91 | */ |
| 92 | #ifdef CONFIG_X86_32_LAZY_GS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 94 | /* unfortunately push/pop can't be no-op */ |
| 95 | .macro PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 96 | pushl $0 |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 97 | .endm |
| 98 | .macro POP_GS pop=0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 99 | addl $(4 + \pop), %esp |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 100 | .endm |
| 101 | .macro POP_GS_EX |
| 102 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 104 | /* all the rest are no-op */ |
| 105 | .macro PTGS_TO_GS |
| 106 | .endm |
| 107 | .macro PTGS_TO_GS_EX |
| 108 | .endm |
| 109 | .macro GS_TO_REG reg |
| 110 | .endm |
| 111 | .macro REG_TO_PTGS reg |
| 112 | .endm |
| 113 | .macro SET_KERNEL_GS reg |
| 114 | .endm |
| 115 | |
| 116 | #else /* CONFIG_X86_32_LAZY_GS */ |
| 117 | |
| 118 | .macro PUSH_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 119 | pushl %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 120 | .endm |
| 121 | |
| 122 | .macro POP_GS pop=0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 123 | 98: popl %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 124 | .if \pop <> 0 |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 125 | add $\pop, %esp |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 126 | .endif |
| 127 | .endm |
| 128 | .macro POP_GS_EX |
| 129 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 130 | 99: movl $0, (%esp) |
| 131 | jmp 98b |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 132 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 133 | _ASM_EXTABLE(98b, 99b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 134 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 136 | .macro PTGS_TO_GS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 137 | 98: mov PT_GS(%esp), %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 138 | .endm |
| 139 | .macro PTGS_TO_GS_EX |
| 140 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 141 | 99: movl $0, PT_GS(%esp) |
| 142 | jmp 98b |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 143 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 144 | _ASM_EXTABLE(98b, 99b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 145 | .endm |
| 146 | |
| 147 | .macro GS_TO_REG reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 148 | movl %gs, \reg |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 149 | .endm |
| 150 | .macro REG_TO_PTGS reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 151 | movl \reg, PT_GS(%esp) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 152 | .endm |
| 153 | .macro SET_KERNEL_GS reg |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 154 | movl $(__KERNEL_STACK_CANARY), \reg |
| 155 | movl \reg, %gs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 156 | .endm |
| 157 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 158 | #endif /* CONFIG_X86_32_LAZY_GS */ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 159 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 160 | /* Unconditionally switch to user cr3 */ |
| 161 | .macro SWITCH_TO_USER_CR3 scratch_reg:req |
| 162 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
| 163 | |
| 164 | movl %cr3, \scratch_reg |
| 165 | orl $PTI_SWITCH_MASK, \scratch_reg |
| 166 | movl \scratch_reg, %cr3 |
| 167 | .Lend_\@: |
| 168 | .endm |
| 169 | |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 170 | .macro BUG_IF_WRONG_CR3 no_user_check=0 |
| 171 | #ifdef CONFIG_DEBUG_ENTRY |
| 172 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
| 173 | .if \no_user_check == 0 |
| 174 | /* coming from usermode? */ |
Jan Beulich | 81ff2c3 | 2019-11-18 16:21:12 +0100 | [diff] [blame] | 175 | testl $USER_SEGMENT_RPL_MASK, PT_CS(%esp) |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 176 | jz .Lend_\@ |
| 177 | .endif |
| 178 | /* On user-cr3? */ |
| 179 | movl %cr3, %eax |
| 180 | testl $PTI_SWITCH_MASK, %eax |
| 181 | jnz .Lend_\@ |
| 182 | /* From userspace with kernel cr3 - BUG */ |
| 183 | ud2 |
| 184 | .Lend_\@: |
| 185 | #endif |
| 186 | .endm |
| 187 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 188 | /* |
| 189 | * Switch to kernel cr3 if not already loaded and return current cr3 in |
| 190 | * \scratch_reg |
| 191 | */ |
| 192 | .macro SWITCH_TO_KERNEL_CR3 scratch_reg:req |
| 193 | ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI |
| 194 | movl %cr3, \scratch_reg |
| 195 | /* Test if we are already on kernel CR3 */ |
| 196 | testl $PTI_SWITCH_MASK, \scratch_reg |
| 197 | jz .Lend_\@ |
| 198 | andl $(~PTI_SWITCH_MASK), \scratch_reg |
| 199 | movl \scratch_reg, %cr3 |
| 200 | /* Return original CR3 in \scratch_reg */ |
| 201 | orl $PTI_SWITCH_MASK, \scratch_reg |
| 202 | .Lend_\@: |
| 203 | .endm |
| 204 | |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 205 | #define CS_FROM_ENTRY_STACK (1 << 31) |
| 206 | #define CS_FROM_USER_CR3 (1 << 30) |
| 207 | #define CS_FROM_KERNEL (1 << 29) |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 208 | #define CS_FROM_ESPFIX (1 << 28) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 209 | |
| 210 | .macro FIXUP_FRAME |
| 211 | /* |
| 212 | * The high bits of the CS dword (__csh) are used for CS_FROM_*. |
| 213 | * Clear them in case hardware didn't do this for us. |
| 214 | */ |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 215 | andl $0x0000ffff, 4*4(%esp) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 216 | |
| 217 | #ifdef CONFIG_VM86 |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 218 | testl $X86_EFLAGS_VM, 5*4(%esp) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 219 | jnz .Lfrom_usermode_no_fixup_\@ |
| 220 | #endif |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 221 | testl $USER_SEGMENT_RPL_MASK, 4*4(%esp) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 222 | jnz .Lfrom_usermode_no_fixup_\@ |
| 223 | |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 224 | orl $CS_FROM_KERNEL, 4*4(%esp) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 225 | |
| 226 | /* |
| 227 | * When we're here from kernel mode; the (exception) stack looks like: |
| 228 | * |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 229 | * 6*4(%esp) - <previous context> |
| 230 | * 5*4(%esp) - flags |
| 231 | * 4*4(%esp) - cs |
| 232 | * 3*4(%esp) - ip |
| 233 | * 2*4(%esp) - orig_eax |
| 234 | * 1*4(%esp) - gs / function |
| 235 | * 0*4(%esp) - fs |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 236 | * |
| 237 | * Lets build a 5 entry IRET frame after that, such that struct pt_regs |
| 238 | * is complete and in particular regs->sp is correct. This gives us |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 239 | * the original 6 enties as gap: |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 240 | * |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 241 | * 14*4(%esp) - <previous context> |
| 242 | * 13*4(%esp) - gap / flags |
| 243 | * 12*4(%esp) - gap / cs |
| 244 | * 11*4(%esp) - gap / ip |
| 245 | * 10*4(%esp) - gap / orig_eax |
| 246 | * 9*4(%esp) - gap / gs / function |
| 247 | * 8*4(%esp) - gap / fs |
| 248 | * 7*4(%esp) - ss |
| 249 | * 6*4(%esp) - sp |
| 250 | * 5*4(%esp) - flags |
| 251 | * 4*4(%esp) - cs |
| 252 | * 3*4(%esp) - ip |
| 253 | * 2*4(%esp) - orig_eax |
| 254 | * 1*4(%esp) - gs / function |
| 255 | * 0*4(%esp) - fs |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 256 | */ |
| 257 | |
| 258 | pushl %ss # ss |
| 259 | pushl %esp # sp (points at ss) |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 260 | addl $7*4, (%esp) # point sp back at the previous context |
| 261 | pushl 7*4(%esp) # flags |
| 262 | pushl 7*4(%esp) # cs |
| 263 | pushl 7*4(%esp) # ip |
| 264 | pushl 7*4(%esp) # orig_eax |
| 265 | pushl 7*4(%esp) # gs / function |
| 266 | pushl 7*4(%esp) # fs |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 267 | .Lfrom_usermode_no_fixup_\@: |
| 268 | .endm |
| 269 | |
| 270 | .macro IRET_FRAME |
Andy Lutomirski | 4c4fd55 | 2019-11-20 09:49:33 +0100 | [diff] [blame] | 271 | /* |
| 272 | * We're called with %ds, %es, %fs, and %gs from the interrupted |
| 273 | * frame, so we shouldn't use them. Also, we may be in ESPFIX |
| 274 | * mode and therefore have a nonzero SS base and an offset ESP, |
| 275 | * so any attempt to access the stack needs to use SS. (except for |
| 276 | * accesses through %esp, which automatically use SS.) |
| 277 | */ |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 278 | testl $CS_FROM_KERNEL, 1*4(%esp) |
| 279 | jz .Lfinished_frame_\@ |
| 280 | |
| 281 | /* |
| 282 | * Reconstruct the 3 entry IRET frame right after the (modified) |
| 283 | * regs->sp without lowering %esp in between, such that an NMI in the |
| 284 | * middle doesn't scribble our stack. |
| 285 | */ |
| 286 | pushl %eax |
| 287 | pushl %ecx |
| 288 | movl 5*4(%esp), %eax # (modified) regs->sp |
| 289 | |
| 290 | movl 4*4(%esp), %ecx # flags |
Andy Lutomirski | 4c4fd55 | 2019-11-20 09:49:33 +0100 | [diff] [blame] | 291 | movl %ecx, %ss:-1*4(%eax) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 292 | |
| 293 | movl 3*4(%esp), %ecx # cs |
| 294 | andl $0x0000ffff, %ecx |
Andy Lutomirski | 4c4fd55 | 2019-11-20 09:49:33 +0100 | [diff] [blame] | 295 | movl %ecx, %ss:-2*4(%eax) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 296 | |
| 297 | movl 2*4(%esp), %ecx # ip |
Andy Lutomirski | 4c4fd55 | 2019-11-20 09:49:33 +0100 | [diff] [blame] | 298 | movl %ecx, %ss:-3*4(%eax) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 299 | |
| 300 | movl 1*4(%esp), %ecx # eax |
Andy Lutomirski | 4c4fd55 | 2019-11-20 09:49:33 +0100 | [diff] [blame] | 301 | movl %ecx, %ss:-4*4(%eax) |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 302 | |
| 303 | popl %ecx |
Andy Lutomirski | 4c4fd55 | 2019-11-20 09:49:33 +0100 | [diff] [blame] | 304 | lea -4*4(%eax), %esp |
Peter Zijlstra | 3c88c69 | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 305 | popl %eax |
| 306 | .Lfinished_frame_\@: |
| 307 | .endm |
| 308 | |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 309 | .macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0 |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 310 | cld |
Peter Zijlstra | e67f1c1 | 2019-07-11 13:40:56 +0200 | [diff] [blame] | 311 | .if \skip_gs == 0 |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 312 | PUSH_GS |
Peter Zijlstra | e67f1c1 | 2019-07-11 13:40:56 +0200 | [diff] [blame] | 313 | .endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 314 | pushl %fs |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 315 | |
| 316 | pushl %eax |
| 317 | movl $(__KERNEL_PERCPU), %eax |
| 318 | movl %eax, %fs |
| 319 | .if \unwind_espfix > 0 |
| 320 | UNWIND_ESPFIX_STACK |
| 321 | .endif |
| 322 | popl %eax |
| 323 | |
Andy Lutomirski | 82cb8a0 | 2019-11-20 09:56:36 +0100 | [diff] [blame] | 324 | FIXUP_FRAME |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 325 | pushl %es |
| 326 | pushl %ds |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 327 | pushl \pt_regs_ax |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 328 | pushl %ebp |
| 329 | pushl %edi |
| 330 | pushl %esi |
| 331 | pushl %edx |
| 332 | pushl %ecx |
| 333 | pushl %ebx |
| 334 | movl $(__USER_DS), %edx |
| 335 | movl %edx, %ds |
| 336 | movl %edx, %es |
Peter Zijlstra | e67f1c1 | 2019-07-11 13:40:56 +0200 | [diff] [blame] | 337 | .if \skip_gs == 0 |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 338 | SET_KERNEL_GS %edx |
Peter Zijlstra | e67f1c1 | 2019-07-11 13:40:56 +0200 | [diff] [blame] | 339 | .endif |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 340 | /* Switch to kernel stack if necessary */ |
| 341 | .if \switch_stacks > 0 |
| 342 | SWITCH_TO_KERNEL_STACK |
| 343 | .endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 344 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 346 | .macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0 |
| 347 | SAVE_ALL unwind_espfix=\unwind_espfix |
Joerg Roedel | b65bef4 | 2018-07-18 11:40:50 +0200 | [diff] [blame] | 348 | |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 349 | BUG_IF_WRONG_CR3 |
| 350 | |
Joerg Roedel | b65bef4 | 2018-07-18 11:40:50 +0200 | [diff] [blame] | 351 | /* |
| 352 | * Now switch the CR3 when PTI is enabled. |
| 353 | * |
| 354 | * We can enter with either user or kernel cr3, the code will |
| 355 | * store the old cr3 in \cr3_reg and switches to the kernel cr3 |
| 356 | * if necessary. |
| 357 | */ |
| 358 | SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg |
| 359 | |
| 360 | .Lend_\@: |
Joerg Roedel | 8b376fa | 2018-07-18 11:40:46 +0200 | [diff] [blame] | 361 | .endm |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 362 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 363 | .macro RESTORE_INT_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 364 | popl %ebx |
| 365 | popl %ecx |
| 366 | popl %edx |
| 367 | popl %esi |
| 368 | popl %edi |
| 369 | popl %ebp |
| 370 | popl %eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 371 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 372 | |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 373 | .macro RESTORE_REGS pop=0 |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 374 | RESTORE_INT_REGS |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 375 | 1: popl %ds |
| 376 | 2: popl %es |
| 377 | 3: popl %fs |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 378 | POP_GS \pop |
Peter Zijlstra | 40ad219 | 2019-11-20 13:05:06 +0100 | [diff] [blame] | 379 | IRET_FRAME |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 380 | .pushsection .fixup, "ax" |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 381 | 4: movl $0, (%esp) |
| 382 | jmp 1b |
| 383 | 5: movl $0, (%esp) |
| 384 | jmp 2b |
| 385 | 6: movl $0, (%esp) |
| 386 | jmp 3b |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 388 | _ASM_EXTABLE(1b, 4b) |
| 389 | _ASM_EXTABLE(2b, 5b) |
| 390 | _ASM_EXTABLE(3b, 6b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 391 | POP_GS_EX |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 392 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | |
Joerg Roedel | b65bef4 | 2018-07-18 11:40:50 +0200 | [diff] [blame] | 394 | .macro RESTORE_ALL_NMI cr3_reg:req pop=0 |
| 395 | /* |
| 396 | * Now switch the CR3 when PTI is enabled. |
| 397 | * |
| 398 | * We enter with kernel cr3 and switch the cr3 to the value |
| 399 | * stored on \cr3_reg, which is either a user or a kernel cr3. |
| 400 | */ |
| 401 | ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI |
| 402 | |
| 403 | testl $PTI_SWITCH_MASK, \cr3_reg |
| 404 | jz .Lswitched_\@ |
| 405 | |
| 406 | /* User cr3 in \cr3_reg - write it to hardware cr3 */ |
| 407 | movl \cr3_reg, %cr3 |
| 408 | |
| 409 | .Lswitched_\@: |
| 410 | |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 411 | BUG_IF_WRONG_CR3 |
| 412 | |
Joerg Roedel | 8b376fa | 2018-07-18 11:40:46 +0200 | [diff] [blame] | 413 | RESTORE_REGS pop=\pop |
| 414 | .endm |
| 415 | |
Joerg Roedel | 46eabca | 2018-07-18 11:40:41 +0200 | [diff] [blame] | 416 | .macro CHECK_AND_APPLY_ESPFIX |
| 417 | #ifdef CONFIG_X86_ESPFIX32 |
Andy Lutomirski | 4a13b0e | 2019-11-24 08:50:03 -0800 | [diff] [blame] | 418 | #define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8) |
| 419 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET |
Joerg Roedel | 46eabca | 2018-07-18 11:40:41 +0200 | [diff] [blame] | 420 | |
| 421 | ALTERNATIVE "jmp .Lend_\@", "", X86_BUG_ESPFIX |
| 422 | |
| 423 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
| 424 | /* |
| 425 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we |
| 426 | * are returning to the kernel. |
| 427 | * See comments in process.c:copy_thread() for details. |
| 428 | */ |
| 429 | movb PT_OLDSS(%esp), %ah |
| 430 | movb PT_CS(%esp), %al |
| 431 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
| 432 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax |
| 433 | jne .Lend_\@ # returning to user-space with LDT SS |
| 434 | |
| 435 | /* |
| 436 | * Setup and switch to ESPFIX stack |
| 437 | * |
| 438 | * We're returning to userspace with a 16 bit stack. The CPU will not |
| 439 | * restore the high word of ESP for us on executing iret... This is an |
| 440 | * "official" bug of all the x86-compatible CPUs, which we can work |
| 441 | * around to make dosemu and wine happy. We do this by preloading the |
| 442 | * high word of ESP with the high word of the userspace ESP while |
| 443 | * compensating for the offset by changing to the ESPFIX segment with |
| 444 | * a base address that matches for the difference. |
| 445 | */ |
| 446 | mov %esp, %edx /* load kernel esp */ |
| 447 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ |
| 448 | mov %dx, %ax /* eax: new kernel esp */ |
| 449 | sub %eax, %edx /* offset (low word is 0) */ |
| 450 | shr $16, %edx |
| 451 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
| 452 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ |
| 453 | pushl $__ESPFIX_SS |
| 454 | pushl %eax /* new kernel esp */ |
| 455 | /* |
| 456 | * Disable interrupts, but do not irqtrace this section: we |
| 457 | * will soon execute iret and the tracer was already set to |
| 458 | * the irqstate after the IRET: |
| 459 | */ |
| 460 | DISABLE_INTERRUPTS(CLBR_ANY) |
| 461 | lss (%esp), %esp /* switch to espfix segment */ |
| 462 | .Lend_\@: |
| 463 | #endif /* CONFIG_X86_ESPFIX32 */ |
| 464 | .endm |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 465 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 466 | /* |
| 467 | * Called with pt_regs fully populated and kernel segments loaded, |
| 468 | * so we can access PER_CPU and use the integer registers. |
| 469 | * |
| 470 | * We need to be very careful here with the %esp switch, because an NMI |
| 471 | * can happen everywhere. If the NMI handler finds itself on the |
| 472 | * entry-stack, it will overwrite the task-stack and everything we |
| 473 | * copied there. So allocate the stack-frame on the task-stack and |
| 474 | * switch to it before we do any copying. |
| 475 | */ |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 476 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 477 | .macro SWITCH_TO_KERNEL_STACK |
| 478 | |
| 479 | ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV |
| 480 | |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 481 | BUG_IF_WRONG_CR3 |
| 482 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 483 | SWITCH_TO_KERNEL_CR3 scratch_reg=%eax |
| 484 | |
| 485 | /* |
| 486 | * %eax now contains the entry cr3 and we carry it forward in |
| 487 | * that register for the time this macro runs |
| 488 | */ |
| 489 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 490 | /* Are we on the entry stack? Bail out if not! */ |
| 491 | movl PER_CPU_VAR(cpu_entry_area), %ecx |
| 492 | addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx |
| 493 | subl %esp, %ecx /* ecx = (end of entry_stack) - esp */ |
| 494 | cmpl $SIZEOF_entry_stack, %ecx |
| 495 | jae .Lend_\@ |
| 496 | |
| 497 | /* Load stack pointer into %esi and %edi */ |
| 498 | movl %esp, %esi |
| 499 | movl %esi, %edi |
| 500 | |
| 501 | /* Move %edi to the top of the entry stack */ |
| 502 | andl $(MASK_entry_stack), %edi |
| 503 | addl $(SIZEOF_entry_stack), %edi |
| 504 | |
| 505 | /* Load top of task-stack into %edi */ |
| 506 | movl TSS_entry2task_stack(%edi), %edi |
| 507 | |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 508 | /* Special case - entry from kernel mode via entry stack */ |
Joerg Roedel | d5e84c2 | 2018-07-20 18:22:23 +0200 | [diff] [blame] | 509 | #ifdef CONFIG_VM86 |
| 510 | movl PT_EFLAGS(%esp), %ecx # mix EFLAGS and CS |
| 511 | movb PT_CS(%esp), %cl |
| 512 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx |
| 513 | #else |
| 514 | movl PT_CS(%esp), %ecx |
| 515 | andl $SEGMENT_RPL_MASK, %ecx |
| 516 | #endif |
| 517 | cmpl $USER_RPL, %ecx |
| 518 | jb .Lentry_from_kernel_\@ |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 519 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 520 | /* Bytes to copy */ |
| 521 | movl $PTREGS_SIZE, %ecx |
| 522 | |
| 523 | #ifdef CONFIG_VM86 |
| 524 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esi) |
| 525 | jz .Lcopy_pt_regs_\@ |
| 526 | |
| 527 | /* |
| 528 | * Stack-frame contains 4 additional segment registers when |
| 529 | * coming from VM86 mode |
| 530 | */ |
| 531 | addl $(4 * 4), %ecx |
| 532 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 533 | #endif |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 534 | .Lcopy_pt_regs_\@: |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 535 | |
| 536 | /* Allocate frame on task-stack */ |
| 537 | subl %ecx, %edi |
| 538 | |
| 539 | /* Switch to task-stack */ |
| 540 | movl %edi, %esp |
| 541 | |
| 542 | /* |
| 543 | * We are now on the task-stack and can safely copy over the |
| 544 | * stack-frame |
| 545 | */ |
| 546 | shrl $2, %ecx |
| 547 | cld |
| 548 | rep movsl |
| 549 | |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 550 | jmp .Lend_\@ |
| 551 | |
| 552 | .Lentry_from_kernel_\@: |
| 553 | |
| 554 | /* |
| 555 | * This handles the case when we enter the kernel from |
| 556 | * kernel-mode and %esp points to the entry-stack. When this |
| 557 | * happens we need to switch to the task-stack to run C code, |
| 558 | * but switch back to the entry-stack again when we approach |
| 559 | * iret and return to the interrupted code-path. This usually |
| 560 | * happens when we hit an exception while restoring user-space |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 561 | * segment registers on the way back to user-space or when the |
| 562 | * sysenter handler runs with eflags.tf set. |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 563 | * |
| 564 | * When we switch to the task-stack here, we can't trust the |
| 565 | * contents of the entry-stack anymore, as the exception handler |
| 566 | * might be scheduled out or moved to another CPU. Therefore we |
| 567 | * copy the complete entry-stack to the task-stack and set a |
| 568 | * marker in the iret-frame (bit 31 of the CS dword) to detect |
| 569 | * what we've done on the iret path. |
| 570 | * |
| 571 | * On the iret path we copy everything back and switch to the |
| 572 | * entry-stack, so that the interrupted kernel code-path |
| 573 | * continues on the same stack it was interrupted with. |
| 574 | * |
| 575 | * Be aware that an NMI can happen anytime in this code. |
| 576 | * |
| 577 | * %esi: Entry-Stack pointer (same as %esp) |
| 578 | * %edi: Top of the task stack |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 579 | * %eax: CR3 on kernel entry |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 580 | */ |
| 581 | |
| 582 | /* Calculate number of bytes on the entry stack in %ecx */ |
| 583 | movl %esi, %ecx |
| 584 | |
| 585 | /* %ecx to the top of entry-stack */ |
| 586 | andl $(MASK_entry_stack), %ecx |
| 587 | addl $(SIZEOF_entry_stack), %ecx |
| 588 | |
| 589 | /* Number of bytes on the entry stack to %ecx */ |
| 590 | sub %esi, %ecx |
| 591 | |
| 592 | /* Mark stackframe as coming from entry stack */ |
| 593 | orl $CS_FROM_ENTRY_STACK, PT_CS(%esp) |
| 594 | |
| 595 | /* |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 596 | * Test the cr3 used to enter the kernel and add a marker |
| 597 | * so that we can switch back to it before iret. |
| 598 | */ |
| 599 | testl $PTI_SWITCH_MASK, %eax |
| 600 | jz .Lcopy_pt_regs_\@ |
| 601 | orl $CS_FROM_USER_CR3, PT_CS(%esp) |
| 602 | |
| 603 | /* |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 604 | * %esi and %edi are unchanged, %ecx contains the number of |
| 605 | * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate |
| 606 | * the stack-frame on task-stack and copy everything over |
| 607 | */ |
| 608 | jmp .Lcopy_pt_regs_\@ |
| 609 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 610 | .Lend_\@: |
| 611 | .endm |
| 612 | |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 613 | /* |
Joerg Roedel | e5862d0 | 2018-07-18 11:40:45 +0200 | [diff] [blame] | 614 | * Switch back from the kernel stack to the entry stack. |
| 615 | * |
| 616 | * The %esp register must point to pt_regs on the task stack. It will |
| 617 | * first calculate the size of the stack-frame to copy, depending on |
| 618 | * whether we return to VM86 mode or not. With that it uses 'rep movsl' |
| 619 | * to copy the contents of the stack over to the entry stack. |
| 620 | * |
| 621 | * We must be very careful here, as we can't trust the contents of the |
| 622 | * task-stack once we switched to the entry-stack. When an NMI happens |
| 623 | * while on the entry-stack, the NMI handler will switch back to the top |
| 624 | * of the task stack, overwriting our stack-frame we are about to copy. |
| 625 | * Therefore we switch the stack only after everything is copied over. |
| 626 | */ |
| 627 | .macro SWITCH_TO_ENTRY_STACK |
| 628 | |
| 629 | ALTERNATIVE "", "jmp .Lend_\@", X86_FEATURE_XENPV |
| 630 | |
| 631 | /* Bytes to copy */ |
| 632 | movl $PTREGS_SIZE, %ecx |
| 633 | |
| 634 | #ifdef CONFIG_VM86 |
| 635 | testl $(X86_EFLAGS_VM), PT_EFLAGS(%esp) |
| 636 | jz .Lcopy_pt_regs_\@ |
| 637 | |
| 638 | /* Additional 4 registers to copy when returning to VM86 mode */ |
| 639 | addl $(4 * 4), %ecx |
| 640 | |
| 641 | .Lcopy_pt_regs_\@: |
| 642 | #endif |
| 643 | |
| 644 | /* Initialize source and destination for movsl */ |
| 645 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi |
| 646 | subl %ecx, %edi |
| 647 | movl %esp, %esi |
| 648 | |
| 649 | /* Save future stack pointer in %ebx */ |
| 650 | movl %edi, %ebx |
| 651 | |
| 652 | /* Copy over the stack-frame */ |
| 653 | shrl $2, %ecx |
| 654 | cld |
| 655 | rep movsl |
| 656 | |
| 657 | /* |
| 658 | * Switch to entry-stack - needs to happen after everything is |
| 659 | * copied because the NMI handler will overwrite the task-stack |
| 660 | * when on entry-stack |
| 661 | */ |
| 662 | movl %ebx, %esp |
| 663 | |
| 664 | .Lend_\@: |
| 665 | .endm |
| 666 | |
| 667 | /* |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 668 | * This macro handles the case when we return to kernel-mode on the iret |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 669 | * path and have to switch back to the entry stack and/or user-cr3 |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 670 | * |
| 671 | * See the comments below the .Lentry_from_kernel_\@ label in the |
| 672 | * SWITCH_TO_KERNEL_STACK macro for more details. |
| 673 | */ |
| 674 | .macro PARANOID_EXIT_TO_KERNEL_MODE |
| 675 | |
| 676 | /* |
| 677 | * Test if we entered the kernel with the entry-stack. Most |
| 678 | * likely we did not, because this code only runs on the |
| 679 | * return-to-kernel path. |
| 680 | */ |
| 681 | testl $CS_FROM_ENTRY_STACK, PT_CS(%esp) |
| 682 | jz .Lend_\@ |
| 683 | |
| 684 | /* Unlikely slow-path */ |
| 685 | |
| 686 | /* Clear marker from stack-frame */ |
| 687 | andl $(~CS_FROM_ENTRY_STACK), PT_CS(%esp) |
| 688 | |
| 689 | /* Copy the remaining task-stack contents to entry-stack */ |
| 690 | movl %esp, %esi |
| 691 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi |
| 692 | |
| 693 | /* Bytes on the task-stack to ecx */ |
| 694 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx |
| 695 | subl %esi, %ecx |
| 696 | |
| 697 | /* Allocate stack-frame on entry-stack */ |
| 698 | subl %ecx, %edi |
| 699 | |
| 700 | /* |
| 701 | * Save future stack-pointer, we must not switch until the |
| 702 | * copy is done, otherwise the NMI handler could destroy the |
| 703 | * contents of the task-stack we are about to copy. |
| 704 | */ |
| 705 | movl %edi, %ebx |
| 706 | |
| 707 | /* Do the copy */ |
| 708 | shrl $2, %ecx |
| 709 | cld |
| 710 | rep movsl |
| 711 | |
| 712 | /* Safe to switch to entry-stack now */ |
| 713 | movl %ebx, %esp |
| 714 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 715 | /* |
| 716 | * We came from entry-stack and need to check if we also need to |
| 717 | * switch back to user cr3. |
| 718 | */ |
| 719 | testl $CS_FROM_USER_CR3, PT_CS(%esp) |
| 720 | jz .Lend_\@ |
| 721 | |
| 722 | /* Clear marker from stack-frame */ |
| 723 | andl $(~CS_FROM_USER_CR3), PT_CS(%esp) |
| 724 | |
| 725 | SWITCH_TO_USER_CR3 scratch_reg=%eax |
| 726 | |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 727 | .Lend_\@: |
| 728 | .endm |
| 729 | /* |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 730 | * %eax: prev task |
| 731 | * %edx: next task |
| 732 | */ |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 733 | SYM_CODE_START(__switch_to_asm) |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 734 | /* |
| 735 | * Save callee-saved registers |
| 736 | * This must match the order in struct inactive_task_frame |
| 737 | */ |
| 738 | pushl %ebp |
| 739 | pushl %ebx |
| 740 | pushl %edi |
| 741 | pushl %esi |
Thomas Gleixner | a3ba966 | 2019-11-16 11:12:03 +0100 | [diff] [blame] | 742 | /* |
| 743 | * Flags are saved to prevent AC leakage. This could go |
| 744 | * away if objtool would have 32bit support to verify |
| 745 | * the STAC/CLAC correctness. |
| 746 | */ |
Peter Zijlstra | 6690e86 | 2019-02-14 10:30:52 +0100 | [diff] [blame] | 747 | pushfl |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 748 | |
| 749 | /* switch stack */ |
| 750 | movl %esp, TASK_threadsp(%eax) |
| 751 | movl TASK_threadsp(%edx), %esp |
| 752 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 753 | #ifdef CONFIG_STACKPROTECTOR |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 754 | movl TASK_stack_canary(%edx), %ebx |
| 755 | movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset |
| 756 | #endif |
| 757 | |
David Woodhouse | c995efd | 2018-01-12 17:49:25 +0000 | [diff] [blame] | 758 | #ifdef CONFIG_RETPOLINE |
| 759 | /* |
| 760 | * When switching from a shallower to a deeper call stack |
| 761 | * the RSB may either underflow or use entries populated |
| 762 | * with userspace addresses. On CPUs where those concerns |
| 763 | * exist, overwrite the RSB with entries which capture |
| 764 | * speculative execution to prevent attack. |
| 765 | */ |
David Woodhouse | d1c9910 | 2018-02-19 10:50:56 +0000 | [diff] [blame] | 766 | FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
David Woodhouse | c995efd | 2018-01-12 17:49:25 +0000 | [diff] [blame] | 767 | #endif |
| 768 | |
Thomas Gleixner | a3ba966 | 2019-11-16 11:12:03 +0100 | [diff] [blame] | 769 | /* Restore flags or the incoming task to restore AC state. */ |
Peter Zijlstra | 6690e86 | 2019-02-14 10:30:52 +0100 | [diff] [blame] | 770 | popfl |
Thomas Gleixner | a3ba966 | 2019-11-16 11:12:03 +0100 | [diff] [blame] | 771 | /* restore callee-saved registers */ |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 772 | popl %esi |
| 773 | popl %edi |
| 774 | popl %ebx |
| 775 | popl %ebp |
| 776 | |
| 777 | jmp __switch_to |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 778 | SYM_CODE_END(__switch_to_asm) |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 779 | |
| 780 | /* |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 781 | * The unwinder expects the last frame on the stack to always be at the same |
| 782 | * offset from the end of the page, which allows it to validate the stack. |
| 783 | * Calling schedule_tail() directly would break that convention because its an |
| 784 | * asmlinkage function so its argument has to be pushed on the stack. This |
| 785 | * wrapper creates a proper "end of stack" frame header before the call. |
| 786 | */ |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 787 | SYM_FUNC_START(schedule_tail_wrapper) |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 788 | FRAME_BEGIN |
| 789 | |
| 790 | pushl %eax |
| 791 | call schedule_tail |
| 792 | popl %eax |
| 793 | |
| 794 | FRAME_END |
| 795 | ret |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 796 | SYM_FUNC_END(schedule_tail_wrapper) |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 797 | /* |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 798 | * A newly forked process directly context switches into this address. |
| 799 | * |
| 800 | * eax: prev task we switched from |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 801 | * ebx: kernel thread func (NULL for user thread) |
| 802 | * edi: kernel thread arg |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 803 | */ |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 804 | SYM_CODE_START(ret_from_fork) |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 805 | call schedule_tail_wrapper |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 806 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 807 | testl %ebx, %ebx |
| 808 | jnz 1f /* kernel threads are uncommon */ |
| 809 | |
| 810 | 2: |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 811 | /* When we fork, we trace the syscall return in the child, too. */ |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 812 | movl %esp, %eax |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 813 | call syscall_return_slowpath |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 814 | STACKLEAK_ERASE |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 815 | jmp restore_all |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 816 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 817 | /* kernel thread */ |
| 818 | 1: movl %edi, %eax |
David Woodhouse | 2641f08 | 2018-01-11 21:46:28 +0000 | [diff] [blame] | 819 | CALL_NOSPEC %ebx |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 820 | /* |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 821 | * A kernel thread is allowed to return here after successfully |
| 822 | * calling do_execve(). Exit to userspace to complete the execve() |
| 823 | * syscall. |
Andy Lutomirski | 39e8701 | 2015-10-05 17:48:13 -0700 | [diff] [blame] | 824 | */ |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 825 | movl $0, PT_EAX(%esp) |
| 826 | jmp 2b |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 827 | SYM_CODE_END(ret_from_fork) |
Al Viro | 6783eaa2 | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 828 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | /* |
| 830 | * Return to user mode is not as complex as all this looks, |
| 831 | * but we want the default path for a system call return to |
| 832 | * go as quickly as possible which is why some of this is |
| 833 | * less clear than it otherwise should be. |
| 834 | */ |
| 835 | |
| 836 | # userspace resumption stub bypassing syscall exit tracing |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 837 | SYM_CODE_START_LOCAL(ret_from_exception) |
Rusty Russell | 139ec7c | 2006-12-07 02:14:08 +0100 | [diff] [blame] | 838 | preempt_stop(CLBR_ANY) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | ret_from_intr: |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 840 | #ifdef CONFIG_VM86 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 841 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
| 842 | movb PT_CS(%esp), %al |
| 843 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 844 | #else |
| 845 | /* |
Al Viro | 6783eaa2 | 2012-08-02 23:05:11 +0400 | [diff] [blame] | 846 | * We can be coming here from child spawned by kernel_thread(). |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 847 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 848 | movl PT_CS(%esp), %eax |
| 849 | andl $SEGMENT_RPL_MASK, %eax |
Dmitry Adamushko | 29a2e28 | 2012-03-22 21:39:25 +0100 | [diff] [blame] | 850 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 851 | cmpl $USER_RPL, %eax |
Peter Zijlstra | 5e1246f | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 852 | jb restore_all_kernel # not returning to v8086 or userspace |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 853 | |
Andy Lutomirski | 5d73fc7 | 2015-07-31 14:41:09 -0700 | [diff] [blame] | 854 | DISABLE_INTERRUPTS(CLBR_ANY) |
Peter Zijlstra | e32e58a | 2008-06-06 10:14:08 +0200 | [diff] [blame] | 855 | TRACE_IRQS_OFF |
Andy Lutomirski | 5d73fc7 | 2015-07-31 14:41:09 -0700 | [diff] [blame] | 856 | movl %esp, %eax |
| 857 | call prepare_exit_to_usermode |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 858 | jmp restore_all |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 859 | SYM_CODE_END(ret_from_exception) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | |
Jiri Slaby | b4edca1 | 2019-10-11 13:50:59 +0200 | [diff] [blame] | 861 | SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 862 | /* |
| 863 | * All code from here through __end_SYSENTER_singlestep_region is subject |
| 864 | * to being single-stepped if a user program sets TF and executes SYSENTER. |
| 865 | * There is absolutely nothing that we can do to prevent this from happening |
| 866 | * (thanks Intel!). To keep our handling of this situation as simple as |
| 867 | * possible, we handle TF just like AC and NT, except that our #DB handler |
| 868 | * will ignore all of the single-step traps generated in this range. |
| 869 | */ |
| 870 | |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 871 | #ifdef CONFIG_XEN_PV |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 872 | /* |
| 873 | * Xen doesn't set %esp to be precisely what the normal SYSENTER |
| 874 | * entry point expects, so fix it up before using the normal path. |
| 875 | */ |
Jiri Slaby | 78762b0 | 2019-10-11 13:51:05 +0200 | [diff] [blame] | 876 | SYM_CODE_START(xen_sysenter_target) |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 877 | addl $5*4, %esp /* remove xen-provided frame */ |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 878 | jmp .Lsysenter_past_esp |
Jiri Slaby | 78762b0 | 2019-10-11 13:51:05 +0200 | [diff] [blame] | 879 | SYM_CODE_END(xen_sysenter_target) |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 880 | #endif |
| 881 | |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 882 | /* |
| 883 | * 32-bit SYSENTER entry. |
| 884 | * |
| 885 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
| 886 | * if X86_FEATURE_SEP is available. This is the preferred system call |
| 887 | * entry on 32-bit systems. |
| 888 | * |
| 889 | * The SYSENTER instruction, in principle, should *only* occur in the |
| 890 | * vDSO. In practice, a small number of Android devices were shipped |
| 891 | * with a copy of Bionic that inlined a SYSENTER instruction. This |
| 892 | * never happened in any of Google's Bionic versions -- it only happened |
| 893 | * in a narrow range of Intel-provided versions. |
| 894 | * |
| 895 | * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs. |
| 896 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). |
| 897 | * SYSENTER does not save anything on the stack, |
| 898 | * and does not save old EIP (!!!), ESP, or EFLAGS. |
| 899 | * |
| 900 | * To avoid losing track of EFLAGS.VM (and thus potentially corrupting |
| 901 | * user and/or vm86 state), we explicitly disable the SYSENTER |
| 902 | * instruction in vm86 mode by reprogramming the MSRs. |
| 903 | * |
| 904 | * Arguments: |
| 905 | * eax system call number |
| 906 | * ebx arg1 |
| 907 | * ecx arg2 |
| 908 | * edx arg3 |
| 909 | * esi arg4 |
| 910 | * edi arg5 |
| 911 | * ebp user stack |
| 912 | * 0(%ebp) arg6 |
| 913 | */ |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 914 | SYM_FUNC_START(entry_SYSENTER_32) |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 915 | /* |
| 916 | * On entry-stack with all userspace-regs live - save and |
| 917 | * restore eflags and %eax to use it as scratch-reg for the cr3 |
| 918 | * switch. |
| 919 | */ |
| 920 | pushfl |
| 921 | pushl %eax |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 922 | BUG_IF_WRONG_CR3 no_user_check=1 |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 923 | SWITCH_TO_KERNEL_CR3 scratch_reg=%eax |
| 924 | popl %eax |
| 925 | popfl |
| 926 | |
| 927 | /* Stack empty again, switch to task stack */ |
Joerg Roedel | ae2e565 | 2018-07-18 11:40:39 +0200 | [diff] [blame] | 928 | movl TSS_entry2task_stack(%esp), %esp |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 929 | |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 930 | .Lsysenter_past_esp: |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 931 | pushl $__USER_DS /* pt_regs->ss */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 932 | pushl %ebp /* pt_regs->sp (stashed in bp) */ |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 933 | pushfl /* pt_regs->flags (except IF = 0) */ |
| 934 | orl $X86_EFLAGS_IF, (%esp) /* Fix IF */ |
| 935 | pushl $__USER_CS /* pt_regs->cs */ |
| 936 | pushl $0 /* pt_regs->ip = 0 (placeholder) */ |
| 937 | pushl %eax /* pt_regs->orig_ax */ |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 938 | SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest, stack already switched */ |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 939 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 940 | /* |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 941 | * SYSENTER doesn't filter flags, so we need to clear NT, AC |
| 942 | * and TF ourselves. To save a few cycles, we can check whether |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 943 | * either was set instead of doing an unconditional popfq. |
| 944 | * This needs to happen before enabling interrupts so that |
| 945 | * we don't get preempted with NT set. |
| 946 | * |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 947 | * If TF is set, we will single-step all the way to here -- do_debug |
| 948 | * will ignore all the traps. (Yes, this is slow, but so is |
| 949 | * single-stepping in general. This allows us to avoid having |
| 950 | * a more complicated code to handle the case where a user program |
| 951 | * forces us to single-step through the SYSENTER entry code.) |
| 952 | * |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 953 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
| 954 | * out-of-line as an optimization: NT is unlikely to be set in the |
| 955 | * majority of the cases and instead of polluting the I$ unnecessarily, |
| 956 | * we're keeping that code behind a branch which will predict as |
| 957 | * not-taken and therefore its instructions won't be fetched. |
| 958 | */ |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 959 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp) |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 960 | jnz .Lsysenter_fix_flags |
| 961 | .Lsysenter_flags_fixed: |
| 962 | |
| 963 | /* |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 964 | * User mode is traced as though IRQs are on, and SYSENTER |
| 965 | * turned them off. |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 966 | */ |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 967 | TRACE_IRQS_OFF |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 968 | |
| 969 | movl %esp, %eax |
| 970 | call do_fast_syscall_32 |
Boris Ostrovsky | 91e2eea | 2015-11-19 16:55:45 -0500 | [diff] [blame] | 971 | /* XEN PV guests always use IRET path */ |
| 972 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
| 973 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 974 | |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 975 | STACKLEAK_ERASE |
| 976 | |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 977 | /* Opportunistic SYSEXIT */ |
| 978 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
Joerg Roedel | e5862d0 | 2018-07-18 11:40:45 +0200 | [diff] [blame] | 979 | |
| 980 | /* |
| 981 | * Setup entry stack - we keep the pointer in %eax and do the |
| 982 | * switch after almost all user-state is restored. |
| 983 | */ |
| 984 | |
| 985 | /* Load entry stack pointer and allocate frame for eflags/eax */ |
| 986 | movl PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax |
| 987 | subl $(2*4), %eax |
| 988 | |
| 989 | /* Copy eflags and eax to entry stack */ |
| 990 | movl PT_EFLAGS(%esp), %edi |
| 991 | movl PT_EAX(%esp), %esi |
| 992 | movl %edi, (%eax) |
| 993 | movl %esi, 4(%eax) |
| 994 | |
| 995 | /* Restore user registers and segments */ |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 996 | movl PT_EIP(%esp), %edx /* pt_regs->ip */ |
| 997 | movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */ |
Andy Lutomirski | 3bd2951 | 2015-10-16 15:42:55 -0700 | [diff] [blame] | 998 | 1: mov PT_FS(%esp), %fs |
| 999 | PTGS_TO_GS |
Joerg Roedel | e5862d0 | 2018-07-18 11:40:45 +0200 | [diff] [blame] | 1000 | |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 1001 | popl %ebx /* pt_regs->bx */ |
| 1002 | addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */ |
| 1003 | popl %esi /* pt_regs->si */ |
| 1004 | popl %edi /* pt_regs->di */ |
| 1005 | popl %ebp /* pt_regs->bp */ |
Joerg Roedel | e5862d0 | 2018-07-18 11:40:45 +0200 | [diff] [blame] | 1006 | |
| 1007 | /* Switch to entry stack */ |
| 1008 | movl %eax, %esp |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 1009 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 1010 | /* Now ready to switch the cr3 */ |
| 1011 | SWITCH_TO_USER_CR3 scratch_reg=%eax |
| 1012 | |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 1013 | /* |
Andy Lutomirski | c2c9b52 | 2016-03-09 19:00:27 -0800 | [diff] [blame] | 1014 | * Restore all flags except IF. (We restore IF separately because |
| 1015 | * STI gives a one-instruction window in which we won't be interrupted, |
| 1016 | * whereas POPF does not.) |
| 1017 | */ |
Jan Beulich | 236f0cd | 2018-06-25 04:21:59 -0600 | [diff] [blame] | 1018 | btrl $X86_EFLAGS_IF_BIT, (%esp) |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 1019 | BUG_IF_WRONG_CR3 no_user_check=1 |
Andy Lutomirski | c2c9b52 | 2016-03-09 19:00:27 -0800 | [diff] [blame] | 1020 | popfl |
Joerg Roedel | e5862d0 | 2018-07-18 11:40:45 +0200 | [diff] [blame] | 1021 | popl %eax |
Andy Lutomirski | c2c9b52 | 2016-03-09 19:00:27 -0800 | [diff] [blame] | 1022 | |
| 1023 | /* |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 1024 | * Return back to the vDSO, which will pop ecx and edx. |
| 1025 | * Don't bother with DS and ES (they already contain __USER_DS). |
| 1026 | */ |
Boris Ostrovsky | 88c15ec | 2015-11-19 16:55:46 -0500 | [diff] [blame] | 1027 | sti |
| 1028 | sysexit |
Roland McGrath | af0575b | 2008-06-24 04:16:52 -0700 | [diff] [blame] | 1029 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1030 | .pushsection .fixup, "ax" |
| 1031 | 2: movl $0, PT_FS(%esp) |
| 1032 | jmp 1b |
Jeremy Fitzhardinge | f95d47c | 2006-12-07 02:14:02 +0100 | [diff] [blame] | 1033 | .popsection |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1034 | _ASM_EXTABLE(1b, 2b) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1035 | PTGS_TO_GS_EX |
Andy Lutomirski | 67f590e | 2016-03-09 19:00:26 -0800 | [diff] [blame] | 1036 | |
| 1037 | .Lsysenter_fix_flags: |
| 1038 | pushl $X86_EFLAGS_FIXED |
| 1039 | popfl |
| 1040 | jmp .Lsysenter_flags_fixed |
Jiri Slaby | b4edca1 | 2019-10-11 13:50:59 +0200 | [diff] [blame] | 1041 | SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE) |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1042 | SYM_FUNC_END(entry_SYSENTER_32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 1044 | /* |
| 1045 | * 32-bit legacy system call entry. |
| 1046 | * |
| 1047 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 |
| 1048 | * instruction. INT $0x80 lands here. |
| 1049 | * |
| 1050 | * This entry point can be used by any 32-bit perform system calls. |
| 1051 | * Instances of INT $0x80 can be found inline in various programs and |
| 1052 | * libraries. It is also used by the vDSO's __kernel_vsyscall |
| 1053 | * fallback for hardware that doesn't support a faster entry method. |
| 1054 | * Restarted 32-bit system calls also fall back to INT $0x80 |
| 1055 | * regardless of what instruction was originally used to do the system |
| 1056 | * call. (64-bit programs can use INT $0x80 as well, but they can |
| 1057 | * only run on 64-bit kernels and therefore land in |
| 1058 | * entry_INT80_compat.) |
| 1059 | * |
| 1060 | * This is considered a slow path. It is not used by most libc |
| 1061 | * implementations on modern hardware except during process startup. |
| 1062 | * |
| 1063 | * Arguments: |
| 1064 | * eax system call number |
| 1065 | * ebx arg1 |
| 1066 | * ecx arg2 |
| 1067 | * edx arg3 |
| 1068 | * esi arg4 |
| 1069 | * edi arg5 |
| 1070 | * ebp arg6 |
| 1071 | */ |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1072 | SYM_FUNC_START(entry_INT80_32) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1073 | ASM_CLAC |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 1074 | pushl %eax /* pt_regs->orig_ax */ |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 1075 | |
| 1076 | SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1 /* save rest */ |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 1077 | |
| 1078 | /* |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 1079 | * User mode is traced as though IRQs are on, and the interrupt gate |
| 1080 | * turned them off. |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 1081 | */ |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 1082 | TRACE_IRQS_OFF |
Andy Lutomirski | 150ac78 | 2015-10-05 17:48:14 -0700 | [diff] [blame] | 1083 | |
| 1084 | movl %esp, %eax |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 1085 | call do_int80_syscall_32 |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 1086 | .Lsyscall_32_done: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1087 | |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 1088 | STACKLEAK_ERASE |
| 1089 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1090 | restore_all: |
Thomas Gleixner | 810f80a | 2020-03-08 23:24:03 +0100 | [diff] [blame] | 1091 | TRACE_IRQS_ON |
Joerg Roedel | e5862d0 | 2018-07-18 11:40:45 +0200 | [diff] [blame] | 1092 | SWITCH_TO_ENTRY_STACK |
Joerg Roedel | 46eabca | 2018-07-18 11:40:41 +0200 | [diff] [blame] | 1093 | CHECK_AND_APPLY_ESPFIX |
Thomas Gleixner | 74a4882 | 2020-03-08 23:24:02 +0100 | [diff] [blame] | 1094 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 1095 | /* Switch back to user CR3 */ |
| 1096 | SWITCH_TO_USER_CR3 scratch_reg=%eax |
| 1097 | |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 1098 | BUG_IF_WRONG_CR3 |
| 1099 | |
Joerg Roedel | e464fb9 | 2018-07-18 11:40:49 +0200 | [diff] [blame] | 1100 | /* Restore user state */ |
| 1101 | RESTORE_REGS pop=4 # skip orig_eax/error_code |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1102 | .Lirq_return: |
Mathieu Desnoyers | 10bcc80 | 2018-01-29 15:20:18 -0500 | [diff] [blame] | 1103 | /* |
| 1104 | * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization |
| 1105 | * when returning from IPI handler and when returning from |
| 1106 | * scheduler to user-space. |
| 1107 | */ |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 1108 | INTERRUPT_RETURN |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1109 | |
Joerg Roedel | 0d2eb73 | 2018-07-18 11:40:43 +0200 | [diff] [blame] | 1110 | restore_all_kernel: |
Thomas Gleixner | 4859397 | 2019-07-26 23:19:42 +0200 | [diff] [blame] | 1111 | #ifdef CONFIG_PREEMPTION |
Peter Zijlstra | 5e1246f | 2019-05-07 23:25:54 +0200 | [diff] [blame] | 1112 | DISABLE_INTERRUPTS(CLBR_ANY) |
| 1113 | cmpl $0, PER_CPU_VAR(__preempt_count) |
| 1114 | jnz .Lno_preempt |
| 1115 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
| 1116 | jz .Lno_preempt |
| 1117 | call preempt_schedule_irq |
| 1118 | .Lno_preempt: |
| 1119 | #endif |
Joerg Roedel | 0d2eb73 | 2018-07-18 11:40:43 +0200 | [diff] [blame] | 1120 | TRACE_IRQS_IRET |
Joerg Roedel | b92a165 | 2018-07-18 11:40:47 +0200 | [diff] [blame] | 1121 | PARANOID_EXIT_TO_KERNEL_MODE |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 1122 | BUG_IF_WRONG_CR3 |
Joerg Roedel | 0d2eb73 | 2018-07-18 11:40:43 +0200 | [diff] [blame] | 1123 | RESTORE_REGS 4 |
| 1124 | jmp .Lirq_return |
| 1125 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1126 | .section .fixup, "ax" |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1127 | SYM_CODE_START(iret_exc) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1128 | pushl $0 # no error code |
| 1129 | pushl $do_iret_error |
Joerg Roedel | 9719370 | 2018-07-18 11:41:16 +0200 | [diff] [blame] | 1130 | |
| 1131 | #ifdef CONFIG_DEBUG_ENTRY |
| 1132 | /* |
| 1133 | * The stack-frame here is the one that iret faulted on, so its a |
| 1134 | * return-to-user frame. We are on kernel-cr3 because we come here from |
| 1135 | * the fixup code. This confuses the CR3 checker, so switch to user-cr3 |
| 1136 | * as the checker expects it. |
| 1137 | */ |
| 1138 | pushl %eax |
| 1139 | SWITCH_TO_USER_CR3 scratch_reg=%eax |
| 1140 | popl %eax |
| 1141 | #endif |
| 1142 | |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1143 | jmp common_exception |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1144 | SYM_CODE_END(iret_exc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | .previous |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1146 | _ASM_EXTABLE(.Lirq_return, iret_exc) |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1147 | SYM_FUNC_END(entry_INT80_32) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1148 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1149 | .macro FIXUP_ESPFIX_STACK |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 1150 | /* |
| 1151 | * Switch back for ESPFIX stack to the normal zerobased stack |
| 1152 | * |
| 1153 | * We can't call C functions using the ESPFIX stack. This code reads |
| 1154 | * the high word of the segment base from the GDT and swiches to the |
| 1155 | * normal stack and adjusts ESP with the matching offset. |
Andy Lutomirski | 4a13b0e | 2019-11-24 08:50:03 -0800 | [diff] [blame] | 1156 | * |
| 1157 | * We might be on user CR3 here, so percpu data is not mapped and we can't |
| 1158 | * access the GDT through the percpu segment. Instead, use SGDT to find |
| 1159 | * the cpu_entry_area alias of the GDT. |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 1160 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1161 | #ifdef CONFIG_X86_ESPFIX32 |
Alexander van Heukelum | dc4c2a0 | 2009-06-18 00:35:58 +0200 | [diff] [blame] | 1162 | /* fixup the stack */ |
Andy Lutomirski | 4a13b0e | 2019-11-24 08:50:03 -0800 | [diff] [blame] | 1163 | pushl %ecx |
| 1164 | subl $2*4, %esp |
| 1165 | sgdt (%esp) |
| 1166 | movl 2(%esp), %ecx /* GDT address */ |
| 1167 | /* |
| 1168 | * Careful: ECX is a linear pointer, so we need to force base |
| 1169 | * zero. %cs is the only known-linear segment we have right now. |
| 1170 | */ |
| 1171 | mov %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al /* bits 16..23 */ |
| 1172 | mov %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah /* bits 24..31 */ |
Denys Vlasenko | 9b47feb | 2015-06-08 22:35:33 +0200 | [diff] [blame] | 1173 | shl $16, %eax |
Andy Lutomirski | 4a13b0e | 2019-11-24 08:50:03 -0800 | [diff] [blame] | 1174 | addl $2*4, %esp |
| 1175 | popl %ecx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1176 | addl %esp, %eax /* the adjusted stack pointer */ |
| 1177 | pushl $__KERNEL_DS |
| 1178 | pushl %eax |
| 1179 | lss (%esp), %esp /* switch to the normal stack segment */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1180 | #endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1181 | .endm |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 1182 | |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1183 | .macro UNWIND_ESPFIX_STACK |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 1184 | /* It's safe to clobber %eax, all other regs need to be preserved */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1185 | #ifdef CONFIG_X86_ESPFIX32 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1186 | movl %ss, %eax |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1187 | /* see if on espfix stack */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1188 | cmpw $__ESPFIX_SS, %ax |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 1189 | jne .Lno_fixup_\@ |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1190 | /* switch to normal stack */ |
| 1191 | FIXUP_ESPFIX_STACK |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 1192 | .Lno_fixup_\@: |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1193 | #endif |
Tejun Heo | f0d9611 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1194 | .endm |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | |
| 1196 | /* |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 1197 | * Build the entry stubs with some assembler magic. |
| 1198 | * We pack 1 stub into every 8-byte block. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1199 | */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 1200 | .align 8 |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1201 | SYM_CODE_START(irq_entries_start) |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 1202 | vector=FIRST_EXTERNAL_VECTOR |
| 1203 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1204 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 1205 | vector=vector+1 |
| 1206 | jmp common_interrupt |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 1207 | .align 8 |
| 1208 | .endr |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1209 | SYM_CODE_END(irq_entries_start) |
Jan Beulich | 47a55cd | 2007-02-13 13:26:24 +0100 | [diff] [blame] | 1210 | |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 1211 | #ifdef CONFIG_X86_LOCAL_APIC |
| 1212 | .align 8 |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1213 | SYM_CODE_START(spurious_entries_start) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 1214 | vector=FIRST_SYSTEM_VECTOR |
| 1215 | .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) |
| 1216 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
| 1217 | vector=vector+1 |
| 1218 | jmp common_spurious |
| 1219 | .align 8 |
| 1220 | .endr |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1221 | SYM_CODE_END(spurious_entries_start) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 1222 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1223 | SYM_CODE_START_LOCAL(common_spurious) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 1224 | ASM_CLAC |
| 1225 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
| 1226 | SAVE_ALL switch_stacks=1 |
| 1227 | ENCODE_FRAME_POINTER |
| 1228 | TRACE_IRQS_OFF |
| 1229 | movl %esp, %eax |
| 1230 | call smp_spurious_interrupt |
| 1231 | jmp ret_from_intr |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1232 | SYM_CODE_END(common_spurious) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 1233 | #endif |
| 1234 | |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 1235 | /* |
| 1236 | * the CPU automatically disables interrupts when executing an IRQ vector, |
| 1237 | * so IRQ-flags tracing has to follow that: |
| 1238 | */ |
H. Peter Anvin | b7c6244 | 2008-11-11 13:24:58 -0800 | [diff] [blame] | 1239 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1240 | SYM_CODE_START_LOCAL(common_interrupt) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1241 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1242 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 1243 | |
| 1244 | SAVE_ALL switch_stacks=1 |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1245 | ENCODE_FRAME_POINTER |
Ingo Molnar | 55f327f | 2006-07-03 00:24:43 -0700 | [diff] [blame] | 1246 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1247 | movl %esp, %eax |
| 1248 | call do_IRQ |
| 1249 | jmp ret_from_intr |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1250 | SYM_CODE_END(common_interrupt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1251 | |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 1252 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1253 | SYM_FUNC_START(name) \ |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 1254 | ASM_CLAC; \ |
| 1255 | pushl $~(nr); \ |
| 1256 | SAVE_ALL switch_stacks=1; \ |
| 1257 | ENCODE_FRAME_POINTER; \ |
| 1258 | TRACE_IRQS_OFF \ |
| 1259 | movl %esp, %eax; \ |
| 1260 | call fn; \ |
| 1261 | jmp ret_from_intr; \ |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1262 | SYM_FUNC_END(name) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1264 | #define BUILD_INTERRUPT(name, nr) \ |
| 1265 | BUILD_INTERRUPT3(name, nr, smp_##name); \ |
Tejun Heo | 02cf94c | 2009-01-21 17:26:06 +0900 | [diff] [blame] | 1266 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | /* The include is where all of the SMP etc. interrupts come from */ |
Ingo Molnar | 1164dd0 | 2009-01-28 19:34:09 +0100 | [diff] [blame] | 1268 | #include <asm/entry_arch.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1270 | SYM_CODE_START(coprocessor_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1271 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1272 | pushl $0 |
| 1273 | pushl $do_coprocessor_error |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1274 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1275 | SYM_CODE_END(coprocessor_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1276 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1277 | SYM_CODE_START(simd_coprocessor_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1278 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1279 | pushl $0 |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 1280 | #ifdef CONFIG_X86_INVD_BUG |
| 1281 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1282 | ALTERNATIVE "pushl $do_general_protection", \ |
| 1283 | "pushl $do_simd_coprocessor_error", \ |
Borislav Petkov | 8e65f6e | 2015-01-18 12:35:55 +0100 | [diff] [blame] | 1284 | X86_FEATURE_XMM |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 1285 | #else |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1286 | pushl $do_simd_coprocessor_error |
Brian Gerst | 40d2e76 | 2010-03-21 09:00:43 -0400 | [diff] [blame] | 1287 | #endif |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1288 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1289 | SYM_CODE_END(simd_coprocessor_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1290 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1291 | SYM_CODE_START(device_not_available) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1292 | ASM_CLAC |
Thomas Gleixner | e441a2a | 2020-02-27 15:24:29 +0100 | [diff] [blame] | 1293 | pushl $0 |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1294 | pushl $do_device_not_available |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1295 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1296 | SYM_CODE_END(device_not_available) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 1298 | #ifdef CONFIG_PARAVIRT |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1299 | SYM_CODE_START(native_iret) |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 1300 | iret |
H. Peter Anvin | 6837a54 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 1301 | _ASM_EXTABLE(native_iret, iret_exc) |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1302 | SYM_CODE_END(native_iret) |
Rusty Russell | d3561b7 | 2006-12-07 02:14:07 +0100 | [diff] [blame] | 1303 | #endif |
| 1304 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1305 | SYM_CODE_START(overflow) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1306 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1307 | pushl $0 |
| 1308 | pushl $do_overflow |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1309 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1310 | SYM_CODE_END(overflow) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1312 | SYM_CODE_START(bounds) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1313 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1314 | pushl $0 |
| 1315 | pushl $do_bounds |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1316 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1317 | SYM_CODE_END(bounds) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1318 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1319 | SYM_CODE_START(invalid_op) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1320 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1321 | pushl $0 |
| 1322 | pushl $do_invalid_op |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1323 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1324 | SYM_CODE_END(invalid_op) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1326 | SYM_CODE_START(coprocessor_segment_overrun) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1327 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1328 | pushl $0 |
| 1329 | pushl $do_coprocessor_segment_overrun |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1330 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1331 | SYM_CODE_END(coprocessor_segment_overrun) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1332 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1333 | SYM_CODE_START(invalid_TSS) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1334 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1335 | pushl $do_invalid_TSS |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1336 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1337 | SYM_CODE_END(invalid_TSS) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1339 | SYM_CODE_START(segment_not_present) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1340 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1341 | pushl $do_segment_not_present |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1342 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1343 | SYM_CODE_END(segment_not_present) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1344 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1345 | SYM_CODE_START(stack_segment) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1346 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1347 | pushl $do_stack_segment |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1348 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1349 | SYM_CODE_END(stack_segment) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1350 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1351 | SYM_CODE_START(alignment_check) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1352 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1353 | pushl $do_alignment_check |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1354 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1355 | SYM_CODE_END(alignment_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1356 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1357 | SYM_CODE_START(divide_error) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1358 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1359 | pushl $0 # no error code |
| 1360 | pushl $do_divide_error |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1361 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1362 | SYM_CODE_END(divide_error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1363 | |
| 1364 | #ifdef CONFIG_X86_MCE |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1365 | SYM_CODE_START(machine_check) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1366 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1367 | pushl $0 |
Thomas Gleixner | 840371b | 2020-02-25 22:36:39 +0100 | [diff] [blame] | 1368 | pushl $do_mce |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1369 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1370 | SYM_CODE_END(machine_check) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1371 | #endif |
| 1372 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1373 | SYM_CODE_START(spurious_interrupt_bug) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1374 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1375 | pushl $0 |
| 1376 | pushl $do_spurious_interrupt_bug |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1377 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1378 | SYM_CODE_END(spurious_interrupt_bug) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1380 | #ifdef CONFIG_XEN_PV |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1381 | SYM_FUNC_START(xen_hypervisor_callback) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1382 | /* |
| 1383 | * Check to see if we got the event in the critical |
| 1384 | * region in xen_iret_direct, after we've reenabled |
| 1385 | * events and checked for pending events. This simulates |
| 1386 | * iret instruction's behaviour where it delivers a |
| 1387 | * pending interrupt when enabling interrupts: |
| 1388 | */ |
Jan Beulich | 29b810f | 2019-11-11 15:32:12 +0100 | [diff] [blame] | 1389 | cmpl $xen_iret_start_crit, (%esp) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1390 | jb 1f |
Jan Beulich | 29b810f | 2019-11-11 15:32:12 +0100 | [diff] [blame] | 1391 | cmpl $xen_iret_end_crit, (%esp) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1392 | jae 1f |
Jan Beulich | 29b810f | 2019-11-11 15:32:12 +0100 | [diff] [blame] | 1393 | call xen_iret_crit_fixup |
| 1394 | 1: |
| 1395 | pushl $-1 /* orig_ax = -1 => not a system call */ |
| 1396 | SAVE_ALL |
| 1397 | ENCODE_FRAME_POINTER |
| 1398 | TRACE_IRQS_OFF |
| 1399 | mov %esp, %eax |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1400 | call xen_evtchn_do_upcall |
Thomas Gleixner | 4859397 | 2019-07-26 23:19:42 +0200 | [diff] [blame] | 1401 | #ifndef CONFIG_PREEMPTION |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1402 | call xen_maybe_preempt_hcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 1403 | #endif |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1404 | jmp ret_from_intr |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1405 | SYM_FUNC_END(xen_hypervisor_callback) |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1406 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1407 | /* |
| 1408 | * Hypervisor uses this for application faults while it executes. |
| 1409 | * We get here for two reasons: |
| 1410 | * 1. Fault while reloading DS, ES, FS or GS |
| 1411 | * 2. Fault while executing IRET |
| 1412 | * Category 1 we fix up by reattempting the load, and zeroing the segment |
| 1413 | * register if the load fails. |
| 1414 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the |
| 1415 | * normal Linux return path in this case because if we use the IRET hypercall |
| 1416 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 1417 | * We distinguish between categories by maintaining a status value in EAX. |
| 1418 | */ |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1419 | SYM_FUNC_START(xen_failsafe_callback) |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1420 | pushl %eax |
| 1421 | movl $1, %eax |
| 1422 | 1: mov 4(%esp), %ds |
| 1423 | 2: mov 8(%esp), %es |
| 1424 | 3: mov 12(%esp), %fs |
| 1425 | 4: mov 16(%esp), %gs |
David Vrabel | a349e23d1 | 2012-10-19 17:29:07 +0100 | [diff] [blame] | 1426 | /* EAX == 0 => Category 1 (Bad segment) |
| 1427 | EAX != 0 => Category 2 (Bad IRET) */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1428 | testl %eax, %eax |
| 1429 | popl %eax |
| 1430 | lea 16(%esp), %esp |
| 1431 | jz 5f |
| 1432 | jmp iret_exc |
| 1433 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1434 | SAVE_ALL |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1435 | ENCODE_FRAME_POINTER |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1436 | jmp ret_from_exception |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1437 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1438 | .section .fixup, "ax" |
| 1439 | 6: xorl %eax, %eax |
| 1440 | movl %eax, 4(%esp) |
| 1441 | jmp 1b |
| 1442 | 7: xorl %eax, %eax |
| 1443 | movl %eax, 8(%esp) |
| 1444 | jmp 2b |
| 1445 | 8: xorl %eax, %eax |
| 1446 | movl %eax, 12(%esp) |
| 1447 | jmp 3b |
| 1448 | 9: xorl %eax, %eax |
| 1449 | movl %eax, 16(%esp) |
| 1450 | jmp 4b |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1451 | .previous |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1452 | _ASM_EXTABLE(1b, 6b) |
| 1453 | _ASM_EXTABLE(2b, 7b) |
| 1454 | _ASM_EXTABLE(3b, 8b) |
| 1455 | _ASM_EXTABLE(4b, 9b) |
Jiri Slaby | 6d685e5 | 2019-10-11 13:51:07 +0200 | [diff] [blame] | 1456 | SYM_FUNC_END(xen_failsafe_callback) |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1457 | #endif /* CONFIG_XEN_PV */ |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1458 | |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1459 | #ifdef CONFIG_XEN_PVHVM |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1460 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
Thomas Gleixner | 4b9a8dc | 2017-08-28 08:47:31 +0200 | [diff] [blame] | 1461 | xen_evtchn_do_upcall) |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1462 | #endif |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 1463 | |
Jeremy Fitzhardinge | 5ead97c | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1464 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1465 | #if IS_ENABLED(CONFIG_HYPERV) |
| 1466 | |
| 1467 | BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
Thomas Gleixner | 4b9a8dc | 2017-08-28 08:47:31 +0200 | [diff] [blame] | 1468 | hyperv_vector_handler) |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1469 | |
Vitaly Kuznetsov | 9328626 | 2018-01-24 14:23:33 +0100 | [diff] [blame] | 1470 | BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR, |
| 1471 | hyperv_reenlightenment_intr) |
| 1472 | |
Michael Kelley | 248e742 | 2018-03-04 22:17:18 -0700 | [diff] [blame] | 1473 | BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR, |
| 1474 | hv_stimer0_vector_handler) |
| 1475 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1476 | #endif /* CONFIG_HYPERV */ |
| 1477 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1478 | SYM_CODE_START(page_fault) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1479 | ASM_CLAC |
Matt Mullins | b8f7095 | 2019-07-23 21:20:58 -0700 | [diff] [blame] | 1480 | pushl $do_page_fault |
| 1481 | jmp common_exception_read_cr2 |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1482 | SYM_CODE_END(page_fault) |
Peter Zijlstra | a0d14b8 | 2019-07-11 13:40:59 +0200 | [diff] [blame] | 1483 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1484 | SYM_CODE_START_LOCAL_NOALIGN(common_exception_read_cr2) |
Matt Mullins | b8f7095 | 2019-07-23 21:20:58 -0700 | [diff] [blame] | 1485 | /* the function address is in %gs's slot on the stack */ |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 1486 | SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
Peter Zijlstra | a0d14b8 | 2019-07-11 13:40:59 +0200 | [diff] [blame] | 1487 | |
| 1488 | ENCODE_FRAME_POINTER |
Peter Zijlstra | a0d14b8 | 2019-07-11 13:40:59 +0200 | [diff] [blame] | 1489 | |
| 1490 | /* fixup %gs */ |
| 1491 | GS_TO_REG %ecx |
Matt Mullins | b8f7095 | 2019-07-23 21:20:58 -0700 | [diff] [blame] | 1492 | movl PT_GS(%esp), %edi |
Peter Zijlstra | a0d14b8 | 2019-07-11 13:40:59 +0200 | [diff] [blame] | 1493 | REG_TO_PTGS %ecx |
| 1494 | SET_KERNEL_GS %ecx |
| 1495 | |
| 1496 | GET_CR2_INTO(%ecx) # might clobber %eax |
| 1497 | |
| 1498 | /* fixup orig %eax */ |
| 1499 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
| 1500 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
| 1501 | |
| 1502 | TRACE_IRQS_OFF |
| 1503 | movl %esp, %eax # pt_regs pointer |
Matt Mullins | b8f7095 | 2019-07-23 21:20:58 -0700 | [diff] [blame] | 1504 | CALL_NOSPEC %edi |
Peter Zijlstra | a0d14b8 | 2019-07-11 13:40:59 +0200 | [diff] [blame] | 1505 | jmp ret_from_exception |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1506 | SYM_CODE_END(common_exception_read_cr2) |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1507 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1508 | SYM_CODE_START_LOCAL_NOALIGN(common_exception) |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1509 | /* the function address is in %gs's slot on the stack */ |
Andy Lutomirski | a1a338e | 2019-11-20 10:10:49 +0100 | [diff] [blame] | 1510 | SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1 |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1511 | ENCODE_FRAME_POINTER |
Peter Zijlstra | e67f1c1 | 2019-07-11 13:40:56 +0200 | [diff] [blame] | 1512 | |
| 1513 | /* fixup %gs */ |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1514 | GS_TO_REG %ecx |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1515 | movl PT_GS(%esp), %edi # get the function address |
Tejun Heo | ccbeed3 | 2009-02-09 22:17:40 +0900 | [diff] [blame] | 1516 | REG_TO_PTGS %ecx |
| 1517 | SET_KERNEL_GS %ecx |
Peter Zijlstra | e67f1c1 | 2019-07-11 13:40:56 +0200 | [diff] [blame] | 1518 | |
| 1519 | /* fixup orig %eax */ |
| 1520 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
| 1521 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
| 1522 | |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1523 | TRACE_IRQS_OFF |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1524 | movl %esp, %eax # pt_regs pointer |
David Woodhouse | 2641f08 | 2018-01-11 21:46:28 +0000 | [diff] [blame] | 1525 | CALL_NOSPEC %edi |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1526 | jmp ret_from_exception |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1527 | SYM_CODE_END(common_exception) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1528 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1529 | SYM_CODE_START(debug) |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1530 | /* |
Joerg Roedel | 929b44e | 2018-07-18 11:40:48 +0200 | [diff] [blame] | 1531 | * Entry from sysenter is now handled in common_exception |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1532 | */ |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1533 | ASM_CLAC |
Thomas Gleixner | e441a2a | 2020-02-27 15:24:29 +0100 | [diff] [blame] | 1534 | pushl $0 |
Joerg Roedel | 929b44e | 2018-07-18 11:40:48 +0200 | [diff] [blame] | 1535 | pushl $do_debug |
| 1536 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1537 | SYM_CODE_END(debug) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1538 | |
Andy Lutomirski | 7d8d8cf | 2019-11-20 23:06:41 -0800 | [diff] [blame] | 1539 | #ifdef CONFIG_DOUBLEFAULT |
| 1540 | SYM_CODE_START(double_fault) |
| 1541 | 1: |
| 1542 | /* |
| 1543 | * This is a task gate handler, not an interrupt gate handler. |
| 1544 | * The error code is on the stack, but the stack is otherwise |
| 1545 | * empty. Interrupts are off. Our state is sane with the following |
| 1546 | * exceptions: |
| 1547 | * |
| 1548 | * - CR0.TS is set. "TS" literally means "task switched". |
| 1549 | * - EFLAGS.NT is set because we're a "nested task". |
| 1550 | * - The doublefault TSS has back_link set and has been marked busy. |
| 1551 | * - TR points to the doublefault TSS and the normal TSS is busy. |
| 1552 | * - CR3 is the normal kernel PGD. This would be delightful, except |
| 1553 | * that the CPU didn't bother to save the old CR3 anywhere. This |
| 1554 | * would make it very awkward to return back to the context we came |
| 1555 | * from. |
| 1556 | * |
| 1557 | * The rest of EFLAGS is sanitized for us, so we don't need to |
| 1558 | * worry about AC or DF. |
| 1559 | * |
| 1560 | * Don't even bother popping the error code. It's always zero, |
| 1561 | * and ignoring it makes us a bit more robust against buggy |
| 1562 | * hypervisor task gate implementations. |
| 1563 | * |
| 1564 | * We will manually undo the task switch instead of doing a |
| 1565 | * task-switching IRET. |
| 1566 | */ |
| 1567 | |
| 1568 | clts /* clear CR0.TS */ |
| 1569 | pushl $X86_EFLAGS_FIXED |
| 1570 | popfl /* clear EFLAGS.NT */ |
| 1571 | |
| 1572 | call doublefault_shim |
| 1573 | |
| 1574 | /* We don't support returning, so we have no IRET here. */ |
| 1575 | 1: |
| 1576 | hlt |
| 1577 | jmp 1b |
| 1578 | SYM_CODE_END(double_fault) |
| 1579 | #endif |
| 1580 | |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1581 | /* |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1582 | * NMI is doubly nasty. It can happen on the first instruction of |
| 1583 | * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning |
| 1584 | * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32 |
| 1585 | * switched stacks. We handle both conditions by simply checking whether we |
| 1586 | * interrupted kernel code running on the SYSENTER stack. |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1587 | */ |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1588 | SYM_CODE_START(nmi) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1589 | ASM_CLAC |
Joerg Roedel | 45d7b25 | 2018-07-18 11:40:44 +0200 | [diff] [blame] | 1590 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1591 | #ifdef CONFIG_X86_ESPFIX32 |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1592 | /* |
| 1593 | * ESPFIX_SS is only ever set on the return to user path |
| 1594 | * after we've switched to the entry stack. |
| 1595 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1596 | pushl %eax |
| 1597 | movl %ss, %eax |
| 1598 | cmpw $__ESPFIX_SS, %ax |
| 1599 | popl %eax |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1600 | je .Lnmi_espfix_stack |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1601 | #endif |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1602 | |
| 1603 | pushl %eax # pt_regs->orig_ax |
Joerg Roedel | b65bef4 | 2018-07-18 11:40:50 +0200 | [diff] [blame] | 1604 | SAVE_ALL_NMI cr3_reg=%edi |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1605 | ENCODE_FRAME_POINTER |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1606 | xorl %edx, %edx # zero error code |
| 1607 | movl %esp, %eax # pt_regs pointer |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1608 | |
| 1609 | /* Are we currently on the SYSENTER stack? */ |
Andy Lutomirski | 72f5e08 | 2017-12-04 15:07:20 +0100 | [diff] [blame] | 1610 | movl PER_CPU_VAR(cpu_entry_area), %ecx |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 1611 | addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx |
| 1612 | subl %eax, %ecx /* ecx = (end of entry_stack) - esp */ |
| 1613 | cmpl $SIZEOF_entry_stack, %ecx |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1614 | jb .Lnmi_from_sysenter_stack |
| 1615 | |
| 1616 | /* Not on SYSENTER stack. */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1617 | call do_nmi |
Joerg Roedel | 8e676ce | 2018-07-18 11:40:42 +0200 | [diff] [blame] | 1618 | jmp .Lnmi_return |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1619 | |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1620 | .Lnmi_from_sysenter_stack: |
| 1621 | /* |
| 1622 | * We're on the SYSENTER stack. Switch off. No one (not even debug) |
| 1623 | * is using the thread stack right now, so it's safe for us to use it. |
| 1624 | */ |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1625 | movl %esp, %ebx |
Andy Lutomirski | 7536656 | 2016-03-09 19:00:32 -0800 | [diff] [blame] | 1626 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esp |
| 1627 | call do_nmi |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1628 | movl %ebx, %esp |
Joerg Roedel | 8e676ce | 2018-07-18 11:40:42 +0200 | [diff] [blame] | 1629 | |
| 1630 | .Lnmi_return: |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1631 | #ifdef CONFIG_X86_ESPFIX32 |
| 1632 | testl $CS_FROM_ESPFIX, PT_CS(%esp) |
| 1633 | jnz .Lnmi_from_espfix |
| 1634 | #endif |
| 1635 | |
Joerg Roedel | 8e676ce | 2018-07-18 11:40:42 +0200 | [diff] [blame] | 1636 | CHECK_AND_APPLY_ESPFIX |
Joerg Roedel | b65bef4 | 2018-07-18 11:40:50 +0200 | [diff] [blame] | 1637 | RESTORE_ALL_NMI cr3_reg=%edi pop=4 |
Joerg Roedel | 8e676ce | 2018-07-18 11:40:42 +0200 | [diff] [blame] | 1638 | jmp .Lirq_return |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1639 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1640 | #ifdef CONFIG_X86_ESPFIX32 |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1641 | .Lnmi_espfix_stack: |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1642 | /* |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1643 | * Create the pointer to LSS back |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1644 | */ |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1645 | pushl %ss |
| 1646 | pushl %esp |
| 1647 | addl $4, (%esp) |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1648 | |
| 1649 | /* Copy the (short) IRET frame */ |
| 1650 | pushl 4*4(%esp) # flags |
| 1651 | pushl 4*4(%esp) # cs |
| 1652 | pushl 4*4(%esp) # ip |
| 1653 | |
| 1654 | pushl %eax # orig_ax |
| 1655 | |
| 1656 | SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1 |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1657 | ENCODE_FRAME_POINTER |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1658 | |
| 1659 | /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */ |
| 1660 | xorl $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp) |
| 1661 | |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1662 | xorl %edx, %edx # zero error code |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1663 | movl %esp, %eax # pt_regs pointer |
| 1664 | jmp .Lnmi_from_sysenter_stack |
| 1665 | |
| 1666 | .Lnmi_from_espfix: |
Joerg Roedel | b65bef4 | 2018-07-18 11:40:50 +0200 | [diff] [blame] | 1667 | RESTORE_ALL_NMI cr3_reg=%edi |
Peter Zijlstra | 8954290 | 2019-11-20 15:02:26 +0100 | [diff] [blame] | 1668 | /* |
| 1669 | * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to |
| 1670 | * fix up the gap and long frame: |
| 1671 | * |
| 1672 | * 3 - original frame (exception) |
| 1673 | * 2 - ESPFIX block (above) |
| 1674 | * 6 - gap (FIXUP_FRAME) |
| 1675 | * 5 - long frame (FIXUP_FRAME) |
| 1676 | * 1 - orig_ax |
| 1677 | */ |
| 1678 | lss (1+5+6)*4(%esp), %esp # back to espfix stack |
Josh Poimboeuf | 1b00255 | 2016-09-21 16:03:59 -0500 | [diff] [blame] | 1679 | jmp .Lirq_return |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 1680 | #endif |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1681 | SYM_CODE_END(nmi) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1682 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1683 | SYM_CODE_START(int3) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1684 | ASM_CLAC |
Thomas Gleixner | e441a2a | 2020-02-27 15:24:29 +0100 | [diff] [blame] | 1685 | pushl $0 |
Thomas Gleixner | ac3607f | 2020-02-25 22:36:45 +0100 | [diff] [blame] | 1686 | pushl $do_int3 |
| 1687 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1688 | SYM_CODE_END(int3) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1689 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1690 | SYM_CODE_START(general_protection) |
Thomas Gleixner | 3d51507 | 2020-02-25 22:36:37 +0100 | [diff] [blame] | 1691 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1692 | pushl $do_general_protection |
Josh Poimboeuf | 7252c4c | 2016-09-21 16:04:00 -0500 | [diff] [blame] | 1693 | jmp common_exception |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1694 | SYM_CODE_END(general_protection) |
Alexander van Heukelum | d211af0 | 2008-11-24 15:38:45 +0100 | [diff] [blame] | 1695 | |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1696 | #ifdef CONFIG_KVM_GUEST |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1697 | SYM_CODE_START(async_page_fault) |
H. Peter Anvin | e59d1b0 | 2012-09-21 13:58:10 -0700 | [diff] [blame] | 1698 | ASM_CLAC |
Ingo Molnar | a49976d | 2015-06-08 09:49:11 +0200 | [diff] [blame] | 1699 | pushl $do_async_page_fault |
Matt Mullins | b8f7095 | 2019-07-23 21:20:58 -0700 | [diff] [blame] | 1700 | jmp common_exception_read_cr2 |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1701 | SYM_CODE_END(async_page_fault) |
Gleb Natapov | 631bc48 | 2010-10-14 11:22:52 +0200 | [diff] [blame] | 1702 | #endif |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1703 | |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1704 | SYM_CODE_START(rewind_stack_do_exit) |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1705 | /* Prevent any naive code from trying to unwind to our caller. */ |
| 1706 | xorl %ebp, %ebp |
| 1707 | |
| 1708 | movl PER_CPU_VAR(cpu_current_top_of_stack), %esi |
| 1709 | leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp |
| 1710 | |
| 1711 | call do_exit |
| 1712 | 1: jmp 1b |
Jiri Slaby | 5e63306 | 2019-10-11 13:51:06 +0200 | [diff] [blame] | 1713 | SYM_CODE_END(rewind_stack_do_exit) |