Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/x86_64/entry.S |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
| 7 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 8 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * entry.S contains the system-call and fault low-level handling routines. |
| 10 | * |
Mauro Carvalho Chehab | cb1aaeb | 2019-06-07 15:54:32 -0300 | [diff] [blame] | 11 | * Some of this is documented in Documentation/x86/entry_64.rst |
Andy Lutomirski | 8b4777a | 2011-06-05 13:50:18 -0400 | [diff] [blame] | 12 | * |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 13 | * A note on terminology: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 14 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
| 15 | * at the top of the kernel process stack. |
Andi Kleen | 2e91a17 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 16 | * |
| 17 | * Some macro usage: |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 18 | * - SYM_FUNC_START/END:Define functions in the symbol table. |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 19 | * - TRACE_IRQ_*: Trace hardirq state for lock debugging. |
| 20 | * - idtentry: Define exception entry points. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/linkage.h> |
| 23 | #include <asm/segment.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <asm/cache.h> |
| 25 | #include <asm/errno.h> |
Sam Ravnborg | e2d5df9 | 2005-09-09 21:28:48 +0200 | [diff] [blame] | 26 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <asm/msr.h> |
| 28 | #include <asm/unistd.h> |
| 29 | #include <asm/thread_info.h> |
| 30 | #include <asm/hw_irq.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 31 | #include <asm/page_types.h> |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 32 | #include <asm/irqflags.h> |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 33 | #include <asm/paravirt.h> |
Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 34 | #include <asm/percpu.h> |
H. Peter Anvin | d7abc0f | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 35 | #include <asm/asm.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 36 | #include <asm/smap.h> |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 37 | #include <asm/pgtable_types.h> |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 38 | #include <asm/export.h> |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 39 | #include <asm/frame.h> |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 40 | #include <asm/trapnr.h> |
David Woodhouse | 2641f08 | 2018-01-11 21:46:28 +0000 | [diff] [blame] | 41 | #include <asm/nospec-branch.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 42 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 44 | #include "calling.h" |
| 45 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 46 | .code64 |
| 47 | .section .entry.text, "ax" |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 48 | |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 49 | #ifdef CONFIG_PARAVIRT |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 50 | SYM_CODE_START(native_usergs_sysret64) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 51 | UNWIND_HINT_EMPTY |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 52 | swapgs |
| 53 | sysretq |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 54 | SYM_CODE_END(native_usergs_sysret64) |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 55 | #endif /* CONFIG_PARAVIRT */ |
| 56 | |
Andy Lutomirski | ca37e57 | 2017-11-22 20:39:16 -0800 | [diff] [blame] | 57 | .macro TRACE_IRQS_FLAGS flags:req |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 58 | #ifdef CONFIG_TRACE_IRQFLAGS |
Jan Beulich | a368d7f | 2018-02-26 04:11:21 -0700 | [diff] [blame] | 59 | btl $9, \flags /* interrupts off? */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 60 | jnc 1f |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 61 | TRACE_IRQS_ON |
| 62 | 1: |
| 63 | #endif |
| 64 | .endm |
| 65 | |
Andy Lutomirski | ca37e57 | 2017-11-22 20:39:16 -0800 | [diff] [blame] | 66 | .macro TRACE_IRQS_IRETQ |
| 67 | TRACE_IRQS_FLAGS EFLAGS(%rsp) |
| 68 | .endm |
| 69 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | /* |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 71 | * When dynamic function tracer is enabled it will add a breakpoint |
| 72 | * to all locations that it is about to modify, sync CPUs, update |
| 73 | * all the code, sync CPUs, then remove the breakpoints. In this time |
| 74 | * if lockdep is enabled, it might jump back into the debug handler |
| 75 | * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF). |
| 76 | * |
| 77 | * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to |
| 78 | * make sure the stack pointer does not get reset back to the top |
| 79 | * of the debug stack, and instead just reuses the current stack. |
| 80 | */ |
| 81 | #if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS) |
| 82 | |
| 83 | .macro TRACE_IRQS_OFF_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 84 | call debug_stack_set_zero |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 85 | TRACE_IRQS_OFF |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 86 | call debug_stack_reset |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 87 | .endm |
| 88 | |
| 89 | .macro TRACE_IRQS_ON_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 90 | call debug_stack_set_zero |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 91 | TRACE_IRQS_ON |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 92 | call debug_stack_reset |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 93 | .endm |
| 94 | |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 95 | .macro TRACE_IRQS_IRETQ_DEBUG |
Jan Beulich | 6709812 | 2018-07-02 04:47:57 -0600 | [diff] [blame] | 96 | btl $9, EFLAGS(%rsp) /* interrupts off? */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 97 | jnc 1f |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 98 | TRACE_IRQS_ON_DEBUG |
| 99 | 1: |
| 100 | .endm |
| 101 | |
| 102 | #else |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 103 | # define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF |
| 104 | # define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON |
| 105 | # define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 106 | #endif |
| 107 | |
| 108 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 109 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 111 | * This is the only entry point used for 64-bit system calls. The |
| 112 | * hardware interface is reasonably well designed and the register to |
| 113 | * argument mapping Linux uses fits well with the registers that are |
| 114 | * available when SYSCALL is used. |
| 115 | * |
| 116 | * SYSCALL instructions can be found inlined in libc implementations as |
| 117 | * well as some other programs and libraries. There are also a handful |
| 118 | * of SYSCALL instructions in the vDSO used, for example, as a |
| 119 | * clock_gettimeofday fallback. |
| 120 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 121 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 122 | * then loads new ss, cs, and rip from previously programmed MSRs. |
| 123 | * rflags gets masked by a value from another MSR (so CLD and CLAC |
| 124 | * are not needed). SYSCALL does not save anything on the stack |
| 125 | * and does not change rsp. |
| 126 | * |
| 127 | * Registers on entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | * rax system call number |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 129 | * rcx return address |
| 130 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | * rdi arg0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | * rsi arg1 |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 133 | * rdx arg2 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 134 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | * r8 arg4 |
| 136 | * r9 arg5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 137 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 138 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | * Only called from user space. |
| 140 | * |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 141 | * When user can change pt_regs->foo always force IRET. That is because |
Andi Kleen | 7bf36bb | 2006-04-07 19:50:00 +0200 | [diff] [blame] | 142 | * it deals with uncanonical addresses better. SYSRET has trouble |
| 143 | * with them due to bugs in both AMD and Intel CPUs. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 144 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 146 | SYM_CODE_START(entry_SYSCALL_64) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 147 | UNWIND_HINT_EMPTY |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 148 | /* |
| 149 | * Interrupts are off on entry. |
| 150 | * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON, |
| 151 | * it is too small to ever cause noticeable irq latency. |
| 152 | */ |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 153 | |
Andy Lutomirski | 8a9949b | 2017-08-07 20:59:21 -0700 | [diff] [blame] | 154 | swapgs |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 155 | /* tss.sp2 is scratch space. */ |
Andy Lutomirski | 98f05b5 | 2018-09-03 15:59:43 -0700 | [diff] [blame] | 156 | movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 157 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 158 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 159 | |
| 160 | /* Construct struct pt_regs on stack */ |
Andy Lutomirski | 98f05b5 | 2018-09-03 15:59:43 -0700 | [diff] [blame] | 161 | pushq $__USER_DS /* pt_regs->ss */ |
| 162 | pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ |
| 163 | pushq %r11 /* pt_regs->flags */ |
| 164 | pushq $__USER_CS /* pt_regs->cs */ |
| 165 | pushq %rcx /* pt_regs->ip */ |
Jiri Slaby | 26ba4e5 | 2019-10-11 13:50:57 +0200 | [diff] [blame] | 166 | SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) |
Andy Lutomirski | 98f05b5 | 2018-09-03 15:59:43 -0700 | [diff] [blame] | 167 | pushq %rax /* pt_regs->orig_ax */ |
Dominik Brodowski | 30907fd | 2018-02-11 11:49:46 +0100 | [diff] [blame] | 168 | |
| 169 | PUSH_AND_CLEAR_REGS rax=$-ENOSYS |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 170 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 171 | /* IRQs are off. */ |
Linus Torvalds | dfe6450 | 2018-04-05 11:53:00 +0200 | [diff] [blame] | 172 | movq %rax, %rdi |
| 173 | movq %rsp, %rsi |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 174 | call do_syscall_64 /* returns with IRQs disabled */ |
| 175 | |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 176 | /* |
| 177 | * Try to use SYSRET instead of IRET if we're returning to |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 178 | * a completely clean 64-bit userspace context. If we're not, |
| 179 | * go to the slow exit path. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 180 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 181 | movq RCX(%rsp), %rcx |
| 182 | movq RIP(%rsp), %r11 |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 183 | |
| 184 | cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ |
| 185 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 186 | |
| 187 | /* |
| 188 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP |
| 189 | * in kernel space. This essentially lets the user take over |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 190 | * the kernel, since userspace controls RSP. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 191 | * |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 192 | * If width of "canonical tail" ever becomes variable, this will need |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 193 | * to be updated to remain correct on both old and new CPUs. |
Kirill A. Shutemov | 361b4b5 | 2017-03-30 11:07:26 +0300 | [diff] [blame] | 194 | * |
Kirill A. Shutemov | cbe0317 | 2017-06-06 14:31:21 +0300 | [diff] [blame] | 195 | * Change top bits to match most significant bit (47th or 56th bit |
| 196 | * depending on paging mode) in the address. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 197 | */ |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 198 | #ifdef CONFIG_X86_5LEVEL |
Kirill A. Shutemov | 39b9552 | 2018-02-16 14:49:48 +0300 | [diff] [blame] | 199 | ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ |
| 200 | "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 201 | #else |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 202 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
| 203 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 204 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 205 | |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 206 | /* If this changed %rcx, it was not canonical */ |
| 207 | cmpq %rcx, %r11 |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 208 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 209 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 210 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 211 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 212 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 213 | movq R11(%rsp), %r11 |
| 214 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 215 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 216 | |
| 217 | /* |
Borislav Petkov | 3e03530 | 2016-08-03 19:14:29 +0200 | [diff] [blame] | 218 | * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot |
| 219 | * restore RF properly. If the slowpath sets it for whatever reason, we |
| 220 | * need to restore it correctly. |
| 221 | * |
| 222 | * SYSRET can restore TF, but unlike IRET, restoring TF results in a |
| 223 | * trap from userspace immediately after SYSRET. This would cause an |
| 224 | * infinite loop whenever #DB happens with register state that satisfies |
| 225 | * the opportunistic SYSRET conditions. For example, single-stepping |
| 226 | * this user code: |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 227 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 228 | * movq $stuck_here, %rcx |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 229 | * pushfq |
| 230 | * popq %r11 |
| 231 | * stuck_here: |
| 232 | * |
| 233 | * would never get past 'stuck_here'. |
| 234 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 235 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 236 | jnz swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 237 | |
| 238 | /* nothing to check for RSP */ |
| 239 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 240 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 241 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 242 | |
| 243 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 244 | * We win! This label is here just for ease of understanding |
| 245 | * perf profiles. Nothing jumps here. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 246 | */ |
| 247 | syscall_return_via_sysret: |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 248 | /* rcx and r11 are already restored (see code above) */ |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 249 | POP_REGS pop_rdi=0 skip_r11rcx=1 |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 250 | |
| 251 | /* |
| 252 | * Now all regs are restored except RSP and RDI. |
| 253 | * Save old stack pointer and switch to trampoline stack. |
| 254 | */ |
| 255 | movq %rsp, %rdi |
Andy Lutomirski | c482fee | 2017-12-04 15:07:29 +0100 | [diff] [blame] | 256 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
Josh Poimboeuf | 1fb1436 | 2020-04-25 05:03:02 -0500 | [diff] [blame] | 257 | UNWIND_HINT_EMPTY |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 258 | |
| 259 | pushq RSP-RDI(%rdi) /* RSP */ |
| 260 | pushq (%rdi) /* RDI */ |
| 261 | |
| 262 | /* |
| 263 | * We are on the trampoline stack. All regs except RDI are live. |
| 264 | * We can do future final exit work right here. |
| 265 | */ |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 266 | STACKLEAK_ERASE_NOCLOBBER |
| 267 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 268 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 269 | |
Andy Lutomirski | 4fbb391 | 2017-11-02 00:59:03 -0700 | [diff] [blame] | 270 | popq %rdi |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 271 | popq %rsp |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 272 | USERGS_SYSRET64 |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 273 | SYM_CODE_END(entry_SYSCALL_64) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 274 | |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 275 | /* |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 276 | * %rdi: prev task |
| 277 | * %rsi: next task |
| 278 | */ |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 279 | .pushsection .text, "ax" |
Josh Poimboeuf | 96c6480 | 2020-04-25 05:03:03 -0500 | [diff] [blame] | 280 | SYM_FUNC_START(__switch_to_asm) |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 281 | /* |
| 282 | * Save callee-saved registers |
| 283 | * This must match the order in inactive_task_frame |
| 284 | */ |
| 285 | pushq %rbp |
| 286 | pushq %rbx |
| 287 | pushq %r12 |
| 288 | pushq %r13 |
| 289 | pushq %r14 |
| 290 | pushq %r15 |
| 291 | |
| 292 | /* switch stack */ |
| 293 | movq %rsp, TASK_threadsp(%rdi) |
| 294 | movq TASK_threadsp(%rsi), %rsp |
| 295 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 296 | #ifdef CONFIG_STACKPROTECTOR |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 297 | movq TASK_stack_canary(%rsi), %rbx |
Andy Lutomirski | e6401c1 | 2019-04-14 18:00:06 +0200 | [diff] [blame] | 298 | movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 299 | #endif |
| 300 | |
David Woodhouse | c995efd | 2018-01-12 17:49:25 +0000 | [diff] [blame] | 301 | #ifdef CONFIG_RETPOLINE |
| 302 | /* |
| 303 | * When switching from a shallower to a deeper call stack |
| 304 | * the RSB may either underflow or use entries populated |
| 305 | * with userspace addresses. On CPUs where those concerns |
| 306 | * exist, overwrite the RSB with entries which capture |
| 307 | * speculative execution to prevent attack. |
| 308 | */ |
David Woodhouse | d1c9910 | 2018-02-19 10:50:56 +0000 | [diff] [blame] | 309 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
David Woodhouse | c995efd | 2018-01-12 17:49:25 +0000 | [diff] [blame] | 310 | #endif |
| 311 | |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 312 | /* restore callee-saved registers */ |
| 313 | popq %r15 |
| 314 | popq %r14 |
| 315 | popq %r13 |
| 316 | popq %r12 |
| 317 | popq %rbx |
| 318 | popq %rbp |
| 319 | |
| 320 | jmp __switch_to |
Josh Poimboeuf | 96c6480 | 2020-04-25 05:03:03 -0500 | [diff] [blame] | 321 | SYM_FUNC_END(__switch_to_asm) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 322 | .popsection |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 323 | |
| 324 | /* |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 325 | * A newly forked process directly context switches into this address. |
| 326 | * |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 327 | * rax: prev task we switched from |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 328 | * rbx: kernel thread func (NULL for user thread) |
| 329 | * r12: kernel thread arg |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 330 | */ |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 331 | .pushsection .text, "ax" |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 332 | SYM_CODE_START(ret_from_fork) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 333 | UNWIND_HINT_EMPTY |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 334 | movq %rax, %rdi |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 335 | call schedule_tail /* rdi: 'prev' task parameter */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 336 | |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 337 | testq %rbx, %rbx /* from kernel_thread? */ |
| 338 | jnz 1f /* kernel threads are uncommon */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 339 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 340 | 2: |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 341 | UNWIND_HINT_REGS |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 342 | movq %rsp, %rdi |
Andy Lutomirski | 24d978b | 2016-01-28 15:11:27 -0800 | [diff] [blame] | 343 | call syscall_return_slowpath /* returns with IRQs disabled */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 344 | jmp swapgs_restore_regs_and_return_to_usermode |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 345 | |
| 346 | 1: |
| 347 | /* kernel thread */ |
Josh Poimboeuf | d31a580 | 2018-05-18 08:47:12 +0200 | [diff] [blame] | 348 | UNWIND_HINT_EMPTY |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 349 | movq %r12, %rdi |
Peter Zijlstra | 34fdce6 | 2020-04-22 17:16:40 +0200 | [diff] [blame] | 350 | CALL_NOSPEC rbx |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 351 | /* |
| 352 | * A kernel thread is allowed to return here after successfully |
| 353 | * calling do_execve(). Exit to userspace to complete the execve() |
| 354 | * syscall. |
| 355 | */ |
| 356 | movq $0, RAX(%rsp) |
| 357 | jmp 2b |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 358 | SYM_CODE_END(ret_from_fork) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 359 | .popsection |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 360 | |
| 361 | /* |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 362 | * Build the entry stubs with some assembler magic. |
| 363 | * We pack 1 stub into every 8-byte block. |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 364 | */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 365 | .align 8 |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 366 | SYM_CODE_START(irq_entries_start) |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 367 | vector=FIRST_EXTERNAL_VECTOR |
| 368 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 369 | UNWIND_HINT_IRET_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 370 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 371 | jmp common_interrupt |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 372 | .align 8 |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 373 | vector=vector+1 |
Denys Vlasenko | 3304c9c | 2015-04-03 21:49:13 +0200 | [diff] [blame] | 374 | .endr |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 375 | SYM_CODE_END(irq_entries_start) |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 376 | |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 377 | .align 8 |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 378 | SYM_CODE_START(spurious_entries_start) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 379 | vector=FIRST_SYSTEM_VECTOR |
| 380 | .rept (NR_VECTORS - FIRST_SYSTEM_VECTOR) |
| 381 | UNWIND_HINT_IRET_REGS |
| 382 | pushq $(~vector+0x80) /* Note: always in signed byte range */ |
| 383 | jmp common_spurious |
| 384 | .align 8 |
| 385 | vector=vector+1 |
| 386 | .endr |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 387 | SYM_CODE_END(spurious_entries_start) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 388 | |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 389 | .macro DEBUG_ENTRY_ASSERT_IRQS_OFF |
| 390 | #ifdef CONFIG_DEBUG_ENTRY |
Boris Ostrovsky | e17f823 | 2017-12-04 15:07:07 +0100 | [diff] [blame] | 391 | pushq %rax |
| 392 | SAVE_FLAGS(CLBR_RAX) |
| 393 | testl $X86_EFLAGS_IF, %eax |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 394 | jz .Lokay_\@ |
| 395 | ud2 |
| 396 | .Lokay_\@: |
Boris Ostrovsky | e17f823 | 2017-12-04 15:07:07 +0100 | [diff] [blame] | 397 | popq %rax |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 398 | #endif |
| 399 | .endm |
| 400 | |
| 401 | /* |
| 402 | * Enters the IRQ stack if we're not already using it. NMI-safe. Clobbers |
| 403 | * flags and puts old RSP into old_rsp, and leaves all other GPRs alone. |
| 404 | * Requires kernel GSBASE. |
| 405 | * |
| 406 | * The invariant is that, if irq_count != -1, then the IRQ stack is in use. |
| 407 | */ |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 408 | .macro ENTER_IRQ_STACK regs=1 old_rsp save_ret=0 |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 409 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 410 | |
| 411 | .if \save_ret |
| 412 | /* |
| 413 | * If save_ret is set, the original stack contains one additional |
| 414 | * entry -- the return address. Therefore, move the address one |
| 415 | * entry below %rsp to \old_rsp. |
| 416 | */ |
| 417 | leaq 8(%rsp), \old_rsp |
| 418 | .else |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 419 | movq %rsp, \old_rsp |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 420 | .endif |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 421 | |
| 422 | .if \regs |
| 423 | UNWIND_HINT_REGS base=\old_rsp |
| 424 | .endif |
| 425 | |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 426 | incl PER_CPU_VAR(irq_count) |
Andy Lutomirski | 2995590 | 2017-07-11 10:33:39 -0500 | [diff] [blame] | 427 | jnz .Lirq_stack_push_old_rsp_\@ |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 428 | |
| 429 | /* |
| 430 | * Right now, if we just incremented irq_count to zero, we've |
| 431 | * claimed the IRQ stack but we haven't switched to it yet. |
| 432 | * |
| 433 | * If anything is added that can interrupt us here without using IST, |
| 434 | * it must be *extremely* careful to limit its stack usage. This |
| 435 | * could include kprobes and a hypothetical future IST-less #DB |
| 436 | * handler. |
Andy Lutomirski | 2995590 | 2017-07-11 10:33:39 -0500 | [diff] [blame] | 437 | * |
| 438 | * The OOPS unwinder relies on the word at the top of the IRQ |
| 439 | * stack linking back to the previous RSP for the entire time we're |
| 440 | * on the IRQ stack. For this to work reliably, we need to write |
| 441 | * it before we actually move ourselves to the IRQ stack. |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 442 | */ |
| 443 | |
Andy Lutomirski | e6401c1 | 2019-04-14 18:00:06 +0200 | [diff] [blame] | 444 | movq \old_rsp, PER_CPU_VAR(irq_stack_backing_store + IRQ_STACK_SIZE - 8) |
Thomas Gleixner | 758a2e3 | 2019-04-14 18:00:02 +0200 | [diff] [blame] | 445 | movq PER_CPU_VAR(hardirq_stack_ptr), %rsp |
Andy Lutomirski | 2995590 | 2017-07-11 10:33:39 -0500 | [diff] [blame] | 446 | |
| 447 | #ifdef CONFIG_DEBUG_ENTRY |
| 448 | /* |
| 449 | * If the first movq above becomes wrong due to IRQ stack layout |
| 450 | * changes, the only way we'll notice is if we try to unwind right |
| 451 | * here. Assert that we set up the stack right to catch this type |
| 452 | * of bug quickly. |
| 453 | */ |
| 454 | cmpq -8(%rsp), \old_rsp |
| 455 | je .Lirq_stack_okay\@ |
| 456 | ud2 |
| 457 | .Lirq_stack_okay\@: |
| 458 | #endif |
| 459 | |
| 460 | .Lirq_stack_push_old_rsp_\@: |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 461 | pushq \old_rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 462 | |
| 463 | .if \regs |
| 464 | UNWIND_HINT_REGS indirect=1 |
| 465 | .endif |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 466 | |
| 467 | .if \save_ret |
| 468 | /* |
| 469 | * Push the return address to the stack. This return address can |
| 470 | * be found at the "real" original RSP, which was offset by 8 at |
| 471 | * the beginning of this macro. |
| 472 | */ |
| 473 | pushq -8(\old_rsp) |
| 474 | .endif |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 475 | .endm |
| 476 | |
| 477 | /* |
| 478 | * Undoes ENTER_IRQ_STACK. |
| 479 | */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 480 | .macro LEAVE_IRQ_STACK regs=1 |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 481 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
| 482 | /* We need to be off the IRQ stack before decrementing irq_count. */ |
| 483 | popq %rsp |
| 484 | |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 485 | .if \regs |
| 486 | UNWIND_HINT_REGS |
| 487 | .endif |
| 488 | |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 489 | /* |
| 490 | * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming |
| 491 | * the irq stack but we're not on it. |
| 492 | */ |
| 493 | |
| 494 | decl PER_CPU_VAR(irq_count) |
| 495 | .endm |
| 496 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 497 | /** |
| 498 | * idtentry_body - Macro to emit code calling the C function |
| 499 | * @vector: Vector number |
| 500 | * @cfunc: C function to be called |
| 501 | * @has_error_code: Hardware pushed error code on stack |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 502 | * @sane: Sane variant which handles irq tracing, context tracking in C |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 503 | */ |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 504 | .macro idtentry_body vector cfunc has_error_code:req sane=0 |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 505 | |
| 506 | call error_entry |
| 507 | UNWIND_HINT_REGS |
| 508 | |
| 509 | .if \vector == X86_TRAP_PF |
| 510 | /* |
| 511 | * Store CR2 early so subsequent faults cannot clobber it. Use R12 as |
| 512 | * intermediate storage as RDX can be clobbered in enter_from_user_mode(). |
| 513 | * GET_CR2_INTO can clobber RAX. |
| 514 | */ |
| 515 | GET_CR2_INTO(%r12); |
| 516 | .endif |
| 517 | |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 518 | .if \sane == 0 |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 519 | TRACE_IRQS_OFF |
| 520 | |
| 521 | #ifdef CONFIG_CONTEXT_TRACKING |
| 522 | testb $3, CS(%rsp) |
| 523 | jz .Lfrom_kernel_no_ctxt_tracking_\@ |
| 524 | CALL_enter_from_user_mode |
| 525 | .Lfrom_kernel_no_ctxt_tracking_\@: |
| 526 | #endif |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 527 | .endif |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 528 | |
| 529 | movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ |
| 530 | |
| 531 | .if \has_error_code == 1 |
| 532 | movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ |
| 533 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
| 534 | .else |
| 535 | xorl %esi, %esi /* Clear the error code */ |
| 536 | .endif |
| 537 | |
| 538 | .if \vector == X86_TRAP_PF |
| 539 | movq %r12, %rdx /* Move CR2 into 3rd argument */ |
| 540 | .endif |
| 541 | |
| 542 | call \cfunc |
| 543 | |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 544 | .if \sane == 0 |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 545 | jmp error_exit |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 546 | .else |
| 547 | jmp error_return |
| 548 | .endif |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 549 | .endm |
| 550 | |
| 551 | /** |
| 552 | * idtentry - Macro to generate entry stubs for simple IDT entries |
| 553 | * @vector: Vector number |
| 554 | * @asmsym: ASM symbol for the entry point |
| 555 | * @cfunc: C function to be called |
| 556 | * @has_error_code: Hardware pushed error code on stack |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 557 | * @sane: Sane variant which handles irq tracing, context tracking in C |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 558 | * |
| 559 | * The macro emits code to set up the kernel context for straight forward |
| 560 | * and simple IDT entries. No IST stack, no paranoid entry checks. |
| 561 | */ |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 562 | .macro idtentry vector asmsym cfunc has_error_code:req sane=0 |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 563 | SYM_CODE_START(\asmsym) |
| 564 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
| 565 | ASM_CLAC |
| 566 | |
| 567 | .if \has_error_code == 0 |
| 568 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
| 569 | .endif |
| 570 | |
| 571 | .if \vector == X86_TRAP_BP |
| 572 | /* |
| 573 | * If coming from kernel space, create a 6-word gap to allow the |
| 574 | * int3 handler to emulate a call instruction. |
| 575 | */ |
| 576 | testb $3, CS-ORIG_RAX(%rsp) |
| 577 | jnz .Lfrom_usermode_no_gap_\@ |
| 578 | .rept 6 |
| 579 | pushq 5*8(%rsp) |
| 580 | .endr |
| 581 | UNWIND_HINT_IRET_REGS offset=8 |
| 582 | .Lfrom_usermode_no_gap_\@: |
| 583 | .endif |
| 584 | |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 585 | idtentry_body \vector \cfunc \has_error_code \sane |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 586 | |
| 587 | _ASM_NOKPROBE(\asmsym) |
| 588 | SYM_CODE_END(\asmsym) |
| 589 | .endm |
| 590 | |
| 591 | /* |
| 592 | * MCE and DB exceptions |
| 593 | */ |
| 594 | #define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + (x) * 8) |
| 595 | |
| 596 | /** |
| 597 | * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB |
| 598 | * @vector: Vector number |
| 599 | * @asmsym: ASM symbol for the entry point |
| 600 | * @cfunc: C function to be called |
| 601 | * |
| 602 | * The macro emits code to set up the kernel context for #MC and #DB |
| 603 | * |
| 604 | * If the entry comes from user space it uses the normal entry path |
| 605 | * including the return to user space work and preemption checks on |
| 606 | * exit. |
| 607 | * |
| 608 | * If hits in kernel mode then it needs to go through the paranoid |
| 609 | * entry as the exception can hit any random state. No preemption |
| 610 | * check on exit to keep the paranoid path simple. |
| 611 | * |
| 612 | * If the trap is #DB then the interrupt stack entry in the IST is |
| 613 | * moved to the second stack, so a potential recursion will have a |
| 614 | * fresh IST. |
| 615 | */ |
| 616 | .macro idtentry_mce_db vector asmsym cfunc |
| 617 | SYM_CODE_START(\asmsym) |
| 618 | UNWIND_HINT_IRET_REGS |
| 619 | ASM_CLAC |
| 620 | |
| 621 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
| 622 | |
| 623 | /* |
| 624 | * If the entry is from userspace, switch stacks and treat it as |
| 625 | * a normal entry. |
| 626 | */ |
| 627 | testb $3, CS-ORIG_RAX(%rsp) |
| 628 | jnz .Lfrom_usermode_switch_stack_\@ |
| 629 | |
| 630 | /* |
| 631 | * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. |
| 632 | * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS |
| 633 | */ |
| 634 | call paranoid_entry |
| 635 | |
| 636 | UNWIND_HINT_REGS |
| 637 | |
| 638 | .if \vector == X86_TRAP_DB |
| 639 | TRACE_IRQS_OFF_DEBUG |
| 640 | .else |
| 641 | TRACE_IRQS_OFF |
| 642 | .endif |
| 643 | |
| 644 | movq %rsp, %rdi /* pt_regs pointer */ |
| 645 | xorl %esi, %esi /* Clear the error code */ |
| 646 | |
| 647 | .if \vector == X86_TRAP_DB |
| 648 | subq $DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB) |
| 649 | .endif |
| 650 | |
| 651 | call \cfunc |
| 652 | |
| 653 | .if \vector == X86_TRAP_DB |
| 654 | addq $DB_STACK_OFFSET, CPU_TSS_IST(IST_INDEX_DB) |
| 655 | .endif |
| 656 | |
| 657 | jmp paranoid_exit |
| 658 | |
| 659 | /* Switch to the regular task stack and use the noist entry point */ |
| 660 | .Lfrom_usermode_switch_stack_\@: |
| 661 | idtentry_body vector \cfunc, has_error_code=0 |
| 662 | |
| 663 | _ASM_NOKPROBE(\asmsym) |
| 664 | SYM_CODE_END(\asmsym) |
| 665 | .endm |
| 666 | |
| 667 | /* |
| 668 | * Double fault entry. Straight paranoid. No checks from which context |
| 669 | * this comes because for the espfix induced #DF this would do the wrong |
| 670 | * thing. |
| 671 | */ |
| 672 | .macro idtentry_df vector asmsym cfunc |
| 673 | SYM_CODE_START(\asmsym) |
| 674 | UNWIND_HINT_IRET_REGS offset=8 |
| 675 | ASM_CLAC |
| 676 | |
| 677 | /* |
| 678 | * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. |
| 679 | * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS |
| 680 | */ |
| 681 | call paranoid_entry |
| 682 | UNWIND_HINT_REGS |
| 683 | |
| 684 | /* Read CR2 early */ |
| 685 | GET_CR2_INTO(%r12); |
| 686 | |
| 687 | TRACE_IRQS_OFF |
| 688 | |
| 689 | movq %rsp, %rdi /* pt_regs pointer into first argument */ |
| 690 | movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ |
| 691 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
| 692 | movq %r12, %rdx /* Move CR2 into 3rd argument */ |
| 693 | call \cfunc |
| 694 | |
| 695 | jmp paranoid_exit |
| 696 | |
| 697 | _ASM_NOKPROBE(\asmsym) |
| 698 | SYM_CODE_END(\asmsym) |
| 699 | .endm |
| 700 | |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 701 | /* |
Thomas Gleixner | 53aaf26 | 2020-02-25 23:16:12 +0100 | [diff] [blame] | 702 | * Include the defines which emit the idt entries which are shared |
| 703 | * shared between 32 and 64 bit. |
| 704 | */ |
| 705 | #include <asm/idtentry.h> |
| 706 | |
| 707 | /* |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 708 | * Interrupt entry helper function. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 709 | * |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 710 | * Entry runs with interrupts off. Stack layout at entry: |
| 711 | * +----------------------------------------------------+ |
| 712 | * | regs->ss | |
| 713 | * | regs->rsp | |
| 714 | * | regs->eflags | |
| 715 | * | regs->cs | |
| 716 | * | regs->ip | |
| 717 | * +----------------------------------------------------+ |
| 718 | * | regs->orig_ax = ~(interrupt number) | |
| 719 | * +----------------------------------------------------+ |
| 720 | * | return address | |
| 721 | * +----------------------------------------------------+ |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 722 | */ |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 723 | SYM_CODE_START(interrupt_entry) |
Josh Poimboeuf | 81b6743 | 2020-04-25 05:06:14 -0500 | [diff] [blame] | 724 | UNWIND_HINT_IRET_REGS offset=16 |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 725 | ASM_CLAC |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 726 | cld |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 727 | |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 728 | testb $3, CS-ORIG_RAX+8(%rsp) |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 729 | jz 1f |
| 730 | SWAPGS |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 731 | FENCE_SWAPGS_USER_ENTRY |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 732 | /* |
| 733 | * Switch to the thread stack. The IRET frame and orig_ax are |
| 734 | * on the stack, as well as the return address. RDI..R12 are |
| 735 | * not (yet) on the stack and space has not (yet) been |
| 736 | * allocated for them. |
| 737 | */ |
Dominik Brodowski | 90a6acc | 2018-02-20 22:01:10 +0100 | [diff] [blame] | 738 | pushq %rdi |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 739 | |
Dominik Brodowski | 90a6acc | 2018-02-20 22:01:10 +0100 | [diff] [blame] | 740 | /* Need to switch before accessing the thread stack. */ |
| 741 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi |
| 742 | movq %rsp, %rdi |
| 743 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 744 | |
| 745 | /* |
| 746 | * We have RDI, return address, and orig_ax on the stack on |
| 747 | * top of the IRET frame. That means offset=24 |
| 748 | */ |
| 749 | UNWIND_HINT_IRET_REGS base=%rdi offset=24 |
Dominik Brodowski | 90a6acc | 2018-02-20 22:01:10 +0100 | [diff] [blame] | 750 | |
| 751 | pushq 7*8(%rdi) /* regs->ss */ |
| 752 | pushq 6*8(%rdi) /* regs->rsp */ |
| 753 | pushq 5*8(%rdi) /* regs->eflags */ |
| 754 | pushq 4*8(%rdi) /* regs->cs */ |
| 755 | pushq 3*8(%rdi) /* regs->ip */ |
Josh Poimboeuf | 81b6743 | 2020-04-25 05:06:14 -0500 | [diff] [blame] | 756 | UNWIND_HINT_IRET_REGS |
Dominik Brodowski | 90a6acc | 2018-02-20 22:01:10 +0100 | [diff] [blame] | 757 | pushq 2*8(%rdi) /* regs->orig_ax */ |
| 758 | pushq 8(%rdi) /* return address */ |
Dominik Brodowski | 90a6acc | 2018-02-20 22:01:10 +0100 | [diff] [blame] | 759 | |
| 760 | movq (%rdi), %rdi |
Josh Poimboeuf | 64dbc12 | 2019-07-15 11:51:39 -0500 | [diff] [blame] | 761 | jmp 2f |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 762 | 1: |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 763 | FENCE_SWAPGS_KERNEL_ENTRY |
| 764 | 2: |
Dominik Brodowski | 0e34d22 | 2018-02-20 22:01:08 +0100 | [diff] [blame] | 765 | PUSH_AND_CLEAR_REGS save_ret=1 |
| 766 | ENCODE_FRAME_POINTER 8 |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 767 | |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 768 | testb $3, CS+8(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 769 | jz 1f |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 770 | |
| 771 | /* |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 772 | * IRQ from user mode. |
| 773 | * |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 774 | * We need to tell lockdep that IRQs are off. We can't do this until |
| 775 | * we fix gsbase, and we should do it before enter_from_user_mode |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 776 | * (which can take locks). Since TRACE_IRQS_OFF is idempotent, |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 777 | * the simplest way to handle it is to just call it twice if |
| 778 | * we enter from user mode. There's no reason to optimize this since |
| 779 | * TRACE_IRQS_OFF is a no-op if lockdep is off. |
| 780 | */ |
| 781 | TRACE_IRQS_OFF |
| 782 | |
Andy Lutomirski | 478dc89 | 2015-11-12 12:59:04 -0800 | [diff] [blame] | 783 | CALL_enter_from_user_mode |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 784 | |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 785 | 1: |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 786 | ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 |
Denys Vlasenko | f6f6468 | 2015-01-08 17:25:15 +0100 | [diff] [blame] | 787 | /* We entered an interrupt context - irqs are off: */ |
| 788 | TRACE_IRQS_OFF |
| 789 | |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 790 | ret |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 791 | SYM_CODE_END(interrupt_entry) |
Andrea Righi | a50480c | 2018-12-06 10:56:48 +0100 | [diff] [blame] | 792 | _ASM_NOKPROBE(interrupt_entry) |
Dominik Brodowski | 2ba6474 | 2018-02-20 22:01:09 +0100 | [diff] [blame] | 793 | |
Dominik Brodowski | f3d415e | 2018-02-20 22:01:13 +0100 | [diff] [blame] | 794 | |
| 795 | /* Interrupt entry/exit. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 797 | /* |
| 798 | * The interrupt stubs push (~vector+0x80) onto the stack and |
| 799 | * then jump to common_spurious/interrupt. |
| 800 | */ |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 801 | SYM_CODE_START_LOCAL(common_spurious) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 802 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
| 803 | call interrupt_entry |
| 804 | UNWIND_HINT_REGS indirect=1 |
| 805 | call smp_spurious_interrupt /* rdi points to pt_regs */ |
| 806 | jmp ret_from_intr |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 807 | SYM_CODE_END(common_spurious) |
Thomas Gleixner | f8a8fe6 | 2019-06-28 13:11:54 +0200 | [diff] [blame] | 808 | _ASM_NOKPROBE(common_spurious) |
| 809 | |
| 810 | /* common_interrupt is a hotpath. Align it */ |
H. Peter Anvin | 939b787 | 2008-11-11 13:51:52 -0800 | [diff] [blame] | 811 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 812 | SYM_CODE_START_LOCAL(common_interrupt) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 813 | addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ |
Dominik Brodowski | 3aa99fc | 2018-02-20 22:01:11 +0100 | [diff] [blame] | 814 | call interrupt_entry |
| 815 | UNWIND_HINT_REGS indirect=1 |
| 816 | call do_IRQ /* rdi points to pt_regs */ |
Denys Vlasenko | 34061f1 | 2015-03-23 14:03:59 +0100 | [diff] [blame] | 817 | /* 0(%rsp): old RSP */ |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 818 | ret_from_intr: |
Jan Beulich | 2140a99 | 2017-02-03 02:03:25 -0700 | [diff] [blame] | 819 | DISABLE_INTERRUPTS(CLBR_ANY) |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 820 | TRACE_IRQS_OFF |
Frederic Weisbecker | 625dbc3b | 2011-01-06 15:22:47 +0100 | [diff] [blame] | 821 | |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 822 | LEAVE_IRQ_STACK |
Frederic Weisbecker | 625dbc3b | 2011-01-06 15:22:47 +0100 | [diff] [blame] | 823 | |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 824 | testb $3, CS(%rsp) |
Denys Vlasenko | dde74f2 | 2015-04-27 15:21:51 +0200 | [diff] [blame] | 825 | jz retint_kernel |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 826 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 827 | /* Interrupt came from user space */ |
Jiri Slaby | 30a2441 | 2019-10-11 11:22:13 +0200 | [diff] [blame] | 828 | .Lretint_user: |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 829 | mov %rsp,%rdi |
| 830 | call prepare_exit_to_usermode |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 831 | |
Jiri Slaby | 26ba4e5 | 2019-10-11 13:50:57 +0200 | [diff] [blame] | 832 | SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 833 | #ifdef CONFIG_DEBUG_ENTRY |
| 834 | /* Assert that pt_regs indicates user mode. */ |
Borislav Petkov | 1e4c4f6 | 2017-11-02 13:09:26 +0100 | [diff] [blame] | 835 | testb $3, CS(%rsp) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 836 | jnz 1f |
| 837 | ud2 |
| 838 | 1: |
| 839 | #endif |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 840 | POP_REGS pop_rdi=0 |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 841 | |
| 842 | /* |
| 843 | * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. |
| 844 | * Save old stack pointer and switch to trampoline stack. |
| 845 | */ |
| 846 | movq %rsp, %rdi |
Andy Lutomirski | c482fee | 2017-12-04 15:07:29 +0100 | [diff] [blame] | 847 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
Josh Poimboeuf | 1fb1436 | 2020-04-25 05:03:02 -0500 | [diff] [blame] | 848 | UNWIND_HINT_EMPTY |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 849 | |
| 850 | /* Copy the IRET frame to the trampoline stack. */ |
| 851 | pushq 6*8(%rdi) /* SS */ |
| 852 | pushq 5*8(%rdi) /* RSP */ |
| 853 | pushq 4*8(%rdi) /* EFLAGS */ |
| 854 | pushq 3*8(%rdi) /* CS */ |
| 855 | pushq 2*8(%rdi) /* RIP */ |
| 856 | |
| 857 | /* Push user RDI on the trampoline stack. */ |
| 858 | pushq (%rdi) |
| 859 | |
| 860 | /* |
| 861 | * We are on the trampoline stack. All regs except RDI are live. |
| 862 | * We can do future final exit work right here. |
| 863 | */ |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 864 | STACKLEAK_ERASE_NOCLOBBER |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 865 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 866 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 867 | |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 868 | /* Restore RDI. */ |
| 869 | popq %rdi |
| 870 | SWAPGS |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 871 | INTERRUPT_RETURN |
| 872 | |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 873 | |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 874 | /* Returning to kernel space */ |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 875 | retint_kernel: |
Thomas Gleixner | 4859397 | 2019-07-26 23:19:42 +0200 | [diff] [blame] | 876 | #ifdef CONFIG_PREEMPTION |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 877 | /* Interrupts are off */ |
| 878 | /* Check if we need preemption */ |
Jan Beulich | 6709812 | 2018-07-02 04:47:57 -0600 | [diff] [blame] | 879 | btl $9, EFLAGS(%rsp) /* were interrupts off? */ |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 880 | jnc 1f |
Valentin Schneider | b5b447b | 2019-03-11 22:47:51 +0000 | [diff] [blame] | 881 | cmpl $0, PER_CPU_VAR(__preempt_count) |
Denys Vlasenko | 36acef2 | 2015-03-31 19:00:07 +0200 | [diff] [blame] | 882 | jnz 1f |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 883 | call preempt_schedule_irq |
Denys Vlasenko | 6ba71b7 | 2015-03-31 19:00:05 +0200 | [diff] [blame] | 884 | 1: |
Denys Vlasenko | 627276c | 2015-03-30 20:09:31 +0200 | [diff] [blame] | 885 | #endif |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 886 | /* |
| 887 | * The iretq could re-enable interrupts: |
| 888 | */ |
| 889 | TRACE_IRQS_IRETQ |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 890 | |
Jiri Slaby | 26ba4e5 | 2019-10-11 13:50:57 +0200 | [diff] [blame] | 891 | SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 892 | #ifdef CONFIG_DEBUG_ENTRY |
| 893 | /* Assert that pt_regs indicates kernel mode. */ |
Borislav Petkov | 1e4c4f6 | 2017-11-02 13:09:26 +0100 | [diff] [blame] | 894 | testb $3, CS(%rsp) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 895 | jz 1f |
| 896 | ud2 |
| 897 | 1: |
| 898 | #endif |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 899 | POP_REGS |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 900 | addq $8, %rsp /* skip regs->orig_ax */ |
Mathieu Desnoyers | 10bcc80 | 2018-01-29 15:20:18 -0500 | [diff] [blame] | 901 | /* |
| 902 | * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization |
| 903 | * when returning from IPI handler. |
| 904 | */ |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 905 | INTERRUPT_RETURN |
| 906 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 907 | SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 908 | UNWIND_HINT_IRET_REGS |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 909 | /* |
| 910 | * Are we returning to a stack segment from the LDT? Note: in |
| 911 | * 64-bit mode SS:RSP on the exception stack is always valid. |
| 912 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 913 | #ifdef CONFIG_X86_ESPFIX64 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 914 | testb $4, (SS-RIP)(%rsp) |
| 915 | jnz native_irq_return_ldt |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 916 | #endif |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 917 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 918 | SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 919 | /* |
| 920 | * This may fault. Non-paranoid faults on return to userspace are |
| 921 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. |
| 922 | * Double-faults due to espfix64 are handled in do_double_fault. |
| 923 | * Other faults here are fatal. |
| 924 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 925 | iretq |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 926 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 927 | #ifdef CONFIG_X86_ESPFIX64 |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 928 | native_irq_return_ldt: |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 929 | /* |
| 930 | * We are running with user GSBASE. All GPRs contain their user |
| 931 | * values. We have a percpu ESPFIX stack that is eight slots |
| 932 | * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom |
| 933 | * of the ESPFIX stack. |
| 934 | * |
| 935 | * We clobber RAX and RDI in this code. We stash RDI on the |
| 936 | * normal stack and RAX on the ESPFIX stack. |
| 937 | * |
| 938 | * The ESPFIX stack layout we set up looks like this: |
| 939 | * |
| 940 | * --- top of ESPFIX stack --- |
| 941 | * SS |
| 942 | * RSP |
| 943 | * RFLAGS |
| 944 | * CS |
| 945 | * RIP <-- RSP points here when we're done |
| 946 | * RAX <-- espfix_waddr points here |
| 947 | * --- bottom of ESPFIX stack --- |
| 948 | */ |
| 949 | |
| 950 | pushq %rdi /* Stash user RDI */ |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 951 | SWAPGS /* to kernel GS */ |
| 952 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ |
| 953 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 954 | movq PER_CPU_VAR(espfix_waddr), %rdi |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 955 | movq %rax, (0*8)(%rdi) /* user RAX */ |
| 956 | movq (1*8)(%rsp), %rax /* user RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 957 | movq %rax, (1*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 958 | movq (2*8)(%rsp), %rax /* user CS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 959 | movq %rax, (2*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 960 | movq (3*8)(%rsp), %rax /* user RFLAGS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 961 | movq %rax, (3*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 962 | movq (5*8)(%rsp), %rax /* user SS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 963 | movq %rax, (5*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 964 | movq (4*8)(%rsp), %rax /* user RSP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 965 | movq %rax, (4*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 966 | /* Now RAX == RSP. */ |
| 967 | |
| 968 | andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 969 | |
| 970 | /* |
| 971 | * espfix_stack[31:16] == 0. The page tables are set up such that |
| 972 | * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of |
| 973 | * espfix_waddr for any X. That is, there are 65536 RO aliases of |
| 974 | * the same page. Set up RSP so that RSP[31:16] contains the |
| 975 | * respective 16 bits of the /userspace/ RSP and RSP nonetheless |
| 976 | * still points to an RO alias of the ESPFIX stack. |
| 977 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 978 | orq PER_CPU_VAR(espfix_stack), %rax |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 979 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 980 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 981 | SWAPGS /* to user GS */ |
| 982 | popq %rdi /* Restore user RDI */ |
| 983 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 984 | movq %rax, %rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 985 | UNWIND_HINT_IRET_REGS offset=8 |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 986 | |
| 987 | /* |
| 988 | * At this point, we cannot write to the stack any more, but we can |
| 989 | * still read. |
| 990 | */ |
| 991 | popq %rax /* Restore user RAX */ |
| 992 | |
| 993 | /* |
| 994 | * RSP now points to an ordinary IRET frame, except that the page |
| 995 | * is read-only and RSP[31:16] are preloaded with the userspace |
| 996 | * values. We can now IRET back to userspace. |
| 997 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 998 | jmp native_irq_return_iret |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 999 | #endif |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 1000 | SYM_CODE_END(common_interrupt) |
Andrea Righi | a50480c | 2018-12-06 10:56:48 +0100 | [diff] [blame] | 1001 | _ASM_NOKPROBE(common_interrupt) |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 1002 | |
Masami Hiramatsu | 8222d71 | 2009-08-27 13:23:25 -0400 | [diff] [blame] | 1003 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 | * APIC interrupts. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 1005 | */ |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1006 | .macro apicinterrupt3 num sym do_sym |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1007 | SYM_CODE_START(\sym) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1008 | UNWIND_HINT_IRET_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1009 | pushq $~(\num) |
Dominik Brodowski | 3aa99fc | 2018-02-20 22:01:11 +0100 | [diff] [blame] | 1010 | call interrupt_entry |
| 1011 | UNWIND_HINT_REGS indirect=1 |
| 1012 | call \do_sym /* rdi points to pt_regs */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1013 | jmp ret_from_intr |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1014 | SYM_CODE_END(\sym) |
Andrea Righi | a50480c | 2018-12-06 10:56:48 +0100 | [diff] [blame] | 1015 | _ASM_NOKPROBE(\sym) |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 1016 | .endm |
Jacob Shin | 89b831e | 2005-11-05 17:25:53 +0100 | [diff] [blame] | 1017 | |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 1018 | /* Make sure APIC interrupt handlers end up in the irqentry section: */ |
Masami Hiramatsu | 229a718 | 2017-08-03 11:38:21 +0900 | [diff] [blame] | 1019 | #define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax" |
| 1020 | #define POP_SECTION_IRQENTRY .popsection |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 1021 | |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1022 | .macro apicinterrupt num sym do_sym |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 1023 | PUSH_SECTION_IRQENTRY |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1024 | apicinterrupt3 \num \sym \do_sym |
Alexander Potapenko | 469f002 | 2016-07-15 11:42:43 +0200 | [diff] [blame] | 1025 | POP_SECTION_IRQENTRY |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1026 | .endm |
| 1027 | |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 1028 | #ifdef CONFIG_SMP |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1029 | apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt |
| 1030 | apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | #endif |
| 1032 | |
Nick Piggin | 03b4863 | 2009-01-20 04:36:04 +0100 | [diff] [blame] | 1033 | #ifdef CONFIG_X86_UV |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1034 | apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt |
Nick Piggin | 03b4863 | 2009-01-20 04:36:04 +0100 | [diff] [blame] | 1035 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1036 | |
| 1037 | apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt |
| 1038 | apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 1040 | #ifdef CONFIG_HAVE_KVM |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1041 | apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi |
| 1042 | apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi |
Wincy Van | 210f84b | 2017-04-28 13:13:58 +0800 | [diff] [blame] | 1043 | apicinterrupt3 POSTED_INTR_NESTED_VECTOR kvm_posted_intr_nested_ipi smp_kvm_posted_intr_nested_ipi |
Yang Zhang | d78f266 | 2013-04-11 19:25:11 +0800 | [diff] [blame] | 1044 | #endif |
| 1045 | |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 1046 | #ifdef CONFIG_X86_MCE_THRESHOLD |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1047 | apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 1048 | #endif |
| 1049 | |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 1050 | #ifdef CONFIG_X86_MCE_AMD |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1051 | apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt |
Aravind Gopalakrishnan | 24fd78a | 2015-05-06 06:58:56 -0500 | [diff] [blame] | 1052 | #endif |
| 1053 | |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 1054 | #ifdef CONFIG_X86_THERMAL_VECTOR |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1055 | apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt |
Seiji Aguchi | 33e5ff6 | 2013-06-22 07:33:30 -0400 | [diff] [blame] | 1056 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1057 | |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 1058 | #ifdef CONFIG_SMP |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1059 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt |
| 1060 | apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt |
| 1061 | apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt |
Alexander van Heukelum | 322648d | 2008-11-23 10:08:28 +0100 | [diff] [blame] | 1062 | #endif |
| 1063 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1064 | apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt |
| 1065 | apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 1066 | |
Peter Zijlstra | e360adb | 2010-10-14 14:01:34 +0800 | [diff] [blame] | 1067 | #ifdef CONFIG_IRQ_WORK |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1068 | apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt |
Ingo Molnar | 241771e | 2008-12-03 10:39:53 +0100 | [diff] [blame] | 1069 | #endif |
| 1070 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1071 | /* |
| 1072 | * Exception entry points. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 1073 | */ |
Andy Lutomirski | 577ed45 | 2014-05-21 15:07:09 -0700 | [diff] [blame] | 1074 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 1075 | idtentry X86_TRAP_PF page_fault do_page_fault has_error_code=1 |
Thomas Gleixner | 67f1386 | 2020-02-25 23:16:09 +0100 | [diff] [blame] | 1076 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 1077 | idtentry_mce_db X86_TRAP_DB debug do_debug |
| 1078 | idtentry_df X86_TRAP_DF double_fault do_double_fault |
Thomas Gleixner | 67f1386 | 2020-02-25 23:16:09 +0100 | [diff] [blame] | 1079 | |
| 1080 | #ifdef CONFIG_XEN_PV |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 1081 | idtentry 512 /* dummy */ hypervisor_callback xen_do_hypervisor_callback has_error_code=0 |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 1082 | idtentry X86_TRAP_DB xendebug do_debug has_error_code=0 |
Thomas Gleixner | 67f1386 | 2020-02-25 23:16:09 +0100 | [diff] [blame] | 1083 | #endif |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 1084 | |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1085 | /* |
| 1086 | * Reload gs selector with exception handling |
| 1087 | * edi: new selector |
| 1088 | * |
| 1089 | * Is in entry.text as it shouldn't be instrumented. |
| 1090 | */ |
Thomas Gleixner | 410367e | 2020-03-04 23:32:15 +0100 | [diff] [blame] | 1091 | SYM_FUNC_START(asm_load_gs_index) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1092 | FRAME_BEGIN |
Thomas Gleixner | c931720 | 2020-05-12 14:54:14 +0200 | [diff] [blame] | 1093 | swapgs |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1094 | .Lgs_change: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1095 | movl %edi, %gs |
Borislav Petkov | 96e5d28 | 2016-04-07 17:31:49 -0700 | [diff] [blame] | 1096 | 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
Thomas Gleixner | c931720 | 2020-05-12 14:54:14 +0200 | [diff] [blame] | 1097 | swapgs |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1098 | FRAME_END |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 1099 | ret |
Thomas Gleixner | 410367e | 2020-03-04 23:32:15 +0100 | [diff] [blame] | 1100 | SYM_FUNC_END(asm_load_gs_index) |
| 1101 | EXPORT_SYMBOL(asm_load_gs_index) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 1102 | |
Jiri Slaby | 98ededb | 2019-09-06 09:55:50 +0200 | [diff] [blame] | 1103 | _ASM_EXTABLE(.Lgs_change, .Lbad_gs) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1104 | .section .fixup, "ax" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | /* running with kernelgs */ |
Jiri Slaby | ef77e68 | 2019-10-11 13:50:45 +0200 | [diff] [blame] | 1106 | SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) |
Thomas Gleixner | c931720 | 2020-05-12 14:54:14 +0200 | [diff] [blame] | 1107 | swapgs /* switch back to user gs */ |
Andy Lutomirski | b038c84 | 2016-04-26 12:23:27 -0700 | [diff] [blame] | 1108 | .macro ZAP_GS |
| 1109 | /* This can't be a string because the preprocessor needs to see it. */ |
| 1110 | movl $__USER_DS, %eax |
| 1111 | movl %eax, %gs |
| 1112 | .endm |
| 1113 | ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1114 | xorl %eax, %eax |
| 1115 | movl %eax, %gs |
| 1116 | jmp 2b |
Jiri Slaby | ef77e68 | 2019-10-11 13:50:45 +0200 | [diff] [blame] | 1117 | SYM_CODE_END(.Lbad_gs) |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 1118 | .previous |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 1119 | |
Andi Kleen | 2699500 | 2006-08-02 22:37:28 +0200 | [diff] [blame] | 1120 | /* Call softirq on interrupt stack. Interrupts are off. */ |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1121 | .pushsection .text, "ax" |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 1122 | SYM_FUNC_START(do_softirq_own_stack) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1123 | pushq %rbp |
| 1124 | mov %rsp, %rbp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1125 | ENTER_IRQ_STACK regs=0 old_rsp=%r11 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1126 | call __do_softirq |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1127 | LEAVE_IRQ_STACK regs=0 |
Andi Kleen | 2699500 | 2006-08-02 22:37:28 +0200 | [diff] [blame] | 1128 | leaveq |
Andi Kleen | ed6b676 | 2005-07-28 21:15:49 -0700 | [diff] [blame] | 1129 | ret |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 1130 | SYM_FUNC_END(do_softirq_own_stack) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1131 | .popsection |
Andi Kleen | 75154f4 | 2007-06-23 02:29:25 +0200 | [diff] [blame] | 1132 | |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1133 | #ifdef CONFIG_XEN_PV |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1134 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 1135 | * A note on the "critical region" in our callback handler. |
| 1136 | * We want to avoid stacking callback handlers due to events occurring |
| 1137 | * during handling of the last event. To do this, we keep events disabled |
| 1138 | * until we've done all processing. HOWEVER, we must enable events before |
| 1139 | * popping the stack frame (can't be done atomically) and so it would still |
| 1140 | * be possible to get enough handler activations to overflow the stack. |
| 1141 | * Although unlikely, bugs of that kind are hard to track down, so we'd |
| 1142 | * like to avoid the possibility. |
| 1143 | * So, on entry to the handler we detect whether we interrupted an |
| 1144 | * existing activation in its critical region -- if so, we pop the current |
| 1145 | * activation and restart the handler using the previous one. |
| 1146 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1147 | /* do_hypervisor_callback(struct *pt_regs) */ |
| 1148 | SYM_CODE_START_LOCAL(xen_do_hypervisor_callback) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1149 | |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 1150 | /* |
| 1151 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will |
| 1152 | * see the correct pointer to the pt_regs |
| 1153 | */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1154 | UNWIND_HINT_FUNC |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1155 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1156 | UNWIND_HINT_REGS |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 1157 | |
| 1158 | ENTER_IRQ_STACK old_rsp=%r10 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1159 | call xen_evtchn_do_upcall |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 1160 | LEAVE_IRQ_STACK |
| 1161 | |
Thomas Gleixner | 4859397 | 2019-07-26 23:19:42 +0200 | [diff] [blame] | 1162 | #ifndef CONFIG_PREEMPTION |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1163 | call xen_maybe_preempt_hcall |
David Vrabel | fdfd811 | 2015-02-19 15:23:17 +0000 | [diff] [blame] | 1164 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1165 | jmp error_exit |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1166 | SYM_CODE_END(xen_do_hypervisor_callback) |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1167 | |
| 1168 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 1169 | * Hypervisor uses this for application faults while it executes. |
| 1170 | * We get here for two reasons: |
| 1171 | * 1. Fault while reloading DS, ES, FS or GS |
| 1172 | * 2. Fault while executing IRET |
| 1173 | * Category 1 we do not need to fix up as Xen has already reloaded all segment |
| 1174 | * registers that could be reloaded and zeroed the others. |
| 1175 | * Category 2 we fix up by killing the current process. We cannot use the |
| 1176 | * normal Linux return path in this case because if we use the IRET hypercall |
| 1177 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 1178 | * We distinguish between categories by comparing each saved segment register |
| 1179 | * with its current contents: any discrepancy means we in category 1. |
| 1180 | */ |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1181 | SYM_CODE_START(xen_failsafe_callback) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1182 | UNWIND_HINT_EMPTY |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1183 | movl %ds, %ecx |
| 1184 | cmpw %cx, 0x10(%rsp) |
| 1185 | jne 1f |
| 1186 | movl %es, %ecx |
| 1187 | cmpw %cx, 0x18(%rsp) |
| 1188 | jne 1f |
| 1189 | movl %fs, %ecx |
| 1190 | cmpw %cx, 0x20(%rsp) |
| 1191 | jne 1f |
| 1192 | movl %gs, %ecx |
| 1193 | cmpw %cx, 0x28(%rsp) |
| 1194 | jne 1f |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1195 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1196 | movq (%rsp), %rcx |
| 1197 | movq 8(%rsp), %r11 |
| 1198 | addq $0x30, %rsp |
| 1199 | pushq $0 /* RIP */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1200 | UNWIND_HINT_IRET_REGS offset=8 |
Thomas Gleixner | be4c11a | 2020-02-25 23:16:25 +0100 | [diff] [blame] | 1201 | jmp asm_exc_general_protection |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1202 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1203 | movq (%rsp), %rcx |
| 1204 | movq 8(%rsp), %r11 |
| 1205 | addq $0x30, %rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1206 | UNWIND_HINT_IRET_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1207 | pushq $-1 /* orig_ax = -1 => not a system call */ |
Dominik Brodowski | 3f01dae | 2018-02-11 11:49:45 +0100 | [diff] [blame] | 1208 | PUSH_AND_CLEAR_REGS |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1209 | ENCODE_FRAME_POINTER |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1210 | jmp error_exit |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1211 | SYM_CODE_END(xen_failsafe_callback) |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1212 | #endif /* CONFIG_XEN_PV */ |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 1213 | |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1214 | #ifdef CONFIG_XEN_PVHVM |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1215 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 1216 | xen_hvm_callback_vector xen_evtchn_do_upcall |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 1217 | #endif |
Sheng Yang | 38e20b0 | 2010-05-14 12:40:51 +0100 | [diff] [blame] | 1218 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1219 | |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1220 | #if IS_ENABLED(CONFIG_HYPERV) |
Seiji Aguchi | cf910e8 | 2013-06-20 11:46:53 -0400 | [diff] [blame] | 1221 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1222 | hyperv_callback_vector hyperv_vector_handler |
Vitaly Kuznetsov | 9328626 | 2018-01-24 14:23:33 +0100 | [diff] [blame] | 1223 | |
| 1224 | apicinterrupt3 HYPERV_REENLIGHTENMENT_VECTOR \ |
| 1225 | hyperv_reenlightenment_vector hyperv_reenlightenment_intr |
Michael Kelley | 248e742 | 2018-03-04 22:17:18 -0700 | [diff] [blame] | 1226 | |
| 1227 | apicinterrupt3 HYPERV_STIMER0_VECTOR \ |
| 1228 | hv_stimer0_callback_vector hv_stimer0_vector_handler |
K. Y. Srinivasan | bc2b033 | 2013-02-03 17:22:39 -0800 | [diff] [blame] | 1229 | #endif /* CONFIG_HYPERV */ |
| 1230 | |
Zhao Yakui | 498ad39 | 2019-04-30 11:45:25 +0800 | [diff] [blame] | 1231 | #if IS_ENABLED(CONFIG_ACRN_GUEST) |
| 1232 | apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ |
| 1233 | acrn_hv_callback_vector acrn_hv_vector_handler |
| 1234 | #endif |
| 1235 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1236 | /* |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 1237 | * Save all registers in pt_regs, and switch gs if needed. |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1238 | * Use slow, but surefire "are we in kernel?" check. |
| 1239 | * Return: ebx=0: need swapgs on exit, ebx=1: otherwise |
| 1240 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1241 | SYM_CODE_START_LOCAL(paranoid_entry) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1242 | UNWIND_HINT_FUNC |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1243 | cld |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 1244 | PUSH_AND_CLEAR_REGS save_ret=1 |
| 1245 | ENCODE_FRAME_POINTER 8 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1246 | movl $1, %ebx |
| 1247 | movl $MSR_GS_BASE, %ecx |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1248 | rdmsr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1249 | testl %edx, %edx |
| 1250 | js 1f /* negative -> in kernel */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1251 | SWAPGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1252 | xorl %ebx, %ebx |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1253 | |
| 1254 | 1: |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1255 | /* |
| 1256 | * Always stash CR3 in %r14. This value will be restored, |
Andy Lutomirski | ae85249 | 2018-10-14 11:38:18 -0700 | [diff] [blame] | 1257 | * verbatim, at exit. Needed if paranoid_entry interrupted |
| 1258 | * another entry that already switched to the user CR3 value |
| 1259 | * but has not yet returned to userspace. |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1260 | * |
| 1261 | * This is also why CS (stashed in the "iret frame" by the |
| 1262 | * hardware at entry) can not be used: this may be a return |
Andy Lutomirski | ae85249 | 2018-10-14 11:38:18 -0700 | [diff] [blame] | 1263 | * to kernel code, but with a user CR3 value. |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1264 | */ |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1265 | SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 |
| 1266 | |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1267 | /* |
| 1268 | * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an |
| 1269 | * unconditional CR3 write, even in the PTI case. So do an lfence |
| 1270 | * to prevent GS speculation, regardless of whether PTI is enabled. |
| 1271 | */ |
| 1272 | FENCE_SWAPGS_KERNEL_ENTRY |
| 1273 | |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1274 | ret |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1275 | SYM_CODE_END(paranoid_entry) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 1276 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1277 | /* |
| 1278 | * "Paranoid" exit path from exception stack. This is invoked |
| 1279 | * only on return from non-NMI IST interrupts that came |
| 1280 | * from kernel space. |
| 1281 | * |
| 1282 | * We may be returning to very strange contexts (e.g. very early |
| 1283 | * in syscall entry), so checking for preemption here would |
| 1284 | * be complicated. Fortunately, we there's no good reason |
| 1285 | * to try to handle preemption here. |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1286 | * |
| 1287 | * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1288 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1289 | SYM_CODE_START_LOCAL(paranoid_exit) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1290 | UNWIND_HINT_REGS |
Jan Beulich | 2140a99 | 2017-02-03 02:03:25 -0700 | [diff] [blame] | 1291 | DISABLE_INTERRUPTS(CLBR_ANY) |
Steven Rostedt | 5963e31 | 2012-05-30 11:54:53 -0400 | [diff] [blame] | 1292 | TRACE_IRQS_OFF_DEBUG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1293 | testl %ebx, %ebx /* swapgs needed? */ |
Andy Lutomirski | e5317832 | 2017-11-02 00:59:02 -0700 | [diff] [blame] | 1294 | jnz .Lparanoid_exit_no_swapgs |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 1295 | TRACE_IRQS_IRETQ |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1296 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 1297 | RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1298 | SWAPGS_UNSAFE_STACK |
Thomas Gleixner | 45c0838 | 2019-10-23 14:27:07 +0200 | [diff] [blame] | 1299 | jmp restore_regs_and_return_to_kernel |
Andy Lutomirski | e5317832 | 2017-11-02 00:59:02 -0700 | [diff] [blame] | 1300 | .Lparanoid_exit_no_swapgs: |
Denys Vlasenko | f2db938 | 2015-02-26 14:40:30 -0800 | [diff] [blame] | 1301 | TRACE_IRQS_IRETQ_DEBUG |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1302 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
Ingo Molnar | e486575 | 2018-02-14 08:39:11 +0100 | [diff] [blame] | 1303 | RESTORE_CR3 scratch_reg=%rbx save_reg=%r14 |
Andy Lutomirski | e5317832 | 2017-11-02 00:59:02 -0700 | [diff] [blame] | 1304 | jmp restore_regs_and_return_to_kernel |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1305 | SYM_CODE_END(paranoid_exit) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1306 | |
| 1307 | /* |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 1308 | * Save all registers in pt_regs, and switch GS if needed. |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1309 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1310 | SYM_CODE_START_LOCAL(error_entry) |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 1311 | UNWIND_HINT_FUNC |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1312 | cld |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 1313 | PUSH_AND_CLEAR_REGS save_ret=1 |
| 1314 | ENCODE_FRAME_POINTER 8 |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 1315 | testb $3, CS+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1316 | jz .Lerror_kernelspace |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1317 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1318 | /* |
| 1319 | * We entered from user mode or we're pretending to have entered |
| 1320 | * from user mode due to an IRET fault. |
| 1321 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1322 | SWAPGS |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1323 | FENCE_SWAPGS_USER_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1324 | /* We have user CR3. Change to kernel CR3. */ |
| 1325 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1326 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1327 | .Lerror_entry_from_usermode_after_swapgs: |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 1328 | /* Put us onto the real thread stack. */ |
| 1329 | popq %r12 /* save return addr in %12 */ |
| 1330 | movq %rsp, %rdi /* arg0 = pt_regs pointer */ |
| 1331 | call sync_regs |
| 1332 | movq %rax, %rsp /* switch stack */ |
| 1333 | ENCODE_FRAME_POINTER |
| 1334 | pushq %r12 |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 1335 | ret |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 1336 | |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1337 | .Lerror_entry_done_lfence: |
| 1338 | FENCE_SWAPGS_KERNEL_ENTRY |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1339 | .Lerror_entry_done: |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1340 | ret |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1341 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1342 | /* |
| 1343 | * There are two places in the kernel that can potentially fault with |
| 1344 | * usergs. Handle them here. B stepping K8s sometimes report a |
| 1345 | * truncated RIP for IRET exceptions returning to compat mode. Check |
| 1346 | * for these here too. |
| 1347 | */ |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1348 | .Lerror_kernelspace: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1349 | leaq native_irq_return_iret(%rip), %rcx |
| 1350 | cmpq %rcx, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1351 | je .Lerror_bad_iret |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1352 | movl %ecx, %eax /* zero extend */ |
| 1353 | cmpq %rax, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1354 | je .Lbstep_iret |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1355 | cmpq $.Lgs_change, RIP+8(%rsp) |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1356 | jne .Lerror_entry_done_lfence |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1357 | |
| 1358 | /* |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1359 | * hack: .Lgs_change can fail with user gsbase. If this happens, fix up |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1360 | * gsbase and proceed. We'll fix up the exception and land in |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1361 | * .Lgs_change's error handler with kernel gsbase. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1362 | */ |
Wanpeng Li | 2fa5f04 | 2016-09-30 09:01:06 +0800 | [diff] [blame] | 1363 | SWAPGS |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1364 | FENCE_SWAPGS_USER_ENTRY |
Wanpeng Li | 2fa5f04 | 2016-09-30 09:01:06 +0800 | [diff] [blame] | 1365 | jmp .Lerror_entry_done |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1366 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1367 | .Lbstep_iret: |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1368 | /* Fix truncated RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1369 | movq %rcx, RIP+8(%rsp) |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1370 | /* fall through */ |
| 1371 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1372 | .Lerror_bad_iret: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1373 | /* |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1374 | * We came from an IRET to user mode, so we have user |
| 1375 | * gsbase and CR3. Switch to kernel gsbase and CR3: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1376 | */ |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1377 | SWAPGS |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1378 | FENCE_SWAPGS_USER_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1379 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1380 | |
| 1381 | /* |
| 1382 | * Pretend that the exception came from user mode: set up pt_regs |
Andy Lutomirski | b3681dd | 2018-07-22 11:05:09 -0700 | [diff] [blame] | 1383 | * as if we faulted immediately after IRET. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1384 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1385 | mov %rsp, %rdi |
| 1386 | call fixup_bad_iret |
| 1387 | mov %rax, %rsp |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1388 | jmp .Lerror_entry_from_usermode_after_swapgs |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1389 | SYM_CODE_END(error_entry) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1390 | |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1391 | SYM_CODE_START_LOCAL(error_exit) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1392 | UNWIND_HINT_REGS |
Jan Beulich | 2140a99 | 2017-02-03 02:03:25 -0700 | [diff] [blame] | 1393 | DISABLE_INTERRUPTS(CLBR_ANY) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1394 | TRACE_IRQS_OFF |
Andy Lutomirski | b3681dd | 2018-07-22 11:05:09 -0700 | [diff] [blame] | 1395 | testb $3, CS(%rsp) |
| 1396 | jz retint_kernel |
Jiri Slaby | 30a2441 | 2019-10-11 11:22:13 +0200 | [diff] [blame] | 1397 | jmp .Lretint_user |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1398 | SYM_CODE_END(error_exit) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1399 | |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 1400 | SYM_CODE_START_LOCAL(error_return) |
| 1401 | UNWIND_HINT_REGS |
| 1402 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
| 1403 | testb $3, CS(%rsp) |
| 1404 | jz restore_regs_and_return_to_kernel |
| 1405 | jmp swapgs_restore_regs_and_return_to_usermode |
| 1406 | SYM_CODE_END(error_return) |
| 1407 | |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1408 | /* |
| 1409 | * Runs on exception stack. Xen PV does not go through this path at all, |
| 1410 | * so we can use real assembly here. |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1411 | * |
| 1412 | * Registers: |
| 1413 | * %r14: Used to save/restore the CR3 of the interrupted context |
| 1414 | * when PAGE_TABLE_ISOLATION is in use. Do not clobber. |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1415 | */ |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1416 | SYM_CODE_START(asm_exc_nmi) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1417 | UNWIND_HINT_IRET_REGS |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1418 | |
Andy Lutomirski | fc57a7c | 2015-09-20 16:32:04 -0700 | [diff] [blame] | 1419 | /* |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1420 | * We allow breakpoints in NMIs. If a breakpoint occurs, then |
| 1421 | * the iretq it performs will take us out of NMI context. |
| 1422 | * This means that we can have nested NMIs where the next |
| 1423 | * NMI is using the top of the stack of the previous NMI. We |
| 1424 | * can't let it execute because the nested NMI will corrupt the |
| 1425 | * stack of the previous NMI. NMI handlers are not re-entrant |
| 1426 | * anyway. |
| 1427 | * |
| 1428 | * To handle this case we do the following: |
| 1429 | * Check the a special location on the stack that contains |
| 1430 | * a variable that is set when NMIs are executing. |
| 1431 | * The interrupted task's stack is also checked to see if it |
| 1432 | * is an NMI stack. |
| 1433 | * If the variable is not set and the stack is not the NMI |
| 1434 | * stack then: |
| 1435 | * o Set the special variable on the stack |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1436 | * o Copy the interrupt frame into an "outermost" location on the |
| 1437 | * stack |
| 1438 | * o Copy the interrupt frame into an "iret" location on the stack |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1439 | * o Continue processing the NMI |
| 1440 | * If the variable is set or the previous stack is the NMI stack: |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1441 | * o Modify the "iret" location to jump to the repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1442 | * o return back to the first NMI |
| 1443 | * |
| 1444 | * Now on exit of the first NMI, we first clear the stack variable |
| 1445 | * The NMI stack will tell any nested NMIs at that point that it is |
| 1446 | * nested. Then we pop the stack normally with iret, and if there was |
| 1447 | * a nested NMI that updated the copy interrupt stack frame, a |
| 1448 | * jump will be made to the repeat_nmi code that will handle the second |
| 1449 | * NMI. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1450 | * |
| 1451 | * However, espfix prevents us from directly returning to userspace |
| 1452 | * with a single IRET instruction. Similarly, IRET to user mode |
| 1453 | * can fault. We therefore handle NMIs from user space like |
| 1454 | * other IST entries. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1455 | */ |
| 1456 | |
Andy Lutomirski | e93c173 | 2017-08-07 19:43:13 -0700 | [diff] [blame] | 1457 | ASM_CLAC |
| 1458 | |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 1459 | /* Use %rdx as our temp variable throughout */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1460 | pushq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1461 | |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1462 | testb $3, CS-RIP+8(%rsp) |
| 1463 | jz .Lnmi_from_kernel |
Steven Rostedt | 45d5a16 | 2012-02-19 16:43:37 -0500 | [diff] [blame] | 1464 | |
| 1465 | /* |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1466 | * NMI from user mode. We need to run on the thread stack, but we |
| 1467 | * can't go through the normal entry paths: NMIs are masked, and |
| 1468 | * we don't want to enable interrupts, because then we'll end |
| 1469 | * up in an awkward situation in which IRQs are on but NMIs |
| 1470 | * are off. |
Andy Lutomirski | 83c133c | 2015-09-20 16:32:05 -0700 | [diff] [blame] | 1471 | * |
| 1472 | * We also must not push anything to the stack before switching |
| 1473 | * stacks lest we corrupt the "NMI executing" variable. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1474 | */ |
| 1475 | |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1476 | swapgs |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1477 | cld |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1478 | FENCE_SWAPGS_USER_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1479 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1480 | movq %rsp, %rdx |
| 1481 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1482 | UNWIND_HINT_IRET_REGS base=%rdx offset=8 |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1483 | pushq 5*8(%rdx) /* pt_regs->ss */ |
| 1484 | pushq 4*8(%rdx) /* pt_regs->rsp */ |
| 1485 | pushq 3*8(%rdx) /* pt_regs->flags */ |
| 1486 | pushq 2*8(%rdx) /* pt_regs->cs */ |
| 1487 | pushq 1*8(%rdx) /* pt_regs->rip */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1488 | UNWIND_HINT_IRET_REGS |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1489 | pushq $-1 /* pt_regs->orig_ax */ |
Dominik Brodowski | 30907fd | 2018-02-11 11:49:46 +0100 | [diff] [blame] | 1490 | PUSH_AND_CLEAR_REGS rdx=(%rdx) |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1491 | ENCODE_FRAME_POINTER |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1492 | |
| 1493 | /* |
| 1494 | * At this point we no longer need to worry about stack damage |
| 1495 | * due to nesting -- we're on the normal thread stack and we're |
| 1496 | * done with the NMI stack. |
| 1497 | */ |
| 1498 | |
| 1499 | movq %rsp, %rdi |
| 1500 | movq $-1, %rsi |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1501 | call exc_nmi |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1502 | |
| 1503 | /* |
| 1504 | * Return back to user mode. We must *not* do the normal exit |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1505 | * work, because we don't want to enable interrupts. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1506 | */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 1507 | jmp swapgs_restore_regs_and_return_to_usermode |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1508 | |
| 1509 | .Lnmi_from_kernel: |
| 1510 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1511 | * Here's what our stack frame will look like: |
| 1512 | * +---------------------------------------------------------+ |
| 1513 | * | original SS | |
| 1514 | * | original Return RSP | |
| 1515 | * | original RFLAGS | |
| 1516 | * | original CS | |
| 1517 | * | original RIP | |
| 1518 | * +---------------------------------------------------------+ |
| 1519 | * | temp storage for rdx | |
| 1520 | * +---------------------------------------------------------+ |
| 1521 | * | "NMI executing" variable | |
| 1522 | * +---------------------------------------------------------+ |
| 1523 | * | iret SS } Copied from "outermost" frame | |
| 1524 | * | iret Return RSP } on each loop iteration; overwritten | |
| 1525 | * | iret RFLAGS } by a nested NMI to force another | |
| 1526 | * | iret CS } iteration if needed. | |
| 1527 | * | iret RIP } | |
| 1528 | * +---------------------------------------------------------+ |
| 1529 | * | outermost SS } initialized in first_nmi; | |
| 1530 | * | outermost Return RSP } will not be changed before | |
| 1531 | * | outermost RFLAGS } NMI processing is done. | |
| 1532 | * | outermost CS } Copied to "iret" frame on each | |
| 1533 | * | outermost RIP } iteration. | |
| 1534 | * +---------------------------------------------------------+ |
| 1535 | * | pt_regs | |
| 1536 | * +---------------------------------------------------------+ |
| 1537 | * |
| 1538 | * The "original" frame is used by hardware. Before re-enabling |
| 1539 | * NMIs, we need to be done with it, and we need to leave enough |
| 1540 | * space for the asm code here. |
| 1541 | * |
| 1542 | * We return by executing IRET while RSP points to the "iret" frame. |
| 1543 | * That will either return for real or it will loop back into NMI |
| 1544 | * processing. |
| 1545 | * |
| 1546 | * The "outermost" frame is copied to the "iret" frame on each |
| 1547 | * iteration of the loop, so each iteration starts with the "iret" |
| 1548 | * frame pointing to the final return target. |
| 1549 | */ |
| 1550 | |
| 1551 | /* |
| 1552 | * Determine whether we're a nested NMI. |
| 1553 | * |
Andy Lutomirski | a27507c | 2015-07-15 10:29:37 -0700 | [diff] [blame] | 1554 | * If we interrupted kernel code between repeat_nmi and |
| 1555 | * end_repeat_nmi, then we are a nested NMI. We must not |
| 1556 | * modify the "iret" frame because it's being written by |
| 1557 | * the outer NMI. That's okay; the outer NMI handler is |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1558 | * about to about to call exc_nmi() anyway, so we can just |
Andy Lutomirski | a27507c | 2015-07-15 10:29:37 -0700 | [diff] [blame] | 1559 | * resume the outer NMI. |
| 1560 | */ |
| 1561 | |
| 1562 | movq $repeat_nmi, %rdx |
| 1563 | cmpq 8(%rsp), %rdx |
| 1564 | ja 1f |
| 1565 | movq $end_repeat_nmi, %rdx |
| 1566 | cmpq 8(%rsp), %rdx |
| 1567 | ja nested_nmi_out |
| 1568 | 1: |
| 1569 | |
| 1570 | /* |
| 1571 | * Now check "NMI executing". If it's set, then we're nested. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1572 | * This will not detect if we interrupted an outer NMI just |
| 1573 | * before IRET. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1574 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1575 | cmpl $1, -8(%rsp) |
| 1576 | je nested_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1577 | |
| 1578 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1579 | * Now test if the previous stack was an NMI stack. This covers |
| 1580 | * the case where we interrupt an outer NMI after it clears |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1581 | * "NMI executing" but before IRET. We need to be careful, though: |
| 1582 | * there is one case in which RSP could point to the NMI stack |
| 1583 | * despite there being no NMI active: naughty userspace controls |
| 1584 | * RSP at the very beginning of the SYSCALL targets. We can |
| 1585 | * pull a fast one on naughty userspace, though: we program |
| 1586 | * SYSCALL to mask DF, so userspace cannot cause DF to be set |
| 1587 | * if it controls the kernel's RSP. We set DF before we clear |
| 1588 | * "NMI executing". |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1589 | */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1590 | lea 6*8(%rsp), %rdx |
| 1591 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ |
| 1592 | cmpq %rdx, 4*8(%rsp) |
| 1593 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ |
| 1594 | ja first_nmi |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1595 | |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1596 | subq $EXCEPTION_STKSZ, %rdx |
| 1597 | cmpq %rdx, 4*8(%rsp) |
| 1598 | /* If it is below the NMI stack, it is a normal NMI */ |
| 1599 | jb first_nmi |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1600 | |
| 1601 | /* Ah, it is within the NMI stack. */ |
| 1602 | |
| 1603 | testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) |
| 1604 | jz first_nmi /* RSP was user controlled. */ |
| 1605 | |
| 1606 | /* This is a nested NMI. */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1607 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1608 | nested_nmi: |
| 1609 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1610 | * Modify the "iret" frame to point to repeat_nmi, forcing another |
| 1611 | * iteration of NMI handling. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1612 | */ |
Andy Lutomirski | 23a781e | 2015-07-15 10:29:39 -0700 | [diff] [blame] | 1613 | subq $8, %rsp |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1614 | leaq -10*8(%rsp), %rdx |
| 1615 | pushq $__KERNEL_DS |
| 1616 | pushq %rdx |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1617 | pushfq |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1618 | pushq $__KERNEL_CS |
| 1619 | pushq $repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1620 | |
| 1621 | /* Put stack back */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1622 | addq $(6*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1623 | |
| 1624 | nested_nmi_out: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1625 | popq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1626 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1627 | /* We are returning to kernel mode, so this cannot result in a fault. */ |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1628 | iretq |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1629 | |
| 1630 | first_nmi: |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1631 | /* Restore rdx. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1632 | movq (%rsp), %rdx |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1633 | |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1634 | /* Make room for "NMI executing". */ |
| 1635 | pushq $0 |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1636 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1637 | /* Leave room for the "iret" frame */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1638 | subq $(5*8), %rsp |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1639 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1640 | /* Copy the "original" frame to the "outermost" frame */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1641 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1642 | pushq 11*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1643 | .endr |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1644 | UNWIND_HINT_IRET_REGS |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1645 | |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1646 | /* Everything up to here is safe from nested NMIs */ |
| 1647 | |
Andy Lutomirski | a97439a | 2015-07-15 10:29:41 -0700 | [diff] [blame] | 1648 | #ifdef CONFIG_DEBUG_ENTRY |
| 1649 | /* |
| 1650 | * For ease of testing, unmask NMIs right away. Disabled by |
| 1651 | * default because IRET is very expensive. |
| 1652 | */ |
| 1653 | pushq $0 /* SS */ |
| 1654 | pushq %rsp /* RSP (minus 8 because of the previous push) */ |
| 1655 | addq $8, (%rsp) /* Fix up RSP */ |
| 1656 | pushfq /* RFLAGS */ |
| 1657 | pushq $__KERNEL_CS /* CS */ |
| 1658 | pushq $1f /* RIP */ |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1659 | iretq /* continues at repeat_nmi below */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1660 | UNWIND_HINT_IRET_REGS |
Andy Lutomirski | a97439a | 2015-07-15 10:29:41 -0700 | [diff] [blame] | 1661 | 1: |
| 1662 | #endif |
| 1663 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1664 | repeat_nmi: |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1665 | /* |
| 1666 | * If there was a nested NMI, the first NMI's iret will return |
| 1667 | * here. But NMIs are still enabled and we can take another |
| 1668 | * nested NMI. The nested NMI checks the interrupted RIP to see |
| 1669 | * if it is between repeat_nmi and end_repeat_nmi, and if so |
| 1670 | * it will just return, as we are about to repeat an NMI anyway. |
| 1671 | * This makes it safe to copy to the stack frame that a nested |
| 1672 | * NMI will update. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1673 | * |
| 1674 | * RSP is pointing to "outermost RIP". gsbase is unknown, but, if |
| 1675 | * we're repeating an NMI, gsbase has the same value that it had on |
| 1676 | * the first iteration. paranoid_entry will load the kernel |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1677 | * gsbase if needed before we call exc_nmi(). "NMI executing" |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1678 | * is zero. |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1679 | */ |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1680 | movq $1, 10*8(%rsp) /* Set "NMI executing". */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1681 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1682 | /* |
| 1683 | * Copy the "outermost" frame to the "iret" frame. NMIs that nest |
| 1684 | * here must not modify the "iret" frame while we're writing to |
| 1685 | * it or it will end up containing garbage. |
| 1686 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1687 | addq $(10*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1688 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1689 | pushq -6*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1690 | .endr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1691 | subq $(5*8), %rsp |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1692 | end_repeat_nmi: |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1693 | |
| 1694 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1695 | * Everything below this point can be preempted by a nested NMI. |
| 1696 | * If this happens, then the inner NMI will change the "iret" |
| 1697 | * frame to point back to repeat_nmi. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1698 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1699 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1700 | |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1701 | /* |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1702 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1703 | * as we should not be calling schedule in NMI context. |
| 1704 | * Even with normal interrupts enabled. An NMI should not be |
| 1705 | * setting NEED_RESCHED or anything that normal interrupts and |
| 1706 | * exceptions might do. |
| 1707 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1708 | call paranoid_entry |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1709 | UNWIND_HINT_REGS |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1710 | |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1711 | /* paranoidentry exc_nmi(), 0; without TRACE_IRQS_OFF */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1712 | movq %rsp, %rdi |
| 1713 | movq $-1, %rsi |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1714 | call exc_nmi |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1715 | |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1716 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 1717 | RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1718 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1719 | testl %ebx, %ebx /* swapgs needed? */ |
| 1720 | jnz nmi_restore |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1721 | nmi_swapgs: |
| 1722 | SWAPGS_UNSAFE_STACK |
| 1723 | nmi_restore: |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 1724 | POP_REGS |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1725 | |
Andy Lutomirski | 471ee48 | 2017-11-02 00:59:05 -0700 | [diff] [blame] | 1726 | /* |
| 1727 | * Skip orig_ax and the "outermost" frame to point RSP at the "iret" |
| 1728 | * at the "iret" frame. |
| 1729 | */ |
| 1730 | addq $6*8, %rsp |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1731 | |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1732 | /* |
| 1733 | * Clear "NMI executing". Set DF first so that we can easily |
| 1734 | * distinguish the remaining code between here and IRET from |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1735 | * the SYSCALL entry and exit paths. |
| 1736 | * |
| 1737 | * We arguably should just inspect RIP instead, but I (Andy) wrote |
| 1738 | * this code when I had the misapprehension that Xen PV supported |
| 1739 | * NMIs, and Xen PV would break that approach. |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1740 | */ |
| 1741 | std |
| 1742 | movq $0, 5*8(%rsp) /* clear "NMI executing" */ |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1743 | |
| 1744 | /* |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1745 | * iretq reads the "iret" frame and exits the NMI stack in a |
| 1746 | * single instruction. We are returning to kernel mode, so this |
| 1747 | * cannot result in a fault. Similarly, we don't need to worry |
| 1748 | * about espfix64 on the way back to kernel mode. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1749 | */ |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1750 | iretq |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame^] | 1751 | SYM_CODE_END(asm_exc_nmi) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1752 | |
Andy Lutomirski | dffb3f9 | 2019-07-01 20:43:20 -0700 | [diff] [blame] | 1753 | #ifndef CONFIG_IA32_EMULATION |
| 1754 | /* |
| 1755 | * This handles SYSCALL from 32-bit code. There is no way to program |
| 1756 | * MSRs to fully disable 32-bit SYSCALL. |
| 1757 | */ |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1758 | SYM_CODE_START(ignore_sysret) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1759 | UNWIND_HINT_EMPTY |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1760 | mov $-ENOSYS, %eax |
Jan Beulich | b2b1d94 | 2019-12-16 11:40:03 +0100 | [diff] [blame] | 1761 | sysretl |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1762 | SYM_CODE_END(ignore_sysret) |
Andy Lutomirski | dffb3f9 | 2019-07-01 20:43:20 -0700 | [diff] [blame] | 1763 | #endif |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1764 | |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1765 | .pushsection .text, "ax" |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1766 | SYM_CODE_START(rewind_stack_do_exit) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1767 | UNWIND_HINT_FUNC |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1768 | /* Prevent any naive code from trying to unwind to our caller. */ |
| 1769 | xorl %ebp, %ebp |
| 1770 | |
| 1771 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rax |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1772 | leaq -PTREGS_SIZE(%rax), %rsp |
Jann Horn | f977df7 | 2020-04-25 05:03:04 -0500 | [diff] [blame] | 1773 | UNWIND_HINT_REGS |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1774 | |
| 1775 | call do_exit |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1776 | SYM_CODE_END(rewind_stack_do_exit) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1777 | .popsection |