Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * linux/arch/x86_64/entry.S |
| 4 | * |
| 5 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 6 | * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs |
| 7 | * Copyright (C) 2000 Pavel Machek <pavel@suse.cz> |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 8 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | * entry.S contains the system-call and fault low-level handling routines. |
| 10 | * |
Mauro Carvalho Chehab | cb1aaeb | 2019-06-07 15:54:32 -0300 | [diff] [blame] | 11 | * Some of this is documented in Documentation/x86/entry_64.rst |
Andy Lutomirski | 8b4777a | 2011-06-05 13:50:18 -0400 | [diff] [blame] | 12 | * |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 13 | * A note on terminology: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 14 | * - iret frame: Architecture defined interrupt frame from SS to RIP |
| 15 | * at the top of the kernel process stack. |
Andi Kleen | 2e91a17 | 2006-09-26 10:52:29 +0200 | [diff] [blame] | 16 | * |
| 17 | * Some macro usage: |
Jiri Slaby | 6dcc562 | 2019-10-11 13:51:04 +0200 | [diff] [blame] | 18 | * - SYM_FUNC_START/END:Define functions in the symbol table. |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 19 | * - idtentry: Define exception entry points. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/linkage.h> |
| 22 | #include <asm/segment.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/cache.h> |
| 24 | #include <asm/errno.h> |
Sam Ravnborg | e2d5df9 | 2005-09-09 21:28:48 +0200 | [diff] [blame] | 25 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #include <asm/msr.h> |
| 27 | #include <asm/unistd.h> |
| 28 | #include <asm/thread_info.h> |
| 29 | #include <asm/hw_irq.h> |
Jeremy Fitzhardinge | 0341c14 | 2009-02-13 11:14:01 -0800 | [diff] [blame] | 30 | #include <asm/page_types.h> |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 31 | #include <asm/irqflags.h> |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 32 | #include <asm/paravirt.h> |
Tejun Heo | 9939dda | 2009-01-13 20:41:35 +0900 | [diff] [blame] | 33 | #include <asm/percpu.h> |
H. Peter Anvin | d7abc0f | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 34 | #include <asm/asm.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 35 | #include <asm/smap.h> |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 36 | #include <asm/pgtable_types.h> |
Al Viro | 784d569 | 2016-01-11 11:04:34 -0500 | [diff] [blame] | 37 | #include <asm/export.h> |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 38 | #include <asm/frame.h> |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 39 | #include <asm/trapnr.h> |
David Woodhouse | 2641f08 | 2018-01-11 21:46:28 +0000 | [diff] [blame] | 40 | #include <asm/nospec-branch.h> |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 41 | #include <asm/fsgsbase.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 42 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 44 | #include "calling.h" |
| 45 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 46 | .code64 |
| 47 | .section .entry.text, "ax" |
Arnaldo Carvalho de Melo | 16444a8 | 2008-05-12 21:20:42 +0200 | [diff] [blame] | 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 50 | * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 52 | * This is the only entry point used for 64-bit system calls. The |
| 53 | * hardware interface is reasonably well designed and the register to |
| 54 | * argument mapping Linux uses fits well with the registers that are |
| 55 | * available when SYSCALL is used. |
| 56 | * |
| 57 | * SYSCALL instructions can be found inlined in libc implementations as |
| 58 | * well as some other programs and libraries. There are also a handful |
| 59 | * of SYSCALL instructions in the vDSO used, for example, as a |
| 60 | * clock_gettimeofday fallback. |
| 61 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 62 | * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11, |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 63 | * then loads new ss, cs, and rip from previously programmed MSRs. |
| 64 | * rflags gets masked by a value from another MSR (so CLD and CLAC |
| 65 | * are not needed). SYSCALL does not save anything on the stack |
| 66 | * and does not change rsp. |
| 67 | * |
| 68 | * Registers on entry: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | * rax system call number |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 70 | * rcx return address |
| 71 | * r11 saved rflags (note: r11 is callee-clobbered register in C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | * rdi arg0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | * rsi arg1 |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 74 | * rdx arg2 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 75 | * r10 arg3 (needs to be moved to rcx to conform to C ABI) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | * r8 arg4 |
| 77 | * r9 arg5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 78 | * (note: r12-r15, rbp, rbx are callee-preserved in C ABI) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 79 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | * Only called from user space. |
| 81 | * |
Ingo Molnar | 7fcb3bc | 2015-03-17 14:42:59 +0100 | [diff] [blame] | 82 | * When user can change pt_regs->foo always force IRET. That is because |
Andi Kleen | 7bf36bb | 2006-04-07 19:50:00 +0200 | [diff] [blame] | 83 | * it deals with uncanonical addresses better. SYSRET has trouble |
| 84 | * with them due to bugs in both AMD and Intel CPUs. |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 85 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 87 | SYM_CODE_START(entry_SYSCALL_64) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 88 | UNWIND_HINT_EMPTY |
Glauber de Oliveira Costa | 72fe485 | 2008-01-30 13:32:08 +0100 | [diff] [blame] | 89 | |
Andy Lutomirski | 8a9949b | 2017-08-07 20:59:21 -0700 | [diff] [blame] | 90 | swapgs |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 91 | /* tss.sp2 is scratch space. */ |
Andy Lutomirski | 98f05b5 | 2018-09-03 15:59:43 -0700 | [diff] [blame] | 92 | movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) |
Andy Lutomirski | bf904d2 | 2018-09-03 15:59:44 -0700 | [diff] [blame] | 93 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 94 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 95 | |
Joerg Roedel | a13644f | 2020-09-07 15:15:46 +0200 | [diff] [blame] | 96 | SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL) |
| 97 | |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 98 | /* Construct struct pt_regs on stack */ |
Andy Lutomirski | 98f05b5 | 2018-09-03 15:59:43 -0700 | [diff] [blame] | 99 | pushq $__USER_DS /* pt_regs->ss */ |
| 100 | pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */ |
| 101 | pushq %r11 /* pt_regs->flags */ |
| 102 | pushq $__USER_CS /* pt_regs->cs */ |
| 103 | pushq %rcx /* pt_regs->ip */ |
Jiri Slaby | 26ba4e5 | 2019-10-11 13:50:57 +0200 | [diff] [blame] | 104 | SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) |
Andy Lutomirski | 98f05b5 | 2018-09-03 15:59:43 -0700 | [diff] [blame] | 105 | pushq %rax /* pt_regs->orig_ax */ |
Dominik Brodowski | 30907fd | 2018-02-11 11:49:46 +0100 | [diff] [blame] | 106 | |
| 107 | PUSH_AND_CLEAR_REGS rax=$-ENOSYS |
Denys Vlasenko | 9ed8e7d | 2015-03-19 18:17:47 +0100 | [diff] [blame] | 108 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 109 | /* IRQs are off. */ |
H. Peter Anvin (Intel) | 3e5e7f7 | 2021-05-10 11:53:11 -0700 | [diff] [blame] | 110 | movq %rsp, %rdi |
H. Peter Anvin (Intel) | 0595494 | 2021-05-18 12:13:01 -0700 | [diff] [blame] | 111 | /* Sign extend the lower 32bit as syscall numbers are treated as int */ |
| 112 | movslq %eax, %rsi |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 113 | call do_syscall_64 /* returns with IRQs disabled */ |
| 114 | |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 115 | /* |
| 116 | * Try to use SYSRET instead of IRET if we're returning to |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 117 | * a completely clean 64-bit userspace context. If we're not, |
| 118 | * go to the slow exit path. |
Juergen Gross | afd3052 | 2021-01-20 14:55:45 +0100 | [diff] [blame] | 119 | * In the Xen PV case we must use iret anyway. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 120 | */ |
Juergen Gross | afd3052 | 2021-01-20 14:55:45 +0100 | [diff] [blame] | 121 | |
| 122 | ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \ |
| 123 | X86_FEATURE_XENPV |
| 124 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 125 | movq RCX(%rsp), %rcx |
| 126 | movq RIP(%rsp), %r11 |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 127 | |
| 128 | cmpq %rcx, %r11 /* SYSRET requires RCX == RIP */ |
| 129 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 130 | |
| 131 | /* |
| 132 | * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP |
| 133 | * in kernel space. This essentially lets the user take over |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 134 | * the kernel, since userspace controls RSP. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 135 | * |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 136 | * If width of "canonical tail" ever becomes variable, this will need |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 137 | * to be updated to remain correct on both old and new CPUs. |
Kirill A. Shutemov | 361b4b5 | 2017-03-30 11:07:26 +0300 | [diff] [blame] | 138 | * |
Kirill A. Shutemov | cbe0317 | 2017-06-06 14:31:21 +0300 | [diff] [blame] | 139 | * Change top bits to match most significant bit (47th or 56th bit |
| 140 | * depending on paging mode) in the address. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 141 | */ |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 142 | #ifdef CONFIG_X86_5LEVEL |
Kirill A. Shutemov | 39b9552 | 2018-02-16 14:49:48 +0300 | [diff] [blame] | 143 | ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ |
| 144 | "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 145 | #else |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 146 | shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
| 147 | sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx |
Kirill A. Shutemov | 09e61a7 | 2018-02-14 14:16:55 +0300 | [diff] [blame] | 148 | #endif |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 149 | |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 150 | /* If this changed %rcx, it was not canonical */ |
| 151 | cmpq %rcx, %r11 |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 152 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 153 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 154 | cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 155 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 156 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 157 | movq R11(%rsp), %r11 |
| 158 | cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 159 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 160 | |
| 161 | /* |
Borislav Petkov | 3e03530 | 2016-08-03 19:14:29 +0200 | [diff] [blame] | 162 | * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot |
| 163 | * restore RF properly. If the slowpath sets it for whatever reason, we |
| 164 | * need to restore it correctly. |
| 165 | * |
| 166 | * SYSRET can restore TF, but unlike IRET, restoring TF results in a |
| 167 | * trap from userspace immediately after SYSRET. This would cause an |
| 168 | * infinite loop whenever #DB happens with register state that satisfies |
| 169 | * the opportunistic SYSRET conditions. For example, single-stepping |
| 170 | * this user code: |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 171 | * |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 172 | * movq $stuck_here, %rcx |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 173 | * pushfq |
| 174 | * popq %r11 |
| 175 | * stuck_here: |
| 176 | * |
| 177 | * would never get past 'stuck_here'. |
| 178 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 179 | testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11 |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 180 | jnz swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 181 | |
| 182 | /* nothing to check for RSP */ |
| 183 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 184 | cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 185 | jne swapgs_restore_regs_and_return_to_usermode |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 186 | |
| 187 | /* |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 188 | * We win! This label is here just for ease of understanding |
| 189 | * perf profiles. Nothing jumps here. |
Denys Vlasenko | fffbb5d | 2015-04-02 18:46:59 +0200 | [diff] [blame] | 190 | */ |
| 191 | syscall_return_via_sysret: |
Denys Vlasenko | 17be0ae | 2015-04-21 18:27:29 +0200 | [diff] [blame] | 192 | /* rcx and r11 are already restored (see code above) */ |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 193 | POP_REGS pop_rdi=0 skip_r11rcx=1 |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 194 | |
| 195 | /* |
| 196 | * Now all regs are restored except RSP and RDI. |
| 197 | * Save old stack pointer and switch to trampoline stack. |
| 198 | */ |
| 199 | movq %rsp, %rdi |
Andy Lutomirski | c482fee | 2017-12-04 15:07:29 +0100 | [diff] [blame] | 200 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
Josh Poimboeuf | 1fb1436 | 2020-04-25 05:03:02 -0500 | [diff] [blame] | 201 | UNWIND_HINT_EMPTY |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 202 | |
| 203 | pushq RSP-RDI(%rdi) /* RSP */ |
| 204 | pushq (%rdi) /* RDI */ |
| 205 | |
| 206 | /* |
| 207 | * We are on the trampoline stack. All regs except RDI are live. |
| 208 | * We can do future final exit work right here. |
| 209 | */ |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 210 | STACKLEAK_ERASE_NOCLOBBER |
| 211 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 212 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 213 | |
Andy Lutomirski | 4fbb391 | 2017-11-02 00:59:03 -0700 | [diff] [blame] | 214 | popq %rdi |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 215 | popq %rsp |
Juergen Gross | afd3052 | 2021-01-20 14:55:45 +0100 | [diff] [blame] | 216 | swapgs |
| 217 | sysretq |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 218 | SYM_CODE_END(entry_SYSCALL_64) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 219 | |
Jan Beulich | 7effaa8 | 2005-09-12 18:49:24 +0200 | [diff] [blame] | 220 | /* |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 221 | * %rdi: prev task |
| 222 | * %rsi: next task |
| 223 | */ |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 224 | .pushsection .text, "ax" |
Josh Poimboeuf | 96c6480 | 2020-04-25 05:03:03 -0500 | [diff] [blame] | 225 | SYM_FUNC_START(__switch_to_asm) |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 226 | /* |
| 227 | * Save callee-saved registers |
| 228 | * This must match the order in inactive_task_frame |
| 229 | */ |
| 230 | pushq %rbp |
| 231 | pushq %rbx |
| 232 | pushq %r12 |
| 233 | pushq %r13 |
| 234 | pushq %r14 |
| 235 | pushq %r15 |
| 236 | |
| 237 | /* switch stack */ |
| 238 | movq %rsp, TASK_threadsp(%rdi) |
| 239 | movq TASK_threadsp(%rsi), %rsp |
| 240 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 241 | #ifdef CONFIG_STACKPROTECTOR |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 242 | movq TASK_stack_canary(%rsi), %rbx |
Andy Lutomirski | e6401c1 | 2019-04-14 18:00:06 +0200 | [diff] [blame] | 243 | movq %rbx, PER_CPU_VAR(fixed_percpu_data) + stack_canary_offset |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 244 | #endif |
| 245 | |
David Woodhouse | c995efd | 2018-01-12 17:49:25 +0000 | [diff] [blame] | 246 | #ifdef CONFIG_RETPOLINE |
| 247 | /* |
| 248 | * When switching from a shallower to a deeper call stack |
| 249 | * the RSB may either underflow or use entries populated |
| 250 | * with userspace addresses. On CPUs where those concerns |
| 251 | * exist, overwrite the RSB with entries which capture |
| 252 | * speculative execution to prevent attack. |
| 253 | */ |
David Woodhouse | d1c9910 | 2018-02-19 10:50:56 +0000 | [diff] [blame] | 254 | FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW |
David Woodhouse | c995efd | 2018-01-12 17:49:25 +0000 | [diff] [blame] | 255 | #endif |
| 256 | |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 257 | /* restore callee-saved registers */ |
| 258 | popq %r15 |
| 259 | popq %r14 |
| 260 | popq %r13 |
| 261 | popq %r12 |
| 262 | popq %rbx |
| 263 | popq %rbp |
| 264 | |
| 265 | jmp __switch_to |
Josh Poimboeuf | 96c6480 | 2020-04-25 05:03:03 -0500 | [diff] [blame] | 266 | SYM_FUNC_END(__switch_to_asm) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 267 | .popsection |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 268 | |
| 269 | /* |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 270 | * A newly forked process directly context switches into this address. |
| 271 | * |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 272 | * rax: prev task we switched from |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 273 | * rbx: kernel thread func (NULL for user thread) |
| 274 | * r12: kernel thread arg |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 275 | */ |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 276 | .pushsection .text, "ax" |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 277 | SYM_CODE_START(ret_from_fork) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 278 | UNWIND_HINT_EMPTY |
Brian Gerst | 0100301 | 2016-08-13 12:38:19 -0400 | [diff] [blame] | 279 | movq %rax, %rdi |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 280 | call schedule_tail /* rdi: 'prev' task parameter */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 281 | |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 282 | testq %rbx, %rbx /* from kernel_thread? */ |
| 283 | jnz 1f /* kernel threads are uncommon */ |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 284 | |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 285 | 2: |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 286 | UNWIND_HINT_REGS |
Josh Poimboeuf | ebd5749 | 2017-05-23 10:37:29 -0500 | [diff] [blame] | 287 | movq %rsp, %rdi |
Thomas Gleixner | 167fd21 | 2020-07-23 00:00:05 +0200 | [diff] [blame] | 288 | call syscall_exit_to_user_mode /* returns with IRQs disabled */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 289 | jmp swapgs_restore_regs_and_return_to_usermode |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 290 | |
| 291 | 1: |
| 292 | /* kernel thread */ |
Josh Poimboeuf | d31a580 | 2018-05-18 08:47:12 +0200 | [diff] [blame] | 293 | UNWIND_HINT_EMPTY |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 294 | movq %r12, %rdi |
Peter Zijlstra | 34fdce6 | 2020-04-22 17:16:40 +0200 | [diff] [blame] | 295 | CALL_NOSPEC rbx |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 296 | /* |
| 297 | * A kernel thread is allowed to return here after successfully |
Eric W. Biederman | be619f7 | 2020-07-13 12:06:48 -0500 | [diff] [blame] | 298 | * calling kernel_execve(). Exit to userspace to complete the execve() |
Brian Gerst | 616d248 | 2016-08-13 12:38:20 -0400 | [diff] [blame] | 299 | * syscall. |
| 300 | */ |
| 301 | movq $0, RAX(%rsp) |
| 302 | jmp 2b |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 303 | SYM_CODE_END(ret_from_fork) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 304 | .popsection |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 305 | |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 306 | .macro DEBUG_ENTRY_ASSERT_IRQS_OFF |
| 307 | #ifdef CONFIG_DEBUG_ENTRY |
Boris Ostrovsky | e17f823 | 2017-12-04 15:07:07 +0100 | [diff] [blame] | 308 | pushq %rax |
Juergen Gross | fafe5e7 | 2021-03-11 15:23:18 +0100 | [diff] [blame] | 309 | SAVE_FLAGS |
Boris Ostrovsky | e17f823 | 2017-12-04 15:07:07 +0100 | [diff] [blame] | 310 | testl $X86_EFLAGS_IF, %eax |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 311 | jz .Lokay_\@ |
| 312 | ud2 |
| 313 | .Lokay_\@: |
Boris Ostrovsky | e17f823 | 2017-12-04 15:07:07 +0100 | [diff] [blame] | 314 | popq %rax |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 315 | #endif |
| 316 | .endm |
| 317 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 318 | /** |
| 319 | * idtentry_body - Macro to emit code calling the C function |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 320 | * @cfunc: C function to be called |
| 321 | * @has_error_code: Hardware pushed error code on stack |
| 322 | */ |
Thomas Gleixner | e2dcb5f | 2020-05-21 22:05:29 +0200 | [diff] [blame] | 323 | .macro idtentry_body cfunc has_error_code:req |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 324 | |
| 325 | call error_entry |
| 326 | UNWIND_HINT_REGS |
| 327 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 328 | movq %rsp, %rdi /* pt_regs pointer into 1st argument*/ |
| 329 | |
| 330 | .if \has_error_code == 1 |
| 331 | movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ |
| 332 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 333 | .endif |
| 334 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 335 | call \cfunc |
| 336 | |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 337 | jmp error_return |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 338 | .endm |
| 339 | |
| 340 | /** |
| 341 | * idtentry - Macro to generate entry stubs for simple IDT entries |
| 342 | * @vector: Vector number |
| 343 | * @asmsym: ASM symbol for the entry point |
| 344 | * @cfunc: C function to be called |
| 345 | * @has_error_code: Hardware pushed error code on stack |
| 346 | * |
| 347 | * The macro emits code to set up the kernel context for straight forward |
| 348 | * and simple IDT entries. No IST stack, no paranoid entry checks. |
| 349 | */ |
Thomas Gleixner | e2dcb5f | 2020-05-21 22:05:29 +0200 | [diff] [blame] | 350 | .macro idtentry vector asmsym cfunc has_error_code:req |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 351 | SYM_CODE_START(\asmsym) |
| 352 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
| 353 | ASM_CLAC |
| 354 | |
| 355 | .if \has_error_code == 0 |
| 356 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
| 357 | .endif |
| 358 | |
| 359 | .if \vector == X86_TRAP_BP |
| 360 | /* |
| 361 | * If coming from kernel space, create a 6-word gap to allow the |
| 362 | * int3 handler to emulate a call instruction. |
| 363 | */ |
| 364 | testb $3, CS-ORIG_RAX(%rsp) |
| 365 | jnz .Lfrom_usermode_no_gap_\@ |
| 366 | .rept 6 |
| 367 | pushq 5*8(%rsp) |
| 368 | .endr |
| 369 | UNWIND_HINT_IRET_REGS offset=8 |
| 370 | .Lfrom_usermode_no_gap_\@: |
| 371 | .endif |
| 372 | |
Thomas Gleixner | e2dcb5f | 2020-05-21 22:05:29 +0200 | [diff] [blame] | 373 | idtentry_body \cfunc \has_error_code |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 374 | |
| 375 | _ASM_NOKPROBE(\asmsym) |
| 376 | SYM_CODE_END(\asmsym) |
| 377 | .endm |
| 378 | |
| 379 | /* |
Thomas Gleixner | 0bf7c31 | 2020-05-21 22:05:36 +0200 | [diff] [blame] | 380 | * Interrupt entry/exit. |
| 381 | * |
| 382 | + The interrupt stubs push (vector) onto the stack, which is the error_code |
| 383 | * position of idtentry exceptions, and jump to one of the two idtentry points |
| 384 | * (common/spurious). |
| 385 | * |
| 386 | * common_interrupt is a hotpath, align it to a cache line |
| 387 | */ |
| 388 | .macro idtentry_irq vector cfunc |
| 389 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
| 390 | idtentry \vector asm_\cfunc \cfunc has_error_code=1 |
| 391 | .endm |
| 392 | |
| 393 | /* |
Thomas Gleixner | 6368558 | 2020-05-21 22:05:38 +0200 | [diff] [blame] | 394 | * System vectors which invoke their handlers directly and are not |
| 395 | * going through the regular common device interrupt handling code. |
| 396 | */ |
| 397 | .macro idtentry_sysvec vector cfunc |
| 398 | idtentry \vector asm_\cfunc \cfunc has_error_code=0 |
| 399 | .endm |
| 400 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 401 | /** |
| 402 | * idtentry_mce_db - Macro to generate entry stubs for #MC and #DB |
| 403 | * @vector: Vector number |
| 404 | * @asmsym: ASM symbol for the entry point |
| 405 | * @cfunc: C function to be called |
| 406 | * |
| 407 | * The macro emits code to set up the kernel context for #MC and #DB |
| 408 | * |
| 409 | * If the entry comes from user space it uses the normal entry path |
| 410 | * including the return to user space work and preemption checks on |
| 411 | * exit. |
| 412 | * |
| 413 | * If hits in kernel mode then it needs to go through the paranoid |
| 414 | * entry as the exception can hit any random state. No preemption |
| 415 | * check on exit to keep the paranoid path simple. |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 416 | */ |
| 417 | .macro idtentry_mce_db vector asmsym cfunc |
| 418 | SYM_CODE_START(\asmsym) |
| 419 | UNWIND_HINT_IRET_REGS |
| 420 | ASM_CLAC |
| 421 | |
| 422 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
| 423 | |
| 424 | /* |
| 425 | * If the entry is from userspace, switch stacks and treat it as |
| 426 | * a normal entry. |
| 427 | */ |
| 428 | testb $3, CS-ORIG_RAX(%rsp) |
| 429 | jnz .Lfrom_usermode_switch_stack_\@ |
| 430 | |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 431 | /* paranoid_entry returns GS information for paranoid_exit in EBX. */ |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 432 | call paranoid_entry |
| 433 | |
| 434 | UNWIND_HINT_REGS |
| 435 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 436 | movq %rsp, %rdi /* pt_regs pointer */ |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 437 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 438 | call \cfunc |
| 439 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 440 | jmp paranoid_exit |
| 441 | |
| 442 | /* Switch to the regular task stack and use the noist entry point */ |
| 443 | .Lfrom_usermode_switch_stack_\@: |
Thomas Gleixner | e2dcb5f | 2020-05-21 22:05:29 +0200 | [diff] [blame] | 444 | idtentry_body noist_\cfunc, has_error_code=0 |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 445 | |
| 446 | _ASM_NOKPROBE(\asmsym) |
| 447 | SYM_CODE_END(\asmsym) |
| 448 | .endm |
| 449 | |
Joerg Roedel | a13644f | 2020-09-07 15:15:46 +0200 | [diff] [blame] | 450 | #ifdef CONFIG_AMD_MEM_ENCRYPT |
| 451 | /** |
| 452 | * idtentry_vc - Macro to generate entry stub for #VC |
| 453 | * @vector: Vector number |
| 454 | * @asmsym: ASM symbol for the entry point |
| 455 | * @cfunc: C function to be called |
| 456 | * |
| 457 | * The macro emits code to set up the kernel context for #VC. The #VC handler |
| 458 | * runs on an IST stack and needs to be able to cause nested #VC exceptions. |
| 459 | * |
| 460 | * To make this work the #VC entry code tries its best to pretend it doesn't use |
| 461 | * an IST stack by switching to the task stack if coming from user-space (which |
| 462 | * includes early SYSCALL entry path) or back to the stack in the IRET frame if |
| 463 | * entered from kernel-mode. |
| 464 | * |
| 465 | * If entered from kernel-mode the return stack is validated first, and if it is |
| 466 | * not safe to use (e.g. because it points to the entry stack) the #VC handler |
| 467 | * will switch to a fall-back stack (VC2) and call a special handler function. |
| 468 | * |
| 469 | * The macro is only used for one vector, but it is planned to be extended in |
| 470 | * the future for the #HV exception. |
| 471 | */ |
| 472 | .macro idtentry_vc vector asmsym cfunc |
| 473 | SYM_CODE_START(\asmsym) |
| 474 | UNWIND_HINT_IRET_REGS |
| 475 | ASM_CLAC |
| 476 | |
| 477 | /* |
| 478 | * If the entry is from userspace, switch stacks and treat it as |
| 479 | * a normal entry. |
| 480 | */ |
| 481 | testb $3, CS-ORIG_RAX(%rsp) |
| 482 | jnz .Lfrom_usermode_switch_stack_\@ |
| 483 | |
| 484 | /* |
| 485 | * paranoid_entry returns SWAPGS flag for paranoid_exit in EBX. |
| 486 | * EBX == 0 -> SWAPGS, EBX == 1 -> no SWAPGS |
| 487 | */ |
| 488 | call paranoid_entry |
| 489 | |
| 490 | UNWIND_HINT_REGS |
| 491 | |
| 492 | /* |
| 493 | * Switch off the IST stack to make it free for nested exceptions. The |
| 494 | * vc_switch_off_ist() function will switch back to the interrupted |
| 495 | * stack if it is safe to do so. If not it switches to the VC fall-back |
| 496 | * stack. |
| 497 | */ |
| 498 | movq %rsp, %rdi /* pt_regs pointer */ |
| 499 | call vc_switch_off_ist |
| 500 | movq %rax, %rsp /* Switch to new stack */ |
| 501 | |
| 502 | UNWIND_HINT_REGS |
| 503 | |
| 504 | /* Update pt_regs */ |
| 505 | movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ |
| 506 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
| 507 | |
| 508 | movq %rsp, %rdi /* pt_regs pointer */ |
| 509 | |
Joerg Roedel | be1a540 | 2021-06-18 13:54:09 +0200 | [diff] [blame] | 510 | call kernel_\cfunc |
Joerg Roedel | a13644f | 2020-09-07 15:15:46 +0200 | [diff] [blame] | 511 | |
| 512 | /* |
| 513 | * No need to switch back to the IST stack. The current stack is either |
| 514 | * identical to the stack in the IRET frame or the VC fall-back stack, |
Ingo Molnar | 163b099 | 2021-03-21 22:28:53 +0100 | [diff] [blame] | 515 | * so it is definitely mapped even with PTI enabled. |
Joerg Roedel | a13644f | 2020-09-07 15:15:46 +0200 | [diff] [blame] | 516 | */ |
| 517 | jmp paranoid_exit |
| 518 | |
| 519 | /* Switch to the regular task stack */ |
| 520 | .Lfrom_usermode_switch_stack_\@: |
Joerg Roedel | be1a540 | 2021-06-18 13:54:09 +0200 | [diff] [blame] | 521 | idtentry_body user_\cfunc, has_error_code=1 |
Joerg Roedel | a13644f | 2020-09-07 15:15:46 +0200 | [diff] [blame] | 522 | |
| 523 | _ASM_NOKPROBE(\asmsym) |
| 524 | SYM_CODE_END(\asmsym) |
| 525 | .endm |
| 526 | #endif |
| 527 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 528 | /* |
| 529 | * Double fault entry. Straight paranoid. No checks from which context |
| 530 | * this comes because for the espfix induced #DF this would do the wrong |
| 531 | * thing. |
| 532 | */ |
| 533 | .macro idtentry_df vector asmsym cfunc |
| 534 | SYM_CODE_START(\asmsym) |
| 535 | UNWIND_HINT_IRET_REGS offset=8 |
| 536 | ASM_CLAC |
| 537 | |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 538 | /* paranoid_entry returns GS information for paranoid_exit in EBX. */ |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 539 | call paranoid_entry |
| 540 | UNWIND_HINT_REGS |
| 541 | |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 542 | movq %rsp, %rdi /* pt_regs pointer into first argument */ |
| 543 | movq ORIG_RAX(%rsp), %rsi /* get error code into 2nd argument*/ |
| 544 | movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */ |
Thomas Gleixner | cfa82a0 | 2020-02-25 23:16:10 +0100 | [diff] [blame] | 545 | call \cfunc |
| 546 | |
| 547 | jmp paranoid_exit |
| 548 | |
| 549 | _ASM_NOKPROBE(\asmsym) |
| 550 | SYM_CODE_END(\asmsym) |
| 551 | .endm |
| 552 | |
Alexander van Heukelum | d99015b | 2008-11-19 01:18:11 +0100 | [diff] [blame] | 553 | /* |
Thomas Gleixner | 53aaf26 | 2020-02-25 23:16:12 +0100 | [diff] [blame] | 554 | * Include the defines which emit the idt entries which are shared |
Thomas Gleixner | f0178fc | 2020-06-10 08:37:01 +0200 | [diff] [blame] | 555 | * shared between 32 and 64 bit and emit the __irqentry_text_* markers |
| 556 | * so the stacktrace boundary checks work. |
Thomas Gleixner | 53aaf26 | 2020-02-25 23:16:12 +0100 | [diff] [blame] | 557 | */ |
Thomas Gleixner | f0178fc | 2020-06-10 08:37:01 +0200 | [diff] [blame] | 558 | .align 16 |
| 559 | .globl __irqentry_text_start |
| 560 | __irqentry_text_start: |
| 561 | |
Thomas Gleixner | 53aaf26 | 2020-02-25 23:16:12 +0100 | [diff] [blame] | 562 | #include <asm/idtentry.h> |
| 563 | |
Thomas Gleixner | f0178fc | 2020-06-10 08:37:01 +0200 | [diff] [blame] | 564 | .align 16 |
| 565 | .globl __irqentry_text_end |
| 566 | __irqentry_text_end: |
| 567 | |
Thomas Gleixner | fa5e5c4 | 2020-05-21 22:05:37 +0200 | [diff] [blame] | 568 | SYM_CODE_START_LOCAL(common_interrupt_return) |
Jiri Slaby | 26ba4e5 | 2019-10-11 13:50:57 +0200 | [diff] [blame] | 569 | SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 570 | #ifdef CONFIG_DEBUG_ENTRY |
| 571 | /* Assert that pt_regs indicates user mode. */ |
Borislav Petkov | 1e4c4f6 | 2017-11-02 13:09:26 +0100 | [diff] [blame] | 572 | testb $3, CS(%rsp) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 573 | jnz 1f |
| 574 | ud2 |
| 575 | 1: |
| 576 | #endif |
Lai Jiangshan | 5c8f6a2 | 2021-11-26 18:11:23 +0800 | [diff] [blame] | 577 | #ifdef CONFIG_XEN_PV |
| 578 | ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV |
| 579 | #endif |
| 580 | |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 581 | POP_REGS pop_rdi=0 |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 582 | |
| 583 | /* |
| 584 | * The stack is now user RDI, orig_ax, RIP, CS, EFLAGS, RSP, SS. |
| 585 | * Save old stack pointer and switch to trampoline stack. |
| 586 | */ |
| 587 | movq %rsp, %rdi |
Andy Lutomirski | c482fee | 2017-12-04 15:07:29 +0100 | [diff] [blame] | 588 | movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp |
Josh Poimboeuf | 1fb1436 | 2020-04-25 05:03:02 -0500 | [diff] [blame] | 589 | UNWIND_HINT_EMPTY |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 590 | |
| 591 | /* Copy the IRET frame to the trampoline stack. */ |
| 592 | pushq 6*8(%rdi) /* SS */ |
| 593 | pushq 5*8(%rdi) /* RSP */ |
| 594 | pushq 4*8(%rdi) /* EFLAGS */ |
| 595 | pushq 3*8(%rdi) /* CS */ |
| 596 | pushq 2*8(%rdi) /* RIP */ |
| 597 | |
| 598 | /* Push user RDI on the trampoline stack. */ |
| 599 | pushq (%rdi) |
| 600 | |
| 601 | /* |
| 602 | * We are on the trampoline stack. All regs except RDI are live. |
| 603 | * We can do future final exit work right here. |
| 604 | */ |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 605 | STACKLEAK_ERASE_NOCLOBBER |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 606 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 607 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 608 | |
Andy Lutomirski | 3e3b929 | 2017-12-04 15:07:24 +0100 | [diff] [blame] | 609 | /* Restore RDI. */ |
| 610 | popq %rdi |
| 611 | SWAPGS |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 612 | INTERRUPT_RETURN |
| 613 | |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 614 | |
Jiri Slaby | 26ba4e5 | 2019-10-11 13:50:57 +0200 | [diff] [blame] | 615 | SYM_INNER_LABEL(restore_regs_and_return_to_kernel, SYM_L_GLOBAL) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 616 | #ifdef CONFIG_DEBUG_ENTRY |
| 617 | /* Assert that pt_regs indicates kernel mode. */ |
Borislav Petkov | 1e4c4f6 | 2017-11-02 13:09:26 +0100 | [diff] [blame] | 618 | testb $3, CS(%rsp) |
Andy Lutomirski | 26c4ef9 | 2017-11-02 00:58:59 -0700 | [diff] [blame] | 619 | jz 1f |
| 620 | ud2 |
| 621 | 1: |
| 622 | #endif |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 623 | POP_REGS |
Andy Lutomirski | e872045 | 2017-11-02 00:59:01 -0700 | [diff] [blame] | 624 | addq $8, %rsp /* skip regs->orig_ax */ |
Mathieu Desnoyers | 10bcc80 | 2018-01-29 15:20:18 -0500 | [diff] [blame] | 625 | /* |
| 626 | * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization |
| 627 | * when returning from IPI handler. |
| 628 | */ |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 629 | INTERRUPT_RETURN |
| 630 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 631 | SYM_INNER_LABEL_ALIGN(native_iret, SYM_L_GLOBAL) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 632 | UNWIND_HINT_IRET_REGS |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 633 | /* |
| 634 | * Are we returning to a stack segment from the LDT? Note: in |
| 635 | * 64-bit mode SS:RSP on the exception stack is always valid. |
| 636 | */ |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 637 | #ifdef CONFIG_X86_ESPFIX64 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 638 | testb $4, (SS-RIP)(%rsp) |
| 639 | jnz native_irq_return_ldt |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 640 | #endif |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 641 | |
Jiri Slaby | cc66936 | 2019-10-11 13:50:50 +0200 | [diff] [blame] | 642 | SYM_INNER_LABEL(native_irq_return_iret, SYM_L_GLOBAL) |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 643 | /* |
| 644 | * This may fault. Non-paranoid faults on return to userspace are |
| 645 | * handled by fixup_bad_iret. These include #SS, #GP, and #NP. |
Thomas Gleixner | c29c775 | 2020-02-25 23:33:31 +0100 | [diff] [blame] | 646 | * Double-faults due to espfix64 are handled in exc_double_fault. |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 647 | * Other faults here are fatal. |
| 648 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | iretq |
Ingo Molnar | 3701d863 | 2008-02-09 23:24:08 +0100 | [diff] [blame] | 650 | |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 651 | #ifdef CONFIG_X86_ESPFIX64 |
Andy Lutomirski | 7209a75 | 2014-07-23 08:34:11 -0700 | [diff] [blame] | 652 | native_irq_return_ldt: |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 653 | /* |
| 654 | * We are running with user GSBASE. All GPRs contain their user |
| 655 | * values. We have a percpu ESPFIX stack that is eight slots |
| 656 | * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom |
| 657 | * of the ESPFIX stack. |
| 658 | * |
| 659 | * We clobber RAX and RDI in this code. We stash RDI on the |
| 660 | * normal stack and RAX on the ESPFIX stack. |
| 661 | * |
| 662 | * The ESPFIX stack layout we set up looks like this: |
| 663 | * |
| 664 | * --- top of ESPFIX stack --- |
| 665 | * SS |
| 666 | * RSP |
| 667 | * RFLAGS |
| 668 | * CS |
| 669 | * RIP <-- RSP points here when we're done |
| 670 | * RAX <-- espfix_waddr points here |
| 671 | * --- bottom of ESPFIX stack --- |
| 672 | */ |
| 673 | |
| 674 | pushq %rdi /* Stash user RDI */ |
Juergen Gross | 53c9d92 | 2021-01-20 14:55:44 +0100 | [diff] [blame] | 675 | swapgs /* to kernel GS */ |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 676 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */ |
| 677 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 678 | movq PER_CPU_VAR(espfix_waddr), %rdi |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 679 | movq %rax, (0*8)(%rdi) /* user RAX */ |
| 680 | movq (1*8)(%rsp), %rax /* user RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 681 | movq %rax, (1*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 682 | movq (2*8)(%rsp), %rax /* user CS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 683 | movq %rax, (2*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 684 | movq (3*8)(%rsp), %rax /* user RFLAGS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 685 | movq %rax, (3*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 686 | movq (5*8)(%rsp), %rax /* user SS */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 687 | movq %rax, (5*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 688 | movq (4*8)(%rsp), %rax /* user RSP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 689 | movq %rax, (4*8)(%rdi) |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 690 | /* Now RAX == RSP. */ |
| 691 | |
| 692 | andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */ |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 693 | |
| 694 | /* |
| 695 | * espfix_stack[31:16] == 0. The page tables are set up such that |
| 696 | * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of |
| 697 | * espfix_waddr for any X. That is, there are 65536 RO aliases of |
| 698 | * the same page. Set up RSP so that RSP[31:16] contains the |
| 699 | * respective 16 bits of the /userspace/ RSP and RSP nonetheless |
| 700 | * still points to an RO alias of the ESPFIX stack. |
| 701 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 702 | orq PER_CPU_VAR(espfix_stack), %rax |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 703 | |
Peter Zijlstra | 6fd166a | 2017-12-04 15:07:59 +0100 | [diff] [blame] | 704 | SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi |
Juergen Gross | 53c9d92 | 2021-01-20 14:55:44 +0100 | [diff] [blame] | 705 | swapgs /* to user GS */ |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 706 | popq %rdi /* Restore user RDI */ |
| 707 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 708 | movq %rax, %rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 709 | UNWIND_HINT_IRET_REGS offset=8 |
Andy Lutomirski | 85063fa | 2016-09-12 15:05:51 -0700 | [diff] [blame] | 710 | |
| 711 | /* |
| 712 | * At this point, we cannot write to the stack any more, but we can |
| 713 | * still read. |
| 714 | */ |
| 715 | popq %rax /* Restore user RAX */ |
| 716 | |
| 717 | /* |
| 718 | * RSP now points to an ordinary IRET frame, except that the page |
| 719 | * is read-only and RSP[31:16] are preloaded with the userspace |
| 720 | * values. We can now IRET back to userspace. |
| 721 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 722 | jmp native_irq_return_iret |
H. Peter Anvin | 34273f4 | 2014-05-04 10:36:22 -0700 | [diff] [blame] | 723 | #endif |
Thomas Gleixner | fa5e5c4 | 2020-05-21 22:05:37 +0200 | [diff] [blame] | 724 | SYM_CODE_END(common_interrupt_return) |
| 725 | _ASM_NOKPROBE(common_interrupt_return) |
H. Peter Anvin | 3891a04 | 2014-04-29 16:46:09 -0700 | [diff] [blame] | 726 | |
Masami Hiramatsu | 8222d71 | 2009-08-27 13:23:25 -0400 | [diff] [blame] | 727 | /* |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 728 | * Reload gs selector with exception handling |
| 729 | * edi: new selector |
| 730 | * |
| 731 | * Is in entry.text as it shouldn't be instrumented. |
| 732 | */ |
Thomas Gleixner | 410367e | 2020-03-04 23:32:15 +0100 | [diff] [blame] | 733 | SYM_FUNC_START(asm_load_gs_index) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 734 | FRAME_BEGIN |
Thomas Gleixner | c931720 | 2020-05-12 14:54:14 +0200 | [diff] [blame] | 735 | swapgs |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 736 | .Lgs_change: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 737 | movl %edi, %gs |
Borislav Petkov | 96e5d28 | 2016-04-07 17:31:49 -0700 | [diff] [blame] | 738 | 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE |
Thomas Gleixner | c931720 | 2020-05-12 14:54:14 +0200 | [diff] [blame] | 739 | swapgs |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 740 | FRAME_END |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 741 | ret |
Thomas Gleixner | 410367e | 2020-03-04 23:32:15 +0100 | [diff] [blame] | 742 | SYM_FUNC_END(asm_load_gs_index) |
| 743 | EXPORT_SYMBOL(asm_load_gs_index) |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 744 | |
Jiri Slaby | 98ededb | 2019-09-06 09:55:50 +0200 | [diff] [blame] | 745 | _ASM_EXTABLE(.Lgs_change, .Lbad_gs) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 746 | .section .fixup, "ax" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | /* running with kernelgs */ |
Jiri Slaby | ef77e68 | 2019-10-11 13:50:45 +0200 | [diff] [blame] | 748 | SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) |
Thomas Gleixner | c931720 | 2020-05-12 14:54:14 +0200 | [diff] [blame] | 749 | swapgs /* switch back to user gs */ |
Andy Lutomirski | b038c84 | 2016-04-26 12:23:27 -0700 | [diff] [blame] | 750 | .macro ZAP_GS |
| 751 | /* This can't be a string because the preprocessor needs to see it. */ |
| 752 | movl $__USER_DS, %eax |
| 753 | movl %eax, %gs |
| 754 | .endm |
| 755 | ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 756 | xorl %eax, %eax |
| 757 | movl %eax, %gs |
| 758 | jmp 2b |
Jiri Slaby | ef77e68 | 2019-10-11 13:50:45 +0200 | [diff] [blame] | 759 | SYM_CODE_END(.Lbad_gs) |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 760 | .previous |
Alexander van Heukelum | 0bd7b79 | 2008-11-16 15:29:00 +0100 | [diff] [blame] | 761 | |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 762 | #ifdef CONFIG_XEN_PV |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 763 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 764 | * A note on the "critical region" in our callback handler. |
| 765 | * We want to avoid stacking callback handlers due to events occurring |
| 766 | * during handling of the last event. To do this, we keep events disabled |
| 767 | * until we've done all processing. HOWEVER, we must enable events before |
| 768 | * popping the stack frame (can't be done atomically) and so it would still |
| 769 | * be possible to get enough handler activations to overflow the stack. |
| 770 | * Although unlikely, bugs of that kind are hard to track down, so we'd |
| 771 | * like to avoid the possibility. |
| 772 | * So, on entry to the handler we detect whether we interrupted an |
| 773 | * existing activation in its critical region -- if so, we pop the current |
| 774 | * activation and restart the handler using the previous one. |
Thomas Gleixner | 2f6474e | 2020-05-21 22:05:26 +0200 | [diff] [blame] | 775 | * |
| 776 | * C calling convention: exc_xen_hypervisor_callback(struct *pt_regs) |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 777 | */ |
Thomas Gleixner | 2f6474e | 2020-05-21 22:05:26 +0200 | [diff] [blame] | 778 | SYM_CODE_START_LOCAL(exc_xen_hypervisor_callback) |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 779 | |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 780 | /* |
| 781 | * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will |
| 782 | * see the correct pointer to the pt_regs |
| 783 | */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 784 | UNWIND_HINT_FUNC |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 785 | movq %rdi, %rsp /* we don't return, adjust the stack frame */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 786 | UNWIND_HINT_REGS |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 787 | |
Thomas Gleixner | 2f6474e | 2020-05-21 22:05:26 +0200 | [diff] [blame] | 788 | call xen_pv_evtchn_do_upcall |
Andy Lutomirski | 1d3e53e | 2017-07-11 10:33:38 -0500 | [diff] [blame] | 789 | |
Thomas Gleixner | 2f6474e | 2020-05-21 22:05:26 +0200 | [diff] [blame] | 790 | jmp error_return |
| 791 | SYM_CODE_END(exc_xen_hypervisor_callback) |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 792 | |
| 793 | /* |
Cyrill Gorcunov | 9f1e87e | 2008-11-27 21:10:08 +0300 | [diff] [blame] | 794 | * Hypervisor uses this for application faults while it executes. |
| 795 | * We get here for two reasons: |
| 796 | * 1. Fault while reloading DS, ES, FS or GS |
| 797 | * 2. Fault while executing IRET |
| 798 | * Category 1 we do not need to fix up as Xen has already reloaded all segment |
| 799 | * registers that could be reloaded and zeroed the others. |
| 800 | * Category 2 we fix up by killing the current process. We cannot use the |
| 801 | * normal Linux return path in this case because if we use the IRET hypercall |
| 802 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
| 803 | * We distinguish between categories by comparing each saved segment register |
| 804 | * with its current contents: any discrepancy means we in category 1. |
| 805 | */ |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 806 | SYM_CODE_START(xen_failsafe_callback) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 807 | UNWIND_HINT_EMPTY |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 808 | movl %ds, %ecx |
| 809 | cmpw %cx, 0x10(%rsp) |
| 810 | jne 1f |
| 811 | movl %es, %ecx |
| 812 | cmpw %cx, 0x18(%rsp) |
| 813 | jne 1f |
| 814 | movl %fs, %ecx |
| 815 | cmpw %cx, 0x20(%rsp) |
| 816 | jne 1f |
| 817 | movl %gs, %ecx |
| 818 | cmpw %cx, 0x28(%rsp) |
| 819 | jne 1f |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 820 | /* All segments match their saved values => Category 2 (Bad IRET). */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 821 | movq (%rsp), %rcx |
| 822 | movq 8(%rsp), %r11 |
| 823 | addq $0x30, %rsp |
| 824 | pushq $0 /* RIP */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 825 | UNWIND_HINT_IRET_REGS offset=8 |
Thomas Gleixner | be4c11a | 2020-02-25 23:16:25 +0100 | [diff] [blame] | 826 | jmp asm_exc_general_protection |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 827 | 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 828 | movq (%rsp), %rcx |
| 829 | movq 8(%rsp), %r11 |
| 830 | addq $0x30, %rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 831 | UNWIND_HINT_IRET_REGS |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 832 | pushq $-1 /* orig_ax = -1 => not a system call */ |
Dominik Brodowski | 3f01dae | 2018-02-11 11:49:45 +0100 | [diff] [blame] | 833 | PUSH_AND_CLEAR_REGS |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 834 | ENCODE_FRAME_POINTER |
Thomas Gleixner | e88d974 | 2020-05-21 22:05:30 +0200 | [diff] [blame] | 835 | jmp error_return |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 836 | SYM_CODE_END(xen_failsafe_callback) |
Juergen Gross | 28c11b0 | 2018-08-28 09:40:12 +0200 | [diff] [blame] | 837 | #endif /* CONFIG_XEN_PV */ |
Jeremy Fitzhardinge | 3d75e1b | 2008-07-08 15:06:49 -0700 | [diff] [blame] | 838 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 839 | /* |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 840 | * Save all registers in pt_regs. Return GSBASE related information |
| 841 | * in EBX depending on the availability of the FSGSBASE instructions: |
| 842 | * |
| 843 | * FSGSBASE R/EBX |
| 844 | * N 0 -> SWAPGS on exit |
| 845 | * 1 -> no SWAPGS on exit |
| 846 | * |
| 847 | * Y GSBASE value at entry, must be restored in paranoid_exit |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 848 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 849 | SYM_CODE_START_LOCAL(paranoid_entry) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 850 | UNWIND_HINT_FUNC |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 851 | cld |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 852 | PUSH_AND_CLEAR_REGS save_ret=1 |
| 853 | ENCODE_FRAME_POINTER 8 |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 854 | |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 855 | /* |
| 856 | * Always stash CR3 in %r14. This value will be restored, |
Andy Lutomirski | ae85249 | 2018-10-14 11:38:18 -0700 | [diff] [blame] | 857 | * verbatim, at exit. Needed if paranoid_entry interrupted |
| 858 | * another entry that already switched to the user CR3 value |
| 859 | * but has not yet returned to userspace. |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 860 | * |
| 861 | * This is also why CS (stashed in the "iret frame" by the |
| 862 | * hardware at entry) can not be used: this may be a return |
Andy Lutomirski | ae85249 | 2018-10-14 11:38:18 -0700 | [diff] [blame] | 863 | * to kernel code, but with a user CR3 value. |
Chang S. Bae | 96b2371 | 2020-05-28 16:13:55 -0400 | [diff] [blame] | 864 | * |
| 865 | * Switching CR3 does not depend on kernel GSBASE so it can |
| 866 | * be done before switching to the kernel GSBASE. This is |
| 867 | * required for FSGSBASE because the kernel GSBASE has to |
| 868 | * be retrieved from a kernel internal table. |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 869 | */ |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 870 | SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14 |
| 871 | |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 872 | /* |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 873 | * Handling GSBASE depends on the availability of FSGSBASE. |
| 874 | * |
| 875 | * Without FSGSBASE the kernel enforces that negative GSBASE |
| 876 | * values indicate kernel GSBASE. With FSGSBASE no assumptions |
| 877 | * can be made about the GSBASE value when entering from user |
| 878 | * space. |
| 879 | */ |
| 880 | ALTERNATIVE "jmp .Lparanoid_entry_checkgs", "", X86_FEATURE_FSGSBASE |
| 881 | |
| 882 | /* |
| 883 | * Read the current GSBASE and store it in %rbx unconditionally, |
| 884 | * retrieve and set the current CPUs kernel GSBASE. The stored value |
| 885 | * has to be restored in paranoid_exit unconditionally. |
| 886 | * |
Borislav Petkov | 0b2c605 | 2020-08-20 11:10:15 +0200 | [diff] [blame] | 887 | * The unconditional write to GS base below ensures that no subsequent |
| 888 | * loads based on a mispredicted GS base can happen, therefore no LFENCE |
| 889 | * is needed here. |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 890 | */ |
| 891 | SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx |
| 892 | ret |
| 893 | |
| 894 | .Lparanoid_entry_checkgs: |
Chang S. Bae | 96b2371 | 2020-05-28 16:13:55 -0400 | [diff] [blame] | 895 | /* EBX = 1 -> kernel GSBASE active, no restore required */ |
| 896 | movl $1, %ebx |
Lai Jiangshan | c07e455 | 2021-11-26 18:11:21 +0800 | [diff] [blame] | 897 | |
Chang S. Bae | 96b2371 | 2020-05-28 16:13:55 -0400 | [diff] [blame] | 898 | /* |
| 899 | * The kernel-enforced convention is a negative GSBASE indicates |
| 900 | * a kernel value. No SWAPGS needed on entry and exit. |
| 901 | */ |
| 902 | movl $MSR_GS_BASE, %ecx |
| 903 | rdmsr |
| 904 | testl %edx, %edx |
Lai Jiangshan | c07e455 | 2021-11-26 18:11:21 +0800 | [diff] [blame] | 905 | js .Lparanoid_kernel_gsbase |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 906 | |
Chang S. Bae | 96b2371 | 2020-05-28 16:13:55 -0400 | [diff] [blame] | 907 | /* EBX = 0 -> SWAPGS required on exit */ |
| 908 | xorl %ebx, %ebx |
Lai Jiangshan | c07e455 | 2021-11-26 18:11:21 +0800 | [diff] [blame] | 909 | swapgs |
| 910 | .Lparanoid_kernel_gsbase: |
| 911 | |
| 912 | FENCE_SWAPGS_KERNEL_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 913 | ret |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 914 | SYM_CODE_END(paranoid_entry) |
Denys Vlasenko | 1eeb207 | 2015-02-26 14:40:33 -0800 | [diff] [blame] | 915 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 916 | /* |
| 917 | * "Paranoid" exit path from exception stack. This is invoked |
| 918 | * only on return from non-NMI IST interrupts that came |
| 919 | * from kernel space. |
| 920 | * |
| 921 | * We may be returning to very strange contexts (e.g. very early |
| 922 | * in syscall entry), so checking for preemption here would |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 923 | * be complicated. Fortunately, there's no good reason to try |
| 924 | * to handle preemption here. |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 925 | * |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 926 | * R/EBX contains the GSBASE related information depending on the |
| 927 | * availability of the FSGSBASE instructions: |
| 928 | * |
| 929 | * FSGSBASE R/EBX |
| 930 | * N 0 -> SWAPGS on exit |
| 931 | * 1 -> no SWAPGS on exit |
| 932 | * |
| 933 | * Y User space GSBASE, must be restored unconditionally |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 934 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 935 | SYM_CODE_START_LOCAL(paranoid_exit) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 936 | UNWIND_HINT_REGS |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 937 | /* |
| 938 | * The order of operations is important. RESTORE_CR3 requires |
| 939 | * kernel GSBASE. |
| 940 | * |
| 941 | * NB to anyone to try to optimize this code: this code does |
| 942 | * not execute at all for exceptions from user mode. Those |
| 943 | * exceptions go through error_exit instead. |
| 944 | */ |
| 945 | RESTORE_CR3 scratch_reg=%rax save_reg=%r14 |
| 946 | |
| 947 | /* Handle the three GSBASE cases */ |
| 948 | ALTERNATIVE "jmp .Lparanoid_exit_checkgs", "", X86_FEATURE_FSGSBASE |
| 949 | |
| 950 | /* With FSGSBASE enabled, unconditionally restore GSBASE */ |
| 951 | wrgsbase %rbx |
| 952 | jmp restore_regs_and_return_to_kernel |
| 953 | |
| 954 | .Lparanoid_exit_checkgs: |
| 955 | /* On non-FSGSBASE systems, conditionally do SWAPGS */ |
| 956 | testl %ebx, %ebx |
| 957 | jnz restore_regs_and_return_to_kernel |
| 958 | |
| 959 | /* We are returning to a context with user GSBASE */ |
Juergen Gross | 53c9d92 | 2021-01-20 14:55:44 +0100 | [diff] [blame] | 960 | swapgs |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 961 | jmp restore_regs_and_return_to_kernel |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 962 | SYM_CODE_END(paranoid_exit) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 963 | |
| 964 | /* |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 965 | * Save all registers in pt_regs, and switch GS if needed. |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 966 | */ |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 967 | SYM_CODE_START_LOCAL(error_entry) |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 968 | UNWIND_HINT_FUNC |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 969 | cld |
Dominik Brodowski | 9e809d1 | 2018-02-14 18:59:23 +0100 | [diff] [blame] | 970 | PUSH_AND_CLEAR_REGS save_ret=1 |
| 971 | ENCODE_FRAME_POINTER 8 |
Denys Vlasenko | 03335e9 | 2015-04-27 15:21:52 +0200 | [diff] [blame] | 972 | testb $3, CS+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 973 | jz .Lerror_kernelspace |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 974 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 975 | /* |
| 976 | * We entered from user mode or we're pretending to have entered |
| 977 | * from user mode due to an IRET fault. |
| 978 | */ |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 979 | SWAPGS |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 980 | FENCE_SWAPGS_USER_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 981 | /* We have user CR3. Change to kernel CR3. */ |
| 982 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 983 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 984 | .Lerror_entry_from_usermode_after_swapgs: |
Andy Lutomirski | 7f2590a | 2017-12-04 15:07:23 +0100 | [diff] [blame] | 985 | /* Put us onto the real thread stack. */ |
| 986 | popq %r12 /* save return addr in %12 */ |
| 987 | movq %rsp, %rdi /* arg0 = pt_regs pointer */ |
| 988 | call sync_regs |
| 989 | movq %rax, %rsp /* switch stack */ |
| 990 | ENCODE_FRAME_POINTER |
| 991 | pushq %r12 |
Andy Lutomirski | f107505 | 2015-11-12 12:59:00 -0800 | [diff] [blame] | 992 | ret |
Andy Lutomirski | 02bc776 | 2015-07-03 12:44:31 -0700 | [diff] [blame] | 993 | |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 994 | /* |
| 995 | * There are two places in the kernel that can potentially fault with |
| 996 | * usergs. Handle them here. B stepping K8s sometimes report a |
| 997 | * truncated RIP for IRET exceptions returning to compat mode. Check |
| 998 | * for these here too. |
| 999 | */ |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1000 | .Lerror_kernelspace: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1001 | leaq native_irq_return_iret(%rip), %rcx |
| 1002 | cmpq %rcx, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1003 | je .Lerror_bad_iret |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1004 | movl %ecx, %eax /* zero extend */ |
| 1005 | cmpq %rax, RIP+8(%rsp) |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1006 | je .Lbstep_iret |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1007 | cmpq $.Lgs_change, RIP+8(%rsp) |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1008 | jne .Lerror_entry_done_lfence |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1009 | |
| 1010 | /* |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1011 | * hack: .Lgs_change can fail with user gsbase. If this happens, fix up |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1012 | * gsbase and proceed. We'll fix up the exception and land in |
Borislav Petkov | 42c748bb | 2016-04-07 17:31:50 -0700 | [diff] [blame] | 1013 | * .Lgs_change's error handler with kernel gsbase. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1014 | */ |
Wanpeng Li | 2fa5f04 | 2016-09-30 09:01:06 +0800 | [diff] [blame] | 1015 | SWAPGS |
Lai Jiangshan | 1367afa | 2021-11-26 18:11:22 +0800 | [diff] [blame] | 1016 | |
| 1017 | /* |
| 1018 | * Issue an LFENCE to prevent GS speculation, regardless of whether it is a |
| 1019 | * kernel or user gsbase. |
| 1020 | */ |
| 1021 | .Lerror_entry_done_lfence: |
| 1022 | FENCE_SWAPGS_KERNEL_ENTRY |
| 1023 | ret |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1024 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1025 | .Lbstep_iret: |
Brian Gerst | ae24ffe | 2009-10-12 10:18:23 -0400 | [diff] [blame] | 1026 | /* Fix truncated RIP */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1027 | movq %rcx, RIP+8(%rsp) |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1028 | /* fall through */ |
| 1029 | |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1030 | .Lerror_bad_iret: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1031 | /* |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1032 | * We came from an IRET to user mode, so we have user |
| 1033 | * gsbase and CR3. Switch to kernel gsbase and CR3: |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1034 | */ |
Andy Lutomirski | b645af2 | 2014-11-22 18:00:33 -0800 | [diff] [blame] | 1035 | SWAPGS |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1036 | FENCE_SWAPGS_USER_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1037 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rax |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1038 | |
| 1039 | /* |
| 1040 | * Pretend that the exception came from user mode: set up pt_regs |
Andy Lutomirski | b3681dd | 2018-07-22 11:05:09 -0700 | [diff] [blame] | 1041 | * as if we faulted immediately after IRET. |
Andy Lutomirski | 539f511 | 2015-06-09 12:36:01 -0700 | [diff] [blame] | 1042 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1043 | mov %rsp, %rdi |
| 1044 | call fixup_bad_iret |
| 1045 | mov %rax, %rsp |
Andy Lutomirski | cb6f64e | 2015-07-03 12:44:27 -0700 | [diff] [blame] | 1046 | jmp .Lerror_entry_from_usermode_after_swapgs |
Jiri Slaby | ef1e031 | 2019-10-11 13:51:00 +0200 | [diff] [blame] | 1047 | SYM_CODE_END(error_entry) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1048 | |
Thomas Gleixner | 424c7d0 | 2020-03-26 16:56:20 +0100 | [diff] [blame] | 1049 | SYM_CODE_START_LOCAL(error_return) |
| 1050 | UNWIND_HINT_REGS |
| 1051 | DEBUG_ENTRY_ASSERT_IRQS_OFF |
| 1052 | testb $3, CS(%rsp) |
| 1053 | jz restore_regs_and_return_to_kernel |
| 1054 | jmp swapgs_restore_regs_and_return_to_usermode |
| 1055 | SYM_CODE_END(error_return) |
| 1056 | |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1057 | /* |
| 1058 | * Runs on exception stack. Xen PV does not go through this path at all, |
| 1059 | * so we can use real assembly here. |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1060 | * |
| 1061 | * Registers: |
| 1062 | * %r14: Used to save/restore the CR3 of the interrupted context |
| 1063 | * when PAGE_TABLE_ISOLATION is in use. Do not clobber. |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1064 | */ |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame] | 1065 | SYM_CODE_START(asm_exc_nmi) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1066 | UNWIND_HINT_IRET_REGS |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1067 | |
Andy Lutomirski | fc57a7c | 2015-09-20 16:32:04 -0700 | [diff] [blame] | 1068 | /* |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1069 | * We allow breakpoints in NMIs. If a breakpoint occurs, then |
| 1070 | * the iretq it performs will take us out of NMI context. |
| 1071 | * This means that we can have nested NMIs where the next |
| 1072 | * NMI is using the top of the stack of the previous NMI. We |
| 1073 | * can't let it execute because the nested NMI will corrupt the |
| 1074 | * stack of the previous NMI. NMI handlers are not re-entrant |
| 1075 | * anyway. |
| 1076 | * |
| 1077 | * To handle this case we do the following: |
| 1078 | * Check the a special location on the stack that contains |
| 1079 | * a variable that is set when NMIs are executing. |
| 1080 | * The interrupted task's stack is also checked to see if it |
| 1081 | * is an NMI stack. |
| 1082 | * If the variable is not set and the stack is not the NMI |
| 1083 | * stack then: |
| 1084 | * o Set the special variable on the stack |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1085 | * o Copy the interrupt frame into an "outermost" location on the |
| 1086 | * stack |
| 1087 | * o Copy the interrupt frame into an "iret" location on the stack |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1088 | * o Continue processing the NMI |
| 1089 | * If the variable is set or the previous stack is the NMI stack: |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1090 | * o Modify the "iret" location to jump to the repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1091 | * o return back to the first NMI |
| 1092 | * |
| 1093 | * Now on exit of the first NMI, we first clear the stack variable |
| 1094 | * The NMI stack will tell any nested NMIs at that point that it is |
| 1095 | * nested. Then we pop the stack normally with iret, and if there was |
| 1096 | * a nested NMI that updated the copy interrupt stack frame, a |
| 1097 | * jump will be made to the repeat_nmi code that will handle the second |
| 1098 | * NMI. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1099 | * |
| 1100 | * However, espfix prevents us from directly returning to userspace |
| 1101 | * with a single IRET instruction. Similarly, IRET to user mode |
| 1102 | * can fault. We therefore handle NMIs from user space like |
| 1103 | * other IST entries. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1104 | */ |
| 1105 | |
Andy Lutomirski | e93c173 | 2017-08-07 19:43:13 -0700 | [diff] [blame] | 1106 | ASM_CLAC |
| 1107 | |
Denys Vlasenko | 146b2b0 | 2015-03-25 18:18:13 +0100 | [diff] [blame] | 1108 | /* Use %rdx as our temp variable throughout */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1109 | pushq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1110 | |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1111 | testb $3, CS-RIP+8(%rsp) |
| 1112 | jz .Lnmi_from_kernel |
Steven Rostedt | 45d5a16 | 2012-02-19 16:43:37 -0500 | [diff] [blame] | 1113 | |
| 1114 | /* |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1115 | * NMI from user mode. We need to run on the thread stack, but we |
| 1116 | * can't go through the normal entry paths: NMIs are masked, and |
| 1117 | * we don't want to enable interrupts, because then we'll end |
| 1118 | * up in an awkward situation in which IRQs are on but NMIs |
| 1119 | * are off. |
Andy Lutomirski | 83c133c | 2015-09-20 16:32:05 -0700 | [diff] [blame] | 1120 | * |
| 1121 | * We also must not push anything to the stack before switching |
| 1122 | * stacks lest we corrupt the "NMI executing" variable. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1123 | */ |
| 1124 | |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1125 | swapgs |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1126 | cld |
Josh Poimboeuf | 18ec54f | 2019-07-08 11:52:25 -0500 | [diff] [blame] | 1127 | FENCE_SWAPGS_USER_ENTRY |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1128 | SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1129 | movq %rsp, %rdx |
| 1130 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1131 | UNWIND_HINT_IRET_REGS base=%rdx offset=8 |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1132 | pushq 5*8(%rdx) /* pt_regs->ss */ |
| 1133 | pushq 4*8(%rdx) /* pt_regs->rsp */ |
| 1134 | pushq 3*8(%rdx) /* pt_regs->flags */ |
| 1135 | pushq 2*8(%rdx) /* pt_regs->cs */ |
| 1136 | pushq 1*8(%rdx) /* pt_regs->rip */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1137 | UNWIND_HINT_IRET_REGS |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1138 | pushq $-1 /* pt_regs->orig_ax */ |
Dominik Brodowski | 30907fd | 2018-02-11 11:49:46 +0100 | [diff] [blame] | 1139 | PUSH_AND_CLEAR_REGS rdx=(%rdx) |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1140 | ENCODE_FRAME_POINTER |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1141 | |
| 1142 | /* |
| 1143 | * At this point we no longer need to worry about stack damage |
| 1144 | * due to nesting -- we're on the normal thread stack and we're |
| 1145 | * done with the NMI stack. |
| 1146 | */ |
| 1147 | |
| 1148 | movq %rsp, %rdi |
| 1149 | movq $-1, %rsi |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame] | 1150 | call exc_nmi |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1151 | |
| 1152 | /* |
| 1153 | * Return back to user mode. We must *not* do the normal exit |
Josh Poimboeuf | 946c191 | 2016-10-20 11:34:40 -0500 | [diff] [blame] | 1154 | * work, because we don't want to enable interrupts. |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1155 | */ |
Andy Lutomirski | 8a055d7 | 2017-11-02 00:59:00 -0700 | [diff] [blame] | 1156 | jmp swapgs_restore_regs_and_return_to_usermode |
Andy Lutomirski | 9b6e6a8 | 2015-07-15 10:29:35 -0700 | [diff] [blame] | 1157 | |
| 1158 | .Lnmi_from_kernel: |
| 1159 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1160 | * Here's what our stack frame will look like: |
| 1161 | * +---------------------------------------------------------+ |
| 1162 | * | original SS | |
| 1163 | * | original Return RSP | |
| 1164 | * | original RFLAGS | |
| 1165 | * | original CS | |
| 1166 | * | original RIP | |
| 1167 | * +---------------------------------------------------------+ |
| 1168 | * | temp storage for rdx | |
| 1169 | * +---------------------------------------------------------+ |
| 1170 | * | "NMI executing" variable | |
| 1171 | * +---------------------------------------------------------+ |
| 1172 | * | iret SS } Copied from "outermost" frame | |
| 1173 | * | iret Return RSP } on each loop iteration; overwritten | |
| 1174 | * | iret RFLAGS } by a nested NMI to force another | |
| 1175 | * | iret CS } iteration if needed. | |
| 1176 | * | iret RIP } | |
| 1177 | * +---------------------------------------------------------+ |
| 1178 | * | outermost SS } initialized in first_nmi; | |
| 1179 | * | outermost Return RSP } will not be changed before | |
| 1180 | * | outermost RFLAGS } NMI processing is done. | |
| 1181 | * | outermost CS } Copied to "iret" frame on each | |
| 1182 | * | outermost RIP } iteration. | |
| 1183 | * +---------------------------------------------------------+ |
| 1184 | * | pt_regs | |
| 1185 | * +---------------------------------------------------------+ |
| 1186 | * |
| 1187 | * The "original" frame is used by hardware. Before re-enabling |
| 1188 | * NMIs, we need to be done with it, and we need to leave enough |
| 1189 | * space for the asm code here. |
| 1190 | * |
| 1191 | * We return by executing IRET while RSP points to the "iret" frame. |
| 1192 | * That will either return for real or it will loop back into NMI |
| 1193 | * processing. |
| 1194 | * |
| 1195 | * The "outermost" frame is copied to the "iret" frame on each |
| 1196 | * iteration of the loop, so each iteration starts with the "iret" |
| 1197 | * frame pointing to the final return target. |
| 1198 | */ |
| 1199 | |
| 1200 | /* |
| 1201 | * Determine whether we're a nested NMI. |
| 1202 | * |
Andy Lutomirski | a27507c | 2015-07-15 10:29:37 -0700 | [diff] [blame] | 1203 | * If we interrupted kernel code between repeat_nmi and |
| 1204 | * end_repeat_nmi, then we are a nested NMI. We must not |
| 1205 | * modify the "iret" frame because it's being written by |
| 1206 | * the outer NMI. That's okay; the outer NMI handler is |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame] | 1207 | * about to about to call exc_nmi() anyway, so we can just |
Andy Lutomirski | a27507c | 2015-07-15 10:29:37 -0700 | [diff] [blame] | 1208 | * resume the outer NMI. |
| 1209 | */ |
| 1210 | |
| 1211 | movq $repeat_nmi, %rdx |
| 1212 | cmpq 8(%rsp), %rdx |
| 1213 | ja 1f |
| 1214 | movq $end_repeat_nmi, %rdx |
| 1215 | cmpq 8(%rsp), %rdx |
| 1216 | ja nested_nmi_out |
| 1217 | 1: |
| 1218 | |
| 1219 | /* |
| 1220 | * Now check "NMI executing". If it's set, then we're nested. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1221 | * This will not detect if we interrupted an outer NMI just |
| 1222 | * before IRET. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1223 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1224 | cmpl $1, -8(%rsp) |
| 1225 | je nested_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1226 | |
| 1227 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1228 | * Now test if the previous stack was an NMI stack. This covers |
| 1229 | * the case where we interrupt an outer NMI after it clears |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1230 | * "NMI executing" but before IRET. We need to be careful, though: |
| 1231 | * there is one case in which RSP could point to the NMI stack |
| 1232 | * despite there being no NMI active: naughty userspace controls |
| 1233 | * RSP at the very beginning of the SYSCALL targets. We can |
| 1234 | * pull a fast one on naughty userspace, though: we program |
| 1235 | * SYSCALL to mask DF, so userspace cannot cause DF to be set |
| 1236 | * if it controls the kernel's RSP. We set DF before we clear |
| 1237 | * "NMI executing". |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1238 | */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1239 | lea 6*8(%rsp), %rdx |
| 1240 | /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */ |
| 1241 | cmpq %rdx, 4*8(%rsp) |
| 1242 | /* If the stack pointer is above the NMI stack, this is a normal NMI */ |
| 1243 | ja first_nmi |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1244 | |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1245 | subq $EXCEPTION_STKSZ, %rdx |
| 1246 | cmpq %rdx, 4*8(%rsp) |
| 1247 | /* If it is below the NMI stack, it is a normal NMI */ |
| 1248 | jb first_nmi |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1249 | |
| 1250 | /* Ah, it is within the NMI stack. */ |
| 1251 | |
| 1252 | testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp) |
| 1253 | jz first_nmi /* RSP was user controlled. */ |
| 1254 | |
| 1255 | /* This is a nested NMI. */ |
Denys Vlasenko | 0784b36 | 2015-04-01 16:50:57 +0200 | [diff] [blame] | 1256 | |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1257 | nested_nmi: |
| 1258 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1259 | * Modify the "iret" frame to point to repeat_nmi, forcing another |
| 1260 | * iteration of NMI handling. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1261 | */ |
Andy Lutomirski | 23a781e | 2015-07-15 10:29:39 -0700 | [diff] [blame] | 1262 | subq $8, %rsp |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1263 | leaq -10*8(%rsp), %rdx |
| 1264 | pushq $__KERNEL_DS |
| 1265 | pushq %rdx |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 1266 | pushfq |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1267 | pushq $__KERNEL_CS |
| 1268 | pushq $repeat_nmi |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1269 | |
| 1270 | /* Put stack back */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1271 | addq $(6*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1272 | |
| 1273 | nested_nmi_out: |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1274 | popq %rdx |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1275 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1276 | /* We are returning to kernel mode, so this cannot result in a fault. */ |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1277 | iretq |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1278 | |
| 1279 | first_nmi: |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1280 | /* Restore rdx. */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1281 | movq (%rsp), %rdx |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1282 | |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1283 | /* Make room for "NMI executing". */ |
| 1284 | pushq $0 |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1285 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1286 | /* Leave room for the "iret" frame */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1287 | subq $(5*8), %rsp |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1288 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1289 | /* Copy the "original" frame to the "outermost" frame */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1290 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1291 | pushq 11*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1292 | .endr |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1293 | UNWIND_HINT_IRET_REGS |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1294 | |
Steven Rostedt | 79fb4ad | 2012-02-24 15:55:13 -0500 | [diff] [blame] | 1295 | /* Everything up to here is safe from nested NMIs */ |
| 1296 | |
Andy Lutomirski | a97439a | 2015-07-15 10:29:41 -0700 | [diff] [blame] | 1297 | #ifdef CONFIG_DEBUG_ENTRY |
| 1298 | /* |
| 1299 | * For ease of testing, unmask NMIs right away. Disabled by |
| 1300 | * default because IRET is very expensive. |
| 1301 | */ |
| 1302 | pushq $0 /* SS */ |
| 1303 | pushq %rsp /* RSP (minus 8 because of the previous push) */ |
| 1304 | addq $8, (%rsp) /* Fix up RSP */ |
| 1305 | pushfq /* RFLAGS */ |
| 1306 | pushq $__KERNEL_CS /* CS */ |
| 1307 | pushq $1f /* RIP */ |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1308 | iretq /* continues at repeat_nmi below */ |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1309 | UNWIND_HINT_IRET_REGS |
Andy Lutomirski | a97439a | 2015-07-15 10:29:41 -0700 | [diff] [blame] | 1310 | 1: |
| 1311 | #endif |
| 1312 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1313 | repeat_nmi: |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1314 | /* |
| 1315 | * If there was a nested NMI, the first NMI's iret will return |
| 1316 | * here. But NMIs are still enabled and we can take another |
| 1317 | * nested NMI. The nested NMI checks the interrupted RIP to see |
| 1318 | * if it is between repeat_nmi and end_repeat_nmi, and if so |
| 1319 | * it will just return, as we are about to repeat an NMI anyway. |
| 1320 | * This makes it safe to copy to the stack frame that a nested |
| 1321 | * NMI will update. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1322 | * |
| 1323 | * RSP is pointing to "outermost RIP". gsbase is unknown, but, if |
| 1324 | * we're repeating an NMI, gsbase has the same value that it had on |
| 1325 | * the first iteration. paranoid_entry will load the kernel |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame] | 1326 | * gsbase if needed before we call exc_nmi(). "NMI executing" |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1327 | * is zero. |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1328 | */ |
Andy Lutomirski | 36f1a77 | 2015-07-15 10:29:40 -0700 | [diff] [blame] | 1329 | movq $1, 10*8(%rsp) /* Set "NMI executing". */ |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1330 | |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1331 | /* |
| 1332 | * Copy the "outermost" frame to the "iret" frame. NMIs that nest |
| 1333 | * here must not modify the "iret" frame while we're writing to |
| 1334 | * it or it will end up containing garbage. |
| 1335 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1336 | addq $(10*8), %rsp |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1337 | .rept 5 |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1338 | pushq -6*8(%rsp) |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1339 | .endr |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1340 | subq $(5*8), %rsp |
Jan Beulich | 6261091 | 2012-02-24 14:54:37 +0000 | [diff] [blame] | 1341 | end_repeat_nmi: |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1342 | |
| 1343 | /* |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1344 | * Everything below this point can be preempted by a nested NMI. |
| 1345 | * If this happens, then the inner NMI will change the "iret" |
| 1346 | * frame to point back to repeat_nmi. |
Steven Rostedt | 3f3c8b8 | 2011-12-08 12:36:23 -0500 | [diff] [blame] | 1347 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1348 | pushq $-1 /* ORIG_RAX: no syscall to restart */ |
Denys Vlasenko | 76f5df4 | 2015-02-26 14:40:27 -0800 | [diff] [blame] | 1349 | |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1350 | /* |
Denys Vlasenko | ebfc453 | 2015-02-26 14:40:34 -0800 | [diff] [blame] | 1351 | * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit |
Steven Rostedt | 1fd466e | 2011-12-08 12:32:27 -0500 | [diff] [blame] | 1352 | * as we should not be calling schedule in NMI context. |
| 1353 | * Even with normal interrupts enabled. An NMI should not be |
| 1354 | * setting NEED_RESCHED or anything that normal interrupts and |
| 1355 | * exceptions might do. |
| 1356 | */ |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1357 | call paranoid_entry |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1358 | UNWIND_HINT_REGS |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1359 | |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1360 | movq %rsp, %rdi |
| 1361 | movq $-1, %rsi |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame] | 1362 | call exc_nmi |
Steven Rostedt | 7fbb98c | 2012-06-07 10:21:21 -0400 | [diff] [blame] | 1363 | |
Dave Hansen | 16561f2 | 2018-10-12 16:21:18 -0700 | [diff] [blame] | 1364 | /* Always restore stashed CR3 value (see paranoid_entry) */ |
Peter Zijlstra | 21e9445 | 2017-12-04 15:08:00 +0100 | [diff] [blame] | 1365 | RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 |
Dave Hansen | 8a09317 | 2017-12-04 15:07:35 +0100 | [diff] [blame] | 1366 | |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 1367 | /* |
| 1368 | * The above invocation of paranoid_entry stored the GSBASE |
| 1369 | * related information in R/EBX depending on the availability |
| 1370 | * of FSGSBASE. |
| 1371 | * |
| 1372 | * If FSGSBASE is enabled, restore the saved GSBASE value |
| 1373 | * unconditionally, otherwise take the conditional SWAPGS path. |
| 1374 | */ |
| 1375 | ALTERNATIVE "jmp nmi_no_fsgsbase", "", X86_FEATURE_FSGSBASE |
| 1376 | |
| 1377 | wrgsbase %rbx |
| 1378 | jmp nmi_restore |
| 1379 | |
| 1380 | nmi_no_fsgsbase: |
| 1381 | /* EBX == 0 -> invoke SWAPGS */ |
| 1382 | testl %ebx, %ebx |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1383 | jnz nmi_restore |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 1384 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1385 | nmi_swapgs: |
Juergen Gross | 53c9d92 | 2021-01-20 14:55:44 +0100 | [diff] [blame] | 1386 | swapgs |
Chang S. Bae | c82965f | 2020-05-28 16:13:57 -0400 | [diff] [blame] | 1387 | |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1388 | nmi_restore: |
Dominik Brodowski | 502af0d | 2018-02-11 11:49:43 +0100 | [diff] [blame] | 1389 | POP_REGS |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1390 | |
Andy Lutomirski | 471ee48 | 2017-11-02 00:59:05 -0700 | [diff] [blame] | 1391 | /* |
| 1392 | * Skip orig_ax and the "outermost" frame to point RSP at the "iret" |
| 1393 | * at the "iret" frame. |
| 1394 | */ |
| 1395 | addq $6*8, %rsp |
Salman Qazi | 28696f4 | 2012-10-01 17:29:25 -0700 | [diff] [blame] | 1396 | |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1397 | /* |
| 1398 | * Clear "NMI executing". Set DF first so that we can easily |
| 1399 | * distinguish the remaining code between here and IRET from |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1400 | * the SYSCALL entry and exit paths. |
| 1401 | * |
| 1402 | * We arguably should just inspect RIP instead, but I (Andy) wrote |
| 1403 | * this code when I had the misapprehension that Xen PV supported |
| 1404 | * NMIs, and Xen PV would break that approach. |
Andy Lutomirski | 810bc07 | 2015-07-15 10:29:38 -0700 | [diff] [blame] | 1405 | */ |
| 1406 | std |
| 1407 | movq $0, 5*8(%rsp) /* clear "NMI executing" */ |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1408 | |
| 1409 | /* |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1410 | * iretq reads the "iret" frame and exits the NMI stack in a |
| 1411 | * single instruction. We are returning to kernel mode, so this |
| 1412 | * cannot result in a fault. Similarly, we don't need to worry |
| 1413 | * about espfix64 on the way back to kernel mode. |
Andy Lutomirski | 0b22930 | 2015-07-15 10:29:36 -0700 | [diff] [blame] | 1414 | */ |
Andy Lutomirski | 929bace | 2017-11-02 00:59:08 -0700 | [diff] [blame] | 1415 | iretq |
Thomas Gleixner | 6271fef | 2020-02-25 23:33:25 +0100 | [diff] [blame] | 1416 | SYM_CODE_END(asm_exc_nmi) |
Alexander van Heukelum | ddeb8f2 | 2008-11-24 13:24:28 +0100 | [diff] [blame] | 1417 | |
Andy Lutomirski | dffb3f9 | 2019-07-01 20:43:20 -0700 | [diff] [blame] | 1418 | #ifndef CONFIG_IA32_EMULATION |
| 1419 | /* |
| 1420 | * This handles SYSCALL from 32-bit code. There is no way to program |
| 1421 | * MSRs to fully disable 32-bit SYSCALL. |
| 1422 | */ |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1423 | SYM_CODE_START(ignore_sysret) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1424 | UNWIND_HINT_EMPTY |
Ingo Molnar | 4d73213 | 2015-06-08 20:43:07 +0200 | [diff] [blame] | 1425 | mov $-ENOSYS, %eax |
Jan Beulich | b2b1d94 | 2019-12-16 11:40:03 +0100 | [diff] [blame] | 1426 | sysretl |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1427 | SYM_CODE_END(ignore_sysret) |
Andy Lutomirski | dffb3f9 | 2019-07-01 20:43:20 -0700 | [diff] [blame] | 1428 | #endif |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1429 | |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1430 | .pushsection .text, "ax" |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1431 | SYM_CODE_START(rewind_stack_do_exit) |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1432 | UNWIND_HINT_FUNC |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1433 | /* Prevent any naive code from trying to unwind to our caller. */ |
| 1434 | xorl %ebp, %ebp |
| 1435 | |
| 1436 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rax |
Josh Poimboeuf | 8c1f755 | 2017-07-11 10:33:44 -0500 | [diff] [blame] | 1437 | leaq -PTREGS_SIZE(%rax), %rsp |
Jann Horn | f977df7 | 2020-04-25 05:03:04 -0500 | [diff] [blame] | 1438 | UNWIND_HINT_REGS |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 1439 | |
| 1440 | call do_exit |
Jiri Slaby | bc7b11c | 2019-10-11 13:51:03 +0200 | [diff] [blame] | 1441 | SYM_CODE_END(rewind_stack_do_exit) |
Thomas Gleixner | b9f6976 | 2020-03-25 19:45:26 +0100 | [diff] [blame] | 1442 | .popsection |