blob: c1af8acd366bc150a315dececf961f66cf9f0d33 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
Ingo Molnar4d732132015-06-08 20:43:07 +02007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * entry.S contains the system-call and fault low-level handling routines.
9 *
Andy Lutomirski8b4777a2011-06-05 13:50:18 -040010 * Some of this is documented in Documentation/x86/entry_64.txt
11 *
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010012 * A note on terminology:
Ingo Molnar4d732132015-06-08 20:43:07 +020013 * - iret frame: Architecture defined interrupt frame from SS to RIP
14 * at the top of the kernel process stack.
Andi Kleen2e91a172006-09-26 10:52:29 +020015 *
16 * Some macro usage:
Ingo Molnar4d732132015-06-08 20:43:07 +020017 * - ENTRY/END: Define functions in the symbol table.
18 * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
19 * - idtentry: Define exception entry points.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/linkage.h>
22#include <asm/segment.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cache.h>
24#include <asm/errno.h>
Ingo Molnard36f9472015-06-03 18:29:26 +020025#include "calling.h"
Sam Ravnborge2d5df92005-09-09 21:28:48 +020026#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/msr.h>
28#include <asm/unistd.h>
29#include <asm/thread_info.h>
30#include <asm/hw_irq.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080031#include <asm/page_types.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070032#include <asm/irqflags.h>
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010033#include <asm/paravirt.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090034#include <asm/percpu.h>
H. Peter Anvind7abc0f2012-04-20 12:19:50 -070035#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070036#include <asm/smap.h>
H. Peter Anvin3891a042014-04-29 16:46:09 -070037#include <asm/pgtable_types.h>
Eric Parisd7e75282012-01-03 14:23:06 -050038#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Roland McGrath86a1c342008-06-23 15:37:04 -070040/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
41#include <linux/elf-em.h>
Ingo Molnar4d732132015-06-08 20:43:07 +020042#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
43#define __AUDIT_ARCH_64BIT 0x80000000
44#define __AUDIT_ARCH_LE 0x40000000
Roland McGrath86a1c342008-06-23 15:37:04 -070045
Ingo Molnar4d732132015-06-08 20:43:07 +020046.code64
47.section .entry.text, "ax"
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020048
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010049#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040050ENTRY(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010051 swapgs
52 sysretq
Cyrill Gorcunovb3baaa12009-02-23 22:57:00 +030053ENDPROC(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010054#endif /* CONFIG_PARAVIRT */
55
Denys Vlasenkof2db9382015-02-26 14:40:30 -080056.macro TRACE_IRQS_IRETQ
Ingo Molnar2601e642006-07-03 00:24:45 -070057#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnar4d732132015-06-08 20:43:07 +020058 bt $9, EFLAGS(%rsp) /* interrupts off? */
59 jnc 1f
Ingo Molnar2601e642006-07-03 00:24:45 -070060 TRACE_IRQS_ON
611:
62#endif
63.endm
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/*
Steven Rostedt5963e312012-05-30 11:54:53 -040066 * When dynamic function tracer is enabled it will add a breakpoint
67 * to all locations that it is about to modify, sync CPUs, update
68 * all the code, sync CPUs, then remove the breakpoints. In this time
69 * if lockdep is enabled, it might jump back into the debug handler
70 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
71 *
72 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
73 * make sure the stack pointer does not get reset back to the top
74 * of the debug stack, and instead just reuses the current stack.
75 */
76#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
77
78.macro TRACE_IRQS_OFF_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020079 call debug_stack_set_zero
Steven Rostedt5963e312012-05-30 11:54:53 -040080 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +020081 call debug_stack_reset
Steven Rostedt5963e312012-05-30 11:54:53 -040082.endm
83
84.macro TRACE_IRQS_ON_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020085 call debug_stack_set_zero
Steven Rostedt5963e312012-05-30 11:54:53 -040086 TRACE_IRQS_ON
Ingo Molnar4d732132015-06-08 20:43:07 +020087 call debug_stack_reset
Steven Rostedt5963e312012-05-30 11:54:53 -040088.endm
89
Denys Vlasenkof2db9382015-02-26 14:40:30 -080090.macro TRACE_IRQS_IRETQ_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020091 bt $9, EFLAGS(%rsp) /* interrupts off? */
92 jnc 1f
Steven Rostedt5963e312012-05-30 11:54:53 -040093 TRACE_IRQS_ON_DEBUG
941:
95.endm
96
97#else
Ingo Molnar4d732132015-06-08 20:43:07 +020098# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
99# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
100# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
Steven Rostedt5963e312012-05-30 11:54:53 -0400101#endif
102
103/*
Ingo Molnar4d732132015-06-08 20:43:07 +0200104 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 *
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800106 * This is the only entry point used for 64-bit system calls. The
107 * hardware interface is reasonably well designed and the register to
108 * argument mapping Linux uses fits well with the registers that are
109 * available when SYSCALL is used.
110 *
111 * SYSCALL instructions can be found inlined in libc implementations as
112 * well as some other programs and libraries. There are also a handful
113 * of SYSCALL instructions in the vDSO used, for example, as a
114 * clock_gettimeofday fallback.
115 *
Ingo Molnar4d732132015-06-08 20:43:07 +0200116 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800117 * then loads new ss, cs, and rip from previously programmed MSRs.
118 * rflags gets masked by a value from another MSR (so CLD and CLAC
119 * are not needed). SYSCALL does not save anything on the stack
120 * and does not change rsp.
121 *
122 * Registers on entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 * rax system call number
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800124 * rcx return address
125 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 * rdi arg0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 * rsi arg1
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100128 * rdx arg2
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800129 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 * r8 arg4
131 * r9 arg5
Ingo Molnar4d732132015-06-08 20:43:07 +0200132 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100133 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 * Only called from user space.
135 *
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100136 * When user can change pt_regs->foo always force IRET. That is because
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200137 * it deals with uncanonical addresses better. SYSRET has trouble
138 * with them due to bugs in both AMD and Intel CPUs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100139 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Ingo Molnarb2502b42015-06-08 08:42:03 +0200141ENTRY(entry_SYSCALL_64)
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100142 /*
143 * Interrupts are off on entry.
144 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
145 * it is too small to ever cause noticeable irq latency.
146 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100147 SWAPGS_UNSAFE_STACK
148 /*
149 * A hypervisor implementation might want to use a label
150 * after the swapgs, so that it can do the swapgs
151 * for the guest and jump here on syscall.
152 */
Ingo Molnarb2502b42015-06-08 08:42:03 +0200153GLOBAL(entry_SYSCALL_64_after_swapgs)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100154
Ingo Molnar4d732132015-06-08 20:43:07 +0200155 movq %rsp, PER_CPU_VAR(rsp_scratch)
156 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100157
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800158 TRACE_IRQS_OFF
159
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100160 /* Construct struct pt_regs on stack */
Ingo Molnar4d732132015-06-08 20:43:07 +0200161 pushq $__USER_DS /* pt_regs->ss */
162 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
Ingo Molnar4d732132015-06-08 20:43:07 +0200163 pushq %r11 /* pt_regs->flags */
164 pushq $__USER_CS /* pt_regs->cs */
165 pushq %rcx /* pt_regs->ip */
166 pushq %rax /* pt_regs->orig_ax */
167 pushq %rdi /* pt_regs->di */
168 pushq %rsi /* pt_regs->si */
169 pushq %rdx /* pt_regs->dx */
170 pushq %rcx /* pt_regs->cx */
171 pushq $-ENOSYS /* pt_regs->ax */
172 pushq %r8 /* pt_regs->r8 */
173 pushq %r9 /* pt_regs->r9 */
174 pushq %r10 /* pt_regs->r10 */
175 pushq %r11 /* pt_regs->r11 */
176 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100177
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800178 /*
179 * If we need to do entry work or if we guess we'll need to do
180 * exit work, go straight to the slow path.
181 */
182 testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
183 jnz entry_SYSCALL64_slow_path
184
Ingo Molnarb2502b42015-06-08 08:42:03 +0200185entry_SYSCALL_64_fastpath:
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800186 /*
187 * Easy case: enable interrupts and issue the syscall. If the syscall
188 * needs pt_regs, we'll call a stub that disables interrupts again
189 * and jumps to the slow path.
190 */
191 TRACE_IRQS_ON
192 ENABLE_INTERRUPTS(CLBR_NONE)
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800193#if __SYSCALL_MASK == ~0
Ingo Molnar4d732132015-06-08 20:43:07 +0200194 cmpq $__NR_syscall_max, %rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800195#else
Ingo Molnar4d732132015-06-08 20:43:07 +0200196 andl $__SYSCALL_MASK, %eax
197 cmpl $__NR_syscall_max, %eax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800198#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200199 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
200 movq %r10, %rcx
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800201
202 /*
203 * This call instruction is handled specially in stub_ptregs_64.
Andy Lutomirskib7765082016-01-31 09:33:26 -0800204 * It might end up jumping to the slow path. If it jumps, RAX
205 * and all argument registers are clobbered.
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800206 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200207 call *sys_call_table(, %rax, 8)
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800208.Lentry_SYSCALL_64_after_fastpath_call:
209
Ingo Molnar4d732132015-06-08 20:43:07 +0200210 movq %rax, RAX(%rsp)
Denys Vlasenko146b2b02015-03-25 18:18:13 +01002111:
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800212
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200213 /*
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800214 * If we get here, then we know that pt_regs is clean for SYSRET64.
215 * If we see that no exit work is required (which we are required
216 * to check with IRQs off), then we can go straight to SYSRET64.
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200217 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100218 DISABLE_INTERRUPTS(CLBR_NONE)
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800219 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +0200220 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800221 jnz 1f
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700222
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800223 LOCKDEP_SYS_EXIT
224 TRACE_IRQS_ON /* user mode is traced as IRQs on */
Andy Lutomirskieb2a54c2016-01-31 09:33:27 -0800225 movq RIP(%rsp), %rcx
226 movq EFLAGS(%rsp), %r11
227 RESTORE_C_REGS_EXCEPT_RCX_R11
Ingo Molnar4d732132015-06-08 20:43:07 +0200228 movq RSP(%rsp), %rsp
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400229 USERGS_SYSRET64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Andy Lutomirski1e423bf2016-01-28 15:11:28 -08002311:
232 /*
233 * The fast path looked good when we started, but something changed
234 * along the way and we need to switch to the slow path. Calling
235 * raise(3) will trigger this, for example. IRQs are off.
236 */
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700237 TRACE_IRQS_ON
238 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800239 SAVE_EXTRA_REGS
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700240 movq %rsp, %rdi
241 call syscall_return_slowpath /* returns with IRQs disabled */
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800242 jmp return_from_SYSCALL_64
243
244entry_SYSCALL64_slow_path:
245 /* IRQs are off. */
246 SAVE_EXTRA_REGS
247 movq %rsp, %rdi
248 call do_syscall_64 /* returns with IRQs disabled */
249
250return_from_SYSCALL_64:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800251 RESTORE_EXTRA_REGS
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700252 TRACE_IRQS_IRETQ /* we're about to change IF */
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200253
254 /*
255 * Try to use SYSRET instead of IRET if we're returning to
256 * a completely clean 64-bit userspace context.
257 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200258 movq RCX(%rsp), %rcx
259 movq RIP(%rsp), %r11
260 cmpq %rcx, %r11 /* RCX == RIP */
261 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200262
263 /*
264 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
265 * in kernel space. This essentially lets the user take over
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200266 * the kernel, since userspace controls RSP.
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200267 *
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200268 * If width of "canonical tail" ever becomes variable, this will need
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200269 * to be updated to remain correct on both old and new CPUs.
270 */
271 .ifne __VIRTUAL_MASK_SHIFT - 47
272 .error "virtual address width changed -- SYSRET checks need update"
273 .endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200274
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200275 /* Change top 16 bits to be the sign-extension of 47th bit */
276 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
277 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
Ingo Molnar4d732132015-06-08 20:43:07 +0200278
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200279 /* If this changed %rcx, it was not canonical */
280 cmpq %rcx, %r11
281 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200282
Ingo Molnar4d732132015-06-08 20:43:07 +0200283 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
284 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200285
Ingo Molnar4d732132015-06-08 20:43:07 +0200286 movq R11(%rsp), %r11
287 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
288 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200289
290 /*
Borislav Petkov3e035302016-08-03 19:14:29 +0200291 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
292 * restore RF properly. If the slowpath sets it for whatever reason, we
293 * need to restore it correctly.
294 *
295 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
296 * trap from userspace immediately after SYSRET. This would cause an
297 * infinite loop whenever #DB happens with register state that satisfies
298 * the opportunistic SYSRET conditions. For example, single-stepping
299 * this user code:
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200300 *
Ingo Molnar4d732132015-06-08 20:43:07 +0200301 * movq $stuck_here, %rcx
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200302 * pushfq
303 * popq %r11
304 * stuck_here:
305 *
306 * would never get past 'stuck_here'.
307 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200308 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
309 jnz opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200310
311 /* nothing to check for RSP */
312
Ingo Molnar4d732132015-06-08 20:43:07 +0200313 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
314 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200315
316 /*
Ingo Molnar4d732132015-06-08 20:43:07 +0200317 * We win! This label is here just for ease of understanding
318 * perf profiles. Nothing jumps here.
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200319 */
320syscall_return_via_sysret:
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200321 /* rcx and r11 are already restored (see code above) */
322 RESTORE_C_REGS_EXCEPT_RCX_R11
Ingo Molnar4d732132015-06-08 20:43:07 +0200323 movq RSP(%rsp), %rsp
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200324 USERGS_SYSRET64
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200325
326opportunistic_sysret_failed:
327 SWAPGS
328 jmp restore_c_regs_and_iret
Ingo Molnarb2502b42015-06-08 08:42:03 +0200329END(entry_SYSCALL_64)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100330
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800331ENTRY(stub_ptregs_64)
332 /*
333 * Syscalls marked as needing ptregs land here.
Andy Lutomirskib7765082016-01-31 09:33:26 -0800334 * If we are on the fast path, we need to save the extra regs,
335 * which we achieve by trying again on the slow path. If we are on
336 * the slow path, the extra regs are already saved.
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800337 *
338 * RAX stores a pointer to the C function implementing the syscall.
Andy Lutomirskib7765082016-01-31 09:33:26 -0800339 * IRQs are on.
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800340 */
341 cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
342 jne 1f
343
Andy Lutomirskib7765082016-01-31 09:33:26 -0800344 /*
345 * Called from fast path -- disable IRQs again, pop return address
346 * and jump to slow path
347 */
348 DISABLE_INTERRUPTS(CLBR_NONE)
349 TRACE_IRQS_OFF
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800350 popq %rax
Andy Lutomirskib7765082016-01-31 09:33:26 -0800351 jmp entry_SYSCALL64_slow_path
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800352
3531:
Borislav Petkovb3830e82016-08-01 12:05:02 +0200354 jmp *%rax /* Called from C */
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800355END(stub_ptregs_64)
356
357.macro ptregs_stub func
358ENTRY(ptregs_\func)
359 leaq \func(%rip), %rax
360 jmp stub_ptregs_64
361END(ptregs_\func)
362.endm
363
364/* Instantiate ptregs_stub for each ptregs-using syscall */
365#define __SYSCALL_64_QUAL_(sym)
366#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
367#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
368#include <asm/syscalls_64.h>
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200369
Jan Beulich7effaa82005-09-12 18:49:24 +0200370/*
Brian Gerst01003012016-08-13 12:38:19 -0400371 * %rdi: prev task
372 * %rsi: next task
373 */
374ENTRY(__switch_to_asm)
375 /*
376 * Save callee-saved registers
377 * This must match the order in inactive_task_frame
378 */
379 pushq %rbp
380 pushq %rbx
381 pushq %r12
382 pushq %r13
383 pushq %r14
384 pushq %r15
385
386 /* switch stack */
387 movq %rsp, TASK_threadsp(%rdi)
388 movq TASK_threadsp(%rsi), %rsp
389
390#ifdef CONFIG_CC_STACKPROTECTOR
391 movq TASK_stack_canary(%rsi), %rbx
392 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
393#endif
394
395 /* restore callee-saved registers */
396 popq %r15
397 popq %r14
398 popq %r13
399 popq %r12
400 popq %rbx
401 popq %rbp
402
403 jmp __switch_to
404END(__switch_to_asm)
405
406/*
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800407 * A newly forked process directly context switches into this address.
408 *
Brian Gerst01003012016-08-13 12:38:19 -0400409 * rax: prev task we switched from
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800410 */
411ENTRY(ret_from_fork)
Brian Gerst01003012016-08-13 12:38:19 -0400412 movq %rax, %rdi
Ingo Molnar4d732132015-06-08 20:43:07 +0200413 call schedule_tail /* rdi: 'prev' task parameter */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800414
Ingo Molnar4d732132015-06-08 20:43:07 +0200415 testb $3, CS(%rsp) /* from kernel_thread? */
Andy Lutomirski24d978b2016-01-28 15:11:27 -0800416 jnz 1f
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800417
Andy Lutomirski1e3fbb82015-02-26 14:40:39 -0800418 /*
Andy Lutomirski24d978b2016-01-28 15:11:27 -0800419 * We came from kernel_thread. This code path is quite twisted, and
420 * someone should clean it up.
421 *
422 * copy_thread_tls stashes the function pointer in RBX and the
423 * parameter to be passed in RBP. The called function is permitted
424 * to call do_execve and thereby jump to user mode.
Andy Lutomirski1e3fbb82015-02-26 14:40:39 -0800425 */
Andy Lutomirski24d978b2016-01-28 15:11:27 -0800426 movq RBP(%rsp), %rdi
427 call *RBX(%rsp)
Ingo Molnar4d732132015-06-08 20:43:07 +0200428 movl $0, RAX(%rsp)
Andy Lutomirski24d978b2016-01-28 15:11:27 -0800429
430 /*
431 * Fall through as though we're exiting a syscall. This makes a
432 * twisted sort of sense if we just called do_execve.
433 */
434
4351:
436 movq %rsp, %rdi
437 call syscall_return_slowpath /* returns with IRQs disabled */
438 TRACE_IRQS_ON /* user mode is traced as IRQS on */
439 SWAPGS
440 jmp restore_regs_and_iret
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800441END(ret_from_fork)
442
443/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200444 * Build the entry stubs with some assembler magic.
445 * We pack 1 stub into every 8-byte block.
H. Peter Anvin939b7872008-11-11 13:51:52 -0800446 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200447 .align 8
H. Peter Anvin939b7872008-11-11 13:51:52 -0800448ENTRY(irq_entries_start)
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200449 vector=FIRST_EXTERNAL_VECTOR
450 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
Ingo Molnar4d732132015-06-08 20:43:07 +0200451 pushq $(~vector+0x80) /* Note: always in signed byte range */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200452 vector=vector+1
453 jmp common_interrupt
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200454 .align 8
455 .endr
H. Peter Anvin939b7872008-11-11 13:51:52 -0800456END(irq_entries_start)
457
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100458/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 * Interrupt entry/exit.
460 *
461 * Interrupt entry points save only callee clobbered registers in fast path.
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100462 *
463 * Entry runs with interrupts off.
464 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100466/* 0(%rsp): ~(interrupt number) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 .macro interrupt func
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100468 cld
Andy Lutomirskiff467592015-07-03 12:44:29 -0700469 ALLOC_PT_GPREGS_ON_STACK
470 SAVE_C_REGS
471 SAVE_EXTRA_REGS
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100472
Andy Lutomirskiff467592015-07-03 12:44:29 -0700473 testb $3, CS(%rsp)
Denys Vlasenkodde74f22015-04-27 15:21:51 +0200474 jz 1f
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700475
476 /*
477 * IRQ from user mode. Switch to kernel gsbase and inform context
478 * tracking that we're in kernel mode.
479 */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100480 SWAPGS
Andy Lutomirskif1075052015-11-12 12:59:00 -0800481
482 /*
483 * We need to tell lockdep that IRQs are off. We can't do this until
484 * we fix gsbase, and we should do it before enter_from_user_mode
485 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
486 * the simplest way to handle it is to just call it twice if
487 * we enter from user mode. There's no reason to optimize this since
488 * TRACE_IRQS_OFF is a no-op if lockdep is off.
489 */
490 TRACE_IRQS_OFF
491
Andy Lutomirski478dc892015-11-12 12:59:04 -0800492 CALL_enter_from_user_mode
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700493
Denys Vlasenko76f5df42015-02-26 14:40:27 -08004941:
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100495 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800496 * Save previous stack pointer, optionally switch to interrupt stack.
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100497 * irq_count is used to check if a CPU is already on an interrupt stack
498 * or not. While this is essentially redundant with preempt_count it is
499 * a little cheaper to use a separate counter in the PDA (short of
500 * moving irq_enter into assembly, which would be too much work)
501 */
Andy Lutomirskia586f982015-07-03 12:44:30 -0700502 movq %rsp, %rdi
Ingo Molnar4d732132015-06-08 20:43:07 +0200503 incl PER_CPU_VAR(irq_count)
504 cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
Andy Lutomirskia586f982015-07-03 12:44:30 -0700505 pushq %rdi
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100506 /* We entered an interrupt context - irqs are off: */
507 TRACE_IRQS_OFF
508
Andy Lutomirskia586f982015-07-03 12:44:30 -0700509 call \func /* rdi points to pt_regs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 .endm
511
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100512 /*
513 * The interrupt stubs push (~vector+0x80) onto the stack and
514 * then jump to common_interrupt.
515 */
H. Peter Anvin939b7872008-11-11 13:51:52 -0800516 .p2align CONFIG_X86_L1_CACHE_SHIFT
517common_interrupt:
Jan Beulichee4eb872012-11-02 11:18:39 +0000518 ASM_CLAC
Ingo Molnar4d732132015-06-08 20:43:07 +0200519 addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 interrupt do_IRQ
Denys Vlasenko34061f12015-03-23 14:03:59 +0100521 /* 0(%rsp): old RSP */
Jan Beulich7effaa82005-09-12 18:49:24 +0200522ret_from_intr:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100523 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700524 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +0200525 decl PER_CPU_VAR(irq_count)
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100526
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200527 /* Restore saved previous stack */
Andy Lutomirskiff467592015-07-03 12:44:29 -0700528 popq %rsp
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100529
Denys Vlasenko03335e92015-04-27 15:21:52 +0200530 testb $3, CS(%rsp)
Denys Vlasenkodde74f22015-04-27 15:21:51 +0200531 jz retint_kernel
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 /* Interrupt came from user space */
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700534GLOBAL(retint_user)
535 mov %rsp,%rdi
536 call prepare_exit_to_usermode
Ingo Molnar2601e642006-07-03 00:24:45 -0700537 TRACE_IRQS_IRETQ
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100538 SWAPGS
Andy Lutomirskiff467592015-07-03 12:44:29 -0700539 jmp restore_regs_and_iret
Ingo Molnar2601e642006-07-03 00:24:45 -0700540
Denys Vlasenko627276c2015-03-30 20:09:31 +0200541/* Returning to kernel space */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200542retint_kernel:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200543#ifdef CONFIG_PREEMPT
544 /* Interrupts are off */
545 /* Check if we need preemption */
Ingo Molnar4d732132015-06-08 20:43:07 +0200546 bt $9, EFLAGS(%rsp) /* were interrupts off? */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200547 jnc 1f
Ingo Molnar4d732132015-06-08 20:43:07 +02005480: cmpl $0, PER_CPU_VAR(__preempt_count)
Denys Vlasenko36acef22015-03-31 19:00:07 +0200549 jnz 1f
Denys Vlasenko627276c2015-03-30 20:09:31 +0200550 call preempt_schedule_irq
Denys Vlasenko36acef22015-03-31 19:00:07 +0200551 jmp 0b
Denys Vlasenko6ba71b72015-03-31 19:00:05 +02005521:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200553#endif
Ingo Molnar2601e642006-07-03 00:24:45 -0700554 /*
555 * The iretq could re-enable interrupts:
556 */
557 TRACE_IRQS_IRETQ
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200558
559/*
560 * At this label, code paths which return to kernel and to user,
561 * which come from interrupts/exception and from syscalls, merge.
562 */
Andy Lutomirskiee08c6b2015-10-05 17:48:09 -0700563GLOBAL(restore_regs_and_iret)
Andy Lutomirskiff467592015-07-03 12:44:29 -0700564 RESTORE_EXTRA_REGS
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200565restore_c_regs_and_iret:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800566 RESTORE_C_REGS
567 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski7209a752014-07-23 08:34:11 -0700568 INTERRUPT_RETURN
569
570ENTRY(native_iret)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700571 /*
572 * Are we returning to a stack segment from the LDT? Note: in
573 * 64-bit mode SS:RSP on the exception stack is always valid.
574 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700575#ifdef CONFIG_X86_ESPFIX64
Ingo Molnar4d732132015-06-08 20:43:07 +0200576 testb $4, (SS-RIP)(%rsp)
577 jnz native_irq_return_ldt
H. Peter Anvin34273f42014-05-04 10:36:22 -0700578#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700579
Andy Lutomirskiaf726f22014-11-22 18:00:31 -0800580.global native_irq_return_iret
Andy Lutomirski7209a752014-07-23 08:34:11 -0700581native_irq_return_iret:
Andy Lutomirskib645af22014-11-22 18:00:33 -0800582 /*
583 * This may fault. Non-paranoid faults on return to userspace are
584 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
585 * Double-faults due to espfix64 are handled in do_double_fault.
586 * Other faults here are fatal.
587 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 iretq
Ingo Molnar3701d8632008-02-09 23:24:08 +0100589
H. Peter Anvin34273f42014-05-04 10:36:22 -0700590#ifdef CONFIG_X86_ESPFIX64
Andy Lutomirski7209a752014-07-23 08:34:11 -0700591native_irq_return_ldt:
Ingo Molnar4d732132015-06-08 20:43:07 +0200592 pushq %rax
593 pushq %rdi
H. Peter Anvin3891a042014-04-29 16:46:09 -0700594 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200595 movq PER_CPU_VAR(espfix_waddr), %rdi
596 movq %rax, (0*8)(%rdi) /* RAX */
597 movq (2*8)(%rsp), %rax /* RIP */
598 movq %rax, (1*8)(%rdi)
599 movq (3*8)(%rsp), %rax /* CS */
600 movq %rax, (2*8)(%rdi)
601 movq (4*8)(%rsp), %rax /* RFLAGS */
602 movq %rax, (3*8)(%rdi)
603 movq (6*8)(%rsp), %rax /* SS */
604 movq %rax, (5*8)(%rdi)
605 movq (5*8)(%rsp), %rax /* RSP */
606 movq %rax, (4*8)(%rdi)
607 andl $0xffff0000, %eax
608 popq %rdi
609 orq PER_CPU_VAR(espfix_stack), %rax
H. Peter Anvin3891a042014-04-29 16:46:09 -0700610 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200611 movq %rax, %rsp
612 popq %rax
613 jmp native_irq_return_iret
H. Peter Anvin34273f42014-05-04 10:36:22 -0700614#endif
Jan Beulich4b787e02006-06-26 13:56:55 +0200615END(common_interrupt)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700616
Masami Hiramatsu8222d712009-08-27 13:23:25 -0400617/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 * APIC interrupts.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100619 */
Seiji Aguchicf910e82013-06-20 11:46:53 -0400620.macro apicinterrupt3 num sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100621ENTRY(\sym)
Jan Beulichee4eb872012-11-02 11:18:39 +0000622 ASM_CLAC
Ingo Molnar4d732132015-06-08 20:43:07 +0200623 pushq $~(\num)
Jan Beulich39e95432011-11-29 11:03:46 +0000624.Lcommon_\sym:
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100625 interrupt \do_sym
Ingo Molnar4d732132015-06-08 20:43:07 +0200626 jmp ret_from_intr
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100627END(\sym)
628.endm
Jacob Shin89b831e2005-11-05 17:25:53 +0100629
Seiji Aguchicf910e82013-06-20 11:46:53 -0400630#ifdef CONFIG_TRACING
631#define trace(sym) trace_##sym
632#define smp_trace(sym) smp_trace_##sym
633
634.macro trace_apicinterrupt num sym
635apicinterrupt3 \num trace(\sym) smp_trace(\sym)
636.endm
637#else
638.macro trace_apicinterrupt num sym do_sym
639.endm
640#endif
641
Alexander Potapenko469f0022016-07-15 11:42:43 +0200642/* Make sure APIC interrupt handlers end up in the irqentry section: */
643#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
644# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
645# define POP_SECTION_IRQENTRY .popsection
646#else
647# define PUSH_SECTION_IRQENTRY
648# define POP_SECTION_IRQENTRY
649#endif
650
Seiji Aguchicf910e82013-06-20 11:46:53 -0400651.macro apicinterrupt num sym do_sym
Alexander Potapenko469f0022016-07-15 11:42:43 +0200652PUSH_SECTION_IRQENTRY
Seiji Aguchicf910e82013-06-20 11:46:53 -0400653apicinterrupt3 \num \sym \do_sym
654trace_apicinterrupt \num \sym
Alexander Potapenko469f0022016-07-15 11:42:43 +0200655POP_SECTION_IRQENTRY
Seiji Aguchicf910e82013-06-20 11:46:53 -0400656.endm
657
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100658#ifdef CONFIG_SMP
Ingo Molnar4d732132015-06-08 20:43:07 +0200659apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
660apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661#endif
662
Nick Piggin03b48632009-01-20 04:36:04 +0100663#ifdef CONFIG_X86_UV
Ingo Molnar4d732132015-06-08 20:43:07 +0200664apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
Nick Piggin03b48632009-01-20 04:36:04 +0100665#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200666
667apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
668apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Yang Zhangd78f2662013-04-11 19:25:11 +0800670#ifdef CONFIG_HAVE_KVM
Ingo Molnar4d732132015-06-08 20:43:07 +0200671apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
672apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
Yang Zhangd78f2662013-04-11 19:25:11 +0800673#endif
674
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400675#ifdef CONFIG_X86_MCE_THRESHOLD
Ingo Molnar4d732132015-06-08 20:43:07 +0200676apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400677#endif
678
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500679#ifdef CONFIG_X86_MCE_AMD
Ingo Molnar4d732132015-06-08 20:43:07 +0200680apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500681#endif
682
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400683#ifdef CONFIG_X86_THERMAL_VECTOR
Ingo Molnar4d732132015-06-08 20:43:07 +0200684apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400685#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100687#ifdef CONFIG_SMP
Ingo Molnar4d732132015-06-08 20:43:07 +0200688apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
689apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
690apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100691#endif
692
Ingo Molnar4d732132015-06-08 20:43:07 +0200693apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
694apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100695
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800696#ifdef CONFIG_IRQ_WORK
Ingo Molnar4d732132015-06-08 20:43:07 +0200697apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
Ingo Molnar241771e2008-12-03 10:39:53 +0100698#endif
699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700/*
701 * Exception entry points.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100702 */
Andy Lutomirski9b476682015-03-05 19:19:07 -0800703#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700704
705.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100706ENTRY(\sym)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700707 /* Sanity check */
708 .if \shift_ist != -1 && \paranoid == 0
709 .error "using shift_ist requires paranoid=1"
710 .endif
711
Jan Beulichee4eb872012-11-02 11:18:39 +0000712 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100713 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700714
715 .ifeq \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200716 pushq $-1 /* ORIG_RAX: no syscall to restart */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700717 .endif
718
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800719 ALLOC_PT_GPREGS_ON_STACK
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700720
721 .if \paranoid
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800722 .if \paranoid == 1
Ingo Molnar4d732132015-06-08 20:43:07 +0200723 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
724 jnz 1f
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800725 .endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200726 call paranoid_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700727 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200728 call error_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700729 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800730 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700731
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700732 .if \paranoid
Andy Lutomirski577ed452014-05-21 15:07:09 -0700733 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200734 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
Andy Lutomirski577ed452014-05-21 15:07:09 -0700735 .else
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100736 TRACE_IRQS_OFF
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700737 .endif
Andy Lutomirski577ed452014-05-21 15:07:09 -0700738 .endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700739
Ingo Molnar4d732132015-06-08 20:43:07 +0200740 movq %rsp, %rdi /* pt_regs pointer */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700741
742 .if \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200743 movq ORIG_RAX(%rsp), %rsi /* get error code */
744 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700745 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200746 xorl %esi, %esi /* no error code */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700747 .endif
748
Andy Lutomirski577ed452014-05-21 15:07:09 -0700749 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200750 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700751 .endif
752
Ingo Molnar4d732132015-06-08 20:43:07 +0200753 call \do_sym
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700754
Andy Lutomirski577ed452014-05-21 15:07:09 -0700755 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200756 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700757 .endif
758
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800759 /* these procedures expect "no swapgs" flag in ebx */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700760 .if \paranoid
Ingo Molnar4d732132015-06-08 20:43:07 +0200761 jmp paranoid_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700762 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200763 jmp error_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700764 .endif
765
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800766 .if \paranoid == 1
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800767 /*
768 * Paranoid entry from userspace. Switch stacks and treat it
769 * as a normal entry. This means that paranoid handlers
770 * run in real process context if user_mode(regs).
771 */
7721:
Ingo Molnar4d732132015-06-08 20:43:07 +0200773 call error_entry
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800774
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800775
Ingo Molnar4d732132015-06-08 20:43:07 +0200776 movq %rsp, %rdi /* pt_regs pointer */
777 call sync_regs
778 movq %rax, %rsp /* switch stack */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800779
Ingo Molnar4d732132015-06-08 20:43:07 +0200780 movq %rsp, %rdi /* pt_regs pointer */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800781
782 .if \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200783 movq ORIG_RAX(%rsp), %rsi /* get error code */
784 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800785 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200786 xorl %esi, %esi /* no error code */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800787 .endif
788
Ingo Molnar4d732132015-06-08 20:43:07 +0200789 call \do_sym
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800790
Ingo Molnar4d732132015-06-08 20:43:07 +0200791 jmp error_exit /* %ebx: no swapgs flag */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800792 .endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100793END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100794.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100795
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400796#ifdef CONFIG_TRACING
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700797.macro trace_idtentry sym do_sym has_error_code:req
798idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
799idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400800.endm
801#else
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700802.macro trace_idtentry sym do_sym has_error_code:req
803idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400804.endm
805#endif
806
Ingo Molnar4d732132015-06-08 20:43:07 +0200807idtentry divide_error do_divide_error has_error_code=0
808idtentry overflow do_overflow has_error_code=0
809idtentry bounds do_bounds has_error_code=0
810idtentry invalid_op do_invalid_op has_error_code=0
811idtentry device_not_available do_device_not_available has_error_code=0
812idtentry double_fault do_double_fault has_error_code=1 paranoid=2
813idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
814idtentry invalid_TSS do_invalid_TSS has_error_code=1
815idtentry segment_not_present do_segment_not_present has_error_code=1
816idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
817idtentry coprocessor_error do_coprocessor_error has_error_code=0
818idtentry alignment_check do_alignment_check has_error_code=1
819idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400820
Ingo Molnar2601e642006-07-03 00:24:45 -0700821
Ingo Molnar4d732132015-06-08 20:43:07 +0200822 /*
823 * Reload gs selector with exception handling
824 * edi: new selector
825 */
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400826ENTRY(native_load_gs_index)
Ingo Molnar131484c2015-05-28 12:21:47 +0200827 pushfq
Jeremy Fitzhardingeb8aa2872009-01-28 14:35:03 -0800828 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300829 SWAPGS
Borislav Petkov42c748bb2016-04-07 17:31:50 -0700830.Lgs_change:
Ingo Molnar4d732132015-06-08 20:43:07 +0200831 movl %edi, %gs
Borislav Petkov96e5d282016-04-07 17:31:49 -07008322: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100833 SWAPGS
Ingo Molnar131484c2015-05-28 12:21:47 +0200834 popfq
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300835 ret
Alexander van Heukelum6efdcfa2008-11-23 10:15:32 +0100836END(native_load_gs_index)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100837
Borislav Petkov42c748bb2016-04-07 17:31:50 -0700838 _ASM_EXTABLE(.Lgs_change, bad_gs)
Ingo Molnar4d732132015-06-08 20:43:07 +0200839 .section .fixup, "ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 /* running with kernelgs */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100841bad_gs:
Ingo Molnar4d732132015-06-08 20:43:07 +0200842 SWAPGS /* switch back to user gs */
Andy Lutomirskib038c842016-04-26 12:23:27 -0700843.macro ZAP_GS
844 /* This can't be a string because the preprocessor needs to see it. */
845 movl $__USER_DS, %eax
846 movl %eax, %gs
847.endm
848 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
Ingo Molnar4d732132015-06-08 20:43:07 +0200849 xorl %eax, %eax
850 movl %eax, %gs
851 jmp 2b
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300852 .previous
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100853
Andi Kleen26995002006-08-02 22:37:28 +0200854/* Call softirq on interrupt stack. Interrupts are off. */
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200855ENTRY(do_softirq_own_stack)
Ingo Molnar4d732132015-06-08 20:43:07 +0200856 pushq %rbp
857 mov %rsp, %rbp
858 incl PER_CPU_VAR(irq_count)
859 cmove PER_CPU_VAR(irq_stack_ptr), %rsp
860 push %rbp /* frame pointer backlink */
861 call __do_softirq
Andi Kleen26995002006-08-02 22:37:28 +0200862 leaveq
Ingo Molnar4d732132015-06-08 20:43:07 +0200863 decl PER_CPU_VAR(irq_count)
Andi Kleened6b6762005-07-28 21:15:49 -0700864 ret
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200865END(do_softirq_own_stack)
Andi Kleen75154f42007-06-23 02:29:25 +0200866
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700867#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700868idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700869
870/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300871 * A note on the "critical region" in our callback handler.
872 * We want to avoid stacking callback handlers due to events occurring
873 * during handling of the last event. To do this, we keep events disabled
874 * until we've done all processing. HOWEVER, we must enable events before
875 * popping the stack frame (can't be done atomically) and so it would still
876 * be possible to get enough handler activations to overflow the stack.
877 * Although unlikely, bugs of that kind are hard to track down, so we'd
878 * like to avoid the possibility.
879 * So, on entry to the handler we detect whether we interrupted an
880 * existing activation in its critical region -- if so, we pop the current
881 * activation and restart the handler using the previous one.
882 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200883ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
884
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300885/*
886 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
887 * see the correct pointer to the pt_regs
888 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200889 movq %rdi, %rsp /* we don't return, adjust the stack frame */
89011: incl PER_CPU_VAR(irq_count)
891 movq %rsp, %rbp
892 cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
893 pushq %rbp /* frame pointer backlink */
894 call xen_evtchn_do_upcall
895 popq %rsp
896 decl PER_CPU_VAR(irq_count)
David Vrabelfdfd8112015-02-19 15:23:17 +0000897#ifndef CONFIG_PREEMPT
Ingo Molnar4d732132015-06-08 20:43:07 +0200898 call xen_maybe_preempt_hcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000899#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200900 jmp error_exit
Alexander van Heukelum371c3942011-03-11 21:59:38 +0100901END(xen_do_hypervisor_callback)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700902
903/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300904 * Hypervisor uses this for application faults while it executes.
905 * We get here for two reasons:
906 * 1. Fault while reloading DS, ES, FS or GS
907 * 2. Fault while executing IRET
908 * Category 1 we do not need to fix up as Xen has already reloaded all segment
909 * registers that could be reloaded and zeroed the others.
910 * Category 2 we fix up by killing the current process. We cannot use the
911 * normal Linux return path in this case because if we use the IRET hypercall
912 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
913 * We distinguish between categories by comparing each saved segment register
914 * with its current contents: any discrepancy means we in category 1.
915 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700916ENTRY(xen_failsafe_callback)
Ingo Molnar4d732132015-06-08 20:43:07 +0200917 movl %ds, %ecx
918 cmpw %cx, 0x10(%rsp)
919 jne 1f
920 movl %es, %ecx
921 cmpw %cx, 0x18(%rsp)
922 jne 1f
923 movl %fs, %ecx
924 cmpw %cx, 0x20(%rsp)
925 jne 1f
926 movl %gs, %ecx
927 cmpw %cx, 0x28(%rsp)
928 jne 1f
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700929 /* All segments match their saved values => Category 2 (Bad IRET). */
Ingo Molnar4d732132015-06-08 20:43:07 +0200930 movq (%rsp), %rcx
931 movq 8(%rsp), %r11
932 addq $0x30, %rsp
933 pushq $0 /* RIP */
934 pushq %r11
935 pushq %rcx
936 jmp general_protection
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07009371: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
Ingo Molnar4d732132015-06-08 20:43:07 +0200938 movq (%rsp), %rcx
939 movq 8(%rsp), %r11
940 addq $0x30, %rsp
941 pushq $-1 /* orig_ax = -1 => not a system call */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800942 ALLOC_PT_GPREGS_ON_STACK
943 SAVE_C_REGS
944 SAVE_EXTRA_REGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200945 jmp error_exit
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700946END(xen_failsafe_callback)
947
Seiji Aguchicf910e82013-06-20 11:46:53 -0400948apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
Sheng Yang38e20b02010-05-14 12:40:51 +0100949 xen_hvm_callback_vector xen_evtchn_do_upcall
950
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700951#endif /* CONFIG_XEN */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100952
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800953#if IS_ENABLED(CONFIG_HYPERV)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400954apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800955 hyperv_callback_vector hyperv_vector_handler
956#endif /* CONFIG_HYPERV */
957
Ingo Molnar4d732132015-06-08 20:43:07 +0200958idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
959idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
960idtentry stack_segment do_stack_segment has_error_code=1
961
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -0700962#ifdef CONFIG_XEN
Ingo Molnar4d732132015-06-08 20:43:07 +0200963idtentry xen_debug do_debug has_error_code=0
964idtentry xen_int3 do_int3 has_error_code=0
965idtentry xen_stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -0700966#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200967
968idtentry general_protection do_general_protection has_error_code=1
969trace_idtentry page_fault do_page_fault has_error_code=1
970
Gleb Natapov631bc482010-10-14 11:22:52 +0200971#ifdef CONFIG_KVM_GUEST
Ingo Molnar4d732132015-06-08 20:43:07 +0200972idtentry async_page_fault do_async_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +0200973#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200974
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100975#ifdef CONFIG_X86_MCE
Ingo Molnar4d732132015-06-08 20:43:07 +0200976idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100977#endif
978
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800979/*
980 * Save all registers in pt_regs, and switch gs if needed.
981 * Use slow, but surefire "are we in kernel?" check.
982 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
983 */
984ENTRY(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800985 cld
986 SAVE_C_REGS 8
987 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +0200988 movl $1, %ebx
989 movl $MSR_GS_BASE, %ecx
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800990 rdmsr
Ingo Molnar4d732132015-06-08 20:43:07 +0200991 testl %edx, %edx
992 js 1f /* negative -> in kernel */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800993 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200994 xorl %ebx, %ebx
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08009951: ret
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800996END(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800997
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800998/*
999 * "Paranoid" exit path from exception stack. This is invoked
1000 * only on return from non-NMI IST interrupts that came
1001 * from kernel space.
1002 *
1003 * We may be returning to very strange contexts (e.g. very early
1004 * in syscall entry), so checking for preemption here would
1005 * be complicated. Fortunately, we there's no good reason
1006 * to try to handle preemption here.
Ingo Molnar4d732132015-06-08 20:43:07 +02001007 *
1008 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001009 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001010ENTRY(paranoid_exit)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001011 DISABLE_INTERRUPTS(CLBR_NONE)
Steven Rostedt5963e312012-05-30 11:54:53 -04001012 TRACE_IRQS_OFF_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +02001013 testl %ebx, %ebx /* swapgs needed? */
1014 jnz paranoid_exit_no_swapgs
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001015 TRACE_IRQS_IRETQ
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001016 SWAPGS_UNSAFE_STACK
Ingo Molnar4d732132015-06-08 20:43:07 +02001017 jmp paranoid_exit_restore
Denys Vlasenko0d550832015-02-26 14:40:29 -08001018paranoid_exit_no_swapgs:
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001019 TRACE_IRQS_IRETQ_DEBUG
Denys Vlasenko0d550832015-02-26 14:40:29 -08001020paranoid_exit_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001021 RESTORE_EXTRA_REGS
1022 RESTORE_C_REGS
1023 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001024 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001025END(paranoid_exit)
1026
1027/*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001028 * Save all registers in pt_regs, and switch gs if needed.
Andy Lutomirski539f5112015-06-09 12:36:01 -07001029 * Return: EBX=0: came from user mode; EBX=1: otherwise
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001030 */
1031ENTRY(error_entry)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001032 cld
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001033 SAVE_C_REGS 8
1034 SAVE_EXTRA_REGS 8
Ingo Molnar4d732132015-06-08 20:43:07 +02001035 xorl %ebx, %ebx
Denys Vlasenko03335e92015-04-27 15:21:52 +02001036 testb $3, CS+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001037 jz .Lerror_kernelspace
Andy Lutomirski539f5112015-06-09 12:36:01 -07001038
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001039.Lerror_entry_from_usermode_swapgs:
1040 /*
1041 * We entered from user mode or we're pretending to have entered
1042 * from user mode due to an IRET fault.
1043 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001044 SWAPGS
Andy Lutomirski539f5112015-06-09 12:36:01 -07001045
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001046.Lerror_entry_from_usermode_after_swapgs:
Andy Lutomirskif1075052015-11-12 12:59:00 -08001047 /*
1048 * We need to tell lockdep that IRQs are off. We can't do this until
1049 * we fix gsbase, and we should do it before enter_from_user_mode
1050 * (which can take locks).
1051 */
1052 TRACE_IRQS_OFF
Andy Lutomirski478dc892015-11-12 12:59:04 -08001053 CALL_enter_from_user_mode
Andy Lutomirskif1075052015-11-12 12:59:00 -08001054 ret
Andy Lutomirski02bc7762015-07-03 12:44:31 -07001055
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001056.Lerror_entry_done:
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001057 TRACE_IRQS_OFF
1058 ret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001059
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001060 /*
1061 * There are two places in the kernel that can potentially fault with
1062 * usergs. Handle them here. B stepping K8s sometimes report a
1063 * truncated RIP for IRET exceptions returning to compat mode. Check
1064 * for these here too.
1065 */
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001066.Lerror_kernelspace:
Ingo Molnar4d732132015-06-08 20:43:07 +02001067 incl %ebx
1068 leaq native_irq_return_iret(%rip), %rcx
1069 cmpq %rcx, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001070 je .Lerror_bad_iret
Ingo Molnar4d732132015-06-08 20:43:07 +02001071 movl %ecx, %eax /* zero extend */
1072 cmpq %rax, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001073 je .Lbstep_iret
Borislav Petkov42c748bb2016-04-07 17:31:50 -07001074 cmpq $.Lgs_change, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001075 jne .Lerror_entry_done
Andy Lutomirski539f5112015-06-09 12:36:01 -07001076
1077 /*
Borislav Petkov42c748bb2016-04-07 17:31:50 -07001078 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
Andy Lutomirski539f5112015-06-09 12:36:01 -07001079 * gsbase and proceed. We'll fix up the exception and land in
Borislav Petkov42c748bb2016-04-07 17:31:50 -07001080 * .Lgs_change's error handler with kernel gsbase.
Andy Lutomirski539f5112015-06-09 12:36:01 -07001081 */
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001082 jmp .Lerror_entry_from_usermode_swapgs
Brian Gerstae24ffe2009-10-12 10:18:23 -04001083
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001084.Lbstep_iret:
Brian Gerstae24ffe2009-10-12 10:18:23 -04001085 /* Fix truncated RIP */
Ingo Molnar4d732132015-06-08 20:43:07 +02001086 movq %rcx, RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001087 /* fall through */
1088
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001089.Lerror_bad_iret:
Andy Lutomirski539f5112015-06-09 12:36:01 -07001090 /*
1091 * We came from an IRET to user mode, so we have user gsbase.
1092 * Switch to kernel gsbase:
1093 */
Andy Lutomirskib645af22014-11-22 18:00:33 -08001094 SWAPGS
Andy Lutomirski539f5112015-06-09 12:36:01 -07001095
1096 /*
1097 * Pretend that the exception came from user mode: set up pt_regs
1098 * as if we faulted immediately after IRET and clear EBX so that
1099 * error_exit knows that we will be returning to user mode.
1100 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001101 mov %rsp, %rdi
1102 call fixup_bad_iret
1103 mov %rax, %rsp
Andy Lutomirski539f5112015-06-09 12:36:01 -07001104 decl %ebx
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001105 jmp .Lerror_entry_from_usermode_after_swapgs
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001106END(error_entry)
1107
1108
Andy Lutomirski539f5112015-06-09 12:36:01 -07001109/*
1110 * On entry, EBS is a "return to kernel mode" flag:
1111 * 1: already in kernel mode, don't need SWAPGS
1112 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1113 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001114ENTRY(error_exit)
Ingo Molnar4d732132015-06-08 20:43:07 +02001115 movl %ebx, %eax
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001116 DISABLE_INTERRUPTS(CLBR_NONE)
1117 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +02001118 testl %eax, %eax
1119 jnz retint_kernel
1120 jmp retint_user
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001121END(error_exit)
1122
Denys Vlasenko0784b362015-04-01 16:50:57 +02001123/* Runs on exception stack */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001124ENTRY(nmi)
Andy Lutomirskifc57a7c2015-09-20 16:32:04 -07001125 /*
1126 * Fix up the exception frame if we're on Xen.
1127 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1128 * one value to the stack on native, so it may clobber the rdx
1129 * scratch slot, but it won't clobber any of the important
1130 * slots past it.
1131 *
1132 * Xen is a different story, because the Xen frame itself overlaps
1133 * the "NMI executing" variable.
1134 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001135 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskifc57a7c2015-09-20 16:32:04 -07001136
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001137 /*
1138 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1139 * the iretq it performs will take us out of NMI context.
1140 * This means that we can have nested NMIs where the next
1141 * NMI is using the top of the stack of the previous NMI. We
1142 * can't let it execute because the nested NMI will corrupt the
1143 * stack of the previous NMI. NMI handlers are not re-entrant
1144 * anyway.
1145 *
1146 * To handle this case we do the following:
1147 * Check the a special location on the stack that contains
1148 * a variable that is set when NMIs are executing.
1149 * The interrupted task's stack is also checked to see if it
1150 * is an NMI stack.
1151 * If the variable is not set and the stack is not the NMI
1152 * stack then:
1153 * o Set the special variable on the stack
Andy Lutomirski0b229302015-07-15 10:29:36 -07001154 * o Copy the interrupt frame into an "outermost" location on the
1155 * stack
1156 * o Copy the interrupt frame into an "iret" location on the stack
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001157 * o Continue processing the NMI
1158 * If the variable is set or the previous stack is the NMI stack:
Andy Lutomirski0b229302015-07-15 10:29:36 -07001159 * o Modify the "iret" location to jump to the repeat_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001160 * o return back to the first NMI
1161 *
1162 * Now on exit of the first NMI, we first clear the stack variable
1163 * The NMI stack will tell any nested NMIs at that point that it is
1164 * nested. Then we pop the stack normally with iret, and if there was
1165 * a nested NMI that updated the copy interrupt stack frame, a
1166 * jump will be made to the repeat_nmi code that will handle the second
1167 * NMI.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001168 *
1169 * However, espfix prevents us from directly returning to userspace
1170 * with a single IRET instruction. Similarly, IRET to user mode
1171 * can fault. We therefore handle NMIs from user space like
1172 * other IST entries.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001173 */
1174
Denys Vlasenko146b2b02015-03-25 18:18:13 +01001175 /* Use %rdx as our temp variable throughout */
Ingo Molnar4d732132015-06-08 20:43:07 +02001176 pushq %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001177
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001178 testb $3, CS-RIP+8(%rsp)
1179 jz .Lnmi_from_kernel
Steven Rostedt45d5a162012-02-19 16:43:37 -05001180
1181 /*
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001182 * NMI from user mode. We need to run on the thread stack, but we
1183 * can't go through the normal entry paths: NMIs are masked, and
1184 * we don't want to enable interrupts, because then we'll end
1185 * up in an awkward situation in which IRQs are on but NMIs
1186 * are off.
Andy Lutomirski83c133c2015-09-20 16:32:05 -07001187 *
1188 * We also must not push anything to the stack before switching
1189 * stacks lest we corrupt the "NMI executing" variable.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001190 */
1191
Andy Lutomirski83c133c2015-09-20 16:32:05 -07001192 SWAPGS_UNSAFE_STACK
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001193 cld
1194 movq %rsp, %rdx
1195 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1196 pushq 5*8(%rdx) /* pt_regs->ss */
1197 pushq 4*8(%rdx) /* pt_regs->rsp */
1198 pushq 3*8(%rdx) /* pt_regs->flags */
1199 pushq 2*8(%rdx) /* pt_regs->cs */
1200 pushq 1*8(%rdx) /* pt_regs->rip */
1201 pushq $-1 /* pt_regs->orig_ax */
1202 pushq %rdi /* pt_regs->di */
1203 pushq %rsi /* pt_regs->si */
1204 pushq (%rdx) /* pt_regs->dx */
1205 pushq %rcx /* pt_regs->cx */
1206 pushq %rax /* pt_regs->ax */
1207 pushq %r8 /* pt_regs->r8 */
1208 pushq %r9 /* pt_regs->r9 */
1209 pushq %r10 /* pt_regs->r10 */
1210 pushq %r11 /* pt_regs->r11 */
1211 pushq %rbx /* pt_regs->rbx */
1212 pushq %rbp /* pt_regs->rbp */
1213 pushq %r12 /* pt_regs->r12 */
1214 pushq %r13 /* pt_regs->r13 */
1215 pushq %r14 /* pt_regs->r14 */
1216 pushq %r15 /* pt_regs->r15 */
1217
1218 /*
1219 * At this point we no longer need to worry about stack damage
1220 * due to nesting -- we're on the normal thread stack and we're
1221 * done with the NMI stack.
1222 */
1223
1224 movq %rsp, %rdi
1225 movq $-1, %rsi
1226 call do_nmi
1227
1228 /*
1229 * Return back to user mode. We must *not* do the normal exit
1230 * work, because we don't want to enable interrupts. Fortunately,
1231 * do_nmi doesn't modify pt_regs.
1232 */
1233 SWAPGS
1234 jmp restore_c_regs_and_iret
1235
1236.Lnmi_from_kernel:
1237 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001238 * Here's what our stack frame will look like:
1239 * +---------------------------------------------------------+
1240 * | original SS |
1241 * | original Return RSP |
1242 * | original RFLAGS |
1243 * | original CS |
1244 * | original RIP |
1245 * +---------------------------------------------------------+
1246 * | temp storage for rdx |
1247 * +---------------------------------------------------------+
1248 * | "NMI executing" variable |
1249 * +---------------------------------------------------------+
1250 * | iret SS } Copied from "outermost" frame |
1251 * | iret Return RSP } on each loop iteration; overwritten |
1252 * | iret RFLAGS } by a nested NMI to force another |
1253 * | iret CS } iteration if needed. |
1254 * | iret RIP } |
1255 * +---------------------------------------------------------+
1256 * | outermost SS } initialized in first_nmi; |
1257 * | outermost Return RSP } will not be changed before |
1258 * | outermost RFLAGS } NMI processing is done. |
1259 * | outermost CS } Copied to "iret" frame on each |
1260 * | outermost RIP } iteration. |
1261 * +---------------------------------------------------------+
1262 * | pt_regs |
1263 * +---------------------------------------------------------+
1264 *
1265 * The "original" frame is used by hardware. Before re-enabling
1266 * NMIs, we need to be done with it, and we need to leave enough
1267 * space for the asm code here.
1268 *
1269 * We return by executing IRET while RSP points to the "iret" frame.
1270 * That will either return for real or it will loop back into NMI
1271 * processing.
1272 *
1273 * The "outermost" frame is copied to the "iret" frame on each
1274 * iteration of the loop, so each iteration starts with the "iret"
1275 * frame pointing to the final return target.
1276 */
1277
1278 /*
1279 * Determine whether we're a nested NMI.
1280 *
Andy Lutomirskia27507c2015-07-15 10:29:37 -07001281 * If we interrupted kernel code between repeat_nmi and
1282 * end_repeat_nmi, then we are a nested NMI. We must not
1283 * modify the "iret" frame because it's being written by
1284 * the outer NMI. That's okay; the outer NMI handler is
1285 * about to about to call do_nmi anyway, so we can just
1286 * resume the outer NMI.
1287 */
1288
1289 movq $repeat_nmi, %rdx
1290 cmpq 8(%rsp), %rdx
1291 ja 1f
1292 movq $end_repeat_nmi, %rdx
1293 cmpq 8(%rsp), %rdx
1294 ja nested_nmi_out
12951:
1296
1297 /*
1298 * Now check "NMI executing". If it's set, then we're nested.
Andy Lutomirski0b229302015-07-15 10:29:36 -07001299 * This will not detect if we interrupted an outer NMI just
1300 * before IRET.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001301 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001302 cmpl $1, -8(%rsp)
1303 je nested_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001304
1305 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001306 * Now test if the previous stack was an NMI stack. This covers
1307 * the case where we interrupt an outer NMI after it clears
Andy Lutomirski810bc072015-07-15 10:29:38 -07001308 * "NMI executing" but before IRET. We need to be careful, though:
1309 * there is one case in which RSP could point to the NMI stack
1310 * despite there being no NMI active: naughty userspace controls
1311 * RSP at the very beginning of the SYSCALL targets. We can
1312 * pull a fast one on naughty userspace, though: we program
1313 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1314 * if it controls the kernel's RSP. We set DF before we clear
1315 * "NMI executing".
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001316 */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001317 lea 6*8(%rsp), %rdx
1318 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1319 cmpq %rdx, 4*8(%rsp)
1320 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1321 ja first_nmi
Ingo Molnar4d732132015-06-08 20:43:07 +02001322
Denys Vlasenko0784b362015-04-01 16:50:57 +02001323 subq $EXCEPTION_STKSZ, %rdx
1324 cmpq %rdx, 4*8(%rsp)
1325 /* If it is below the NMI stack, it is a normal NMI */
1326 jb first_nmi
Andy Lutomirski810bc072015-07-15 10:29:38 -07001327
1328 /* Ah, it is within the NMI stack. */
1329
1330 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1331 jz first_nmi /* RSP was user controlled. */
1332
1333 /* This is a nested NMI. */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001334
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001335nested_nmi:
1336 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001337 * Modify the "iret" frame to point to repeat_nmi, forcing another
1338 * iteration of NMI handling.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001339 */
Andy Lutomirski23a781e2015-07-15 10:29:39 -07001340 subq $8, %rsp
Ingo Molnar4d732132015-06-08 20:43:07 +02001341 leaq -10*8(%rsp), %rdx
1342 pushq $__KERNEL_DS
1343 pushq %rdx
Ingo Molnar131484c2015-05-28 12:21:47 +02001344 pushfq
Ingo Molnar4d732132015-06-08 20:43:07 +02001345 pushq $__KERNEL_CS
1346 pushq $repeat_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001347
1348 /* Put stack back */
Ingo Molnar4d732132015-06-08 20:43:07 +02001349 addq $(6*8), %rsp
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001350
1351nested_nmi_out:
Ingo Molnar4d732132015-06-08 20:43:07 +02001352 popq %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001353
Andy Lutomirski0b229302015-07-15 10:29:36 -07001354 /* We are returning to kernel mode, so this cannot result in a fault. */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001355 INTERRUPT_RETURN
1356
1357first_nmi:
Andy Lutomirski0b229302015-07-15 10:29:36 -07001358 /* Restore rdx. */
Ingo Molnar4d732132015-06-08 20:43:07 +02001359 movq (%rsp), %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001360
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001361 /* Make room for "NMI executing". */
1362 pushq $0
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001363
Andy Lutomirski0b229302015-07-15 10:29:36 -07001364 /* Leave room for the "iret" frame */
Ingo Molnar4d732132015-06-08 20:43:07 +02001365 subq $(5*8), %rsp
Salman Qazi28696f42012-10-01 17:29:25 -07001366
Andy Lutomirski0b229302015-07-15 10:29:36 -07001367 /* Copy the "original" frame to the "outermost" frame */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001368 .rept 5
Ingo Molnar4d732132015-06-08 20:43:07 +02001369 pushq 11*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001370 .endr
Jan Beulich62610912012-02-24 14:54:37 +00001371
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001372 /* Everything up to here is safe from nested NMIs */
1373
Andy Lutomirskia97439a2015-07-15 10:29:41 -07001374#ifdef CONFIG_DEBUG_ENTRY
1375 /*
1376 * For ease of testing, unmask NMIs right away. Disabled by
1377 * default because IRET is very expensive.
1378 */
1379 pushq $0 /* SS */
1380 pushq %rsp /* RSP (minus 8 because of the previous push) */
1381 addq $8, (%rsp) /* Fix up RSP */
1382 pushfq /* RFLAGS */
1383 pushq $__KERNEL_CS /* CS */
1384 pushq $1f /* RIP */
1385 INTERRUPT_RETURN /* continues at repeat_nmi below */
13861:
1387#endif
1388
Andy Lutomirski0b229302015-07-15 10:29:36 -07001389repeat_nmi:
Jan Beulich62610912012-02-24 14:54:37 +00001390 /*
1391 * If there was a nested NMI, the first NMI's iret will return
1392 * here. But NMIs are still enabled and we can take another
1393 * nested NMI. The nested NMI checks the interrupted RIP to see
1394 * if it is between repeat_nmi and end_repeat_nmi, and if so
1395 * it will just return, as we are about to repeat an NMI anyway.
1396 * This makes it safe to copy to the stack frame that a nested
1397 * NMI will update.
Andy Lutomirski0b229302015-07-15 10:29:36 -07001398 *
1399 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1400 * we're repeating an NMI, gsbase has the same value that it had on
1401 * the first iteration. paranoid_entry will load the kernel
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001402 * gsbase if needed before we call do_nmi. "NMI executing"
1403 * is zero.
Jan Beulich62610912012-02-24 14:54:37 +00001404 */
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001405 movq $1, 10*8(%rsp) /* Set "NMI executing". */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001406
Andy Lutomirski0b229302015-07-15 10:29:36 -07001407 /*
1408 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1409 * here must not modify the "iret" frame while we're writing to
1410 * it or it will end up containing garbage.
1411 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001412 addq $(10*8), %rsp
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001413 .rept 5
Ingo Molnar4d732132015-06-08 20:43:07 +02001414 pushq -6*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001415 .endr
Ingo Molnar4d732132015-06-08 20:43:07 +02001416 subq $(5*8), %rsp
Jan Beulich62610912012-02-24 14:54:37 +00001417end_repeat_nmi:
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001418
1419 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001420 * Everything below this point can be preempted by a nested NMI.
1421 * If this happens, then the inner NMI will change the "iret"
1422 * frame to point back to repeat_nmi.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001423 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001424 pushq $-1 /* ORIG_RAX: no syscall to restart */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001425 ALLOC_PT_GPREGS_ON_STACK
1426
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001427 /*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001428 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001429 * as we should not be calling schedule in NMI context.
1430 * Even with normal interrupts enabled. An NMI should not be
1431 * setting NEED_RESCHED or anything that normal interrupts and
1432 * exceptions might do.
1433 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001434 call paranoid_entry
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001435
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001436 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
Ingo Molnar4d732132015-06-08 20:43:07 +02001437 movq %rsp, %rdi
1438 movq $-1, %rsi
1439 call do_nmi
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001440
Ingo Molnar4d732132015-06-08 20:43:07 +02001441 testl %ebx, %ebx /* swapgs needed? */
1442 jnz nmi_restore
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001443nmi_swapgs:
1444 SWAPGS_UNSAFE_STACK
1445nmi_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001446 RESTORE_EXTRA_REGS
1447 RESTORE_C_REGS
Andy Lutomirski0b229302015-07-15 10:29:36 -07001448
1449 /* Point RSP at the "iret" frame. */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001450 REMOVE_PT_GPREGS_FROM_STACK 6*8
Salman Qazi28696f42012-10-01 17:29:25 -07001451
Andy Lutomirski810bc072015-07-15 10:29:38 -07001452 /*
1453 * Clear "NMI executing". Set DF first so that we can easily
1454 * distinguish the remaining code between here and IRET from
1455 * the SYSCALL entry and exit paths. On a native kernel, we
1456 * could just inspect RIP, but, on paravirt kernels,
1457 * INTERRUPT_RETURN can translate into a jump into a
1458 * hypercall page.
1459 */
1460 std
1461 movq $0, 5*8(%rsp) /* clear "NMI executing" */
Andy Lutomirski0b229302015-07-15 10:29:36 -07001462
1463 /*
1464 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1465 * stack in a single instruction. We are returning to kernel
1466 * mode, so this cannot result in a fault.
1467 */
Andy Lutomirski5ca6f702015-06-04 13:24:29 -07001468 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001469END(nmi)
1470
1471ENTRY(ignore_sysret)
Ingo Molnar4d732132015-06-08 20:43:07 +02001472 mov $-ENOSYS, %eax
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001473 sysret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001474END(ignore_sysret)
Andy Lutomirski2deb4be2016-07-14 13:22:55 -07001475
1476ENTRY(rewind_stack_do_exit)
1477 /* Prevent any naive code from trying to unwind to our caller. */
1478 xorl %ebp, %ebp
1479
1480 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
1481 leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
1482
1483 call do_exit
14841: jmp 1b
1485END(rewind_stack_do_exit)