blob: 65fad8a63cd0238f405992d1576785299ab36df9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
Ingo Molnar4d732132015-06-08 20:43:07 +02007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * entry.S contains the system-call and fault low-level handling routines.
9 *
Andy Lutomirski8b4777a2011-06-05 13:50:18 -040010 * Some of this is documented in Documentation/x86/entry_64.txt
11 *
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010012 * A note on terminology:
Ingo Molnar4d732132015-06-08 20:43:07 +020013 * - iret frame: Architecture defined interrupt frame from SS to RIP
14 * at the top of the kernel process stack.
Andi Kleen2e91a172006-09-26 10:52:29 +020015 *
16 * Some macro usage:
Ingo Molnar4d732132015-06-08 20:43:07 +020017 * - ENTRY/END: Define functions in the symbol table.
18 * - TRACE_IRQ_*: Trace hardirq state for lock debugging.
19 * - idtentry: Define exception entry points.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/linkage.h>
22#include <asm/segment.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cache.h>
24#include <asm/errno.h>
Ingo Molnard36f9472015-06-03 18:29:26 +020025#include "calling.h"
Sam Ravnborge2d5df92005-09-09 21:28:48 +020026#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <asm/msr.h>
28#include <asm/unistd.h>
29#include <asm/thread_info.h>
30#include <asm/hw_irq.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080031#include <asm/page_types.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070032#include <asm/irqflags.h>
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010033#include <asm/paravirt.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090034#include <asm/percpu.h>
H. Peter Anvind7abc0f2012-04-20 12:19:50 -070035#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070036#include <asm/smap.h>
H. Peter Anvin3891a042014-04-29 16:46:09 -070037#include <asm/pgtable_types.h>
Al Viro784d5692016-01-11 11:04:34 -050038#include <asm/export.h>
Eric Parisd7e75282012-01-03 14:23:06 -050039#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Roland McGrath86a1c342008-06-23 15:37:04 -070041/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
42#include <linux/elf-em.h>
Ingo Molnar4d732132015-06-08 20:43:07 +020043#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
44#define __AUDIT_ARCH_64BIT 0x80000000
45#define __AUDIT_ARCH_LE 0x40000000
Roland McGrath86a1c342008-06-23 15:37:04 -070046
Ingo Molnar4d732132015-06-08 20:43:07 +020047.code64
48.section .entry.text, "ax"
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020049
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010050#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040051ENTRY(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010052 swapgs
53 sysretq
Cyrill Gorcunovb3baaa12009-02-23 22:57:00 +030054ENDPROC(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010055#endif /* CONFIG_PARAVIRT */
56
Denys Vlasenkof2db9382015-02-26 14:40:30 -080057.macro TRACE_IRQS_IRETQ
Ingo Molnar2601e642006-07-03 00:24:45 -070058#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnar4d732132015-06-08 20:43:07 +020059 bt $9, EFLAGS(%rsp) /* interrupts off? */
60 jnc 1f
Ingo Molnar2601e642006-07-03 00:24:45 -070061 TRACE_IRQS_ON
621:
63#endif
64.endm
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/*
Steven Rostedt5963e312012-05-30 11:54:53 -040067 * When dynamic function tracer is enabled it will add a breakpoint
68 * to all locations that it is about to modify, sync CPUs, update
69 * all the code, sync CPUs, then remove the breakpoints. In this time
70 * if lockdep is enabled, it might jump back into the debug handler
71 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
72 *
73 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
74 * make sure the stack pointer does not get reset back to the top
75 * of the debug stack, and instead just reuses the current stack.
76 */
77#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
78
79.macro TRACE_IRQS_OFF_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020080 call debug_stack_set_zero
Steven Rostedt5963e312012-05-30 11:54:53 -040081 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +020082 call debug_stack_reset
Steven Rostedt5963e312012-05-30 11:54:53 -040083.endm
84
85.macro TRACE_IRQS_ON_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020086 call debug_stack_set_zero
Steven Rostedt5963e312012-05-30 11:54:53 -040087 TRACE_IRQS_ON
Ingo Molnar4d732132015-06-08 20:43:07 +020088 call debug_stack_reset
Steven Rostedt5963e312012-05-30 11:54:53 -040089.endm
90
Denys Vlasenkof2db9382015-02-26 14:40:30 -080091.macro TRACE_IRQS_IRETQ_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +020092 bt $9, EFLAGS(%rsp) /* interrupts off? */
93 jnc 1f
Steven Rostedt5963e312012-05-30 11:54:53 -040094 TRACE_IRQS_ON_DEBUG
951:
96.endm
97
98#else
Ingo Molnar4d732132015-06-08 20:43:07 +020099# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
100# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
101# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
Steven Rostedt5963e312012-05-30 11:54:53 -0400102#endif
103
104/*
Ingo Molnar4d732132015-06-08 20:43:07 +0200105 * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 *
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800107 * This is the only entry point used for 64-bit system calls. The
108 * hardware interface is reasonably well designed and the register to
109 * argument mapping Linux uses fits well with the registers that are
110 * available when SYSCALL is used.
111 *
112 * SYSCALL instructions can be found inlined in libc implementations as
113 * well as some other programs and libraries. There are also a handful
114 * of SYSCALL instructions in the vDSO used, for example, as a
115 * clock_gettimeofday fallback.
116 *
Ingo Molnar4d732132015-06-08 20:43:07 +0200117 * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800118 * then loads new ss, cs, and rip from previously programmed MSRs.
119 * rflags gets masked by a value from another MSR (so CLD and CLAC
120 * are not needed). SYSCALL does not save anything on the stack
121 * and does not change rsp.
122 *
123 * Registers on entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 * rax system call number
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800125 * rcx return address
126 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 * rdi arg0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 * rsi arg1
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100129 * rdx arg2
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800130 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 * r8 arg4
132 * r9 arg5
Ingo Molnar4d732132015-06-08 20:43:07 +0200133 * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100134 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 * Only called from user space.
136 *
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100137 * When user can change pt_regs->foo always force IRET. That is because
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200138 * it deals with uncanonical addresses better. SYSRET has trouble
139 * with them due to bugs in both AMD and Intel CPUs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100140 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Ingo Molnarb2502b42015-06-08 08:42:03 +0200142ENTRY(entry_SYSCALL_64)
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100143 /*
144 * Interrupts are off on entry.
145 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
146 * it is too small to ever cause noticeable irq latency.
147 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100148 SWAPGS_UNSAFE_STACK
149 /*
150 * A hypervisor implementation might want to use a label
151 * after the swapgs, so that it can do the swapgs
152 * for the guest and jump here on syscall.
153 */
Ingo Molnarb2502b42015-06-08 08:42:03 +0200154GLOBAL(entry_SYSCALL_64_after_swapgs)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100155
Ingo Molnar4d732132015-06-08 20:43:07 +0200156 movq %rsp, PER_CPU_VAR(rsp_scratch)
157 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100158
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800159 TRACE_IRQS_OFF
160
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100161 /* Construct struct pt_regs on stack */
Ingo Molnar4d732132015-06-08 20:43:07 +0200162 pushq $__USER_DS /* pt_regs->ss */
163 pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
Ingo Molnar4d732132015-06-08 20:43:07 +0200164 pushq %r11 /* pt_regs->flags */
165 pushq $__USER_CS /* pt_regs->cs */
166 pushq %rcx /* pt_regs->ip */
167 pushq %rax /* pt_regs->orig_ax */
168 pushq %rdi /* pt_regs->di */
169 pushq %rsi /* pt_regs->si */
170 pushq %rdx /* pt_regs->dx */
171 pushq %rcx /* pt_regs->cx */
172 pushq $-ENOSYS /* pt_regs->ax */
173 pushq %r8 /* pt_regs->r8 */
174 pushq %r9 /* pt_regs->r9 */
175 pushq %r10 /* pt_regs->r10 */
176 pushq %r11 /* pt_regs->r11 */
177 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100178
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800179 /*
180 * If we need to do entry work or if we guess we'll need to do
181 * exit work, go straight to the slow path.
182 */
Andy Lutomirski15f4eae2016-09-13 14:29:25 -0700183 movq PER_CPU_VAR(current_task), %r11
184 testl $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800185 jnz entry_SYSCALL64_slow_path
186
Ingo Molnarb2502b42015-06-08 08:42:03 +0200187entry_SYSCALL_64_fastpath:
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800188 /*
189 * Easy case: enable interrupts and issue the syscall. If the syscall
190 * needs pt_regs, we'll call a stub that disables interrupts again
191 * and jumps to the slow path.
192 */
193 TRACE_IRQS_ON
194 ENABLE_INTERRUPTS(CLBR_NONE)
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800195#if __SYSCALL_MASK == ~0
Ingo Molnar4d732132015-06-08 20:43:07 +0200196 cmpq $__NR_syscall_max, %rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800197#else
Ingo Molnar4d732132015-06-08 20:43:07 +0200198 andl $__SYSCALL_MASK, %eax
199 cmpl $__NR_syscall_max, %eax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800200#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200201 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
202 movq %r10, %rcx
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800203
204 /*
205 * This call instruction is handled specially in stub_ptregs_64.
Andy Lutomirskib7765082016-01-31 09:33:26 -0800206 * It might end up jumping to the slow path. If it jumps, RAX
207 * and all argument registers are clobbered.
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800208 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200209 call *sys_call_table(, %rax, 8)
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800210.Lentry_SYSCALL_64_after_fastpath_call:
211
Ingo Molnar4d732132015-06-08 20:43:07 +0200212 movq %rax, RAX(%rsp)
Denys Vlasenko146b2b02015-03-25 18:18:13 +01002131:
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800214
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200215 /*
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800216 * If we get here, then we know that pt_regs is clean for SYSRET64.
217 * If we see that no exit work is required (which we are required
218 * to check with IRQs off), then we can go straight to SYSRET64.
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200219 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100220 DISABLE_INTERRUPTS(CLBR_NONE)
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800221 TRACE_IRQS_OFF
Andy Lutomirski15f4eae2016-09-13 14:29:25 -0700222 movq PER_CPU_VAR(current_task), %r11
223 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11)
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800224 jnz 1f
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700225
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800226 LOCKDEP_SYS_EXIT
227 TRACE_IRQS_ON /* user mode is traced as IRQs on */
Andy Lutomirskieb2a54c2016-01-31 09:33:27 -0800228 movq RIP(%rsp), %rcx
229 movq EFLAGS(%rsp), %r11
230 RESTORE_C_REGS_EXCEPT_RCX_R11
Ingo Molnar4d732132015-06-08 20:43:07 +0200231 movq RSP(%rsp), %rsp
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400232 USERGS_SYSRET64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Andy Lutomirski1e423bf2016-01-28 15:11:28 -08002341:
235 /*
236 * The fast path looked good when we started, but something changed
237 * along the way and we need to switch to the slow path. Calling
238 * raise(3) will trigger this, for example. IRQs are off.
239 */
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700240 TRACE_IRQS_ON
241 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800242 SAVE_EXTRA_REGS
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700243 movq %rsp, %rdi
244 call syscall_return_slowpath /* returns with IRQs disabled */
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800245 jmp return_from_SYSCALL_64
246
247entry_SYSCALL64_slow_path:
248 /* IRQs are off. */
249 SAVE_EXTRA_REGS
250 movq %rsp, %rdi
251 call do_syscall_64 /* returns with IRQs disabled */
252
253return_from_SYSCALL_64:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800254 RESTORE_EXTRA_REGS
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700255 TRACE_IRQS_IRETQ /* we're about to change IF */
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200256
257 /*
258 * Try to use SYSRET instead of IRET if we're returning to
259 * a completely clean 64-bit userspace context.
260 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200261 movq RCX(%rsp), %rcx
262 movq RIP(%rsp), %r11
263 cmpq %rcx, %r11 /* RCX == RIP */
264 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200265
266 /*
267 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
268 * in kernel space. This essentially lets the user take over
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200269 * the kernel, since userspace controls RSP.
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200270 *
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200271 * If width of "canonical tail" ever becomes variable, this will need
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200272 * to be updated to remain correct on both old and new CPUs.
273 */
274 .ifne __VIRTUAL_MASK_SHIFT - 47
275 .error "virtual address width changed -- SYSRET checks need update"
276 .endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200277
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200278 /* Change top 16 bits to be the sign-extension of 47th bit */
279 shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
280 sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
Ingo Molnar4d732132015-06-08 20:43:07 +0200281
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200282 /* If this changed %rcx, it was not canonical */
283 cmpq %rcx, %r11
284 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200285
Ingo Molnar4d732132015-06-08 20:43:07 +0200286 cmpq $__USER_CS, CS(%rsp) /* CS must match SYSRET */
287 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200288
Ingo Molnar4d732132015-06-08 20:43:07 +0200289 movq R11(%rsp), %r11
290 cmpq %r11, EFLAGS(%rsp) /* R11 == RFLAGS */
291 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200292
293 /*
Borislav Petkov3e035302016-08-03 19:14:29 +0200294 * SYSCALL clears RF when it saves RFLAGS in R11 and SYSRET cannot
295 * restore RF properly. If the slowpath sets it for whatever reason, we
296 * need to restore it correctly.
297 *
298 * SYSRET can restore TF, but unlike IRET, restoring TF results in a
299 * trap from userspace immediately after SYSRET. This would cause an
300 * infinite loop whenever #DB happens with register state that satisfies
301 * the opportunistic SYSRET conditions. For example, single-stepping
302 * this user code:
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200303 *
Ingo Molnar4d732132015-06-08 20:43:07 +0200304 * movq $stuck_here, %rcx
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200305 * pushfq
306 * popq %r11
307 * stuck_here:
308 *
309 * would never get past 'stuck_here'.
310 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200311 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
312 jnz opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200313
314 /* nothing to check for RSP */
315
Ingo Molnar4d732132015-06-08 20:43:07 +0200316 cmpq $__USER_DS, SS(%rsp) /* SS must match SYSRET */
317 jne opportunistic_sysret_failed
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200318
319 /*
Ingo Molnar4d732132015-06-08 20:43:07 +0200320 * We win! This label is here just for ease of understanding
321 * perf profiles. Nothing jumps here.
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200322 */
323syscall_return_via_sysret:
Denys Vlasenko17be0ae2015-04-21 18:27:29 +0200324 /* rcx and r11 are already restored (see code above) */
325 RESTORE_C_REGS_EXCEPT_RCX_R11
Ingo Molnar4d732132015-06-08 20:43:07 +0200326 movq RSP(%rsp), %rsp
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200327 USERGS_SYSRET64
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200328
329opportunistic_sysret_failed:
330 SWAPGS
331 jmp restore_c_regs_and_iret
Ingo Molnarb2502b42015-06-08 08:42:03 +0200332END(entry_SYSCALL_64)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100333
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800334ENTRY(stub_ptregs_64)
335 /*
336 * Syscalls marked as needing ptregs land here.
Andy Lutomirskib7765082016-01-31 09:33:26 -0800337 * If we are on the fast path, we need to save the extra regs,
338 * which we achieve by trying again on the slow path. If we are on
339 * the slow path, the extra regs are already saved.
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800340 *
341 * RAX stores a pointer to the C function implementing the syscall.
Andy Lutomirskib7765082016-01-31 09:33:26 -0800342 * IRQs are on.
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800343 */
344 cmpq $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
345 jne 1f
346
Andy Lutomirskib7765082016-01-31 09:33:26 -0800347 /*
348 * Called from fast path -- disable IRQs again, pop return address
349 * and jump to slow path
350 */
351 DISABLE_INTERRUPTS(CLBR_NONE)
352 TRACE_IRQS_OFF
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800353 popq %rax
Andy Lutomirskib7765082016-01-31 09:33:26 -0800354 jmp entry_SYSCALL64_slow_path
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800355
3561:
Borislav Petkovb3830e82016-08-01 12:05:02 +0200357 jmp *%rax /* Called from C */
Andy Lutomirski302f5b22016-01-28 15:11:25 -0800358END(stub_ptregs_64)
359
360.macro ptregs_stub func
361ENTRY(ptregs_\func)
362 leaq \func(%rip), %rax
363 jmp stub_ptregs_64
364END(ptregs_\func)
365.endm
366
367/* Instantiate ptregs_stub for each ptregs-using syscall */
368#define __SYSCALL_64_QUAL_(sym)
369#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
370#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
371#include <asm/syscalls_64.h>
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200372
Jan Beulich7effaa82005-09-12 18:49:24 +0200373/*
Brian Gerst01003012016-08-13 12:38:19 -0400374 * %rdi: prev task
375 * %rsi: next task
376 */
377ENTRY(__switch_to_asm)
378 /*
379 * Save callee-saved registers
380 * This must match the order in inactive_task_frame
381 */
382 pushq %rbp
383 pushq %rbx
384 pushq %r12
385 pushq %r13
386 pushq %r14
387 pushq %r15
388
389 /* switch stack */
390 movq %rsp, TASK_threadsp(%rdi)
391 movq TASK_threadsp(%rsi), %rsp
392
393#ifdef CONFIG_CC_STACKPROTECTOR
394 movq TASK_stack_canary(%rsi), %rbx
395 movq %rbx, PER_CPU_VAR(irq_stack_union)+stack_canary_offset
396#endif
397
398 /* restore callee-saved registers */
399 popq %r15
400 popq %r14
401 popq %r13
402 popq %r12
403 popq %rbx
404 popq %rbp
405
406 jmp __switch_to
407END(__switch_to_asm)
408
409/*
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800410 * A newly forked process directly context switches into this address.
411 *
Brian Gerst01003012016-08-13 12:38:19 -0400412 * rax: prev task we switched from
Brian Gerst616d2482016-08-13 12:38:20 -0400413 * rbx: kernel thread func (NULL for user thread)
414 * r12: kernel thread arg
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800415 */
416ENTRY(ret_from_fork)
Brian Gerst01003012016-08-13 12:38:19 -0400417 movq %rax, %rdi
Ingo Molnar4d732132015-06-08 20:43:07 +0200418 call schedule_tail /* rdi: 'prev' task parameter */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800419
Brian Gerst616d2482016-08-13 12:38:20 -0400420 testq %rbx, %rbx /* from kernel_thread? */
421 jnz 1f /* kernel threads are uncommon */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800422
Brian Gerst616d2482016-08-13 12:38:20 -04004232:
Andy Lutomirski24d978b2016-01-28 15:11:27 -0800424 movq %rsp, %rdi
425 call syscall_return_slowpath /* returns with IRQs disabled */
426 TRACE_IRQS_ON /* user mode is traced as IRQS on */
427 SWAPGS
428 jmp restore_regs_and_iret
Brian Gerst616d2482016-08-13 12:38:20 -0400429
4301:
431 /* kernel thread */
432 movq %r12, %rdi
433 call *%rbx
434 /*
435 * A kernel thread is allowed to return here after successfully
436 * calling do_execve(). Exit to userspace to complete the execve()
437 * syscall.
438 */
439 movq $0, RAX(%rsp)
440 jmp 2b
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800441END(ret_from_fork)
442
443/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200444 * Build the entry stubs with some assembler magic.
445 * We pack 1 stub into every 8-byte block.
H. Peter Anvin939b7872008-11-11 13:51:52 -0800446 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200447 .align 8
H. Peter Anvin939b7872008-11-11 13:51:52 -0800448ENTRY(irq_entries_start)
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200449 vector=FIRST_EXTERNAL_VECTOR
450 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
Ingo Molnar4d732132015-06-08 20:43:07 +0200451 pushq $(~vector+0x80) /* Note: always in signed byte range */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200452 vector=vector+1
453 jmp common_interrupt
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200454 .align 8
455 .endr
H. Peter Anvin939b7872008-11-11 13:51:52 -0800456END(irq_entries_start)
457
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100458/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 * Interrupt entry/exit.
460 *
461 * Interrupt entry points save only callee clobbered registers in fast path.
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100462 *
463 * Entry runs with interrupts off.
464 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100466/* 0(%rsp): ~(interrupt number) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 .macro interrupt func
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100468 cld
Andy Lutomirskiff467592015-07-03 12:44:29 -0700469 ALLOC_PT_GPREGS_ON_STACK
470 SAVE_C_REGS
471 SAVE_EXTRA_REGS
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500472 ENCODE_FRAME_POINTER
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100473
Andy Lutomirskiff467592015-07-03 12:44:29 -0700474 testb $3, CS(%rsp)
Denys Vlasenkodde74f22015-04-27 15:21:51 +0200475 jz 1f
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700476
477 /*
478 * IRQ from user mode. Switch to kernel gsbase and inform context
479 * tracking that we're in kernel mode.
480 */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100481 SWAPGS
Andy Lutomirskif1075052015-11-12 12:59:00 -0800482
483 /*
484 * We need to tell lockdep that IRQs are off. We can't do this until
485 * we fix gsbase, and we should do it before enter_from_user_mode
486 * (which can take locks). Since TRACE_IRQS_OFF idempotent,
487 * the simplest way to handle it is to just call it twice if
488 * we enter from user mode. There's no reason to optimize this since
489 * TRACE_IRQS_OFF is a no-op if lockdep is off.
490 */
491 TRACE_IRQS_OFF
492
Andy Lutomirski478dc892015-11-12 12:59:04 -0800493 CALL_enter_from_user_mode
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700494
Denys Vlasenko76f5df42015-02-26 14:40:27 -08004951:
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100496 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800497 * Save previous stack pointer, optionally switch to interrupt stack.
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100498 * irq_count is used to check if a CPU is already on an interrupt stack
499 * or not. While this is essentially redundant with preempt_count it is
500 * a little cheaper to use a separate counter in the PDA (short of
501 * moving irq_enter into assembly, which would be too much work)
502 */
Andy Lutomirskia586f982015-07-03 12:44:30 -0700503 movq %rsp, %rdi
Ingo Molnar4d732132015-06-08 20:43:07 +0200504 incl PER_CPU_VAR(irq_count)
505 cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
Andy Lutomirskia586f982015-07-03 12:44:30 -0700506 pushq %rdi
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100507 /* We entered an interrupt context - irqs are off: */
508 TRACE_IRQS_OFF
509
Andy Lutomirskia586f982015-07-03 12:44:30 -0700510 call \func /* rdi points to pt_regs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 .endm
512
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100513 /*
514 * The interrupt stubs push (~vector+0x80) onto the stack and
515 * then jump to common_interrupt.
516 */
H. Peter Anvin939b7872008-11-11 13:51:52 -0800517 .p2align CONFIG_X86_L1_CACHE_SHIFT
518common_interrupt:
Jan Beulichee4eb872012-11-02 11:18:39 +0000519 ASM_CLAC
Ingo Molnar4d732132015-06-08 20:43:07 +0200520 addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 interrupt do_IRQ
Denys Vlasenko34061f12015-03-23 14:03:59 +0100522 /* 0(%rsp): old RSP */
Jan Beulich7effaa82005-09-12 18:49:24 +0200523ret_from_intr:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100524 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700525 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +0200526 decl PER_CPU_VAR(irq_count)
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100527
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200528 /* Restore saved previous stack */
Andy Lutomirskiff467592015-07-03 12:44:29 -0700529 popq %rsp
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100530
Denys Vlasenko03335e92015-04-27 15:21:52 +0200531 testb $3, CS(%rsp)
Denys Vlasenkodde74f22015-04-27 15:21:51 +0200532 jz retint_kernel
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 /* Interrupt came from user space */
Andy Lutomirski02bc7762015-07-03 12:44:31 -0700535GLOBAL(retint_user)
536 mov %rsp,%rdi
537 call prepare_exit_to_usermode
Ingo Molnar2601e642006-07-03 00:24:45 -0700538 TRACE_IRQS_IRETQ
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100539 SWAPGS
Andy Lutomirskiff467592015-07-03 12:44:29 -0700540 jmp restore_regs_and_iret
Ingo Molnar2601e642006-07-03 00:24:45 -0700541
Denys Vlasenko627276c2015-03-30 20:09:31 +0200542/* Returning to kernel space */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200543retint_kernel:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200544#ifdef CONFIG_PREEMPT
545 /* Interrupts are off */
546 /* Check if we need preemption */
Ingo Molnar4d732132015-06-08 20:43:07 +0200547 bt $9, EFLAGS(%rsp) /* were interrupts off? */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200548 jnc 1f
Ingo Molnar4d732132015-06-08 20:43:07 +02005490: cmpl $0, PER_CPU_VAR(__preempt_count)
Denys Vlasenko36acef22015-03-31 19:00:07 +0200550 jnz 1f
Denys Vlasenko627276c2015-03-30 20:09:31 +0200551 call preempt_schedule_irq
Denys Vlasenko36acef22015-03-31 19:00:07 +0200552 jmp 0b
Denys Vlasenko6ba71b72015-03-31 19:00:05 +02005531:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200554#endif
Ingo Molnar2601e642006-07-03 00:24:45 -0700555 /*
556 * The iretq could re-enable interrupts:
557 */
558 TRACE_IRQS_IRETQ
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200559
560/*
561 * At this label, code paths which return to kernel and to user,
562 * which come from interrupts/exception and from syscalls, merge.
563 */
Andy Lutomirskiee08c6b2015-10-05 17:48:09 -0700564GLOBAL(restore_regs_and_iret)
Andy Lutomirskiff467592015-07-03 12:44:29 -0700565 RESTORE_EXTRA_REGS
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200566restore_c_regs_and_iret:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800567 RESTORE_C_REGS
568 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski7209a752014-07-23 08:34:11 -0700569 INTERRUPT_RETURN
570
571ENTRY(native_iret)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700572 /*
573 * Are we returning to a stack segment from the LDT? Note: in
574 * 64-bit mode SS:RSP on the exception stack is always valid.
575 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700576#ifdef CONFIG_X86_ESPFIX64
Ingo Molnar4d732132015-06-08 20:43:07 +0200577 testb $4, (SS-RIP)(%rsp)
578 jnz native_irq_return_ldt
H. Peter Anvin34273f42014-05-04 10:36:22 -0700579#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700580
Andy Lutomirskiaf726f22014-11-22 18:00:31 -0800581.global native_irq_return_iret
Andy Lutomirski7209a752014-07-23 08:34:11 -0700582native_irq_return_iret:
Andy Lutomirskib645af22014-11-22 18:00:33 -0800583 /*
584 * This may fault. Non-paranoid faults on return to userspace are
585 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
586 * Double-faults due to espfix64 are handled in do_double_fault.
587 * Other faults here are fatal.
588 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 iretq
Ingo Molnar3701d8632008-02-09 23:24:08 +0100590
H. Peter Anvin34273f42014-05-04 10:36:22 -0700591#ifdef CONFIG_X86_ESPFIX64
Andy Lutomirski7209a752014-07-23 08:34:11 -0700592native_irq_return_ldt:
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700593 /*
594 * We are running with user GSBASE. All GPRs contain their user
595 * values. We have a percpu ESPFIX stack that is eight slots
596 * long (see ESPFIX_STACK_SIZE). espfix_waddr points to the bottom
597 * of the ESPFIX stack.
598 *
599 * We clobber RAX and RDI in this code. We stash RDI on the
600 * normal stack and RAX on the ESPFIX stack.
601 *
602 * The ESPFIX stack layout we set up looks like this:
603 *
604 * --- top of ESPFIX stack ---
605 * SS
606 * RSP
607 * RFLAGS
608 * CS
609 * RIP <-- RSP points here when we're done
610 * RAX <-- espfix_waddr points here
611 * --- bottom of ESPFIX stack ---
612 */
613
614 pushq %rdi /* Stash user RDI */
H. Peter Anvin3891a042014-04-29 16:46:09 -0700615 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200616 movq PER_CPU_VAR(espfix_waddr), %rdi
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700617 movq %rax, (0*8)(%rdi) /* user RAX */
618 movq (1*8)(%rsp), %rax /* user RIP */
Ingo Molnar4d732132015-06-08 20:43:07 +0200619 movq %rax, (1*8)(%rdi)
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700620 movq (2*8)(%rsp), %rax /* user CS */
Ingo Molnar4d732132015-06-08 20:43:07 +0200621 movq %rax, (2*8)(%rdi)
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700622 movq (3*8)(%rsp), %rax /* user RFLAGS */
Ingo Molnar4d732132015-06-08 20:43:07 +0200623 movq %rax, (3*8)(%rdi)
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700624 movq (5*8)(%rsp), %rax /* user SS */
Ingo Molnar4d732132015-06-08 20:43:07 +0200625 movq %rax, (5*8)(%rdi)
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700626 movq (4*8)(%rsp), %rax /* user RSP */
Ingo Molnar4d732132015-06-08 20:43:07 +0200627 movq %rax, (4*8)(%rdi)
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700628 /* Now RAX == RSP. */
629
630 andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
631 popq %rdi /* Restore user RDI */
632
633 /*
634 * espfix_stack[31:16] == 0. The page tables are set up such that
635 * (espfix_stack | (X & 0xffff0000)) points to a read-only alias of
636 * espfix_waddr for any X. That is, there are 65536 RO aliases of
637 * the same page. Set up RSP so that RSP[31:16] contains the
638 * respective 16 bits of the /userspace/ RSP and RSP nonetheless
639 * still points to an RO alias of the ESPFIX stack.
640 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200641 orq PER_CPU_VAR(espfix_stack), %rax
H. Peter Anvin3891a042014-04-29 16:46:09 -0700642 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +0200643 movq %rax, %rsp
Andy Lutomirski85063fa2016-09-12 15:05:51 -0700644
645 /*
646 * At this point, we cannot write to the stack any more, but we can
647 * still read.
648 */
649 popq %rax /* Restore user RAX */
650
651 /*
652 * RSP now points to an ordinary IRET frame, except that the page
653 * is read-only and RSP[31:16] are preloaded with the userspace
654 * values. We can now IRET back to userspace.
655 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200656 jmp native_irq_return_iret
H. Peter Anvin34273f42014-05-04 10:36:22 -0700657#endif
Jan Beulich4b787e02006-06-26 13:56:55 +0200658END(common_interrupt)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700659
Masami Hiramatsu8222d712009-08-27 13:23:25 -0400660/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 * APIC interrupts.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100662 */
Seiji Aguchicf910e82013-06-20 11:46:53 -0400663.macro apicinterrupt3 num sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100664ENTRY(\sym)
Jan Beulichee4eb872012-11-02 11:18:39 +0000665 ASM_CLAC
Ingo Molnar4d732132015-06-08 20:43:07 +0200666 pushq $~(\num)
Jan Beulich39e95432011-11-29 11:03:46 +0000667.Lcommon_\sym:
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100668 interrupt \do_sym
Ingo Molnar4d732132015-06-08 20:43:07 +0200669 jmp ret_from_intr
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100670END(\sym)
671.endm
Jacob Shin89b831e2005-11-05 17:25:53 +0100672
Seiji Aguchicf910e82013-06-20 11:46:53 -0400673#ifdef CONFIG_TRACING
674#define trace(sym) trace_##sym
675#define smp_trace(sym) smp_trace_##sym
676
677.macro trace_apicinterrupt num sym
678apicinterrupt3 \num trace(\sym) smp_trace(\sym)
679.endm
680#else
681.macro trace_apicinterrupt num sym do_sym
682.endm
683#endif
684
Alexander Potapenko469f0022016-07-15 11:42:43 +0200685/* Make sure APIC interrupt handlers end up in the irqentry section: */
686#if defined(CONFIG_FUNCTION_GRAPH_TRACER) || defined(CONFIG_KASAN)
687# define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
688# define POP_SECTION_IRQENTRY .popsection
689#else
690# define PUSH_SECTION_IRQENTRY
691# define POP_SECTION_IRQENTRY
692#endif
693
Seiji Aguchicf910e82013-06-20 11:46:53 -0400694.macro apicinterrupt num sym do_sym
Alexander Potapenko469f0022016-07-15 11:42:43 +0200695PUSH_SECTION_IRQENTRY
Seiji Aguchicf910e82013-06-20 11:46:53 -0400696apicinterrupt3 \num \sym \do_sym
697trace_apicinterrupt \num \sym
Alexander Potapenko469f0022016-07-15 11:42:43 +0200698POP_SECTION_IRQENTRY
Seiji Aguchicf910e82013-06-20 11:46:53 -0400699.endm
700
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100701#ifdef CONFIG_SMP
Ingo Molnar4d732132015-06-08 20:43:07 +0200702apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
703apicinterrupt3 REBOOT_VECTOR reboot_interrupt smp_reboot_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704#endif
705
Nick Piggin03b48632009-01-20 04:36:04 +0100706#ifdef CONFIG_X86_UV
Ingo Molnar4d732132015-06-08 20:43:07 +0200707apicinterrupt3 UV_BAU_MESSAGE uv_bau_message_intr1 uv_bau_message_interrupt
Nick Piggin03b48632009-01-20 04:36:04 +0100708#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200709
710apicinterrupt LOCAL_TIMER_VECTOR apic_timer_interrupt smp_apic_timer_interrupt
711apicinterrupt X86_PLATFORM_IPI_VECTOR x86_platform_ipi smp_x86_platform_ipi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Yang Zhangd78f2662013-04-11 19:25:11 +0800713#ifdef CONFIG_HAVE_KVM
Ingo Molnar4d732132015-06-08 20:43:07 +0200714apicinterrupt3 POSTED_INTR_VECTOR kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
715apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR kvm_posted_intr_wakeup_ipi smp_kvm_posted_intr_wakeup_ipi
Yang Zhangd78f2662013-04-11 19:25:11 +0800716#endif
717
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400718#ifdef CONFIG_X86_MCE_THRESHOLD
Ingo Molnar4d732132015-06-08 20:43:07 +0200719apicinterrupt THRESHOLD_APIC_VECTOR threshold_interrupt smp_threshold_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400720#endif
721
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500722#ifdef CONFIG_X86_MCE_AMD
Ingo Molnar4d732132015-06-08 20:43:07 +0200723apicinterrupt DEFERRED_ERROR_VECTOR deferred_error_interrupt smp_deferred_error_interrupt
Aravind Gopalakrishnan24fd78a2015-05-06 06:58:56 -0500724#endif
725
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400726#ifdef CONFIG_X86_THERMAL_VECTOR
Ingo Molnar4d732132015-06-08 20:43:07 +0200727apicinterrupt THERMAL_APIC_VECTOR thermal_interrupt smp_thermal_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400728#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100730#ifdef CONFIG_SMP
Ingo Molnar4d732132015-06-08 20:43:07 +0200731apicinterrupt CALL_FUNCTION_SINGLE_VECTOR call_function_single_interrupt smp_call_function_single_interrupt
732apicinterrupt CALL_FUNCTION_VECTOR call_function_interrupt smp_call_function_interrupt
733apicinterrupt RESCHEDULE_VECTOR reschedule_interrupt smp_reschedule_interrupt
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100734#endif
735
Ingo Molnar4d732132015-06-08 20:43:07 +0200736apicinterrupt ERROR_APIC_VECTOR error_interrupt smp_error_interrupt
737apicinterrupt SPURIOUS_APIC_VECTOR spurious_interrupt smp_spurious_interrupt
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100738
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800739#ifdef CONFIG_IRQ_WORK
Ingo Molnar4d732132015-06-08 20:43:07 +0200740apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
Ingo Molnar241771e2008-12-03 10:39:53 +0100741#endif
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743/*
744 * Exception entry points.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100745 */
Andy Lutomirski9b476682015-03-05 19:19:07 -0800746#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700747
748.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100749ENTRY(\sym)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700750 /* Sanity check */
751 .if \shift_ist != -1 && \paranoid == 0
752 .error "using shift_ist requires paranoid=1"
753 .endif
754
Jan Beulichee4eb872012-11-02 11:18:39 +0000755 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100756 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700757
758 .ifeq \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200759 pushq $-1 /* ORIG_RAX: no syscall to restart */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700760 .endif
761
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800762 ALLOC_PT_GPREGS_ON_STACK
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700763
764 .if \paranoid
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800765 .if \paranoid == 1
Ingo Molnar4d732132015-06-08 20:43:07 +0200766 testb $3, CS(%rsp) /* If coming from userspace, switch stacks */
767 jnz 1f
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800768 .endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200769 call paranoid_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700770 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200771 call error_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700772 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800773 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700774
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700775 .if \paranoid
Andy Lutomirski577ed452014-05-21 15:07:09 -0700776 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200777 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
Andy Lutomirski577ed452014-05-21 15:07:09 -0700778 .else
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100779 TRACE_IRQS_OFF
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700780 .endif
Andy Lutomirski577ed452014-05-21 15:07:09 -0700781 .endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700782
Ingo Molnar4d732132015-06-08 20:43:07 +0200783 movq %rsp, %rdi /* pt_regs pointer */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700784
785 .if \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200786 movq ORIG_RAX(%rsp), %rsi /* get error code */
787 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700788 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200789 xorl %esi, %esi /* no error code */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700790 .endif
791
Andy Lutomirski577ed452014-05-21 15:07:09 -0700792 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200793 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700794 .endif
795
Ingo Molnar4d732132015-06-08 20:43:07 +0200796 call \do_sym
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700797
Andy Lutomirski577ed452014-05-21 15:07:09 -0700798 .if \shift_ist != -1
Ingo Molnar4d732132015-06-08 20:43:07 +0200799 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700800 .endif
801
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800802 /* these procedures expect "no swapgs" flag in ebx */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700803 .if \paranoid
Ingo Molnar4d732132015-06-08 20:43:07 +0200804 jmp paranoid_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700805 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200806 jmp error_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700807 .endif
808
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800809 .if \paranoid == 1
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800810 /*
811 * Paranoid entry from userspace. Switch stacks and treat it
812 * as a normal entry. This means that paranoid handlers
813 * run in real process context if user_mode(regs).
814 */
8151:
Ingo Molnar4d732132015-06-08 20:43:07 +0200816 call error_entry
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800817
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800818
Ingo Molnar4d732132015-06-08 20:43:07 +0200819 movq %rsp, %rdi /* pt_regs pointer */
820 call sync_regs
821 movq %rax, %rsp /* switch stack */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800822
Ingo Molnar4d732132015-06-08 20:43:07 +0200823 movq %rsp, %rdi /* pt_regs pointer */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800824
825 .if \has_error_code
Ingo Molnar4d732132015-06-08 20:43:07 +0200826 movq ORIG_RAX(%rsp), %rsi /* get error code */
827 movq $-1, ORIG_RAX(%rsp) /* no syscall to restart */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800828 .else
Ingo Molnar4d732132015-06-08 20:43:07 +0200829 xorl %esi, %esi /* no error code */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800830 .endif
831
Ingo Molnar4d732132015-06-08 20:43:07 +0200832 call \do_sym
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800833
Ingo Molnar4d732132015-06-08 20:43:07 +0200834 jmp error_exit /* %ebx: no swapgs flag */
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800835 .endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100836END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100837.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100838
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400839#ifdef CONFIG_TRACING
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700840.macro trace_idtentry sym do_sym has_error_code:req
841idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
842idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400843.endm
844#else
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700845.macro trace_idtentry sym do_sym has_error_code:req
846idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -0400847.endm
848#endif
849
Ingo Molnar4d732132015-06-08 20:43:07 +0200850idtentry divide_error do_divide_error has_error_code=0
851idtentry overflow do_overflow has_error_code=0
852idtentry bounds do_bounds has_error_code=0
853idtentry invalid_op do_invalid_op has_error_code=0
854idtentry device_not_available do_device_not_available has_error_code=0
855idtentry double_fault do_double_fault has_error_code=1 paranoid=2
856idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
857idtentry invalid_TSS do_invalid_TSS has_error_code=1
858idtentry segment_not_present do_segment_not_present has_error_code=1
859idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
860idtentry coprocessor_error do_coprocessor_error has_error_code=0
861idtentry alignment_check do_alignment_check has_error_code=1
862idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
Andy Lutomirski5cec93c2011-06-05 13:50:24 -0400863
Ingo Molnar2601e642006-07-03 00:24:45 -0700864
Ingo Molnar4d732132015-06-08 20:43:07 +0200865 /*
866 * Reload gs selector with exception handling
867 * edi: new selector
868 */
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -0400869ENTRY(native_load_gs_index)
Ingo Molnar131484c2015-05-28 12:21:47 +0200870 pushfq
Jeremy Fitzhardingeb8aa2872009-01-28 14:35:03 -0800871 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300872 SWAPGS
Borislav Petkov42c748bb2016-04-07 17:31:50 -0700873.Lgs_change:
Ingo Molnar4d732132015-06-08 20:43:07 +0200874 movl %edi, %gs
Borislav Petkov96e5d282016-04-07 17:31:49 -07008752: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100876 SWAPGS
Ingo Molnar131484c2015-05-28 12:21:47 +0200877 popfq
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300878 ret
Alexander van Heukelum6efdcfa2008-11-23 10:15:32 +0100879END(native_load_gs_index)
Al Viro784d5692016-01-11 11:04:34 -0500880EXPORT_SYMBOL(native_load_gs_index)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100881
Borislav Petkov42c748bb2016-04-07 17:31:50 -0700882 _ASM_EXTABLE(.Lgs_change, bad_gs)
Ingo Molnar4d732132015-06-08 20:43:07 +0200883 .section .fixup, "ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 /* running with kernelgs */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100885bad_gs:
Ingo Molnar4d732132015-06-08 20:43:07 +0200886 SWAPGS /* switch back to user gs */
Andy Lutomirskib038c842016-04-26 12:23:27 -0700887.macro ZAP_GS
888 /* This can't be a string because the preprocessor needs to see it. */
889 movl $__USER_DS, %eax
890 movl %eax, %gs
891.endm
892 ALTERNATIVE "", "ZAP_GS", X86_BUG_NULL_SEG
Ingo Molnar4d732132015-06-08 20:43:07 +0200893 xorl %eax, %eax
894 movl %eax, %gs
895 jmp 2b
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300896 .previous
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100897
Andi Kleen26995002006-08-02 22:37:28 +0200898/* Call softirq on interrupt stack. Interrupts are off. */
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200899ENTRY(do_softirq_own_stack)
Ingo Molnar4d732132015-06-08 20:43:07 +0200900 pushq %rbp
901 mov %rsp, %rbp
902 incl PER_CPU_VAR(irq_count)
903 cmove PER_CPU_VAR(irq_stack_ptr), %rsp
904 push %rbp /* frame pointer backlink */
905 call __do_softirq
Andi Kleen26995002006-08-02 22:37:28 +0200906 leaveq
Ingo Molnar4d732132015-06-08 20:43:07 +0200907 decl PER_CPU_VAR(irq_count)
Andi Kleened6b6762005-07-28 21:15:49 -0700908 ret
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +0200909END(do_softirq_own_stack)
Andi Kleen75154f42007-06-23 02:29:25 +0200910
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700911#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700912idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700913
914/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300915 * A note on the "critical region" in our callback handler.
916 * We want to avoid stacking callback handlers due to events occurring
917 * during handling of the last event. To do this, we keep events disabled
918 * until we've done all processing. HOWEVER, we must enable events before
919 * popping the stack frame (can't be done atomically) and so it would still
920 * be possible to get enough handler activations to overflow the stack.
921 * Although unlikely, bugs of that kind are hard to track down, so we'd
922 * like to avoid the possibility.
923 * So, on entry to the handler we detect whether we interrupted an
924 * existing activation in its critical region -- if so, we pop the current
925 * activation and restart the handler using the previous one.
926 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200927ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
928
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300929/*
930 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
931 * see the correct pointer to the pt_regs
932 */
Ingo Molnar4d732132015-06-08 20:43:07 +0200933 movq %rdi, %rsp /* we don't return, adjust the stack frame */
93411: incl PER_CPU_VAR(irq_count)
935 movq %rsp, %rbp
936 cmovzq PER_CPU_VAR(irq_stack_ptr), %rsp
937 pushq %rbp /* frame pointer backlink */
938 call xen_evtchn_do_upcall
939 popq %rsp
940 decl PER_CPU_VAR(irq_count)
David Vrabelfdfd8112015-02-19 15:23:17 +0000941#ifndef CONFIG_PREEMPT
Ingo Molnar4d732132015-06-08 20:43:07 +0200942 call xen_maybe_preempt_hcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000943#endif
Ingo Molnar4d732132015-06-08 20:43:07 +0200944 jmp error_exit
Alexander van Heukelum371c3942011-03-11 21:59:38 +0100945END(xen_do_hypervisor_callback)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700946
947/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +0300948 * Hypervisor uses this for application faults while it executes.
949 * We get here for two reasons:
950 * 1. Fault while reloading DS, ES, FS or GS
951 * 2. Fault while executing IRET
952 * Category 1 we do not need to fix up as Xen has already reloaded all segment
953 * registers that could be reloaded and zeroed the others.
954 * Category 2 we fix up by killing the current process. We cannot use the
955 * normal Linux return path in this case because if we use the IRET hypercall
956 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
957 * We distinguish between categories by comparing each saved segment register
958 * with its current contents: any discrepancy means we in category 1.
959 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700960ENTRY(xen_failsafe_callback)
Ingo Molnar4d732132015-06-08 20:43:07 +0200961 movl %ds, %ecx
962 cmpw %cx, 0x10(%rsp)
963 jne 1f
964 movl %es, %ecx
965 cmpw %cx, 0x18(%rsp)
966 jne 1f
967 movl %fs, %ecx
968 cmpw %cx, 0x20(%rsp)
969 jne 1f
970 movl %gs, %ecx
971 cmpw %cx, 0x28(%rsp)
972 jne 1f
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700973 /* All segments match their saved values => Category 2 (Bad IRET). */
Ingo Molnar4d732132015-06-08 20:43:07 +0200974 movq (%rsp), %rcx
975 movq 8(%rsp), %r11
976 addq $0x30, %rsp
977 pushq $0 /* RIP */
978 pushq %r11
979 pushq %rcx
980 jmp general_protection
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07009811: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
Ingo Molnar4d732132015-06-08 20:43:07 +0200982 movq (%rsp), %rcx
983 movq 8(%rsp), %r11
984 addq $0x30, %rsp
985 pushq $-1 /* orig_ax = -1 => not a system call */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800986 ALLOC_PT_GPREGS_ON_STACK
987 SAVE_C_REGS
988 SAVE_EXTRA_REGS
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500989 ENCODE_FRAME_POINTER
Ingo Molnar4d732132015-06-08 20:43:07 +0200990 jmp error_exit
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700991END(xen_failsafe_callback)
992
Seiji Aguchicf910e82013-06-20 11:46:53 -0400993apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
Sheng Yang38e20b02010-05-14 12:40:51 +0100994 xen_hvm_callback_vector xen_evtchn_do_upcall
995
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -0700996#endif /* CONFIG_XEN */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +0100997
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800998#if IS_ENABLED(CONFIG_HYPERV)
Seiji Aguchicf910e82013-06-20 11:46:53 -0400999apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001000 hyperv_callback_vector hyperv_vector_handler
1001#endif /* CONFIG_HYPERV */
1002
Ingo Molnar4d732132015-06-08 20:43:07 +02001003idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1004idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1005idtentry stack_segment do_stack_segment has_error_code=1
1006
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001007#ifdef CONFIG_XEN
Ingo Molnar4d732132015-06-08 20:43:07 +02001008idtentry xen_debug do_debug has_error_code=0
1009idtentry xen_int3 do_int3 has_error_code=0
1010idtentry xen_stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001011#endif
Ingo Molnar4d732132015-06-08 20:43:07 +02001012
1013idtentry general_protection do_general_protection has_error_code=1
1014trace_idtentry page_fault do_page_fault has_error_code=1
1015
Gleb Natapov631bc482010-10-14 11:22:52 +02001016#ifdef CONFIG_KVM_GUEST
Ingo Molnar4d732132015-06-08 20:43:07 +02001017idtentry async_page_fault do_async_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +02001018#endif
Ingo Molnar4d732132015-06-08 20:43:07 +02001019
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001020#ifdef CONFIG_X86_MCE
Ingo Molnar4d732132015-06-08 20:43:07 +02001021idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001022#endif
1023
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001024/*
1025 * Save all registers in pt_regs, and switch gs if needed.
1026 * Use slow, but surefire "are we in kernel?" check.
1027 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1028 */
1029ENTRY(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001030 cld
1031 SAVE_C_REGS 8
1032 SAVE_EXTRA_REGS 8
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001033 ENCODE_FRAME_POINTER 8
Ingo Molnar4d732132015-06-08 20:43:07 +02001034 movl $1, %ebx
1035 movl $MSR_GS_BASE, %ecx
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001036 rdmsr
Ingo Molnar4d732132015-06-08 20:43:07 +02001037 testl %edx, %edx
1038 js 1f /* negative -> in kernel */
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001039 SWAPGS
Ingo Molnar4d732132015-06-08 20:43:07 +02001040 xorl %ebx, %ebx
Denys Vlasenko1eeb2072015-02-26 14:40:33 -080010411: ret
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001042END(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001043
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001044/*
1045 * "Paranoid" exit path from exception stack. This is invoked
1046 * only on return from non-NMI IST interrupts that came
1047 * from kernel space.
1048 *
1049 * We may be returning to very strange contexts (e.g. very early
1050 * in syscall entry), so checking for preemption here would
1051 * be complicated. Fortunately, we there's no good reason
1052 * to try to handle preemption here.
Ingo Molnar4d732132015-06-08 20:43:07 +02001053 *
1054 * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001055 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001056ENTRY(paranoid_exit)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001057 DISABLE_INTERRUPTS(CLBR_NONE)
Steven Rostedt5963e312012-05-30 11:54:53 -04001058 TRACE_IRQS_OFF_DEBUG
Ingo Molnar4d732132015-06-08 20:43:07 +02001059 testl %ebx, %ebx /* swapgs needed? */
1060 jnz paranoid_exit_no_swapgs
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001061 TRACE_IRQS_IRETQ
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001062 SWAPGS_UNSAFE_STACK
Ingo Molnar4d732132015-06-08 20:43:07 +02001063 jmp paranoid_exit_restore
Denys Vlasenko0d550832015-02-26 14:40:29 -08001064paranoid_exit_no_swapgs:
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001065 TRACE_IRQS_IRETQ_DEBUG
Denys Vlasenko0d550832015-02-26 14:40:29 -08001066paranoid_exit_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001067 RESTORE_EXTRA_REGS
1068 RESTORE_C_REGS
1069 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001070 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001071END(paranoid_exit)
1072
1073/*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001074 * Save all registers in pt_regs, and switch gs if needed.
Andy Lutomirski539f5112015-06-09 12:36:01 -07001075 * Return: EBX=0: came from user mode; EBX=1: otherwise
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001076 */
1077ENTRY(error_entry)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001078 cld
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001079 SAVE_C_REGS 8
1080 SAVE_EXTRA_REGS 8
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001081 ENCODE_FRAME_POINTER 8
Ingo Molnar4d732132015-06-08 20:43:07 +02001082 xorl %ebx, %ebx
Denys Vlasenko03335e92015-04-27 15:21:52 +02001083 testb $3, CS+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001084 jz .Lerror_kernelspace
Andy Lutomirski539f5112015-06-09 12:36:01 -07001085
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001086 /*
1087 * We entered from user mode or we're pretending to have entered
1088 * from user mode due to an IRET fault.
1089 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001090 SWAPGS
Andy Lutomirski539f5112015-06-09 12:36:01 -07001091
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001092.Lerror_entry_from_usermode_after_swapgs:
Andy Lutomirskif1075052015-11-12 12:59:00 -08001093 /*
1094 * We need to tell lockdep that IRQs are off. We can't do this until
1095 * we fix gsbase, and we should do it before enter_from_user_mode
1096 * (which can take locks).
1097 */
1098 TRACE_IRQS_OFF
Andy Lutomirski478dc892015-11-12 12:59:04 -08001099 CALL_enter_from_user_mode
Andy Lutomirskif1075052015-11-12 12:59:00 -08001100 ret
Andy Lutomirski02bc7762015-07-03 12:44:31 -07001101
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001102.Lerror_entry_done:
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001103 TRACE_IRQS_OFF
1104 ret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001105
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001106 /*
1107 * There are two places in the kernel that can potentially fault with
1108 * usergs. Handle them here. B stepping K8s sometimes report a
1109 * truncated RIP for IRET exceptions returning to compat mode. Check
1110 * for these here too.
1111 */
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001112.Lerror_kernelspace:
Ingo Molnar4d732132015-06-08 20:43:07 +02001113 incl %ebx
1114 leaq native_irq_return_iret(%rip), %rcx
1115 cmpq %rcx, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001116 je .Lerror_bad_iret
Ingo Molnar4d732132015-06-08 20:43:07 +02001117 movl %ecx, %eax /* zero extend */
1118 cmpq %rax, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001119 je .Lbstep_iret
Borislav Petkov42c748bb2016-04-07 17:31:50 -07001120 cmpq $.Lgs_change, RIP+8(%rsp)
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001121 jne .Lerror_entry_done
Andy Lutomirski539f5112015-06-09 12:36:01 -07001122
1123 /*
Borislav Petkov42c748bb2016-04-07 17:31:50 -07001124 * hack: .Lgs_change can fail with user gsbase. If this happens, fix up
Andy Lutomirski539f5112015-06-09 12:36:01 -07001125 * gsbase and proceed. We'll fix up the exception and land in
Borislav Petkov42c748bb2016-04-07 17:31:50 -07001126 * .Lgs_change's error handler with kernel gsbase.
Andy Lutomirski539f5112015-06-09 12:36:01 -07001127 */
Wanpeng Li2fa5f042016-09-30 09:01:06 +08001128 SWAPGS
1129 jmp .Lerror_entry_done
Brian Gerstae24ffe2009-10-12 10:18:23 -04001130
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001131.Lbstep_iret:
Brian Gerstae24ffe2009-10-12 10:18:23 -04001132 /* Fix truncated RIP */
Ingo Molnar4d732132015-06-08 20:43:07 +02001133 movq %rcx, RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001134 /* fall through */
1135
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001136.Lerror_bad_iret:
Andy Lutomirski539f5112015-06-09 12:36:01 -07001137 /*
1138 * We came from an IRET to user mode, so we have user gsbase.
1139 * Switch to kernel gsbase:
1140 */
Andy Lutomirskib645af22014-11-22 18:00:33 -08001141 SWAPGS
Andy Lutomirski539f5112015-06-09 12:36:01 -07001142
1143 /*
1144 * Pretend that the exception came from user mode: set up pt_regs
1145 * as if we faulted immediately after IRET and clear EBX so that
1146 * error_exit knows that we will be returning to user mode.
1147 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001148 mov %rsp, %rdi
1149 call fixup_bad_iret
1150 mov %rax, %rsp
Andy Lutomirski539f5112015-06-09 12:36:01 -07001151 decl %ebx
Andy Lutomirskicb6f64e2015-07-03 12:44:27 -07001152 jmp .Lerror_entry_from_usermode_after_swapgs
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001153END(error_entry)
1154
1155
Andy Lutomirski539f5112015-06-09 12:36:01 -07001156/*
Nicolas Iooss75ca5b22016-07-29 13:39:51 +02001157 * On entry, EBX is a "return to kernel mode" flag:
Andy Lutomirski539f5112015-06-09 12:36:01 -07001158 * 1: already in kernel mode, don't need SWAPGS
1159 * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
1160 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001161ENTRY(error_exit)
Ingo Molnar4d732132015-06-08 20:43:07 +02001162 movl %ebx, %eax
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001163 DISABLE_INTERRUPTS(CLBR_NONE)
1164 TRACE_IRQS_OFF
Ingo Molnar4d732132015-06-08 20:43:07 +02001165 testl %eax, %eax
1166 jnz retint_kernel
1167 jmp retint_user
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001168END(error_exit)
1169
Denys Vlasenko0784b362015-04-01 16:50:57 +02001170/* Runs on exception stack */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001171ENTRY(nmi)
Andy Lutomirskifc57a7c2015-09-20 16:32:04 -07001172 /*
1173 * Fix up the exception frame if we're on Xen.
1174 * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
1175 * one value to the stack on native, so it may clobber the rdx
1176 * scratch slot, but it won't clobber any of the important
1177 * slots past it.
1178 *
1179 * Xen is a different story, because the Xen frame itself overlaps
1180 * the "NMI executing" variable.
1181 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001182 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskifc57a7c2015-09-20 16:32:04 -07001183
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001184 /*
1185 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1186 * the iretq it performs will take us out of NMI context.
1187 * This means that we can have nested NMIs where the next
1188 * NMI is using the top of the stack of the previous NMI. We
1189 * can't let it execute because the nested NMI will corrupt the
1190 * stack of the previous NMI. NMI handlers are not re-entrant
1191 * anyway.
1192 *
1193 * To handle this case we do the following:
1194 * Check the a special location on the stack that contains
1195 * a variable that is set when NMIs are executing.
1196 * The interrupted task's stack is also checked to see if it
1197 * is an NMI stack.
1198 * If the variable is not set and the stack is not the NMI
1199 * stack then:
1200 * o Set the special variable on the stack
Andy Lutomirski0b229302015-07-15 10:29:36 -07001201 * o Copy the interrupt frame into an "outermost" location on the
1202 * stack
1203 * o Copy the interrupt frame into an "iret" location on the stack
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001204 * o Continue processing the NMI
1205 * If the variable is set or the previous stack is the NMI stack:
Andy Lutomirski0b229302015-07-15 10:29:36 -07001206 * o Modify the "iret" location to jump to the repeat_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001207 * o return back to the first NMI
1208 *
1209 * Now on exit of the first NMI, we first clear the stack variable
1210 * The NMI stack will tell any nested NMIs at that point that it is
1211 * nested. Then we pop the stack normally with iret, and if there was
1212 * a nested NMI that updated the copy interrupt stack frame, a
1213 * jump will be made to the repeat_nmi code that will handle the second
1214 * NMI.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001215 *
1216 * However, espfix prevents us from directly returning to userspace
1217 * with a single IRET instruction. Similarly, IRET to user mode
1218 * can fault. We therefore handle NMIs from user space like
1219 * other IST entries.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001220 */
1221
Denys Vlasenko146b2b02015-03-25 18:18:13 +01001222 /* Use %rdx as our temp variable throughout */
Ingo Molnar4d732132015-06-08 20:43:07 +02001223 pushq %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001224
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001225 testb $3, CS-RIP+8(%rsp)
1226 jz .Lnmi_from_kernel
Steven Rostedt45d5a162012-02-19 16:43:37 -05001227
1228 /*
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001229 * NMI from user mode. We need to run on the thread stack, but we
1230 * can't go through the normal entry paths: NMIs are masked, and
1231 * we don't want to enable interrupts, because then we'll end
1232 * up in an awkward situation in which IRQs are on but NMIs
1233 * are off.
Andy Lutomirski83c133c2015-09-20 16:32:05 -07001234 *
1235 * We also must not push anything to the stack before switching
1236 * stacks lest we corrupt the "NMI executing" variable.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001237 */
1238
Andy Lutomirski83c133c2015-09-20 16:32:05 -07001239 SWAPGS_UNSAFE_STACK
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001240 cld
1241 movq %rsp, %rdx
1242 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
1243 pushq 5*8(%rdx) /* pt_regs->ss */
1244 pushq 4*8(%rdx) /* pt_regs->rsp */
1245 pushq 3*8(%rdx) /* pt_regs->flags */
1246 pushq 2*8(%rdx) /* pt_regs->cs */
1247 pushq 1*8(%rdx) /* pt_regs->rip */
1248 pushq $-1 /* pt_regs->orig_ax */
1249 pushq %rdi /* pt_regs->di */
1250 pushq %rsi /* pt_regs->si */
1251 pushq (%rdx) /* pt_regs->dx */
1252 pushq %rcx /* pt_regs->cx */
1253 pushq %rax /* pt_regs->ax */
1254 pushq %r8 /* pt_regs->r8 */
1255 pushq %r9 /* pt_regs->r9 */
1256 pushq %r10 /* pt_regs->r10 */
1257 pushq %r11 /* pt_regs->r11 */
1258 pushq %rbx /* pt_regs->rbx */
1259 pushq %rbp /* pt_regs->rbp */
1260 pushq %r12 /* pt_regs->r12 */
1261 pushq %r13 /* pt_regs->r13 */
1262 pushq %r14 /* pt_regs->r14 */
1263 pushq %r15 /* pt_regs->r15 */
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001264 ENCODE_FRAME_POINTER
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001265
1266 /*
1267 * At this point we no longer need to worry about stack damage
1268 * due to nesting -- we're on the normal thread stack and we're
1269 * done with the NMI stack.
1270 */
1271
1272 movq %rsp, %rdi
1273 movq $-1, %rsi
1274 call do_nmi
1275
1276 /*
1277 * Return back to user mode. We must *not* do the normal exit
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001278 * work, because we don't want to enable interrupts.
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001279 */
1280 SWAPGS
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001281 jmp restore_regs_and_iret
Andy Lutomirski9b6e6a82015-07-15 10:29:35 -07001282
1283.Lnmi_from_kernel:
1284 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001285 * Here's what our stack frame will look like:
1286 * +---------------------------------------------------------+
1287 * | original SS |
1288 * | original Return RSP |
1289 * | original RFLAGS |
1290 * | original CS |
1291 * | original RIP |
1292 * +---------------------------------------------------------+
1293 * | temp storage for rdx |
1294 * +---------------------------------------------------------+
1295 * | "NMI executing" variable |
1296 * +---------------------------------------------------------+
1297 * | iret SS } Copied from "outermost" frame |
1298 * | iret Return RSP } on each loop iteration; overwritten |
1299 * | iret RFLAGS } by a nested NMI to force another |
1300 * | iret CS } iteration if needed. |
1301 * | iret RIP } |
1302 * +---------------------------------------------------------+
1303 * | outermost SS } initialized in first_nmi; |
1304 * | outermost Return RSP } will not be changed before |
1305 * | outermost RFLAGS } NMI processing is done. |
1306 * | outermost CS } Copied to "iret" frame on each |
1307 * | outermost RIP } iteration. |
1308 * +---------------------------------------------------------+
1309 * | pt_regs |
1310 * +---------------------------------------------------------+
1311 *
1312 * The "original" frame is used by hardware. Before re-enabling
1313 * NMIs, we need to be done with it, and we need to leave enough
1314 * space for the asm code here.
1315 *
1316 * We return by executing IRET while RSP points to the "iret" frame.
1317 * That will either return for real or it will loop back into NMI
1318 * processing.
1319 *
1320 * The "outermost" frame is copied to the "iret" frame on each
1321 * iteration of the loop, so each iteration starts with the "iret"
1322 * frame pointing to the final return target.
1323 */
1324
1325 /*
1326 * Determine whether we're a nested NMI.
1327 *
Andy Lutomirskia27507c2015-07-15 10:29:37 -07001328 * If we interrupted kernel code between repeat_nmi and
1329 * end_repeat_nmi, then we are a nested NMI. We must not
1330 * modify the "iret" frame because it's being written by
1331 * the outer NMI. That's okay; the outer NMI handler is
1332 * about to about to call do_nmi anyway, so we can just
1333 * resume the outer NMI.
1334 */
1335
1336 movq $repeat_nmi, %rdx
1337 cmpq 8(%rsp), %rdx
1338 ja 1f
1339 movq $end_repeat_nmi, %rdx
1340 cmpq 8(%rsp), %rdx
1341 ja nested_nmi_out
13421:
1343
1344 /*
1345 * Now check "NMI executing". If it's set, then we're nested.
Andy Lutomirski0b229302015-07-15 10:29:36 -07001346 * This will not detect if we interrupted an outer NMI just
1347 * before IRET.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001348 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001349 cmpl $1, -8(%rsp)
1350 je nested_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001351
1352 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001353 * Now test if the previous stack was an NMI stack. This covers
1354 * the case where we interrupt an outer NMI after it clears
Andy Lutomirski810bc072015-07-15 10:29:38 -07001355 * "NMI executing" but before IRET. We need to be careful, though:
1356 * there is one case in which RSP could point to the NMI stack
1357 * despite there being no NMI active: naughty userspace controls
1358 * RSP at the very beginning of the SYSCALL targets. We can
1359 * pull a fast one on naughty userspace, though: we program
1360 * SYSCALL to mask DF, so userspace cannot cause DF to be set
1361 * if it controls the kernel's RSP. We set DF before we clear
1362 * "NMI executing".
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001363 */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001364 lea 6*8(%rsp), %rdx
1365 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1366 cmpq %rdx, 4*8(%rsp)
1367 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1368 ja first_nmi
Ingo Molnar4d732132015-06-08 20:43:07 +02001369
Denys Vlasenko0784b362015-04-01 16:50:57 +02001370 subq $EXCEPTION_STKSZ, %rdx
1371 cmpq %rdx, 4*8(%rsp)
1372 /* If it is below the NMI stack, it is a normal NMI */
1373 jb first_nmi
Andy Lutomirski810bc072015-07-15 10:29:38 -07001374
1375 /* Ah, it is within the NMI stack. */
1376
1377 testb $(X86_EFLAGS_DF >> 8), (3*8 + 1)(%rsp)
1378 jz first_nmi /* RSP was user controlled. */
1379
1380 /* This is a nested NMI. */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001381
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001382nested_nmi:
1383 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001384 * Modify the "iret" frame to point to repeat_nmi, forcing another
1385 * iteration of NMI handling.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001386 */
Andy Lutomirski23a781e2015-07-15 10:29:39 -07001387 subq $8, %rsp
Ingo Molnar4d732132015-06-08 20:43:07 +02001388 leaq -10*8(%rsp), %rdx
1389 pushq $__KERNEL_DS
1390 pushq %rdx
Ingo Molnar131484c2015-05-28 12:21:47 +02001391 pushfq
Ingo Molnar4d732132015-06-08 20:43:07 +02001392 pushq $__KERNEL_CS
1393 pushq $repeat_nmi
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001394
1395 /* Put stack back */
Ingo Molnar4d732132015-06-08 20:43:07 +02001396 addq $(6*8), %rsp
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001397
1398nested_nmi_out:
Ingo Molnar4d732132015-06-08 20:43:07 +02001399 popq %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001400
Andy Lutomirski0b229302015-07-15 10:29:36 -07001401 /* We are returning to kernel mode, so this cannot result in a fault. */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001402 INTERRUPT_RETURN
1403
1404first_nmi:
Andy Lutomirski0b229302015-07-15 10:29:36 -07001405 /* Restore rdx. */
Ingo Molnar4d732132015-06-08 20:43:07 +02001406 movq (%rsp), %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001407
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001408 /* Make room for "NMI executing". */
1409 pushq $0
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001410
Andy Lutomirski0b229302015-07-15 10:29:36 -07001411 /* Leave room for the "iret" frame */
Ingo Molnar4d732132015-06-08 20:43:07 +02001412 subq $(5*8), %rsp
Salman Qazi28696f42012-10-01 17:29:25 -07001413
Andy Lutomirski0b229302015-07-15 10:29:36 -07001414 /* Copy the "original" frame to the "outermost" frame */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001415 .rept 5
Ingo Molnar4d732132015-06-08 20:43:07 +02001416 pushq 11*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001417 .endr
Jan Beulich62610912012-02-24 14:54:37 +00001418
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001419 /* Everything up to here is safe from nested NMIs */
1420
Andy Lutomirskia97439a2015-07-15 10:29:41 -07001421#ifdef CONFIG_DEBUG_ENTRY
1422 /*
1423 * For ease of testing, unmask NMIs right away. Disabled by
1424 * default because IRET is very expensive.
1425 */
1426 pushq $0 /* SS */
1427 pushq %rsp /* RSP (minus 8 because of the previous push) */
1428 addq $8, (%rsp) /* Fix up RSP */
1429 pushfq /* RFLAGS */
1430 pushq $__KERNEL_CS /* CS */
1431 pushq $1f /* RIP */
1432 INTERRUPT_RETURN /* continues at repeat_nmi below */
14331:
1434#endif
1435
Andy Lutomirski0b229302015-07-15 10:29:36 -07001436repeat_nmi:
Jan Beulich62610912012-02-24 14:54:37 +00001437 /*
1438 * If there was a nested NMI, the first NMI's iret will return
1439 * here. But NMIs are still enabled and we can take another
1440 * nested NMI. The nested NMI checks the interrupted RIP to see
1441 * if it is between repeat_nmi and end_repeat_nmi, and if so
1442 * it will just return, as we are about to repeat an NMI anyway.
1443 * This makes it safe to copy to the stack frame that a nested
1444 * NMI will update.
Andy Lutomirski0b229302015-07-15 10:29:36 -07001445 *
1446 * RSP is pointing to "outermost RIP". gsbase is unknown, but, if
1447 * we're repeating an NMI, gsbase has the same value that it had on
1448 * the first iteration. paranoid_entry will load the kernel
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001449 * gsbase if needed before we call do_nmi. "NMI executing"
1450 * is zero.
Jan Beulich62610912012-02-24 14:54:37 +00001451 */
Andy Lutomirski36f1a772015-07-15 10:29:40 -07001452 movq $1, 10*8(%rsp) /* Set "NMI executing". */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001453
Andy Lutomirski0b229302015-07-15 10:29:36 -07001454 /*
1455 * Copy the "outermost" frame to the "iret" frame. NMIs that nest
1456 * here must not modify the "iret" frame while we're writing to
1457 * it or it will end up containing garbage.
1458 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001459 addq $(10*8), %rsp
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001460 .rept 5
Ingo Molnar4d732132015-06-08 20:43:07 +02001461 pushq -6*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001462 .endr
Ingo Molnar4d732132015-06-08 20:43:07 +02001463 subq $(5*8), %rsp
Jan Beulich62610912012-02-24 14:54:37 +00001464end_repeat_nmi:
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001465
1466 /*
Andy Lutomirski0b229302015-07-15 10:29:36 -07001467 * Everything below this point can be preempted by a nested NMI.
1468 * If this happens, then the inner NMI will change the "iret"
1469 * frame to point back to repeat_nmi.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001470 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001471 pushq $-1 /* ORIG_RAX: no syscall to restart */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001472 ALLOC_PT_GPREGS_ON_STACK
1473
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001474 /*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001475 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001476 * as we should not be calling schedule in NMI context.
1477 * Even with normal interrupts enabled. An NMI should not be
1478 * setting NEED_RESCHED or anything that normal interrupts and
1479 * exceptions might do.
1480 */
Ingo Molnar4d732132015-06-08 20:43:07 +02001481 call paranoid_entry
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001482
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001483 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
Ingo Molnar4d732132015-06-08 20:43:07 +02001484 movq %rsp, %rdi
1485 movq $-1, %rsi
1486 call do_nmi
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001487
Ingo Molnar4d732132015-06-08 20:43:07 +02001488 testl %ebx, %ebx /* swapgs needed? */
1489 jnz nmi_restore
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001490nmi_swapgs:
1491 SWAPGS_UNSAFE_STACK
1492nmi_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001493 RESTORE_EXTRA_REGS
1494 RESTORE_C_REGS
Andy Lutomirski0b229302015-07-15 10:29:36 -07001495
1496 /* Point RSP at the "iret" frame. */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001497 REMOVE_PT_GPREGS_FROM_STACK 6*8
Salman Qazi28696f42012-10-01 17:29:25 -07001498
Andy Lutomirski810bc072015-07-15 10:29:38 -07001499 /*
1500 * Clear "NMI executing". Set DF first so that we can easily
1501 * distinguish the remaining code between here and IRET from
1502 * the SYSCALL entry and exit paths. On a native kernel, we
1503 * could just inspect RIP, but, on paravirt kernels,
1504 * INTERRUPT_RETURN can translate into a jump into a
1505 * hypercall page.
1506 */
1507 std
1508 movq $0, 5*8(%rsp) /* clear "NMI executing" */
Andy Lutomirski0b229302015-07-15 10:29:36 -07001509
1510 /*
1511 * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
1512 * stack in a single instruction. We are returning to kernel
1513 * mode, so this cannot result in a fault.
1514 */
Andy Lutomirski5ca6f702015-06-04 13:24:29 -07001515 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001516END(nmi)
1517
1518ENTRY(ignore_sysret)
Ingo Molnar4d732132015-06-08 20:43:07 +02001519 mov $-ENOSYS, %eax
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001520 sysret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001521END(ignore_sysret)
Andy Lutomirski2deb4be2016-07-14 13:22:55 -07001522
1523ENTRY(rewind_stack_do_exit)
1524 /* Prevent any naive code from trying to unwind to our caller. */
1525 xorl %ebp, %ebp
1526
1527 movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
1528 leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
1529
1530 call do_exit
15311: jmp 1b
1532END(rewind_stack_do_exit)