blob: 5252e6021826488852509788da3c5eb7cb98de05 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
Andy Lutomirski8b4777a2011-06-05 13:50:18 -040012 * Some of this is documented in Documentation/x86/entry_64.txt
13 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010016 *
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010017 * A note on terminology:
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +010018 * - iret frame: Architecture defined interrupt frame from SS to RIP
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010019 * at the top of the kernel process stack.
Andi Kleen2e91a172006-09-26 10:52:29 +020020 *
21 * Some macro usage:
22 * - CFI macros are used to generate dwarf2 unwind information for better
23 * backtraces. They don't change any code.
Andi Kleen2e91a172006-09-26 10:52:29 +020024 * - ENTRY/END Define functions in the symbol table.
Andi Kleen2e91a172006-09-26 10:52:29 +020025 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -070026 * - idtentry - Define exception entry points.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/linkage.h>
30#include <asm/segment.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/cache.h>
32#include <asm/errno.h>
33#include <asm/dwarf2.h>
34#include <asm/calling.h>
Sam Ravnborge2d5df92005-09-09 21:28:48 +020035#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/msr.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39#include <asm/hw_irq.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080040#include <asm/page_types.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070041#include <asm/irqflags.h>
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010042#include <asm/paravirt.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090043#include <asm/percpu.h>
H. Peter Anvind7abc0f2012-04-20 12:19:50 -070044#include <asm/asm.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010045#include <asm/context_tracking.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070046#include <asm/smap.h>
H. Peter Anvin3891a042014-04-29 16:46:09 -070047#include <asm/pgtable_types.h>
Eric Parisd7e75282012-01-03 14:23:06 -050048#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Roland McGrath86a1c342008-06-23 15:37:04 -070050/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
51#include <linux/elf-em.h>
52#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
53#define __AUDIT_ARCH_64BIT 0x80000000
54#define __AUDIT_ARCH_LE 0x40000000
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 .code64
Jiri Olsaea714542011-03-07 19:10:39 +010057 .section .entry.text, "ax"
58
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020059
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010060#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040061ENTRY(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010062 swapgs
63 sysretq
Cyrill Gorcunovb3baaa12009-02-23 22:57:00 +030064ENDPROC(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010065#endif /* CONFIG_PARAVIRT */
66
Ingo Molnar2601e642006-07-03 00:24:45 -070067
Denys Vlasenkof2db9382015-02-26 14:40:30 -080068.macro TRACE_IRQS_IRETQ
Ingo Molnar2601e642006-07-03 00:24:45 -070069#ifdef CONFIG_TRACE_IRQFLAGS
Denys Vlasenkof2db9382015-02-26 14:40:30 -080070 bt $9,EFLAGS(%rsp) /* interrupts off? */
Ingo Molnar2601e642006-07-03 00:24:45 -070071 jnc 1f
72 TRACE_IRQS_ON
731:
74#endif
75.endm
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/*
Steven Rostedt5963e312012-05-30 11:54:53 -040078 * When dynamic function tracer is enabled it will add a breakpoint
79 * to all locations that it is about to modify, sync CPUs, update
80 * all the code, sync CPUs, then remove the breakpoints. In this time
81 * if lockdep is enabled, it might jump back into the debug handler
82 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
83 *
84 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
85 * make sure the stack pointer does not get reset back to the top
86 * of the debug stack, and instead just reuses the current stack.
87 */
88#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
89
90.macro TRACE_IRQS_OFF_DEBUG
91 call debug_stack_set_zero
92 TRACE_IRQS_OFF
93 call debug_stack_reset
94.endm
95
96.macro TRACE_IRQS_ON_DEBUG
97 call debug_stack_set_zero
98 TRACE_IRQS_ON
99 call debug_stack_reset
100.endm
101
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800102.macro TRACE_IRQS_IRETQ_DEBUG
103 bt $9,EFLAGS(%rsp) /* interrupts off? */
Steven Rostedt5963e312012-05-30 11:54:53 -0400104 jnc 1f
105 TRACE_IRQS_ON_DEBUG
1061:
107.endm
108
109#else
110# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
111# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
112# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
113#endif
114
115/*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800116 * empty frame
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100117 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100118 .macro EMPTY_FRAME start=1 offset=0
119 .if \start
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100120 CFI_STARTPROC simple
121 CFI_SIGNAL_FRAME
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100122 CFI_DEF_CFA rsp,8+\offset
123 .else
124 CFI_DEF_CFA_OFFSET 8+\offset
125 .endif
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100126 .endm
127
128/*
129 * initial frame state for interrupts (and exceptions without error code)
130 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100131 .macro INTR_FRAME start=1 offset=0
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800132 EMPTY_FRAME \start, 5*8+\offset
133 /*CFI_REL_OFFSET ss, 4*8+\offset*/
134 CFI_REL_OFFSET rsp, 3*8+\offset
135 /*CFI_REL_OFFSET rflags, 2*8+\offset*/
136 /*CFI_REL_OFFSET cs, 1*8+\offset*/
137 CFI_REL_OFFSET rip, 0*8+\offset
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100138 .endm
139
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100140/*
141 * initial frame state for exceptions with error code (and interrupts
142 * with vector already pushed)
143 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100144 .macro XCPT_FRAME start=1 offset=0
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800145 INTR_FRAME \start, 1*8+\offset
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100146 .endm
147
148/*
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800149 * frame that enables passing a complete pt_regs to a C function.
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100150 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800151 .macro DEFAULT_FRAME start=1 offset=0
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800152 XCPT_FRAME \start, ORIG_RAX+\offset
153 CFI_REL_OFFSET rdi, RDI+\offset
154 CFI_REL_OFFSET rsi, RSI+\offset
155 CFI_REL_OFFSET rdx, RDX+\offset
156 CFI_REL_OFFSET rcx, RCX+\offset
157 CFI_REL_OFFSET rax, RAX+\offset
158 CFI_REL_OFFSET r8, R8+\offset
159 CFI_REL_OFFSET r9, R9+\offset
160 CFI_REL_OFFSET r10, R10+\offset
161 CFI_REL_OFFSET r11, R11+\offset
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100162 CFI_REL_OFFSET rbx, RBX+\offset
163 CFI_REL_OFFSET rbp, RBP+\offset
164 CFI_REL_OFFSET r12, R12+\offset
165 CFI_REL_OFFSET r13, R13+\offset
166 CFI_REL_OFFSET r14, R14+\offset
167 CFI_REL_OFFSET r15, R15+\offset
168 .endm
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/*
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800171 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 *
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800173 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
174 * then loads new ss, cs, and rip from previously programmed MSRs.
175 * rflags gets masked by a value from another MSR (so CLD and CLAC
176 * are not needed). SYSCALL does not save anything on the stack
177 * and does not change rsp.
178 *
179 * Registers on entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * rax system call number
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800181 * rcx return address
182 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 * rdi arg0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * rsi arg1
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100185 * rdx arg2
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800186 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 * r8 arg4
188 * r9 arg5
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800189 * (note: r12-r15,rbp,rbx are callee-preserved in C ABI)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100190 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 * Only called from user space.
192 *
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100193 * When user can change pt_regs->foo always force IRET. That is because
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200194 * it deals with uncanonical addresses better. SYSRET has trouble
195 * with them due to bugs in both AMD and Intel CPUs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100196 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198ENTRY(system_call)
Jan Beulich7effaa82005-09-12 18:49:24 +0200199 CFI_STARTPROC simple
Jan Beulichadf14232006-09-26 10:52:41 +0200200 CFI_SIGNAL_FRAME
Denys Vlasenkoef593262015-03-19 18:17:46 +0100201 CFI_DEF_CFA rsp,0
Jan Beulich7effaa82005-09-12 18:49:24 +0200202 CFI_REGISTER rip,rcx
203 /*CFI_REGISTER rflags,r11*/
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100204
205 /*
206 * Interrupts are off on entry.
207 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
208 * it is too small to ever cause noticeable irq latency.
209 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100210 SWAPGS_UNSAFE_STACK
211 /*
212 * A hypervisor implementation might want to use a label
213 * after the swapgs, so that it can do the swapgs
214 * for the guest and jump here on syscall.
215 */
Jan Beulichf6b2bc82011-11-29 11:24:10 +0000216GLOBAL(system_call_after_swapgs)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100217
Ingo Molnarc38e5032015-03-17 14:42:59 +0100218 movq %rsp,PER_CPU_VAR(rsp_scratch)
Brian Gerst9af45652009-01-19 00:38:58 +0900219 movq PER_CPU_VAR(kernel_stack),%rsp
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100220
221 /* Construct struct pt_regs on stack */
222 pushq_cfi $__USER_DS /* pt_regs->ss */
223 pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
Denys Vlasenko33db1fd2015-03-17 14:52:24 +0100224 /*
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100225 * Re-enable interrupts.
226 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
227 * must execute atomically in the face of possible interrupt-driven
228 * task preemption. We must enable interrupts only after we're done
229 * with using rsp_scratch:
Denys Vlasenko33db1fd2015-03-17 14:52:24 +0100230 */
231 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100232 pushq_cfi %r11 /* pt_regs->flags */
233 pushq_cfi $__USER_CS /* pt_regs->cs */
234 pushq_cfi %rcx /* pt_regs->ip */
235 CFI_REL_OFFSET rip,0
236 pushq_cfi_reg rax /* pt_regs->orig_ax */
237 pushq_cfi_reg rdi /* pt_regs->di */
238 pushq_cfi_reg rsi /* pt_regs->si */
239 pushq_cfi_reg rdx /* pt_regs->dx */
240 pushq_cfi_reg rcx /* pt_regs->cx */
241 pushq_cfi $-ENOSYS /* pt_regs->ax */
242 pushq_cfi_reg r8 /* pt_regs->r8 */
243 pushq_cfi_reg r9 /* pt_regs->r9 */
244 pushq_cfi_reg r10 /* pt_regs->r10 */
Denys Vlasenkoa71ffdd2015-03-19 18:17:48 +0100245 pushq_cfi_reg r11 /* pt_regs->r11 */
246 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
Denys Vlasenko27be87c2015-03-27 11:36:19 +0100247 CFI_ADJUST_CFA_OFFSET 6*8
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100248
Ingo Molnardca5b522015-03-24 19:44:42 +0100249 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 jnz tracesys
Roland McGrath86a1c342008-06-23 15:37:04 -0700251system_call_fastpath:
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800252#if __SYSCALL_MASK == ~0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cmpq $__NR_syscall_max,%rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800254#else
255 andl $__SYSCALL_MASK,%eax
256 cmpl $__NR_syscall_max,%eax
257#endif
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100258 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 movq %r10,%rcx
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100260 call *sys_call_table(,%rax,8)
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800261 movq %rax,RAX(%rsp)
Denys Vlasenko146b2b02015-03-25 18:18:13 +01002621:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263/*
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100264 * Syscall return path ending with SYSRET (fast path).
265 * Has incompletely filled pt_regs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100266 */
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200267 LOCKDEP_SYS_EXIT
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200268 /*
269 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
270 * it is too small to ever cause noticeable irq latency.
271 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100272 DISABLE_INTERRUPTS(CLBR_NONE)
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700273
274 /*
275 * We must check ti flags with interrupts (or at least preemption)
276 * off because we must *never* return to userspace without
277 * processing exit work that is enqueued if we're preempted here.
278 * In particular, returning to userspace with any of the one-shot
279 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
280 * very bad.
281 */
Ingo Molnar06ab9c12015-03-24 21:14:07 +0100282 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
283 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700284
Jan Beulichbcddc012006-12-07 02:14:02 +0100285 CFI_REMEMBER_STATE
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200286
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100287 RESTORE_C_REGS_EXCEPT_RCX_R11
288 movq RIP(%rsp),%rcx
Jan Beulich7effaa82005-09-12 18:49:24 +0200289 CFI_REGISTER rip,rcx
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100290 movq EFLAGS(%rsp),%r11
Jan Beulich7effaa82005-09-12 18:49:24 +0200291 /*CFI_REGISTER rflags,r11*/
Denys Vlasenko263042e2015-03-09 19:39:23 +0100292 movq RSP(%rsp),%rsp
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800293 /*
294 * 64bit SYSRET restores rip from rcx,
295 * rflags from r11 (but RF and VM bits are forced to 0),
296 * cs and ss are loaded from MSRs.
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200297 * Restoration of rflags re-enables interrupts.
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800298 */
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400299 USERGS_SYSRET64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Jan Beulichbcddc012006-12-07 02:14:02 +0100301 CFI_RESTORE_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100303 /* Do syscall entry tracing */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100304tracesys:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800305 movq %rsp, %rdi
Denys Vlasenko47eb5822015-03-25 18:18:15 +0100306 movl $AUDIT_ARCH_X86_64, %esi
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700307 call syscall_trace_enter_phase1
308 test %rax, %rax
309 jnz tracesys_phase2 /* if needed, run the slow path */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800310 RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800311 movq ORIG_RAX(%rsp), %rax
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700312 jmp system_call_fastpath /* and return to the fast path */
313
314tracesys_phase2:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800315 SAVE_EXTRA_REGS
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700316 movq %rsp, %rdi
Denys Vlasenko47eb5822015-03-25 18:18:15 +0100317 movl $AUDIT_ARCH_X86_64, %esi
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700318 movq %rax,%rdx
319 call syscall_trace_enter_phase2
320
Roland McGrathd4d67152008-07-09 02:38:07 -0700321 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800322 * Reload registers from stack in case ptrace changed them.
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700323 * We don't reload %rax because syscall_trace_entry_phase2() returned
Roland McGrathd4d67152008-07-09 02:38:07 -0700324 * the value it wants us to use in the table lookup.
325 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800326 RESTORE_C_REGS_EXCEPT_RAX
327 RESTORE_EXTRA_REGS
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800328#if __SYSCALL_MASK == ~0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 cmpq $__NR_syscall_max,%rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800330#else
331 andl $__SYSCALL_MASK,%eax
332 cmpl $__NR_syscall_max,%eax
333#endif
Denys Vlasenkoa6de5a22015-03-31 19:00:11 +0200334 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 movq %r10,%rcx /* fixup for C */
336 call *sys_call_table(,%rax,8)
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800337 movq %rax,RAX(%rsp)
Denys Vlasenkoa6de5a22015-03-31 19:00:11 +02003381:
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100339 /* Use IRET because user could have changed pt_regs->foo */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100340
341/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 * Syscall return path ending with IRET.
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100343 * Has correct iret frame.
Jan Beulichbcddc012006-12-07 02:14:02 +0100344 */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300345GLOBAL(int_ret_from_sys_call)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100346 DISABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko4416c5a2015-03-31 19:00:03 +0200347int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
Ingo Molnar2601e642006-07-03 00:24:45 -0700348 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 movl $_TIF_ALLWORK_MASK,%edi
350 /* edi: mask to check */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300351GLOBAL(int_with_check)
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200352 LOCKDEP_SYS_EXIT_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 GET_THREAD_INFO(%rcx)
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300354 movl TI_flags(%rcx),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 andl %edi,%edx
356 jnz int_careful
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200357 andl $~TS_COMPAT,TI_status(%rcx)
358 jmp syscall_return
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 /* Either reschedule or signal or syscall exit tracking needed. */
361 /* First do a reschedule test. */
362 /* edx: work, edi: workmask */
363int_careful:
364 bt $TIF_NEED_RESCHED,%edx
365 jnc int_very_careful
Ingo Molnar2601e642006-07-03 00:24:45 -0700366 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100367 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +0100368 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +0200369 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +0100370 popq_cfi %rdi
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100371 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700372 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 jmp int_with_check
374
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100375 /* handle signals and tracing -- both require a full pt_regs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376int_very_careful:
Ingo Molnar2601e642006-07-03 00:24:45 -0700377 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100378 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800379 SAVE_EXTRA_REGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100380 /* Check for syscall exit trace */
Roland McGrathd4d67152008-07-09 02:38:07 -0700381 testl $_TIF_WORK_SYSCALL_EXIT,%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 jz int_signal
Jan Beulichdf5d1872010-09-02 14:07:16 +0100383 pushq_cfi %rdi
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100384 leaq 8(%rsp),%rdi # &ptregs -> arg1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 call syscall_trace_leave
Jan Beulichdf5d1872010-09-02 14:07:16 +0100386 popq_cfi %rdi
Roland McGrathd4d67152008-07-09 02:38:07 -0700387 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 jmp int_restore_rest
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390int_signal:
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100391 testl $_TIF_DO_NOTIFY_MASK,%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 jz 1f
393 movq %rsp,%rdi # &ptregs -> arg1
394 xorl %esi,%esi # oldset -> arg2
395 call do_notify_resume
Roland McGratheca91e72008-07-10 14:50:39 -07003961: movl $_TIF_WORK_MASK,%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397int_restore_rest:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800398 RESTORE_EXTRA_REGS
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100399 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700400 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 jmp int_with_check
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200402
403syscall_return:
404 /* The IRETQ could re-enable interrupts: */
405 DISABLE_INTERRUPTS(CLBR_ANY)
406 TRACE_IRQS_IRETQ
407
408 /*
409 * Try to use SYSRET instead of IRET if we're returning to
410 * a completely clean 64-bit userspace context.
411 */
412 movq RCX(%rsp),%rcx
413 cmpq %rcx,RIP(%rsp) /* RCX == RIP */
414 jne opportunistic_sysret_failed
415
416 /*
417 * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
418 * in kernel space. This essentially lets the user take over
419 * the kernel, since userspace controls RSP. It's not worth
420 * testing for canonicalness exactly -- this check detects any
421 * of the 17 high bits set, which is true for non-canonical
422 * or kernel addresses. (This will pessimize vsyscall=native.
423 * Big deal.)
424 *
425 * If virtual addresses ever become wider, this will need
426 * to be updated to remain correct on both old and new CPUs.
427 */
428 .ifne __VIRTUAL_MASK_SHIFT - 47
429 .error "virtual address width changed -- SYSRET checks need update"
430 .endif
431 shr $__VIRTUAL_MASK_SHIFT, %rcx
432 jnz opportunistic_sysret_failed
433
434 cmpq $__USER_CS,CS(%rsp) /* CS must match SYSRET */
435 jne opportunistic_sysret_failed
436
437 movq R11(%rsp),%r11
438 cmpq %r11,EFLAGS(%rsp) /* R11 == RFLAGS */
439 jne opportunistic_sysret_failed
440
441 /*
442 * SYSRET can't restore RF. SYSRET can restore TF, but unlike IRET,
443 * restoring TF results in a trap from userspace immediately after
444 * SYSRET. This would cause an infinite loop whenever #DB happens
445 * with register state that satisfies the opportunistic SYSRET
446 * conditions. For example, single-stepping this user code:
447 *
448 * movq $stuck_here,%rcx
449 * pushfq
450 * popq %r11
451 * stuck_here:
452 *
453 * would never get past 'stuck_here'.
454 */
455 testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
456 jnz opportunistic_sysret_failed
457
458 /* nothing to check for RSP */
459
460 cmpq $__USER_DS,SS(%rsp) /* SS must match SYSRET */
461 jne opportunistic_sysret_failed
462
463 /*
464 * We win! This label is here just for ease of understanding
465 * perf profiles. Nothing jumps here.
466 */
467syscall_return_via_sysret:
468 CFI_REMEMBER_STATE
469 /* r11 is already restored (see code above) */
470 RESTORE_C_REGS_EXCEPT_R11
471 movq RSP(%rsp),%rsp
472 USERGS_SYSRET64
473 CFI_RESTORE_STATE
474
475opportunistic_sysret_failed:
476 SWAPGS
477 jmp restore_c_regs_and_iret
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 CFI_ENDPROC
Jan Beulichbcddc012006-12-07 02:14:02 +0100479END(system_call)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100480
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200481
Al Viro1d4b4b22012-10-22 22:34:11 -0400482 .macro FORK_LIKE func
483ENTRY(stub_\func)
484 CFI_STARTPROC
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800485 DEFAULT_FRAME 0, 8 /* offset 8: return address */
486 SAVE_EXTRA_REGS 8
Al Viro1d4b4b22012-10-22 22:34:11 -0400487 call sys_\func
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800488 ret
Al Viro1d4b4b22012-10-22 22:34:11 -0400489 CFI_ENDPROC
490END(stub_\func)
491 .endm
492
493 FORK_LIKE clone
494 FORK_LIKE fork
495 FORK_LIKE vfork
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497ENTRY(stub_execve)
498 CFI_STARTPROC
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200499 DEFAULT_FRAME 0, 8
500 call sys_execve
501return_from_execve:
502 testl %eax, %eax
503 jz 1f
504 /* exec failed, can use fast SYSRET code path in this case */
505 ret
5061:
507 /* must use IRET code path (pt_regs->cs may have changed) */
508 addq $8, %rsp
Denys Vlasenko8b3607b52015-04-07 18:42:47 +0200509 CFI_ADJUST_CFA_OFFSET -8
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200510 ZERO_EXTRA_REGS
511 movq %rax,RAX(%rsp)
512 jmp int_ret_from_sys_call
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200514END(stub_execve)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100515
David Drysdale27d6ec72014-12-12 16:57:33 -0800516ENTRY(stub_execveat)
517 CFI_STARTPROC
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200518 DEFAULT_FRAME 0, 8
519 call sys_execveat
520 jmp return_from_execve
David Drysdale27d6ec72014-12-12 16:57:33 -0800521 CFI_ENDPROC
522END(stub_execveat)
523
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524/*
525 * sigreturn is special because it needs to restore all registers on return.
526 * This cannot be done with SYSRET, so use the IRET return path instead.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100527 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528ENTRY(stub_rt_sigreturn)
529 CFI_STARTPROC
Denys Vlasenko31f01192015-04-07 22:43:37 +0200530 DEFAULT_FRAME 0, 8
531 /*
532 * SAVE_EXTRA_REGS result is not normally needed:
533 * sigreturn overwrites all pt_regs->GPREGS.
534 * But sigreturn can fail (!), and there is no easy way to detect that.
535 * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
536 * we SAVE_EXTRA_REGS here.
537 */
538 SAVE_EXTRA_REGS 8
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 call sys_rt_sigreturn
Denys Vlasenko31f01192015-04-07 22:43:37 +0200540return_from_stub:
541 addq $8, %rsp
542 CFI_ADJUST_CFA_OFFSET -8
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800543 RESTORE_EXTRA_REGS
Denys Vlasenko31f01192015-04-07 22:43:37 +0200544 movq %rax,RAX(%rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 jmp int_ret_from_sys_call
546 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200547END(stub_rt_sigreturn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800549#ifdef CONFIG_X86_X32_ABI
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800550ENTRY(stub_x32_rt_sigreturn)
551 CFI_STARTPROC
Denys Vlasenko31f01192015-04-07 22:43:37 +0200552 DEFAULT_FRAME 0, 8
553 SAVE_EXTRA_REGS 8
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800554 call sys32_x32_rt_sigreturn
Denys Vlasenko31f01192015-04-07 22:43:37 +0200555 jmp return_from_stub
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800556 CFI_ENDPROC
557END(stub_x32_rt_sigreturn)
558
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800559ENTRY(stub_x32_execve)
560 CFI_STARTPROC
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200561 DEFAULT_FRAME 0, 8
562 call compat_sys_execve
563 jmp return_from_execve
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800564 CFI_ENDPROC
565END(stub_x32_execve)
566
David Drysdale27d6ec72014-12-12 16:57:33 -0800567ENTRY(stub_x32_execveat)
568 CFI_STARTPROC
Denys Vlasenkofc3e9582015-04-04 20:55:19 +0200569 DEFAULT_FRAME 0, 8
570 call compat_sys_execveat
571 jmp return_from_execve
David Drysdale27d6ec72014-12-12 16:57:33 -0800572 CFI_ENDPROC
573END(stub_x32_execveat)
574
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800575#endif
576
Jan Beulich7effaa82005-09-12 18:49:24 +0200577/*
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800578 * A newly forked process directly context switches into this address.
579 *
580 * rdi: prev task we switched from
581 */
582ENTRY(ret_from_fork)
583 DEFAULT_FRAME
584
585 LOCK ; btr $TIF_FORK,TI_flags(%r8)
586
587 pushq_cfi $0x0002
588 popfq_cfi # reset kernel eflags
589
590 call schedule_tail # rdi: 'prev' task parameter
591
592 GET_THREAD_INFO(%rcx)
593
594 RESTORE_EXTRA_REGS
595
596 testl $3,CS(%rsp) # from kernel_thread?
597 jz 1f
598
Andy Lutomirski1e3fbb82015-02-26 14:40:39 -0800599 /*
600 * By the time we get here, we have no idea whether our pt_regs,
601 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
602 * the slow path, or one of the ia32entry paths.
603 * Use int_ret_from_sys_call to return, since it can safely handle
604 * all of the above.
605 */
606 jmp int_ret_from_sys_call
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800607
6081:
609 movq %rbp, %rdi
610 call *%rbx
611 movl $0, RAX(%rsp)
612 RESTORE_EXTRA_REGS
613 jmp int_ret_from_sys_call
614 CFI_ENDPROC
615END(ret_from_fork)
616
617/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200618 * Build the entry stubs with some assembler magic.
619 * We pack 1 stub into every 8-byte block.
H. Peter Anvin939b7872008-11-11 13:51:52 -0800620 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200621 .align 8
H. Peter Anvin939b7872008-11-11 13:51:52 -0800622ENTRY(irq_entries_start)
623 INTR_FRAME
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200624 vector=FIRST_EXTERNAL_VECTOR
625 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
626 pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
627 vector=vector+1
628 jmp common_interrupt
H. Peter Anvin939b7872008-11-11 13:51:52 -0800629 CFI_ADJUST_CFA_OFFSET -8
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200630 .align 8
631 .endr
H. Peter Anvin939b7872008-11-11 13:51:52 -0800632 CFI_ENDPROC
633END(irq_entries_start)
634
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100635/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 * Interrupt entry/exit.
637 *
638 * Interrupt entry points save only callee clobbered registers in fast path.
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100639 *
640 * Entry runs with interrupts off.
641 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100643/* 0(%rsp): ~(interrupt number) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 .macro interrupt func
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100645 cld
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800646 /*
647 * Since nothing in interrupt handling code touches r12...r15 members
648 * of "struct pt_regs", and since interrupts can nest, we can save
649 * four stack slots and simultaneously provide
650 * an unwind-friendly stack layout by saving "truncated" pt_regs
651 * exactly up to rbp slot, without these members.
652 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800653 ALLOC_PT_GPREGS_ON_STACK -RBP
654 SAVE_C_REGS -RBP
655 /* this goes to 0(%rsp) for unwinder, not for saving the value: */
656 SAVE_EXTRA_REGS_RBP -RBP
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100657
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800658 leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100659
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800660 testl $3, CS-RBP(%rsp)
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100661 je 1f
662 SWAPGS
Denys Vlasenko76f5df42015-02-26 14:40:27 -08006631:
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100664 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800665 * Save previous stack pointer, optionally switch to interrupt stack.
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100666 * irq_count is used to check if a CPU is already on an interrupt stack
667 * or not. While this is essentially redundant with preempt_count it is
668 * a little cheaper to use a separate counter in the PDA (short of
669 * moving irq_enter into assembly, which would be too much work)
670 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800671 movq %rsp, %rsi
672 incl PER_CPU_VAR(irq_count)
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100673 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
674 CFI_DEF_CFA_REGISTER rsi
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100675 pushq %rsi
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800676 /*
677 * For debugger:
678 * "CFA (Current Frame Address) is the value on stack + offset"
679 */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100680 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800681 0x77 /* DW_OP_breg7 (rsp) */, 0, \
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100682 0x06 /* DW_OP_deref */, \
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800683 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100684 0x22 /* DW_OP_plus */
685 /* We entered an interrupt context - irqs are off: */
686 TRACE_IRQS_OFF
687
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 call \func
689 .endm
690
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100691 /*
692 * The interrupt stubs push (~vector+0x80) onto the stack and
693 * then jump to common_interrupt.
694 */
H. Peter Anvin939b7872008-11-11 13:51:52 -0800695 .p2align CONFIG_X86_L1_CACHE_SHIFT
696common_interrupt:
Jan Beulich7effaa82005-09-12 18:49:24 +0200697 XCPT_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +0000698 ASM_CLAC
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100699 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 interrupt do_IRQ
Denys Vlasenko34061f12015-03-23 14:03:59 +0100701 /* 0(%rsp): old RSP */
Jan Beulich7effaa82005-09-12 18:49:24 +0200702ret_from_intr:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100703 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700704 TRACE_IRQS_OFF
Brian Gerst56895532009-01-19 00:38:58 +0900705 decl PER_CPU_VAR(irq_count)
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100706
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200707 /* Restore saved previous stack */
708 popq %rsi
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800709 CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800710 /* return code expects complete pt_regs - adjust rsp accordingly: */
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800711 leaq -RBP(%rsi),%rsp
Jan Beulich7effaa82005-09-12 18:49:24 +0200712 CFI_DEF_CFA_REGISTER rsp
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800713 CFI_ADJUST_CFA_OFFSET RBP
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100714
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800715 testl $3,CS(%rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 je retint_kernel
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* Interrupt came from user space */
Denys Vlasenkoa3675b32015-03-30 20:09:34 +0200718
719 GET_THREAD_INFO(%rcx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 * %rcx: thread info. Interrupts off.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100722 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723retint_with_reschedule:
724 movl $_TIF_WORK_MASK,%edi
Jan Beulich7effaa82005-09-12 18:49:24 +0200725retint_check:
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200726 LOCKDEP_SYS_EXIT_IRQ
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300727 movl TI_flags(%rcx),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 andl %edi,%edx
Jan Beulich7effaa82005-09-12 18:49:24 +0200729 CFI_REMEMBER_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 jnz retint_careful
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200731
732retint_swapgs: /* return to user-space */
Ingo Molnar2601e642006-07-03 00:24:45 -0700733 /*
734 * The iretq could re-enable interrupts:
735 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100736 DISABLE_INTERRUPTS(CLBR_ANY)
Ingo Molnar2601e642006-07-03 00:24:45 -0700737 TRACE_IRQS_IRETQ
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700738
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100739 SWAPGS
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200740 jmp restore_c_regs_and_iret
Ingo Molnar2601e642006-07-03 00:24:45 -0700741
Denys Vlasenko627276c2015-03-30 20:09:31 +0200742/* Returning to kernel space */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200743retint_kernel:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200744#ifdef CONFIG_PREEMPT
745 /* Interrupts are off */
746 /* Check if we need preemption */
Denys Vlasenko627276c2015-03-30 20:09:31 +0200747 bt $9,EFLAGS(%rsp) /* interrupts were off? */
Denys Vlasenko6ba71b72015-03-31 19:00:05 +0200748 jnc 1f
Denys Vlasenko36acef22015-03-31 19:00:07 +02007490: cmpl $0,PER_CPU_VAR(__preempt_count)
750 jnz 1f
Denys Vlasenko627276c2015-03-30 20:09:31 +0200751 call preempt_schedule_irq
Denys Vlasenko36acef22015-03-31 19:00:07 +0200752 jmp 0b
Denys Vlasenko6ba71b72015-03-31 19:00:05 +02007531:
Denys Vlasenko627276c2015-03-30 20:09:31 +0200754#endif
Ingo Molnar2601e642006-07-03 00:24:45 -0700755 /*
756 * The iretq could re-enable interrupts:
757 */
758 TRACE_IRQS_IRETQ
Denys Vlasenkofffbb5d2015-04-02 18:46:59 +0200759
760/*
761 * At this label, code paths which return to kernel and to user,
762 * which come from interrupts/exception and from syscalls, merge.
763 */
764restore_c_regs_and_iret:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800765 RESTORE_C_REGS
766 REMOVE_PT_GPREGS_FROM_STACK 8
Ingo Molnar3701d8632008-02-09 23:24:08 +0100767
Adrian Bunkf7f3d792008-02-13 23:29:53 +0200768irq_return:
Andy Lutomirski7209a752014-07-23 08:34:11 -0700769 INTERRUPT_RETURN
770
771ENTRY(native_iret)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700772 /*
773 * Are we returning to a stack segment from the LDT? Note: in
774 * 64-bit mode SS:RSP on the exception stack is always valid.
775 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700776#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -0700777 testb $4,(SS-RIP)(%rsp)
Andy Lutomirski7209a752014-07-23 08:34:11 -0700778 jnz native_irq_return_ldt
H. Peter Anvin34273f42014-05-04 10:36:22 -0700779#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700780
Andy Lutomirskiaf726f22014-11-22 18:00:31 -0800781.global native_irq_return_iret
Andy Lutomirski7209a752014-07-23 08:34:11 -0700782native_irq_return_iret:
Andy Lutomirskib645af22014-11-22 18:00:33 -0800783 /*
784 * This may fault. Non-paranoid faults on return to userspace are
785 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
786 * Double-faults due to espfix64 are handled in do_double_fault.
787 * Other faults here are fatal.
788 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 iretq
Ingo Molnar3701d8632008-02-09 23:24:08 +0100790
H. Peter Anvin34273f42014-05-04 10:36:22 -0700791#ifdef CONFIG_X86_ESPFIX64
Andy Lutomirski7209a752014-07-23 08:34:11 -0700792native_irq_return_ldt:
H. Peter Anvin3891a042014-04-29 16:46:09 -0700793 pushq_cfi %rax
794 pushq_cfi %rdi
795 SWAPGS
796 movq PER_CPU_VAR(espfix_waddr),%rdi
797 movq %rax,(0*8)(%rdi) /* RAX */
798 movq (2*8)(%rsp),%rax /* RIP */
799 movq %rax,(1*8)(%rdi)
800 movq (3*8)(%rsp),%rax /* CS */
801 movq %rax,(2*8)(%rdi)
802 movq (4*8)(%rsp),%rax /* RFLAGS */
803 movq %rax,(3*8)(%rdi)
804 movq (6*8)(%rsp),%rax /* SS */
805 movq %rax,(5*8)(%rdi)
806 movq (5*8)(%rsp),%rax /* RSP */
807 movq %rax,(4*8)(%rdi)
808 andl $0xffff0000,%eax
809 popq_cfi %rdi
810 orq PER_CPU_VAR(espfix_stack),%rax
811 SWAPGS
812 movq %rax,%rsp
813 popq_cfi %rax
Andy Lutomirski7209a752014-07-23 08:34:11 -0700814 jmp native_irq_return_iret
H. Peter Anvin34273f42014-05-04 10:36:22 -0700815#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700816
Jan Beulich7effaa82005-09-12 18:49:24 +0200817 /* edi: workmask, edx: work */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818retint_careful:
Jan Beulich7effaa82005-09-12 18:49:24 +0200819 CFI_RESTORE_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 bt $TIF_NEED_RESCHED,%edx
821 jnc retint_signal
Ingo Molnar2601e642006-07-03 00:24:45 -0700822 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100823 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +0100824 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +0200825 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +0100826 popq_cfi %rdi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 GET_THREAD_INFO(%rcx)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100828 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700829 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 jmp retint_check
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832retint_signal:
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100833 testl $_TIF_DO_NOTIFY_MASK,%edx
Andi Kleen10ffdbb2005-05-16 21:53:19 -0700834 jz retint_swapgs
Ingo Molnar2601e642006-07-03 00:24:45 -0700835 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100836 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800837 SAVE_EXTRA_REGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100838 movq $-1,ORIG_RAX(%rsp)
Andi Kleen3829ee62005-07-28 21:15:48 -0700839 xorl %esi,%esi # oldset
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 movq %rsp,%rdi # &pt_regs
841 call do_notify_resume
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800842 RESTORE_EXTRA_REGS
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100843 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700844 TRACE_IRQS_OFF
Andi Kleenbe9e6872005-05-01 08:58:51 -0700845 GET_THREAD_INFO(%rcx)
Roland McGratheca91e72008-07-10 14:50:39 -0700846 jmp retint_with_reschedule
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200849END(common_interrupt)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700850
Masami Hiramatsu8222d712009-08-27 13:23:25 -0400851/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 * APIC interrupts.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100853 */
Seiji Aguchicf910e82013-06-20 11:46:53 -0400854.macro apicinterrupt3 num sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100855ENTRY(\sym)
Jan Beulich7effaa82005-09-12 18:49:24 +0200856 INTR_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +0000857 ASM_CLAC
Jan Beulichdf5d1872010-09-02 14:07:16 +0100858 pushq_cfi $~(\num)
Jan Beulich39e95432011-11-29 11:03:46 +0000859.Lcommon_\sym:
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100860 interrupt \do_sym
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 jmp ret_from_intr
862 CFI_ENDPROC
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100863END(\sym)
864.endm
Jacob Shin89b831e2005-11-05 17:25:53 +0100865
Seiji Aguchicf910e82013-06-20 11:46:53 -0400866#ifdef CONFIG_TRACING
867#define trace(sym) trace_##sym
868#define smp_trace(sym) smp_trace_##sym
869
870.macro trace_apicinterrupt num sym
871apicinterrupt3 \num trace(\sym) smp_trace(\sym)
872.endm
873#else
874.macro trace_apicinterrupt num sym do_sym
875.endm
876#endif
877
878.macro apicinterrupt num sym do_sym
879apicinterrupt3 \num \sym \do_sym
880trace_apicinterrupt \num \sym
881.endm
882
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100883#ifdef CONFIG_SMP
Seiji Aguchicf910e82013-06-20 11:46:53 -0400884apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100885 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
Seiji Aguchicf910e82013-06-20 11:46:53 -0400886apicinterrupt3 REBOOT_VECTOR \
Andi Kleen4ef702c2009-05-27 21:56:52 +0200887 reboot_interrupt smp_reboot_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888#endif
889
Nick Piggin03b48632009-01-20 04:36:04 +0100890#ifdef CONFIG_X86_UV
Seiji Aguchicf910e82013-06-20 11:46:53 -0400891apicinterrupt3 UV_BAU_MESSAGE \
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100892 uv_bau_message_intr1 uv_bau_message_interrupt
Nick Piggin03b48632009-01-20 04:36:04 +0100893#endif
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100894apicinterrupt LOCAL_TIMER_VECTOR \
895 apic_timer_interrupt smp_apic_timer_interrupt
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500896apicinterrupt X86_PLATFORM_IPI_VECTOR \
897 x86_platform_ipi smp_x86_platform_ipi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
Yang Zhangd78f2662013-04-11 19:25:11 +0800899#ifdef CONFIG_HAVE_KVM
Seiji Aguchicf910e82013-06-20 11:46:53 -0400900apicinterrupt3 POSTED_INTR_VECTOR \
Yang Zhangd78f2662013-04-11 19:25:11 +0800901 kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
902#endif
903
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400904#ifdef CONFIG_X86_MCE_THRESHOLD
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100905apicinterrupt THRESHOLD_APIC_VECTOR \
Andi Kleen7856f6c2009-04-28 23:32:56 +0200906 threshold_interrupt smp_threshold_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400907#endif
908
909#ifdef CONFIG_X86_THERMAL_VECTOR
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100910apicinterrupt THERMAL_APIC_VECTOR \
911 thermal_interrupt smp_thermal_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400912#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100914#ifdef CONFIG_SMP
915apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
916 call_function_single_interrupt smp_call_function_single_interrupt
917apicinterrupt CALL_FUNCTION_VECTOR \
918 call_function_interrupt smp_call_function_interrupt
919apicinterrupt RESCHEDULE_VECTOR \
920 reschedule_interrupt smp_reschedule_interrupt
921#endif
922
923apicinterrupt ERROR_APIC_VECTOR \
924 error_interrupt smp_error_interrupt
925apicinterrupt SPURIOUS_APIC_VECTOR \
926 spurious_interrupt smp_spurious_interrupt
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100927
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800928#ifdef CONFIG_IRQ_WORK
929apicinterrupt IRQ_WORK_VECTOR \
930 irq_work_interrupt smp_irq_work_interrupt
Ingo Molnar241771e2008-12-03 10:39:53 +0100931#endif
932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933/*
934 * Exception entry points.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100935 */
Andy Lutomirski9b476682015-03-05 19:19:07 -0800936#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700937
938.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100939ENTRY(\sym)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700940 /* Sanity check */
941 .if \shift_ist != -1 && \paranoid == 0
942 .error "using shift_ist requires paranoid=1"
943 .endif
944
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700945 .if \has_error_code
946 XCPT_FRAME
947 .else
Jan Beulich7effaa82005-09-12 18:49:24 +0200948 INTR_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700949 .endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Jan Beulichee4eb872012-11-02 11:18:39 +0000951 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100952 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700953
954 .ifeq \has_error_code
955 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
956 .endif
957
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800958 ALLOC_PT_GPREGS_ON_STACK
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700959
960 .if \paranoid
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800961 .if \paranoid == 1
962 CFI_REMEMBER_STATE
963 testl $3, CS(%rsp) /* If coming from userspace, switch */
964 jnz 1f /* stacks. */
965 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800966 call paranoid_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700967 .else
968 call error_entry
969 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800970 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700971
Andy Lutomirski1bd24ef2014-05-21 15:07:07 -0700972 DEFAULT_FRAME 0
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700973
974 .if \paranoid
Andy Lutomirski577ed452014-05-21 15:07:09 -0700975 .if \shift_ist != -1
976 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
977 .else
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100978 TRACE_IRQS_OFF
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700979 .endif
Andy Lutomirski577ed452014-05-21 15:07:09 -0700980 .endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700981
982 movq %rsp,%rdi /* pt_regs pointer */
983
984 .if \has_error_code
985 movq ORIG_RAX(%rsp),%rsi /* get error code */
986 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
987 .else
988 xorl %esi,%esi /* no error code */
989 .endif
990
Andy Lutomirski577ed452014-05-21 15:07:09 -0700991 .if \shift_ist != -1
Andy Lutomirski9b476682015-03-05 19:19:07 -0800992 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700993 .endif
994
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100995 call \do_sym
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700996
Andy Lutomirski577ed452014-05-21 15:07:09 -0700997 .if \shift_ist != -1
Andy Lutomirski9b476682015-03-05 19:19:07 -0800998 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700999 .endif
1000
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001001 /* these procedures expect "no swapgs" flag in ebx */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001002 .if \paranoid
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001003 jmp paranoid_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001004 .else
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001005 jmp error_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001006 .endif
1007
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001008 .if \paranoid == 1
1009 CFI_RESTORE_STATE
1010 /*
1011 * Paranoid entry from userspace. Switch stacks and treat it
1012 * as a normal entry. This means that paranoid handlers
1013 * run in real process context if user_mode(regs).
1014 */
10151:
1016 call error_entry
1017
1018 DEFAULT_FRAME 0
1019
1020 movq %rsp,%rdi /* pt_regs pointer */
1021 call sync_regs
1022 movq %rax,%rsp /* switch stack */
1023
1024 movq %rsp,%rdi /* pt_regs pointer */
1025
1026 .if \has_error_code
1027 movq ORIG_RAX(%rsp),%rsi /* get error code */
1028 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1029 .else
1030 xorl %esi,%esi /* no error code */
1031 .endif
1032
1033 call \do_sym
1034
1035 jmp error_exit /* %ebx: no swapgs flag */
1036 .endif
1037
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001038 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001039END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001040.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001041
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001042#ifdef CONFIG_TRACING
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001043.macro trace_idtentry sym do_sym has_error_code:req
1044idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
1045idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001046.endm
1047#else
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001048.macro trace_idtentry sym do_sym has_error_code:req
1049idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001050.endm
1051#endif
1052
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001053idtentry divide_error do_divide_error has_error_code=0
1054idtentry overflow do_overflow has_error_code=0
1055idtentry bounds do_bounds has_error_code=0
1056idtentry invalid_op do_invalid_op has_error_code=0
1057idtentry device_not_available do_device_not_available has_error_code=0
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001058idtentry double_fault do_double_fault has_error_code=1 paranoid=2
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001059idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
1060idtentry invalid_TSS do_invalid_TSS has_error_code=1
1061idtentry segment_not_present do_segment_not_present has_error_code=1
1062idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
1063idtentry coprocessor_error do_coprocessor_error has_error_code=0
1064idtentry alignment_check do_alignment_check has_error_code=1
1065idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
Andy Lutomirski5cec93c2011-06-05 13:50:24 -04001066
Ingo Molnar2601e642006-07-03 00:24:45 -07001067
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001068 /* Reload gs selector with exception handling */
1069 /* edi: new selector */
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -04001070ENTRY(native_load_gs_index)
Jan Beulich7effaa82005-09-12 18:49:24 +02001071 CFI_STARTPROC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001072 pushfq_cfi
Jeremy Fitzhardingeb8aa2872009-01-28 14:35:03 -08001073 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001074 SWAPGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001075gs_change:
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001076 movl %edi,%gs
Linus Torvalds1da177e2005-04-16 15:20:36 -070010772: mfence /* workaround */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001078 SWAPGS
Jan Beulichdf5d1872010-09-02 14:07:16 +01001079 popfq_cfi
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001080 ret
Jan Beulich7effaa82005-09-12 18:49:24 +02001081 CFI_ENDPROC
Alexander van Heukelum6efdcfa2008-11-23 10:15:32 +01001082END(native_load_gs_index)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001083
H. Peter Anvind7abc0f2012-04-20 12:19:50 -07001084 _ASM_EXTABLE(gs_change,bad_gs)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001085 .section .fixup,"ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 /* running with kernelgs */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001087bad_gs:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001088 SWAPGS /* switch back to user gs */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 xorl %eax,%eax
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001090 movl %eax,%gs
1091 jmp 2b
1092 .previous
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001093
Andi Kleen26995002006-08-02 22:37:28 +02001094/* Call softirq on interrupt stack. Interrupts are off. */
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +02001095ENTRY(do_softirq_own_stack)
Jan Beulich7effaa82005-09-12 18:49:24 +02001096 CFI_STARTPROC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001097 pushq_cfi %rbp
Andi Kleen26995002006-08-02 22:37:28 +02001098 CFI_REL_OFFSET rbp,0
1099 mov %rsp,%rbp
1100 CFI_DEF_CFA_REGISTER rbp
Brian Gerst56895532009-01-19 00:38:58 +09001101 incl PER_CPU_VAR(irq_count)
Brian Gerst26f80bd2009-01-19 00:38:58 +09001102 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
Andi Kleen26995002006-08-02 22:37:28 +02001103 push %rbp # backlink for old unwinder
Andi Kleened6b6762005-07-28 21:15:49 -07001104 call __do_softirq
Andi Kleen26995002006-08-02 22:37:28 +02001105 leaveq
Jan Beulichdf5d1872010-09-02 14:07:16 +01001106 CFI_RESTORE rbp
Jan Beulich7effaa82005-09-12 18:49:24 +02001107 CFI_DEF_CFA_REGISTER rsp
Andi Kleen26995002006-08-02 22:37:28 +02001108 CFI_ADJUST_CFA_OFFSET -8
Brian Gerst56895532009-01-19 00:38:58 +09001109 decl PER_CPU_VAR(irq_count)
Andi Kleened6b6762005-07-28 21:15:49 -07001110 ret
Jan Beulich7effaa82005-09-12 18:49:24 +02001111 CFI_ENDPROC
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +02001112END(do_softirq_own_stack)
Andi Kleen75154f42007-06-23 02:29:25 +02001113
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001114#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001115idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001116
1117/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001118 * A note on the "critical region" in our callback handler.
1119 * We want to avoid stacking callback handlers due to events occurring
1120 * during handling of the last event. To do this, we keep events disabled
1121 * until we've done all processing. HOWEVER, we must enable events before
1122 * popping the stack frame (can't be done atomically) and so it would still
1123 * be possible to get enough handler activations to overflow the stack.
1124 * Although unlikely, bugs of that kind are hard to track down, so we'd
1125 * like to avoid the possibility.
1126 * So, on entry to the handler we detect whether we interrupted an
1127 * existing activation in its critical region -- if so, we pop the current
1128 * activation and restart the handler using the previous one.
1129 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001130ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1131 CFI_STARTPROC
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001132/*
1133 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1134 * see the correct pointer to the pt_regs
1135 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001136 movq %rdi, %rsp # we don't return, adjust the stack frame
1137 CFI_ENDPROC
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001138 DEFAULT_FRAME
Brian Gerst56895532009-01-19 00:38:58 +0900113911: incl PER_CPU_VAR(irq_count)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001140 movq %rsp,%rbp
1141 CFI_DEF_CFA_REGISTER rbp
Brian Gerst26f80bd2009-01-19 00:38:58 +09001142 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001143 pushq %rbp # backlink for old unwinder
1144 call xen_evtchn_do_upcall
1145 popq %rsp
1146 CFI_DEF_CFA_REGISTER rsp
Brian Gerst56895532009-01-19 00:38:58 +09001147 decl PER_CPU_VAR(irq_count)
David Vrabelfdfd8112015-02-19 15:23:17 +00001148#ifndef CONFIG_PREEMPT
1149 call xen_maybe_preempt_hcall
1150#endif
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001151 jmp error_exit
1152 CFI_ENDPROC
Alexander van Heukelum371c3942011-03-11 21:59:38 +01001153END(xen_do_hypervisor_callback)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001154
1155/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001156 * Hypervisor uses this for application faults while it executes.
1157 * We get here for two reasons:
1158 * 1. Fault while reloading DS, ES, FS or GS
1159 * 2. Fault while executing IRET
1160 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1161 * registers that could be reloaded and zeroed the others.
1162 * Category 2 we fix up by killing the current process. We cannot use the
1163 * normal Linux return path in this case because if we use the IRET hypercall
1164 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1165 * We distinguish between categories by comparing each saved segment register
1166 * with its current contents: any discrepancy means we in category 1.
1167 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001168ENTRY(xen_failsafe_callback)
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001169 INTR_FRAME 1 (6*8)
1170 /*CFI_REL_OFFSET gs,GS*/
1171 /*CFI_REL_OFFSET fs,FS*/
1172 /*CFI_REL_OFFSET es,ES*/
1173 /*CFI_REL_OFFSET ds,DS*/
1174 CFI_REL_OFFSET r11,8
1175 CFI_REL_OFFSET rcx,0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001176 movw %ds,%cx
1177 cmpw %cx,0x10(%rsp)
1178 CFI_REMEMBER_STATE
1179 jne 1f
1180 movw %es,%cx
1181 cmpw %cx,0x18(%rsp)
1182 jne 1f
1183 movw %fs,%cx
1184 cmpw %cx,0x20(%rsp)
1185 jne 1f
1186 movw %gs,%cx
1187 cmpw %cx,0x28(%rsp)
1188 jne 1f
1189 /* All segments match their saved values => Category 2 (Bad IRET). */
1190 movq (%rsp),%rcx
1191 CFI_RESTORE rcx
1192 movq 8(%rsp),%r11
1193 CFI_RESTORE r11
1194 addq $0x30,%rsp
1195 CFI_ADJUST_CFA_OFFSET -0x30
Ingo Molnar14ae22b2008-11-21 15:20:47 +01001196 pushq_cfi $0 /* RIP */
1197 pushq_cfi %r11
1198 pushq_cfi %rcx
Jeremy Fitzhardinge4a5c3e72008-07-08 15:07:09 -07001199 jmp general_protection
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001200 CFI_RESTORE_STATE
12011: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1202 movq (%rsp),%rcx
1203 CFI_RESTORE rcx
1204 movq 8(%rsp),%r11
1205 CFI_RESTORE r11
1206 addq $0x30,%rsp
1207 CFI_ADJUST_CFA_OFFSET -0x30
David Vrabela349e23d12012-10-19 17:29:07 +01001208 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001209 ALLOC_PT_GPREGS_ON_STACK
1210 SAVE_C_REGS
1211 SAVE_EXTRA_REGS
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001212 jmp error_exit
1213 CFI_ENDPROC
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001214END(xen_failsafe_callback)
1215
Seiji Aguchicf910e82013-06-20 11:46:53 -04001216apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
Sheng Yang38e20b02010-05-14 12:40:51 +01001217 xen_hvm_callback_vector xen_evtchn_do_upcall
1218
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001219#endif /* CONFIG_XEN */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001220
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001221#if IS_ENABLED(CONFIG_HYPERV)
Seiji Aguchicf910e82013-06-20 11:46:53 -04001222apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001223 hyperv_callback_vector hyperv_vector_handler
1224#endif /* CONFIG_HYPERV */
1225
Andy Lutomirski577ed452014-05-21 15:07:09 -07001226idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1227idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
Andy Lutomirski6f442be2014-11-22 18:00:32 -08001228idtentry stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001229#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001230idtentry xen_debug do_debug has_error_code=0
1231idtentry xen_int3 do_int3 has_error_code=0
1232idtentry xen_stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001233#endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001234idtentry general_protection do_general_protection has_error_code=1
1235trace_idtentry page_fault do_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +02001236#ifdef CONFIG_KVM_GUEST
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001237idtentry async_page_fault do_async_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +02001238#endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001239#ifdef CONFIG_X86_MCE
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001240idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001241#endif
1242
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001243/*
1244 * Save all registers in pt_regs, and switch gs if needed.
1245 * Use slow, but surefire "are we in kernel?" check.
1246 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1247 */
1248ENTRY(paranoid_entry)
1249 XCPT_FRAME 1 15*8
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001250 cld
1251 SAVE_C_REGS 8
1252 SAVE_EXTRA_REGS 8
1253 movl $1,%ebx
1254 movl $MSR_GS_BASE,%ecx
1255 rdmsr
1256 testl %edx,%edx
1257 js 1f /* negative -> in kernel */
1258 SWAPGS
1259 xorl %ebx,%ebx
12601: ret
1261 CFI_ENDPROC
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001262END(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001263
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001264/*
1265 * "Paranoid" exit path from exception stack. This is invoked
1266 * only on return from non-NMI IST interrupts that came
1267 * from kernel space.
1268 *
1269 * We may be returning to very strange contexts (e.g. very early
1270 * in syscall entry), so checking for preemption here would
1271 * be complicated. Fortunately, we there's no good reason
1272 * to try to handle preemption here.
1273 */
1274/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001275ENTRY(paranoid_exit)
Jan Beulich1f130a72010-09-02 13:54:32 +01001276 DEFAULT_FRAME
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001277 DISABLE_INTERRUPTS(CLBR_NONE)
Steven Rostedt5963e312012-05-30 11:54:53 -04001278 TRACE_IRQS_OFF_DEBUG
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001279 testl %ebx,%ebx /* swapgs needed? */
Denys Vlasenko0d550832015-02-26 14:40:29 -08001280 jnz paranoid_exit_no_swapgs
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001281 TRACE_IRQS_IRETQ
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001282 SWAPGS_UNSAFE_STACK
Denys Vlasenko0d550832015-02-26 14:40:29 -08001283 jmp paranoid_exit_restore
1284paranoid_exit_no_swapgs:
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001285 TRACE_IRQS_IRETQ_DEBUG
Denys Vlasenko0d550832015-02-26 14:40:29 -08001286paranoid_exit_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001287 RESTORE_EXTRA_REGS
1288 RESTORE_C_REGS
1289 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001290 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001291 CFI_ENDPROC
1292END(paranoid_exit)
1293
1294/*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001295 * Save all registers in pt_regs, and switch gs if needed.
1296 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001297 */
1298ENTRY(error_entry)
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001299 XCPT_FRAME 1 15*8
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001300 cld
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001301 SAVE_C_REGS 8
1302 SAVE_EXTRA_REGS 8
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001303 xorl %ebx,%ebx
1304 testl $3,CS+8(%rsp)
1305 je error_kernelspace
1306error_swapgs:
1307 SWAPGS
1308error_sti:
1309 TRACE_IRQS_OFF
1310 ret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001311
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001312 /*
1313 * There are two places in the kernel that can potentially fault with
1314 * usergs. Handle them here. B stepping K8s sometimes report a
1315 * truncated RIP for IRET exceptions returning to compat mode. Check
1316 * for these here too.
1317 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001318error_kernelspace:
Jan Beulich3bab13b2014-06-25 14:11:22 +01001319 CFI_REL_OFFSET rcx, RCX+8
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001320 incl %ebx
Andy Lutomirski7209a752014-07-23 08:34:11 -07001321 leaq native_irq_return_iret(%rip),%rcx
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001322 cmpq %rcx,RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001323 je error_bad_iret
Brian Gerstae24ffe2009-10-12 10:18:23 -04001324 movl %ecx,%eax /* zero extend */
1325 cmpq %rax,RIP+8(%rsp)
1326 je bstep_iret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001327 cmpq $gs_change,RIP+8(%rsp)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001328 je error_swapgs
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001329 jmp error_sti
Brian Gerstae24ffe2009-10-12 10:18:23 -04001330
1331bstep_iret:
1332 /* Fix truncated RIP */
1333 movq %rcx,RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001334 /* fall through */
1335
1336error_bad_iret:
1337 SWAPGS
1338 mov %rsp,%rdi
1339 call fixup_bad_iret
1340 mov %rax,%rsp
1341 decl %ebx /* Return to usergs */
1342 jmp error_sti
Jan Beuliche6b04b62010-09-02 13:52:45 +01001343 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001344END(error_entry)
1345
1346
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001347/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001348ENTRY(error_exit)
1349 DEFAULT_FRAME
1350 movl %ebx,%eax
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001351 RESTORE_EXTRA_REGS
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001352 DISABLE_INTERRUPTS(CLBR_NONE)
1353 TRACE_IRQS_OFF
1354 GET_THREAD_INFO(%rcx)
1355 testl %eax,%eax
1356 jne retint_kernel
1357 LOCKDEP_SYS_EXIT_IRQ
1358 movl TI_flags(%rcx),%edx
1359 movl $_TIF_WORK_MASK,%edi
1360 andl %edi,%edx
1361 jnz retint_careful
1362 jmp retint_swapgs
1363 CFI_ENDPROC
1364END(error_exit)
1365
Denys Vlasenko0784b362015-04-01 16:50:57 +02001366/* Runs on exception stack */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001367ENTRY(nmi)
1368 INTR_FRAME
1369 PARAVIRT_ADJUST_EXCEPTION_FRAME
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001370 /*
1371 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1372 * the iretq it performs will take us out of NMI context.
1373 * This means that we can have nested NMIs where the next
1374 * NMI is using the top of the stack of the previous NMI. We
1375 * can't let it execute because the nested NMI will corrupt the
1376 * stack of the previous NMI. NMI handlers are not re-entrant
1377 * anyway.
1378 *
1379 * To handle this case we do the following:
1380 * Check the a special location on the stack that contains
1381 * a variable that is set when NMIs are executing.
1382 * The interrupted task's stack is also checked to see if it
1383 * is an NMI stack.
1384 * If the variable is not set and the stack is not the NMI
1385 * stack then:
1386 * o Set the special variable on the stack
1387 * o Copy the interrupt frame into a "saved" location on the stack
1388 * o Copy the interrupt frame into a "copy" location on the stack
1389 * o Continue processing the NMI
1390 * If the variable is set or the previous stack is the NMI stack:
1391 * o Modify the "copy" location to jump to the repeate_nmi
1392 * o return back to the first NMI
1393 *
1394 * Now on exit of the first NMI, we first clear the stack variable
1395 * The NMI stack will tell any nested NMIs at that point that it is
1396 * nested. Then we pop the stack normally with iret, and if there was
1397 * a nested NMI that updated the copy interrupt stack frame, a
1398 * jump will be made to the repeat_nmi code that will handle the second
1399 * NMI.
1400 */
1401
Denys Vlasenko146b2b02015-03-25 18:18:13 +01001402 /* Use %rdx as our temp variable throughout */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001403 pushq_cfi %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001404 CFI_REL_OFFSET rdx, 0
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001405
1406 /*
Steven Rostedt45d5a162012-02-19 16:43:37 -05001407 * If %cs was not the kernel segment, then the NMI triggered in user
1408 * space, which means it is definitely not nested.
1409 */
Steven Rostedta38449ef2012-02-20 15:29:34 -05001410 cmpl $__KERNEL_CS, 16(%rsp)
Steven Rostedt45d5a162012-02-19 16:43:37 -05001411 jne first_nmi
1412
1413 /*
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001414 * Check the special variable on the stack to see if NMIs are
1415 * executing.
1416 */
Steven Rostedta38449ef2012-02-20 15:29:34 -05001417 cmpl $1, -8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001418 je nested_nmi
1419
1420 /*
1421 * Now test if the previous stack was an NMI stack.
1422 * We need the double check. We check the NMI stack to satisfy the
1423 * race when the first NMI clears the variable before returning.
1424 * We check the variable because the first NMI could be in a
1425 * breakpoint routine using a breakpoint stack.
1426 */
Denys Vlasenko0784b362015-04-01 16:50:57 +02001427 lea 6*8(%rsp), %rdx
1428 /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
1429 cmpq %rdx, 4*8(%rsp)
1430 /* If the stack pointer is above the NMI stack, this is a normal NMI */
1431 ja first_nmi
1432 subq $EXCEPTION_STKSZ, %rdx
1433 cmpq %rdx, 4*8(%rsp)
1434 /* If it is below the NMI stack, it is a normal NMI */
1435 jb first_nmi
1436 /* Ah, it is within the NMI stack, treat it as nested */
1437 jmp nested_nmi
1438
Jan Beulich62610912012-02-24 14:54:37 +00001439 CFI_REMEMBER_STATE
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001440
1441nested_nmi:
1442 /*
1443 * Do nothing if we interrupted the fixup in repeat_nmi.
1444 * It's about to repeat the NMI handler, so we are fine
1445 * with ignoring this one.
1446 */
1447 movq $repeat_nmi, %rdx
1448 cmpq 8(%rsp), %rdx
1449 ja 1f
1450 movq $end_repeat_nmi, %rdx
1451 cmpq 8(%rsp), %rdx
1452 ja nested_nmi_out
1453
14541:
1455 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
Salman Qazi28696f42012-10-01 17:29:25 -07001456 leaq -1*8(%rsp), %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001457 movq %rdx, %rsp
Salman Qazi28696f42012-10-01 17:29:25 -07001458 CFI_ADJUST_CFA_OFFSET 1*8
1459 leaq -10*8(%rsp), %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001460 pushq_cfi $__KERNEL_DS
1461 pushq_cfi %rdx
1462 pushfq_cfi
1463 pushq_cfi $__KERNEL_CS
1464 pushq_cfi $repeat_nmi
1465
1466 /* Put stack back */
Salman Qazi28696f42012-10-01 17:29:25 -07001467 addq $(6*8), %rsp
1468 CFI_ADJUST_CFA_OFFSET -6*8
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001469
1470nested_nmi_out:
1471 popq_cfi %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001472 CFI_RESTORE rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001473
1474 /* No need to check faults here */
1475 INTERRUPT_RETURN
1476
Jan Beulich62610912012-02-24 14:54:37 +00001477 CFI_RESTORE_STATE
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001478first_nmi:
1479 /*
1480 * Because nested NMIs will use the pushed location that we
1481 * stored in rdx, we must keep that space available.
1482 * Here's what our stack frame will look like:
1483 * +-------------------------+
1484 * | original SS |
1485 * | original Return RSP |
1486 * | original RFLAGS |
1487 * | original CS |
1488 * | original RIP |
1489 * +-------------------------+
1490 * | temp storage for rdx |
1491 * +-------------------------+
1492 * | NMI executing variable |
1493 * +-------------------------+
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001494 * | copied SS |
1495 * | copied Return RSP |
1496 * | copied RFLAGS |
1497 * | copied CS |
1498 * | copied RIP |
1499 * +-------------------------+
Salman Qazi28696f42012-10-01 17:29:25 -07001500 * | Saved SS |
1501 * | Saved Return RSP |
1502 * | Saved RFLAGS |
1503 * | Saved CS |
1504 * | Saved RIP |
1505 * +-------------------------+
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001506 * | pt_regs |
1507 * +-------------------------+
1508 *
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001509 * The saved stack frame is used to fix up the copied stack frame
1510 * that a nested NMI may change to make the interrupted NMI iret jump
1511 * to the repeat_nmi. The original stack frame and the temp storage
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001512 * is also used by nested NMIs and can not be trusted on exit.
1513 */
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001514 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
Jan Beulich62610912012-02-24 14:54:37 +00001515 movq (%rsp), %rdx
1516 CFI_RESTORE rdx
1517
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001518 /* Set the NMI executing variable on the stack. */
1519 pushq_cfi $1
1520
Salman Qazi28696f42012-10-01 17:29:25 -07001521 /*
1522 * Leave room for the "copied" frame
1523 */
1524 subq $(5*8), %rsp
Jan Beulich444723d2013-01-24 09:27:31 +00001525 CFI_ADJUST_CFA_OFFSET 5*8
Salman Qazi28696f42012-10-01 17:29:25 -07001526
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001527 /* Copy the stack frame to the Saved frame */
1528 .rept 5
Salman Qazi28696f42012-10-01 17:29:25 -07001529 pushq_cfi 11*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001530 .endr
Denys Vlasenko911d2bb2015-02-26 14:40:36 -08001531 CFI_DEF_CFA_OFFSET 5*8
Jan Beulich62610912012-02-24 14:54:37 +00001532
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001533 /* Everything up to here is safe from nested NMIs */
1534
Jan Beulich62610912012-02-24 14:54:37 +00001535 /*
1536 * If there was a nested NMI, the first NMI's iret will return
1537 * here. But NMIs are still enabled and we can take another
1538 * nested NMI. The nested NMI checks the interrupted RIP to see
1539 * if it is between repeat_nmi and end_repeat_nmi, and if so
1540 * it will just return, as we are about to repeat an NMI anyway.
1541 * This makes it safe to copy to the stack frame that a nested
1542 * NMI will update.
1543 */
1544repeat_nmi:
1545 /*
1546 * Update the stack variable to say we are still in NMI (the update
1547 * is benign for the non-repeat case, where 1 was pushed just above
1548 * to this very stack slot).
1549 */
Salman Qazi28696f42012-10-01 17:29:25 -07001550 movq $1, 10*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001551
1552 /* Make another copy, this one may be modified by nested NMIs */
Salman Qazi28696f42012-10-01 17:29:25 -07001553 addq $(10*8), %rsp
1554 CFI_ADJUST_CFA_OFFSET -10*8
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001555 .rept 5
Salman Qazi28696f42012-10-01 17:29:25 -07001556 pushq_cfi -6*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001557 .endr
Salman Qazi28696f42012-10-01 17:29:25 -07001558 subq $(5*8), %rsp
Denys Vlasenko911d2bb2015-02-26 14:40:36 -08001559 CFI_DEF_CFA_OFFSET 5*8
Jan Beulich62610912012-02-24 14:54:37 +00001560end_repeat_nmi:
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001561
1562 /*
1563 * Everything below this point can be preempted by a nested
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001564 * NMI if the first NMI took an exception and reset our iret stack
1565 * so that we repeat another NMI.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001566 */
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001567 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001568 ALLOC_PT_GPREGS_ON_STACK
1569
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001570 /*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001571 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001572 * as we should not be calling schedule in NMI context.
1573 * Even with normal interrupts enabled. An NMI should not be
1574 * setting NEED_RESCHED or anything that normal interrupts and
1575 * exceptions might do.
1576 */
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001577 call paranoid_entry
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001578 DEFAULT_FRAME 0
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001579
1580 /*
1581 * Save off the CR2 register. If we take a page fault in the NMI then
1582 * it could corrupt the CR2 value. If the NMI preempts a page fault
1583 * handler before it was able to read the CR2 register, and then the
1584 * NMI itself takes a page fault, the page fault that was preempted
1585 * will read the information from the NMI page fault and not the
1586 * origin fault. Save it off and restore it if it changes.
1587 * Use the r12 callee-saved register.
1588 */
1589 movq %cr2, %r12
1590
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001591 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1592 movq %rsp,%rdi
1593 movq $-1,%rsi
1594 call do_nmi
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001595
1596 /* Did the NMI take a page fault? Restore cr2 if it did */
1597 movq %cr2, %rcx
1598 cmpq %rcx, %r12
1599 je 1f
1600 movq %r12, %cr2
16011:
1602
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001603 testl %ebx,%ebx /* swapgs needed? */
1604 jnz nmi_restore
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001605nmi_swapgs:
1606 SWAPGS_UNSAFE_STACK
1607nmi_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001608 RESTORE_EXTRA_REGS
1609 RESTORE_C_REGS
Jan Beulich444723d2013-01-24 09:27:31 +00001610 /* Pop the extra iret frame at once */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001611 REMOVE_PT_GPREGS_FROM_STACK 6*8
Salman Qazi28696f42012-10-01 17:29:25 -07001612
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001613 /* Clear the NMI executing stack variable */
Salman Qazi28696f42012-10-01 17:29:25 -07001614 movq $0, 5*8(%rsp)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001615 jmp irq_return
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001616 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001617END(nmi)
1618
1619ENTRY(ignore_sysret)
1620 CFI_STARTPROC
1621 mov $-ENOSYS,%eax
1622 sysret
1623 CFI_ENDPROC
1624END(ignore_sysret)
1625