blob: 34d60c34fca8c5d19b93d186ecffa789a4eed222 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 */
8
9/*
10 * entry.S contains the system-call and fault low-level handling routines.
11 *
Andy Lutomirski8b4777a2011-06-05 13:50:18 -040012 * Some of this is documented in Documentation/x86/entry_64.txt
13 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 * NOTE: This code handles signal-recognition, which happens every time
15 * after an interrupt and after each system call.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010016 *
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010017 * A note on terminology:
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +010018 * - iret frame: Architecture defined interrupt frame from SS to RIP
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010019 * at the top of the kernel process stack.
Andi Kleen2e91a172006-09-26 10:52:29 +020020 *
21 * Some macro usage:
22 * - CFI macros are used to generate dwarf2 unwind information for better
23 * backtraces. They don't change any code.
Andi Kleen2e91a172006-09-26 10:52:29 +020024 * - ENTRY/END Define functions in the symbol table.
Andi Kleen2e91a172006-09-26 10:52:29 +020025 * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -070026 * - idtentry - Define exception entry points.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/linkage.h>
30#include <asm/segment.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/cache.h>
32#include <asm/errno.h>
33#include <asm/dwarf2.h>
34#include <asm/calling.h>
Sam Ravnborge2d5df92005-09-09 21:28:48 +020035#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/msr.h>
37#include <asm/unistd.h>
38#include <asm/thread_info.h>
39#include <asm/hw_irq.h>
Jeremy Fitzhardinge0341c142009-02-13 11:14:01 -080040#include <asm/page_types.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070041#include <asm/irqflags.h>
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010042#include <asm/paravirt.h>
Tejun Heo9939dda2009-01-13 20:41:35 +090043#include <asm/percpu.h>
H. Peter Anvind7abc0f2012-04-20 12:19:50 -070044#include <asm/asm.h>
Frederic Weisbecker91d1aa432012-11-27 19:33:25 +010045#include <asm/context_tracking.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070046#include <asm/smap.h>
H. Peter Anvin3891a042014-04-29 16:46:09 -070047#include <asm/pgtable_types.h>
Eric Parisd7e75282012-01-03 14:23:06 -050048#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Roland McGrath86a1c342008-06-23 15:37:04 -070050/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
51#include <linux/elf-em.h>
52#define AUDIT_ARCH_X86_64 (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
53#define __AUDIT_ARCH_64BIT 0x80000000
54#define __AUDIT_ARCH_LE 0x40000000
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 .code64
Jiri Olsaea714542011-03-07 19:10:39 +010057 .section .entry.text, "ax"
58
Arnaldo Carvalho de Melo16444a82008-05-12 21:20:42 +020059
Andi Kleendc37db42005-04-16 15:25:05 -070060#ifndef CONFIG_PREEMPT
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#define retint_kernel retint_restore_args
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +010062#endif
Ingo Molnar2601e642006-07-03 00:24:45 -070063
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010064#ifdef CONFIG_PARAVIRT
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040065ENTRY(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010066 swapgs
67 sysretq
Cyrill Gorcunovb3baaa12009-02-23 22:57:00 +030068ENDPROC(native_usergs_sysret64)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +010069#endif /* CONFIG_PARAVIRT */
70
Ingo Molnar2601e642006-07-03 00:24:45 -070071
Denys Vlasenkof2db9382015-02-26 14:40:30 -080072.macro TRACE_IRQS_IRETQ
Ingo Molnar2601e642006-07-03 00:24:45 -070073#ifdef CONFIG_TRACE_IRQFLAGS
Denys Vlasenkof2db9382015-02-26 14:40:30 -080074 bt $9,EFLAGS(%rsp) /* interrupts off? */
Ingo Molnar2601e642006-07-03 00:24:45 -070075 jnc 1f
76 TRACE_IRQS_ON
771:
78#endif
79.endm
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081/*
Steven Rostedt5963e312012-05-30 11:54:53 -040082 * When dynamic function tracer is enabled it will add a breakpoint
83 * to all locations that it is about to modify, sync CPUs, update
84 * all the code, sync CPUs, then remove the breakpoints. In this time
85 * if lockdep is enabled, it might jump back into the debug handler
86 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
87 *
88 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
89 * make sure the stack pointer does not get reset back to the top
90 * of the debug stack, and instead just reuses the current stack.
91 */
92#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
93
94.macro TRACE_IRQS_OFF_DEBUG
95 call debug_stack_set_zero
96 TRACE_IRQS_OFF
97 call debug_stack_reset
98.endm
99
100.macro TRACE_IRQS_ON_DEBUG
101 call debug_stack_set_zero
102 TRACE_IRQS_ON
103 call debug_stack_reset
104.endm
105
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800106.macro TRACE_IRQS_IRETQ_DEBUG
107 bt $9,EFLAGS(%rsp) /* interrupts off? */
Steven Rostedt5963e312012-05-30 11:54:53 -0400108 jnc 1f
109 TRACE_IRQS_ON_DEBUG
1101:
111.endm
112
113#else
114# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
115# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
116# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
117#endif
118
119/*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800120 * empty frame
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100121 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100122 .macro EMPTY_FRAME start=1 offset=0
123 .if \start
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100124 CFI_STARTPROC simple
125 CFI_SIGNAL_FRAME
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100126 CFI_DEF_CFA rsp,8+\offset
127 .else
128 CFI_DEF_CFA_OFFSET 8+\offset
129 .endif
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100130 .endm
131
132/*
133 * initial frame state for interrupts (and exceptions without error code)
134 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100135 .macro INTR_FRAME start=1 offset=0
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800136 EMPTY_FRAME \start, 5*8+\offset
137 /*CFI_REL_OFFSET ss, 4*8+\offset*/
138 CFI_REL_OFFSET rsp, 3*8+\offset
139 /*CFI_REL_OFFSET rflags, 2*8+\offset*/
140 /*CFI_REL_OFFSET cs, 1*8+\offset*/
141 CFI_REL_OFFSET rip, 0*8+\offset
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100142 .endm
143
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100144/*
145 * initial frame state for exceptions with error code (and interrupts
146 * with vector already pushed)
147 */
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100148 .macro XCPT_FRAME start=1 offset=0
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800149 INTR_FRAME \start, 1*8+\offset
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100150 .endm
151
152/*
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800153 * frame that enables passing a complete pt_regs to a C function.
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100154 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800155 .macro DEFAULT_FRAME start=1 offset=0
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800156 XCPT_FRAME \start, ORIG_RAX+\offset
157 CFI_REL_OFFSET rdi, RDI+\offset
158 CFI_REL_OFFSET rsi, RSI+\offset
159 CFI_REL_OFFSET rdx, RDX+\offset
160 CFI_REL_OFFSET rcx, RCX+\offset
161 CFI_REL_OFFSET rax, RAX+\offset
162 CFI_REL_OFFSET r8, R8+\offset
163 CFI_REL_OFFSET r9, R9+\offset
164 CFI_REL_OFFSET r10, R10+\offset
165 CFI_REL_OFFSET r11, R11+\offset
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +0100166 CFI_REL_OFFSET rbx, RBX+\offset
167 CFI_REL_OFFSET rbp, RBP+\offset
168 CFI_REL_OFFSET r12, R12+\offset
169 CFI_REL_OFFSET r13, R13+\offset
170 CFI_REL_OFFSET r14, R14+\offset
171 CFI_REL_OFFSET r15, R15+\offset
172 .endm
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174/*
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800175 * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 *
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800177 * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
178 * then loads new ss, cs, and rip from previously programmed MSRs.
179 * rflags gets masked by a value from another MSR (so CLD and CLAC
180 * are not needed). SYSCALL does not save anything on the stack
181 * and does not change rsp.
182 *
183 * Registers on entry:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * rax system call number
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800185 * rcx return address
186 * r11 saved rflags (note: r11 is callee-clobbered register in C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 * rdi arg0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 * rsi arg1
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100189 * rdx arg2
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800190 * r10 arg3 (needs to be moved to rcx to conform to C ABI)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 * r8 arg4
192 * r9 arg5
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800193 * (note: r12-r15,rbp,rbx are callee-preserved in C ABI)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100194 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 * Only called from user space.
196 *
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100197 * When user can change pt_regs->foo always force IRET. That is because
Andi Kleen7bf36bb2006-04-07 19:50:00 +0200198 * it deals with uncanonical addresses better. SYSRET has trouble
199 * with them due to bugs in both AMD and Intel CPUs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100200 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202ENTRY(system_call)
Jan Beulich7effaa82005-09-12 18:49:24 +0200203 CFI_STARTPROC simple
Jan Beulichadf14232006-09-26 10:52:41 +0200204 CFI_SIGNAL_FRAME
Denys Vlasenkoef593262015-03-19 18:17:46 +0100205 CFI_DEF_CFA rsp,0
Jan Beulich7effaa82005-09-12 18:49:24 +0200206 CFI_REGISTER rip,rcx
207 /*CFI_REGISTER rflags,r11*/
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100208
209 /*
210 * Interrupts are off on entry.
211 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
212 * it is too small to ever cause noticeable irq latency.
213 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100214 SWAPGS_UNSAFE_STACK
215 /*
216 * A hypervisor implementation might want to use a label
217 * after the swapgs, so that it can do the swapgs
218 * for the guest and jump here on syscall.
219 */
Jan Beulichf6b2bc82011-11-29 11:24:10 +0000220GLOBAL(system_call_after_swapgs)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100221
Ingo Molnarc38e5032015-03-17 14:42:59 +0100222 movq %rsp,PER_CPU_VAR(rsp_scratch)
Brian Gerst9af45652009-01-19 00:38:58 +0900223 movq PER_CPU_VAR(kernel_stack),%rsp
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100224
225 /* Construct struct pt_regs on stack */
226 pushq_cfi $__USER_DS /* pt_regs->ss */
227 pushq_cfi PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
Denys Vlasenko33db1fd2015-03-17 14:52:24 +0100228 /*
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100229 * Re-enable interrupts.
230 * We use 'rsp_scratch' as a scratch space, hence irq-off block above
231 * must execute atomically in the face of possible interrupt-driven
232 * task preemption. We must enable interrupts only after we're done
233 * with using rsp_scratch:
Denys Vlasenko33db1fd2015-03-17 14:52:24 +0100234 */
235 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100236 pushq_cfi %r11 /* pt_regs->flags */
237 pushq_cfi $__USER_CS /* pt_regs->cs */
238 pushq_cfi %rcx /* pt_regs->ip */
239 CFI_REL_OFFSET rip,0
240 pushq_cfi_reg rax /* pt_regs->orig_ax */
241 pushq_cfi_reg rdi /* pt_regs->di */
242 pushq_cfi_reg rsi /* pt_regs->si */
243 pushq_cfi_reg rdx /* pt_regs->dx */
244 pushq_cfi_reg rcx /* pt_regs->cx */
245 pushq_cfi $-ENOSYS /* pt_regs->ax */
246 pushq_cfi_reg r8 /* pt_regs->r8 */
247 pushq_cfi_reg r9 /* pt_regs->r9 */
248 pushq_cfi_reg r10 /* pt_regs->r10 */
Denys Vlasenkoa71ffdd2015-03-19 18:17:48 +0100249 pushq_cfi_reg r11 /* pt_regs->r11 */
250 sub $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
Denys Vlasenko27be87c2015-03-27 11:36:19 +0100251 CFI_ADJUST_CFA_OFFSET 6*8
Denys Vlasenko9ed8e7d2015-03-19 18:17:47 +0100252
Ingo Molnardca5b522015-03-24 19:44:42 +0100253 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 jnz tracesys
Roland McGrath86a1c342008-06-23 15:37:04 -0700255system_call_fastpath:
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800256#if __SYSCALL_MASK == ~0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 cmpq $__NR_syscall_max,%rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800258#else
259 andl $__SYSCALL_MASK,%eax
260 cmpl $__NR_syscall_max,%eax
261#endif
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100262 ja 1f /* return -ENOSYS (already in pt_regs->ax) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 movq %r10,%rcx
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100264 call *sys_call_table(,%rax,8)
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800265 movq %rax,RAX(%rsp)
Denys Vlasenko146b2b02015-03-25 18:18:13 +01002661:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267/*
Denys Vlasenko146b2b02015-03-25 18:18:13 +0100268 * Syscall return path ending with SYSRET (fast path).
269 * Has incompletely filled pt_regs.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100270 */
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200271 LOCKDEP_SYS_EXIT
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100272 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700273 TRACE_IRQS_OFF
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700274
275 /*
276 * We must check ti flags with interrupts (or at least preemption)
277 * off because we must *never* return to userspace without
278 * processing exit work that is enqueued if we're preempted here.
279 * In particular, returning to userspace with any of the one-shot
280 * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
281 * very bad.
282 */
Ingo Molnar06ab9c12015-03-24 21:14:07 +0100283 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
284 jnz int_ret_from_sys_call_irqs_off /* Go to the slow path */
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700285
Jan Beulichbcddc012006-12-07 02:14:02 +0100286 CFI_REMEMBER_STATE
Ingo Molnar2601e642006-07-03 00:24:45 -0700287 /*
288 * sysretq will re-enable interrupts:
289 */
290 TRACE_IRQS_ON
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100291 RESTORE_C_REGS_EXCEPT_RCX_R11
292 movq RIP(%rsp),%rcx
Jan Beulich7effaa82005-09-12 18:49:24 +0200293 CFI_REGISTER rip,rcx
Denys Vlasenko29722cd2015-03-09 19:39:21 +0100294 movq EFLAGS(%rsp),%r11
Jan Beulich7effaa82005-09-12 18:49:24 +0200295 /*CFI_REGISTER rflags,r11*/
Denys Vlasenko263042e2015-03-09 19:39:23 +0100296 movq RSP(%rsp),%rsp
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800297 /*
298 * 64bit SYSRET restores rip from rcx,
299 * rflags from r11 (but RF and VM bits are forced to 0),
300 * cs and ss are loaded from MSRs.
301 */
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400302 USERGS_SYSRET64
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
Jan Beulichbcddc012006-12-07 02:14:02 +0100304 CFI_RESTORE_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100306 /* Do syscall entry tracing */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100307tracesys:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800308 movq %rsp, %rdi
Denys Vlasenko47eb5822015-03-25 18:18:15 +0100309 movl $AUDIT_ARCH_X86_64, %esi
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700310 call syscall_trace_enter_phase1
311 test %rax, %rax
312 jnz tracesys_phase2 /* if needed, run the slow path */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800313 RESTORE_C_REGS_EXCEPT_RAX /* else restore clobbered regs */
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800314 movq ORIG_RAX(%rsp), %rax
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700315 jmp system_call_fastpath /* and return to the fast path */
316
317tracesys_phase2:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800318 SAVE_EXTRA_REGS
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700319 movq %rsp, %rdi
Denys Vlasenko47eb5822015-03-25 18:18:15 +0100320 movl $AUDIT_ARCH_X86_64, %esi
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700321 movq %rax,%rdx
322 call syscall_trace_enter_phase2
323
Roland McGrathd4d67152008-07-09 02:38:07 -0700324 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800325 * Reload registers from stack in case ptrace changed them.
Andy Lutomirski1dcf74f2014-09-05 15:13:56 -0700326 * We don't reload %rax because syscall_trace_entry_phase2() returned
Roland McGrathd4d67152008-07-09 02:38:07 -0700327 * the value it wants us to use in the table lookup.
328 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800329 RESTORE_C_REGS_EXCEPT_RAX
330 RESTORE_EXTRA_REGS
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800331#if __SYSCALL_MASK == ~0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 cmpq $__NR_syscall_max,%rax
H. Peter Anvinfca460f2012-02-19 07:56:26 -0800333#else
334 andl $__SYSCALL_MASK,%eax
335 cmpl $__NR_syscall_max,%eax
336#endif
Andy Lutomirski54eea992014-09-05 15:13:55 -0700337 ja int_ret_from_sys_call /* RAX(%rsp) is already set */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 movq %r10,%rcx /* fixup for C */
339 call *sys_call_table(,%rax,8)
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800340 movq %rax,RAX(%rsp)
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100341 /* Use IRET because user could have changed pt_regs->foo */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100342
343/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 * Syscall return path ending with IRET.
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100345 * Has correct iret frame.
Jan Beulichbcddc012006-12-07 02:14:02 +0100346 */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300347GLOBAL(int_ret_from_sys_call)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100348 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700349 TRACE_IRQS_OFF
Andy Lutomirskib3494a42015-03-23 12:32:54 -0700350int_ret_from_sys_call_irqs_off:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 movl $_TIF_ALLWORK_MASK,%edi
352 /* edi: mask to check */
Cyrill Gorcunovbc8b2b92009-02-23 22:57:01 +0300353GLOBAL(int_with_check)
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200354 LOCKDEP_SYS_EXIT_IRQ
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 GET_THREAD_INFO(%rcx)
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300356 movl TI_flags(%rcx),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 andl %edi,%edx
358 jnz int_careful
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300359 andl $~TS_COMPAT,TI_status(%rcx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 jmp retint_swapgs
361
362 /* Either reschedule or signal or syscall exit tracking needed. */
363 /* First do a reschedule test. */
364 /* edx: work, edi: workmask */
365int_careful:
366 bt $TIF_NEED_RESCHED,%edx
367 jnc int_very_careful
Ingo Molnar2601e642006-07-03 00:24:45 -0700368 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100369 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +0100370 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +0200371 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +0100372 popq_cfi %rdi
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100373 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700374 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 jmp int_with_check
376
Ingo Molnar7fcb3bc2015-03-17 14:42:59 +0100377 /* handle signals and tracing -- both require a full pt_regs */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378int_very_careful:
Ingo Molnar2601e642006-07-03 00:24:45 -0700379 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100380 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800381 SAVE_EXTRA_REGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100382 /* Check for syscall exit trace */
Roland McGrathd4d67152008-07-09 02:38:07 -0700383 testl $_TIF_WORK_SYSCALL_EXIT,%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 jz int_signal
Jan Beulichdf5d1872010-09-02 14:07:16 +0100385 pushq_cfi %rdi
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100386 leaq 8(%rsp),%rdi # &ptregs -> arg1
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 call syscall_trace_leave
Jan Beulichdf5d1872010-09-02 14:07:16 +0100388 popq_cfi %rdi
Roland McGrathd4d67152008-07-09 02:38:07 -0700389 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 jmp int_restore_rest
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392int_signal:
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100393 testl $_TIF_DO_NOTIFY_MASK,%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 jz 1f
395 movq %rsp,%rdi # &ptregs -> arg1
396 xorl %esi,%esi # oldset -> arg2
397 call do_notify_resume
Roland McGratheca91e72008-07-10 14:50:39 -07003981: movl $_TIF_WORK_MASK,%edi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399int_restore_rest:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800400 RESTORE_EXTRA_REGS
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100401 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700402 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 jmp int_with_check
404 CFI_ENDPROC
Jan Beulichbcddc012006-12-07 02:14:02 +0100405END(system_call)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100406
Al Viro1d4b4b22012-10-22 22:34:11 -0400407 .macro FORK_LIKE func
408ENTRY(stub_\func)
409 CFI_STARTPROC
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800410 DEFAULT_FRAME 0, 8 /* offset 8: return address */
411 SAVE_EXTRA_REGS 8
Al Viro1d4b4b22012-10-22 22:34:11 -0400412 call sys_\func
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800413 ret
Al Viro1d4b4b22012-10-22 22:34:11 -0400414 CFI_ENDPROC
415END(stub_\func)
416 .endm
417
418 FORK_LIKE clone
419 FORK_LIKE fork
420 FORK_LIKE vfork
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422ENTRY(stub_execve)
423 CFI_STARTPROC
Jan Beuliche6b04b62010-09-02 13:52:45 +0100424 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800425 DEFAULT_FRAME 0
426 SAVE_EXTRA_REGS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 call sys_execve
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 movq %rax,RAX(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800429 RESTORE_EXTRA_REGS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 jmp int_ret_from_sys_call
431 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200432END(stub_execve)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100433
David Drysdale27d6ec72014-12-12 16:57:33 -0800434ENTRY(stub_execveat)
435 CFI_STARTPROC
436 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800437 DEFAULT_FRAME 0
438 SAVE_EXTRA_REGS
David Drysdale27d6ec72014-12-12 16:57:33 -0800439 call sys_execveat
David Drysdale27d6ec72014-12-12 16:57:33 -0800440 movq %rax,RAX(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800441 RESTORE_EXTRA_REGS
David Drysdale27d6ec72014-12-12 16:57:33 -0800442 jmp int_ret_from_sys_call
443 CFI_ENDPROC
444END(stub_execveat)
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446/*
447 * sigreturn is special because it needs to restore all registers on return.
448 * This cannot be done with SYSRET, so use the IRET return path instead.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100449 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450ENTRY(stub_rt_sigreturn)
451 CFI_STARTPROC
Jan Beulich7effaa82005-09-12 18:49:24 +0200452 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800453 DEFAULT_FRAME 0
454 SAVE_EXTRA_REGS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 call sys_rt_sigreturn
456 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800457 RESTORE_EXTRA_REGS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 jmp int_ret_from_sys_call
459 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200460END(stub_rt_sigreturn)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800462#ifdef CONFIG_X86_X32_ABI
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800463ENTRY(stub_x32_rt_sigreturn)
464 CFI_STARTPROC
465 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800466 DEFAULT_FRAME 0
467 SAVE_EXTRA_REGS
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800468 call sys32_x32_rt_sigreturn
469 movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800470 RESTORE_EXTRA_REGS
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800471 jmp int_ret_from_sys_call
472 CFI_ENDPROC
473END(stub_x32_rt_sigreturn)
474
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800475ENTRY(stub_x32_execve)
476 CFI_STARTPROC
477 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800478 DEFAULT_FRAME 0
479 SAVE_EXTRA_REGS
Al Viro6783eaa22012-08-02 23:05:11 +0400480 call compat_sys_execve
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800481 movq %rax,RAX(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800482 RESTORE_EXTRA_REGS
H. Peter Anvind1a797f2012-02-19 10:06:34 -0800483 jmp int_ret_from_sys_call
484 CFI_ENDPROC
485END(stub_x32_execve)
486
David Drysdale27d6ec72014-12-12 16:57:33 -0800487ENTRY(stub_x32_execveat)
488 CFI_STARTPROC
489 addq $8, %rsp
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800490 DEFAULT_FRAME 0
491 SAVE_EXTRA_REGS
David Drysdale27d6ec72014-12-12 16:57:33 -0800492 call compat_sys_execveat
David Drysdale27d6ec72014-12-12 16:57:33 -0800493 movq %rax,RAX(%rsp)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800494 RESTORE_EXTRA_REGS
David Drysdale27d6ec72014-12-12 16:57:33 -0800495 jmp int_ret_from_sys_call
496 CFI_ENDPROC
497END(stub_x32_execveat)
498
H. Peter Anvinc5a37392012-02-19 09:41:09 -0800499#endif
500
Jan Beulich7effaa82005-09-12 18:49:24 +0200501/*
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800502 * A newly forked process directly context switches into this address.
503 *
504 * rdi: prev task we switched from
505 */
506ENTRY(ret_from_fork)
507 DEFAULT_FRAME
508
509 LOCK ; btr $TIF_FORK,TI_flags(%r8)
510
511 pushq_cfi $0x0002
512 popfq_cfi # reset kernel eflags
513
514 call schedule_tail # rdi: 'prev' task parameter
515
516 GET_THREAD_INFO(%rcx)
517
518 RESTORE_EXTRA_REGS
519
520 testl $3,CS(%rsp) # from kernel_thread?
521 jz 1f
522
Andy Lutomirski1e3fbb82015-02-26 14:40:39 -0800523 /*
524 * By the time we get here, we have no idea whether our pt_regs,
525 * ti flags, and ti status came from the 64-bit SYSCALL fast path,
526 * the slow path, or one of the ia32entry paths.
527 * Use int_ret_from_sys_call to return, since it can safely handle
528 * all of the above.
529 */
530 jmp int_ret_from_sys_call
Denys Vlasenko1eeb2072015-02-26 14:40:33 -0800531
5321:
533 movq %rbp, %rdi
534 call *%rbx
535 movl $0, RAX(%rsp)
536 RESTORE_EXTRA_REGS
537 jmp int_ret_from_sys_call
538 CFI_ENDPROC
539END(ret_from_fork)
540
541/*
H. Peter Anvin939b7872008-11-11 13:51:52 -0800542 * Build the entry stubs and pointer table with some assembler magic.
543 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
544 * single cache line on all modern x86 implementations.
545 */
546 .section .init.rodata,"a"
547ENTRY(interrupt)
Jiri Olsaea714542011-03-07 19:10:39 +0100548 .section .entry.text
H. Peter Anvin939b7872008-11-11 13:51:52 -0800549 .p2align 5
550 .p2align CONFIG_X86_L1_CACHE_SHIFT
551ENTRY(irq_entries_start)
552 INTR_FRAME
553vector=FIRST_EXTERNAL_VECTOR
Jan Beulich2414e022014-11-03 08:39:43 +0000554.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
H. Peter Anvin939b7872008-11-11 13:51:52 -0800555 .balign 32
556 .rept 7
Jan Beulich2414e022014-11-03 08:39:43 +0000557 .if vector < FIRST_SYSTEM_VECTOR
H. Peter Anvin86655962008-11-12 10:27:35 -0800558 .if vector <> FIRST_EXTERNAL_VECTOR
H. Peter Anvin939b7872008-11-11 13:51:52 -0800559 CFI_ADJUST_CFA_OFFSET -8
560 .endif
Jan Beulichdf5d1872010-09-02 14:07:16 +01005611: pushq_cfi $(~vector+0x80) /* Note: always in signed byte range */
H. Peter Anvin86655962008-11-12 10:27:35 -0800562 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
H. Peter Anvin939b7872008-11-11 13:51:52 -0800563 jmp 2f
564 .endif
565 .previous
566 .quad 1b
Jiri Olsaea714542011-03-07 19:10:39 +0100567 .section .entry.text
H. Peter Anvin939b7872008-11-11 13:51:52 -0800568vector=vector+1
569 .endif
570 .endr
5712: jmp common_interrupt
572.endr
573 CFI_ENDPROC
574END(irq_entries_start)
575
576.previous
577END(interrupt)
578.previous
579
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100580/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 * Interrupt entry/exit.
582 *
583 * Interrupt entry points save only callee clobbered registers in fast path.
Alexander van Heukelumd99015b2008-11-19 01:18:11 +0100584 *
585 * Entry runs with interrupts off.
586 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100588/* 0(%rsp): ~(interrupt number) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 .macro interrupt func
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100590 cld
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800591 /*
592 * Since nothing in interrupt handling code touches r12...r15 members
593 * of "struct pt_regs", and since interrupts can nest, we can save
594 * four stack slots and simultaneously provide
595 * an unwind-friendly stack layout by saving "truncated" pt_regs
596 * exactly up to rbp slot, without these members.
597 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800598 ALLOC_PT_GPREGS_ON_STACK -RBP
599 SAVE_C_REGS -RBP
600 /* this goes to 0(%rsp) for unwinder, not for saving the value: */
601 SAVE_EXTRA_REGS_RBP -RBP
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100602
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800603 leaq -RBP(%rsp),%rdi /* arg1 for \func (pointer to pt_regs) */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100604
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800605 testl $3, CS-RBP(%rsp)
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100606 je 1f
607 SWAPGS
Denys Vlasenko76f5df42015-02-26 14:40:27 -08006081:
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100609 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800610 * Save previous stack pointer, optionally switch to interrupt stack.
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100611 * irq_count is used to check if a CPU is already on an interrupt stack
612 * or not. While this is essentially redundant with preempt_count it is
613 * a little cheaper to use a separate counter in the PDA (short of
614 * moving irq_enter into assembly, which would be too much work)
615 */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800616 movq %rsp, %rsi
617 incl PER_CPU_VAR(irq_count)
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100618 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
619 CFI_DEF_CFA_REGISTER rsi
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100620 pushq %rsi
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800621 /*
622 * For debugger:
623 * "CFA (Current Frame Address) is the value on stack + offset"
624 */
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100625 CFI_ESCAPE 0x0f /* DW_CFA_def_cfa_expression */, 6, \
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800626 0x77 /* DW_OP_breg7 (rsp) */, 0, \
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100627 0x06 /* DW_OP_deref */, \
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800628 0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
Denys Vlasenkof6f64682015-01-08 17:25:15 +0100629 0x22 /* DW_OP_plus */
630 /* We entered an interrupt context - irqs are off: */
631 TRACE_IRQS_OFF
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 call \func
634 .endm
635
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100636 /*
637 * The interrupt stubs push (~vector+0x80) onto the stack and
638 * then jump to common_interrupt.
639 */
H. Peter Anvin939b7872008-11-11 13:51:52 -0800640 .p2align CONFIG_X86_L1_CACHE_SHIFT
641common_interrupt:
Jan Beulich7effaa82005-09-12 18:49:24 +0200642 XCPT_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +0000643 ASM_CLAC
Alexander van Heukelum722024d2008-11-13 13:50:20 +0100644 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 interrupt do_IRQ
Denys Vlasenko34061f12015-03-23 14:03:59 +0100646 /* 0(%rsp): old RSP */
Jan Beulich7effaa82005-09-12 18:49:24 +0200647ret_from_intr:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100648 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700649 TRACE_IRQS_OFF
Brian Gerst56895532009-01-19 00:38:58 +0900650 decl PER_CPU_VAR(irq_count)
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100651
Frederic Weisbeckera2bbe752011-07-02 16:52:45 +0200652 /* Restore saved previous stack */
653 popq %rsi
Denys Vlasenko911d2bb2015-02-26 14:40:36 -0800654 CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800655 /* return code expects complete pt_regs - adjust rsp accordingly: */
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800656 leaq -RBP(%rsi),%rsp
Jan Beulich7effaa82005-09-12 18:49:24 +0200657 CFI_DEF_CFA_REGISTER rsp
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800658 CFI_ADJUST_CFA_OFFSET RBP
Frederic Weisbecker625dbc3b2011-01-06 15:22:47 +0100659
Jan Beulich7effaa82005-09-12 18:49:24 +0200660exit_intr:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 GET_THREAD_INFO(%rcx)
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800662 testl $3,CS(%rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 je retint_kernel
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 /* Interrupt came from user space */
666 /*
Denys Vlasenkoe90e1472015-02-26 14:40:28 -0800667 * Has a correct top of stack.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 * %rcx: thread info. Interrupts off.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100669 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670retint_with_reschedule:
671 movl $_TIF_WORK_MASK,%edi
Jan Beulich7effaa82005-09-12 18:49:24 +0200672retint_check:
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200673 LOCKDEP_SYS_EXIT_IRQ
Glauber Costa26ccb8a2008-06-24 11:19:35 -0300674 movl TI_flags(%rcx),%edx
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 andl %edi,%edx
Jan Beulich7effaa82005-09-12 18:49:24 +0200676 CFI_REMEMBER_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 jnz retint_careful
Peter Zijlstra10cd7062007-10-11 22:11:12 +0200678
679retint_swapgs: /* return to user-space */
Ingo Molnar2601e642006-07-03 00:24:45 -0700680 /*
681 * The iretq could re-enable interrupts:
682 */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100683 DISABLE_INTERRUPTS(CLBR_ANY)
Ingo Molnar2601e642006-07-03 00:24:45 -0700684 TRACE_IRQS_IRETQ
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700685
686 /*
687 * Try to use SYSRET instead of IRET if we're returning to
688 * a completely clean 64-bit userspace context.
689 */
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800690 movq RCX(%rsp),%rcx
691 cmpq %rcx,RIP(%rsp) /* RCX == RIP */
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700692 jne opportunistic_sysret_failed
693
694 /*
695 * On Intel CPUs, sysret with non-canonical RCX/RIP will #GP
696 * in kernel space. This essentially lets the user take over
697 * the kernel, since userspace controls RSP. It's not worth
698 * testing for canonicalness exactly -- this check detects any
699 * of the 17 high bits set, which is true for non-canonical
700 * or kernel addresses. (This will pessimize vsyscall=native.
701 * Big deal.)
702 *
703 * If virtual addresses ever become wider, this will need
704 * to be updated to remain correct on both old and new CPUs.
705 */
706 .ifne __VIRTUAL_MASK_SHIFT - 47
707 .error "virtual address width changed -- sysret checks need update"
708 .endif
709 shr $__VIRTUAL_MASK_SHIFT, %rcx
710 jnz opportunistic_sysret_failed
711
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800712 cmpq $__USER_CS,CS(%rsp) /* CS must match SYSRET */
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700713 jne opportunistic_sysret_failed
714
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800715 movq R11(%rsp),%r11
716 cmpq %r11,EFLAGS(%rsp) /* R11 == RFLAGS */
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700717 jne opportunistic_sysret_failed
718
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800719 testq $X86_EFLAGS_RF,%r11 /* sysret can't restore RF */
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700720 jnz opportunistic_sysret_failed
721
722 /* nothing to check for RSP */
723
Denys Vlasenkof2db9382015-02-26 14:40:30 -0800724 cmpq $__USER_DS,SS(%rsp) /* SS must match SYSRET */
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700725 jne opportunistic_sysret_failed
726
727 /*
728 * We win! This label is here just for ease of understanding
729 * perf profiles. Nothing jumps here.
730 */
731irq_return_via_sysret:
732 CFI_REMEMBER_STATE
Denys Vlasenkod441c1f2015-02-26 14:40:38 -0800733 /* r11 is already restored (see code above) */
734 RESTORE_C_REGS_EXCEPT_R11
735 movq RSP(%rsp),%rsp
Andy Lutomirski2a23c6b2014-07-22 12:46:50 -0700736 USERGS_SYSRET64
737 CFI_RESTORE_STATE
738
739opportunistic_sysret_failed:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100740 SWAPGS
Ingo Molnar2601e642006-07-03 00:24:45 -0700741 jmp restore_args
742
Denys Vlasenko627276c2015-03-30 20:09:31 +0200743/* Returning to kernel space */
744#ifdef CONFIG_PREEMPT
745 /* Interrupts are off */
746 /* Check if we need preemption */
747ENTRY(retint_kernel)
748 cmpl $0,PER_CPU_VAR(__preempt_count)
749 jnz retint_restore_args
750 bt $9,EFLAGS(%rsp) /* interrupts were off? */
751 jnc retint_restore_args
752 call preempt_schedule_irq
753 jmp exit_intr
754#endif
755retint_restore_args:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100756 DISABLE_INTERRUPTS(CLBR_ANY)
Ingo Molnar2601e642006-07-03 00:24:45 -0700757 /*
758 * The iretq could re-enable interrupts:
759 */
760 TRACE_IRQS_IRETQ
761restore_args:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800762 RESTORE_C_REGS
763 REMOVE_PT_GPREGS_FROM_STACK 8
Ingo Molnar3701d8632008-02-09 23:24:08 +0100764
Adrian Bunkf7f3d792008-02-13 23:29:53 +0200765irq_return:
Andy Lutomirski7209a752014-07-23 08:34:11 -0700766 INTERRUPT_RETURN
767
768ENTRY(native_iret)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700769 /*
770 * Are we returning to a stack segment from the LDT? Note: in
771 * 64-bit mode SS:RSP on the exception stack is always valid.
772 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700773#ifdef CONFIG_X86_ESPFIX64
H. Peter Anvin3891a042014-04-29 16:46:09 -0700774 testb $4,(SS-RIP)(%rsp)
Andy Lutomirski7209a752014-07-23 08:34:11 -0700775 jnz native_irq_return_ldt
H. Peter Anvin34273f42014-05-04 10:36:22 -0700776#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700777
Andy Lutomirskiaf726f22014-11-22 18:00:31 -0800778.global native_irq_return_iret
Andy Lutomirski7209a752014-07-23 08:34:11 -0700779native_irq_return_iret:
Andy Lutomirskib645af22014-11-22 18:00:33 -0800780 /*
781 * This may fault. Non-paranoid faults on return to userspace are
782 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
783 * Double-faults due to espfix64 are handled in do_double_fault.
784 * Other faults here are fatal.
785 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 iretq
Ingo Molnar3701d8632008-02-09 23:24:08 +0100787
H. Peter Anvin34273f42014-05-04 10:36:22 -0700788#ifdef CONFIG_X86_ESPFIX64
Andy Lutomirski7209a752014-07-23 08:34:11 -0700789native_irq_return_ldt:
H. Peter Anvin3891a042014-04-29 16:46:09 -0700790 pushq_cfi %rax
791 pushq_cfi %rdi
792 SWAPGS
793 movq PER_CPU_VAR(espfix_waddr),%rdi
794 movq %rax,(0*8)(%rdi) /* RAX */
795 movq (2*8)(%rsp),%rax /* RIP */
796 movq %rax,(1*8)(%rdi)
797 movq (3*8)(%rsp),%rax /* CS */
798 movq %rax,(2*8)(%rdi)
799 movq (4*8)(%rsp),%rax /* RFLAGS */
800 movq %rax,(3*8)(%rdi)
801 movq (6*8)(%rsp),%rax /* SS */
802 movq %rax,(5*8)(%rdi)
803 movq (5*8)(%rsp),%rax /* RSP */
804 movq %rax,(4*8)(%rdi)
805 andl $0xffff0000,%eax
806 popq_cfi %rdi
807 orq PER_CPU_VAR(espfix_stack),%rax
808 SWAPGS
809 movq %rax,%rsp
810 popq_cfi %rax
Andy Lutomirski7209a752014-07-23 08:34:11 -0700811 jmp native_irq_return_iret
H. Peter Anvin34273f42014-05-04 10:36:22 -0700812#endif
H. Peter Anvin3891a042014-04-29 16:46:09 -0700813
Jan Beulich7effaa82005-09-12 18:49:24 +0200814 /* edi: workmask, edx: work */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815retint_careful:
Jan Beulich7effaa82005-09-12 18:49:24 +0200816 CFI_RESTORE_STATE
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 bt $TIF_NEED_RESCHED,%edx
818 jnc retint_signal
Ingo Molnar2601e642006-07-03 00:24:45 -0700819 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100820 ENABLE_INTERRUPTS(CLBR_NONE)
Jan Beulichdf5d1872010-09-02 14:07:16 +0100821 pushq_cfi %rdi
Frederic Weisbecker04304992012-07-11 20:26:38 +0200822 SCHEDULE_USER
Jan Beulichdf5d1872010-09-02 14:07:16 +0100823 popq_cfi %rdi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 GET_THREAD_INFO(%rcx)
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100825 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700826 TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 jmp retint_check
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829retint_signal:
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +0100830 testl $_TIF_DO_NOTIFY_MASK,%edx
Andi Kleen10ffdbb2005-05-16 21:53:19 -0700831 jz retint_swapgs
Ingo Molnar2601e642006-07-03 00:24:45 -0700832 TRACE_IRQS_ON
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100833 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800834 SAVE_EXTRA_REGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100835 movq $-1,ORIG_RAX(%rsp)
Andi Kleen3829ee62005-07-28 21:15:48 -0700836 xorl %esi,%esi # oldset
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 movq %rsp,%rdi # &pt_regs
838 call do_notify_resume
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800839 RESTORE_EXTRA_REGS
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +0100840 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700841 TRACE_IRQS_OFF
Andi Kleenbe9e6872005-05-01 08:58:51 -0700842 GET_THREAD_INFO(%rcx)
Roland McGratheca91e72008-07-10 14:50:39 -0700843 jmp retint_with_reschedule
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 CFI_ENDPROC
Jan Beulich4b787e02006-06-26 13:56:55 +0200846END(common_interrupt)
H. Peter Anvin3891a042014-04-29 16:46:09 -0700847
Masami Hiramatsu8222d712009-08-27 13:23:25 -0400848/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 * APIC interrupts.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100850 */
Seiji Aguchicf910e82013-06-20 11:46:53 -0400851.macro apicinterrupt3 num sym do_sym
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100852ENTRY(\sym)
Jan Beulich7effaa82005-09-12 18:49:24 +0200853 INTR_FRAME
Jan Beulichee4eb872012-11-02 11:18:39 +0000854 ASM_CLAC
Jan Beulichdf5d1872010-09-02 14:07:16 +0100855 pushq_cfi $~(\num)
Jan Beulich39e95432011-11-29 11:03:46 +0000856.Lcommon_\sym:
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100857 interrupt \do_sym
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 jmp ret_from_intr
859 CFI_ENDPROC
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100860END(\sym)
861.endm
Jacob Shin89b831e2005-11-05 17:25:53 +0100862
Seiji Aguchicf910e82013-06-20 11:46:53 -0400863#ifdef CONFIG_TRACING
864#define trace(sym) trace_##sym
865#define smp_trace(sym) smp_trace_##sym
866
867.macro trace_apicinterrupt num sym
868apicinterrupt3 \num trace(\sym) smp_trace(\sym)
869.endm
870#else
871.macro trace_apicinterrupt num sym do_sym
872.endm
873#endif
874
875.macro apicinterrupt num sym do_sym
876apicinterrupt3 \num \sym \do_sym
877trace_apicinterrupt \num \sym
878.endm
879
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100880#ifdef CONFIG_SMP
Seiji Aguchicf910e82013-06-20 11:46:53 -0400881apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100882 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
Seiji Aguchicf910e82013-06-20 11:46:53 -0400883apicinterrupt3 REBOOT_VECTOR \
Andi Kleen4ef702c2009-05-27 21:56:52 +0200884 reboot_interrupt smp_reboot_interrupt
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885#endif
886
Nick Piggin03b48632009-01-20 04:36:04 +0100887#ifdef CONFIG_X86_UV
Seiji Aguchicf910e82013-06-20 11:46:53 -0400888apicinterrupt3 UV_BAU_MESSAGE \
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100889 uv_bau_message_intr1 uv_bau_message_interrupt
Nick Piggin03b48632009-01-20 04:36:04 +0100890#endif
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100891apicinterrupt LOCAL_TIMER_VECTOR \
892 apic_timer_interrupt smp_apic_timer_interrupt
Dimitri Sivanich4a4de9c2009-10-14 09:22:57 -0500893apicinterrupt X86_PLATFORM_IPI_VECTOR \
894 x86_platform_ipi smp_x86_platform_ipi
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Yang Zhangd78f2662013-04-11 19:25:11 +0800896#ifdef CONFIG_HAVE_KVM
Seiji Aguchicf910e82013-06-20 11:46:53 -0400897apicinterrupt3 POSTED_INTR_VECTOR \
Yang Zhangd78f2662013-04-11 19:25:11 +0800898 kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
899#endif
900
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400901#ifdef CONFIG_X86_MCE_THRESHOLD
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100902apicinterrupt THRESHOLD_APIC_VECTOR \
Andi Kleen7856f6c2009-04-28 23:32:56 +0200903 threshold_interrupt smp_threshold_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400904#endif
905
906#ifdef CONFIG_X86_THERMAL_VECTOR
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100907apicinterrupt THERMAL_APIC_VECTOR \
908 thermal_interrupt smp_thermal_interrupt
Seiji Aguchi33e5ff62013-06-22 07:33:30 -0400909#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100911#ifdef CONFIG_SMP
912apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
913 call_function_single_interrupt smp_call_function_single_interrupt
914apicinterrupt CALL_FUNCTION_VECTOR \
915 call_function_interrupt smp_call_function_interrupt
916apicinterrupt RESCHEDULE_VECTOR \
917 reschedule_interrupt smp_reschedule_interrupt
918#endif
919
920apicinterrupt ERROR_APIC_VECTOR \
921 error_interrupt smp_error_interrupt
922apicinterrupt SPURIOUS_APIC_VECTOR \
923 spurious_interrupt smp_spurious_interrupt
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100924
Peter Zijlstrae360adb2010-10-14 14:01:34 +0800925#ifdef CONFIG_IRQ_WORK
926apicinterrupt IRQ_WORK_VECTOR \
927 irq_work_interrupt smp_irq_work_interrupt
Ingo Molnar241771e2008-12-03 10:39:53 +0100928#endif
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930/*
931 * Exception entry points.
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +0100932 */
Andy Lutomirski9b476682015-03-05 19:19:07 -0800933#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700934
935.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100936ENTRY(\sym)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700937 /* Sanity check */
938 .if \shift_ist != -1 && \paranoid == 0
939 .error "using shift_ist requires paranoid=1"
940 .endif
941
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700942 .if \has_error_code
943 XCPT_FRAME
944 .else
Jan Beulich7effaa82005-09-12 18:49:24 +0200945 INTR_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700946 .endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947
Jan Beulichee4eb872012-11-02 11:18:39 +0000948 ASM_CLAC
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100949 PARAVIRT_ADJUST_EXCEPTION_FRAME
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700950
951 .ifeq \has_error_code
952 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
953 .endif
954
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800955 ALLOC_PT_GPREGS_ON_STACK
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700956
957 .if \paranoid
Andy Lutomirski48e08d02014-11-11 12:49:41 -0800958 .if \paranoid == 1
959 CFI_REMEMBER_STATE
960 testl $3, CS(%rsp) /* If coming from userspace, switch */
961 jnz 1f /* stacks. */
962 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800963 call paranoid_entry
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700964 .else
965 call error_entry
966 .endif
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800967 /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700968
Andy Lutomirski1bd24ef2014-05-21 15:07:07 -0700969 DEFAULT_FRAME 0
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700970
971 .if \paranoid
Andy Lutomirski577ed452014-05-21 15:07:09 -0700972 .if \shift_ist != -1
973 TRACE_IRQS_OFF_DEBUG /* reload IDT in case of recursion */
974 .else
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +0100975 TRACE_IRQS_OFF
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700976 .endif
Andy Lutomirski577ed452014-05-21 15:07:09 -0700977 .endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700978
979 movq %rsp,%rdi /* pt_regs pointer */
980
981 .if \has_error_code
982 movq ORIG_RAX(%rsp),%rsi /* get error code */
983 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
984 .else
985 xorl %esi,%esi /* no error code */
986 .endif
987
Andy Lutomirski577ed452014-05-21 15:07:09 -0700988 .if \shift_ist != -1
Andy Lutomirski9b476682015-03-05 19:19:07 -0800989 subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700990 .endif
991
Alexander van Heukelum322648d2008-11-23 10:08:28 +0100992 call \do_sym
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700993
Andy Lutomirski577ed452014-05-21 15:07:09 -0700994 .if \shift_ist != -1
Andy Lutomirski9b476682015-03-05 19:19:07 -0800995 addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
Andy Lutomirski577ed452014-05-21 15:07:09 -0700996 .endif
997
Denys Vlasenkoebfc4532015-02-26 14:40:34 -0800998 /* these procedures expect "no swapgs" flag in ebx */
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -0700999 .if \paranoid
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001000 jmp paranoid_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001001 .else
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001002 jmp error_exit
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001003 .endif
1004
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001005 .if \paranoid == 1
1006 CFI_RESTORE_STATE
1007 /*
1008 * Paranoid entry from userspace. Switch stacks and treat it
1009 * as a normal entry. This means that paranoid handlers
1010 * run in real process context if user_mode(regs).
1011 */
10121:
1013 call error_entry
1014
1015 DEFAULT_FRAME 0
1016
1017 movq %rsp,%rdi /* pt_regs pointer */
1018 call sync_regs
1019 movq %rax,%rsp /* switch stack */
1020
1021 movq %rsp,%rdi /* pt_regs pointer */
1022
1023 .if \has_error_code
1024 movq ORIG_RAX(%rsp),%rsi /* get error code */
1025 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1026 .else
1027 xorl %esi,%esi /* no error code */
1028 .endif
1029
1030 call \do_sym
1031
1032 jmp error_exit /* %ebx: no swapgs flag */
1033 .endif
1034
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001035 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001036END(\sym)
Alexander van Heukelum322648d2008-11-23 10:08:28 +01001037.endm
Alexander van Heukelumb8b1d082008-11-21 16:44:28 +01001038
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001039#ifdef CONFIG_TRACING
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001040.macro trace_idtentry sym do_sym has_error_code:req
1041idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
1042idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001043.endm
1044#else
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001045.macro trace_idtentry sym do_sym has_error_code:req
1046idtentry \sym \do_sym has_error_code=\has_error_code
Seiji Aguchi25c74b12013-10-30 16:37:00 -04001047.endm
1048#endif
1049
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001050idtentry divide_error do_divide_error has_error_code=0
1051idtentry overflow do_overflow has_error_code=0
1052idtentry bounds do_bounds has_error_code=0
1053idtentry invalid_op do_invalid_op has_error_code=0
1054idtentry device_not_available do_device_not_available has_error_code=0
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001055idtentry double_fault do_double_fault has_error_code=1 paranoid=2
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001056idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
1057idtentry invalid_TSS do_invalid_TSS has_error_code=1
1058idtentry segment_not_present do_segment_not_present has_error_code=1
1059idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
1060idtentry coprocessor_error do_coprocessor_error has_error_code=0
1061idtentry alignment_check do_alignment_check has_error_code=1
1062idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
Andy Lutomirski5cec93c2011-06-05 13:50:24 -04001063
Ingo Molnar2601e642006-07-03 00:24:45 -07001064
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001065 /* Reload gs selector with exception handling */
1066 /* edi: new selector */
Jeremy Fitzhardinge9f9d4892008-06-25 00:19:32 -04001067ENTRY(native_load_gs_index)
Jan Beulich7effaa82005-09-12 18:49:24 +02001068 CFI_STARTPROC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001069 pushfq_cfi
Jeremy Fitzhardingeb8aa2872009-01-28 14:35:03 -08001070 DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001071 SWAPGS
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001072gs_change:
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001073 movl %edi,%gs
Linus Torvalds1da177e2005-04-16 15:20:36 -070010742: mfence /* workaround */
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001075 SWAPGS
Jan Beulichdf5d1872010-09-02 14:07:16 +01001076 popfq_cfi
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001077 ret
Jan Beulich7effaa82005-09-12 18:49:24 +02001078 CFI_ENDPROC
Alexander van Heukelum6efdcfa2008-11-23 10:15:32 +01001079END(native_load_gs_index)
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001080
H. Peter Anvind7abc0f2012-04-20 12:19:50 -07001081 _ASM_EXTABLE(gs_change,bad_gs)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001082 .section .fixup,"ax"
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 /* running with kernelgs */
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001084bad_gs:
Glauber de Oliveira Costa72fe4852008-01-30 13:32:08 +01001085 SWAPGS /* switch back to user gs */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 xorl %eax,%eax
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001087 movl %eax,%gs
1088 jmp 2b
1089 .previous
Alexander van Heukelum0bd7b792008-11-16 15:29:00 +01001090
Andi Kleen26995002006-08-02 22:37:28 +02001091/* Call softirq on interrupt stack. Interrupts are off. */
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +02001092ENTRY(do_softirq_own_stack)
Jan Beulich7effaa82005-09-12 18:49:24 +02001093 CFI_STARTPROC
Jan Beulichdf5d1872010-09-02 14:07:16 +01001094 pushq_cfi %rbp
Andi Kleen26995002006-08-02 22:37:28 +02001095 CFI_REL_OFFSET rbp,0
1096 mov %rsp,%rbp
1097 CFI_DEF_CFA_REGISTER rbp
Brian Gerst56895532009-01-19 00:38:58 +09001098 incl PER_CPU_VAR(irq_count)
Brian Gerst26f80bd2009-01-19 00:38:58 +09001099 cmove PER_CPU_VAR(irq_stack_ptr),%rsp
Andi Kleen26995002006-08-02 22:37:28 +02001100 push %rbp # backlink for old unwinder
Andi Kleened6b6762005-07-28 21:15:49 -07001101 call __do_softirq
Andi Kleen26995002006-08-02 22:37:28 +02001102 leaveq
Jan Beulichdf5d1872010-09-02 14:07:16 +01001103 CFI_RESTORE rbp
Jan Beulich7effaa82005-09-12 18:49:24 +02001104 CFI_DEF_CFA_REGISTER rsp
Andi Kleen26995002006-08-02 22:37:28 +02001105 CFI_ADJUST_CFA_OFFSET -8
Brian Gerst56895532009-01-19 00:38:58 +09001106 decl PER_CPU_VAR(irq_count)
Andi Kleened6b6762005-07-28 21:15:49 -07001107 ret
Jan Beulich7effaa82005-09-12 18:49:24 +02001108 CFI_ENDPROC
Frederic Weisbecker7d65f4a2013-09-05 15:49:45 +02001109END(do_softirq_own_stack)
Andi Kleen75154f42007-06-23 02:29:25 +02001110
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001111#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001112idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001113
1114/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001115 * A note on the "critical region" in our callback handler.
1116 * We want to avoid stacking callback handlers due to events occurring
1117 * during handling of the last event. To do this, we keep events disabled
1118 * until we've done all processing. HOWEVER, we must enable events before
1119 * popping the stack frame (can't be done atomically) and so it would still
1120 * be possible to get enough handler activations to overflow the stack.
1121 * Although unlikely, bugs of that kind are hard to track down, so we'd
1122 * like to avoid the possibility.
1123 * So, on entry to the handler we detect whether we interrupted an
1124 * existing activation in its critical region -- if so, we pop the current
1125 * activation and restart the handler using the previous one.
1126 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001127ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1128 CFI_STARTPROC
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001129/*
1130 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1131 * see the correct pointer to the pt_regs
1132 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001133 movq %rdi, %rsp # we don't return, adjust the stack frame
1134 CFI_ENDPROC
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001135 DEFAULT_FRAME
Brian Gerst56895532009-01-19 00:38:58 +0900113611: incl PER_CPU_VAR(irq_count)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001137 movq %rsp,%rbp
1138 CFI_DEF_CFA_REGISTER rbp
Brian Gerst26f80bd2009-01-19 00:38:58 +09001139 cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001140 pushq %rbp # backlink for old unwinder
1141 call xen_evtchn_do_upcall
1142 popq %rsp
1143 CFI_DEF_CFA_REGISTER rsp
Brian Gerst56895532009-01-19 00:38:58 +09001144 decl PER_CPU_VAR(irq_count)
David Vrabelfdfd8112015-02-19 15:23:17 +00001145#ifndef CONFIG_PREEMPT
1146 call xen_maybe_preempt_hcall
1147#endif
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001148 jmp error_exit
1149 CFI_ENDPROC
Alexander van Heukelum371c3942011-03-11 21:59:38 +01001150END(xen_do_hypervisor_callback)
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001151
1152/*
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001153 * Hypervisor uses this for application faults while it executes.
1154 * We get here for two reasons:
1155 * 1. Fault while reloading DS, ES, FS or GS
1156 * 2. Fault while executing IRET
1157 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1158 * registers that could be reloaded and zeroed the others.
1159 * Category 2 we fix up by killing the current process. We cannot use the
1160 * normal Linux return path in this case because if we use the IRET hypercall
1161 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1162 * We distinguish between categories by comparing each saved segment register
1163 * with its current contents: any discrepancy means we in category 1.
1164 */
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001165ENTRY(xen_failsafe_callback)
Alexander van Heukelumdcd072e2008-11-20 14:40:11 +01001166 INTR_FRAME 1 (6*8)
1167 /*CFI_REL_OFFSET gs,GS*/
1168 /*CFI_REL_OFFSET fs,FS*/
1169 /*CFI_REL_OFFSET es,ES*/
1170 /*CFI_REL_OFFSET ds,DS*/
1171 CFI_REL_OFFSET r11,8
1172 CFI_REL_OFFSET rcx,0
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001173 movw %ds,%cx
1174 cmpw %cx,0x10(%rsp)
1175 CFI_REMEMBER_STATE
1176 jne 1f
1177 movw %es,%cx
1178 cmpw %cx,0x18(%rsp)
1179 jne 1f
1180 movw %fs,%cx
1181 cmpw %cx,0x20(%rsp)
1182 jne 1f
1183 movw %gs,%cx
1184 cmpw %cx,0x28(%rsp)
1185 jne 1f
1186 /* All segments match their saved values => Category 2 (Bad IRET). */
1187 movq (%rsp),%rcx
1188 CFI_RESTORE rcx
1189 movq 8(%rsp),%r11
1190 CFI_RESTORE r11
1191 addq $0x30,%rsp
1192 CFI_ADJUST_CFA_OFFSET -0x30
Ingo Molnar14ae22b2008-11-21 15:20:47 +01001193 pushq_cfi $0 /* RIP */
1194 pushq_cfi %r11
1195 pushq_cfi %rcx
Jeremy Fitzhardinge4a5c3e72008-07-08 15:07:09 -07001196 jmp general_protection
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001197 CFI_RESTORE_STATE
11981: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
1199 movq (%rsp),%rcx
1200 CFI_RESTORE rcx
1201 movq 8(%rsp),%r11
1202 CFI_RESTORE r11
1203 addq $0x30,%rsp
1204 CFI_ADJUST_CFA_OFFSET -0x30
David Vrabela349e23d12012-10-19 17:29:07 +01001205 pushq_cfi $-1 /* orig_ax = -1 => not a system call */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001206 ALLOC_PT_GPREGS_ON_STACK
1207 SAVE_C_REGS
1208 SAVE_EXTRA_REGS
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001209 jmp error_exit
1210 CFI_ENDPROC
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001211END(xen_failsafe_callback)
1212
Seiji Aguchicf910e82013-06-20 11:46:53 -04001213apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
Sheng Yang38e20b02010-05-14 12:40:51 +01001214 xen_hvm_callback_vector xen_evtchn_do_upcall
1215
Jeremy Fitzhardinge3d75e1b2008-07-08 15:06:49 -07001216#endif /* CONFIG_XEN */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001217
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001218#if IS_ENABLED(CONFIG_HYPERV)
Seiji Aguchicf910e82013-06-20 11:46:53 -04001219apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -08001220 hyperv_callback_vector hyperv_vector_handler
1221#endif /* CONFIG_HYPERV */
1222
Andy Lutomirski577ed452014-05-21 15:07:09 -07001223idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1224idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
Andy Lutomirski6f442be2014-11-22 18:00:32 -08001225idtentry stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001226#ifdef CONFIG_XEN
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001227idtentry xen_debug do_debug has_error_code=0
1228idtentry xen_int3 do_int3 has_error_code=0
1229idtentry xen_stack_segment do_stack_segment has_error_code=1
Jeremy Fitzhardinge6cac5a92009-03-29 19:56:29 -07001230#endif
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001231idtentry general_protection do_general_protection has_error_code=1
1232trace_idtentry page_fault do_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +02001233#ifdef CONFIG_KVM_GUEST
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001234idtentry async_page_fault do_async_page_fault has_error_code=1
Gleb Natapov631bc482010-10-14 11:22:52 +02001235#endif
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001236#ifdef CONFIG_X86_MCE
Andy Lutomirskicb5dd2c2014-05-21 15:07:08 -07001237idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001238#endif
1239
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001240/*
1241 * Save all registers in pt_regs, and switch gs if needed.
1242 * Use slow, but surefire "are we in kernel?" check.
1243 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
1244 */
1245ENTRY(paranoid_entry)
1246 XCPT_FRAME 1 15*8
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001247 cld
1248 SAVE_C_REGS 8
1249 SAVE_EXTRA_REGS 8
1250 movl $1,%ebx
1251 movl $MSR_GS_BASE,%ecx
1252 rdmsr
1253 testl %edx,%edx
1254 js 1f /* negative -> in kernel */
1255 SWAPGS
1256 xorl %ebx,%ebx
12571: ret
1258 CFI_ENDPROC
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001259END(paranoid_entry)
Denys Vlasenko1eeb2072015-02-26 14:40:33 -08001260
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001261/*
1262 * "Paranoid" exit path from exception stack. This is invoked
1263 * only on return from non-NMI IST interrupts that came
1264 * from kernel space.
1265 *
1266 * We may be returning to very strange contexts (e.g. very early
1267 * in syscall entry), so checking for preemption here would
1268 * be complicated. Fortunately, we there's no good reason
1269 * to try to handle preemption here.
1270 */
1271/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001272ENTRY(paranoid_exit)
Jan Beulich1f130a72010-09-02 13:54:32 +01001273 DEFAULT_FRAME
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001274 DISABLE_INTERRUPTS(CLBR_NONE)
Steven Rostedt5963e312012-05-30 11:54:53 -04001275 TRACE_IRQS_OFF_DEBUG
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001276 testl %ebx,%ebx /* swapgs needed? */
Denys Vlasenko0d550832015-02-26 14:40:29 -08001277 jnz paranoid_exit_no_swapgs
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001278 TRACE_IRQS_IRETQ
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001279 SWAPGS_UNSAFE_STACK
Denys Vlasenko0d550832015-02-26 14:40:29 -08001280 jmp paranoid_exit_restore
1281paranoid_exit_no_swapgs:
Denys Vlasenkof2db9382015-02-26 14:40:30 -08001282 TRACE_IRQS_IRETQ_DEBUG
Denys Vlasenko0d550832015-02-26 14:40:29 -08001283paranoid_exit_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001284 RESTORE_EXTRA_REGS
1285 RESTORE_C_REGS
1286 REMOVE_PT_GPREGS_FROM_STACK 8
Andy Lutomirski48e08d02014-11-11 12:49:41 -08001287 INTERRUPT_RETURN
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001288 CFI_ENDPROC
1289END(paranoid_exit)
1290
1291/*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001292 * Save all registers in pt_regs, and switch gs if needed.
1293 * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001294 */
1295ENTRY(error_entry)
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001296 XCPT_FRAME 1 15*8
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001297 cld
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001298 SAVE_C_REGS 8
1299 SAVE_EXTRA_REGS 8
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001300 xorl %ebx,%ebx
1301 testl $3,CS+8(%rsp)
1302 je error_kernelspace
1303error_swapgs:
1304 SWAPGS
1305error_sti:
1306 TRACE_IRQS_OFF
1307 ret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001308
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001309 /*
1310 * There are two places in the kernel that can potentially fault with
1311 * usergs. Handle them here. B stepping K8s sometimes report a
1312 * truncated RIP for IRET exceptions returning to compat mode. Check
1313 * for these here too.
1314 */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001315error_kernelspace:
Jan Beulich3bab13b2014-06-25 14:11:22 +01001316 CFI_REL_OFFSET rcx, RCX+8
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001317 incl %ebx
Andy Lutomirski7209a752014-07-23 08:34:11 -07001318 leaq native_irq_return_iret(%rip),%rcx
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001319 cmpq %rcx,RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001320 je error_bad_iret
Brian Gerstae24ffe2009-10-12 10:18:23 -04001321 movl %ecx,%eax /* zero extend */
1322 cmpq %rax,RIP+8(%rsp)
1323 je bstep_iret
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001324 cmpq $gs_change,RIP+8(%rsp)
Cyrill Gorcunov9f1e87e2008-11-27 21:10:08 +03001325 je error_swapgs
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001326 jmp error_sti
Brian Gerstae24ffe2009-10-12 10:18:23 -04001327
1328bstep_iret:
1329 /* Fix truncated RIP */
1330 movq %rcx,RIP+8(%rsp)
Andy Lutomirskib645af22014-11-22 18:00:33 -08001331 /* fall through */
1332
1333error_bad_iret:
1334 SWAPGS
1335 mov %rsp,%rdi
1336 call fixup_bad_iret
1337 mov %rax,%rsp
1338 decl %ebx /* Return to usergs */
1339 jmp error_sti
Jan Beuliche6b04b62010-09-02 13:52:45 +01001340 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001341END(error_entry)
1342
1343
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001344/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001345ENTRY(error_exit)
1346 DEFAULT_FRAME
1347 movl %ebx,%eax
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001348 RESTORE_EXTRA_REGS
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001349 DISABLE_INTERRUPTS(CLBR_NONE)
1350 TRACE_IRQS_OFF
1351 GET_THREAD_INFO(%rcx)
1352 testl %eax,%eax
1353 jne retint_kernel
1354 LOCKDEP_SYS_EXIT_IRQ
1355 movl TI_flags(%rcx),%edx
1356 movl $_TIF_WORK_MASK,%edi
1357 andl %edi,%edx
1358 jnz retint_careful
1359 jmp retint_swapgs
1360 CFI_ENDPROC
1361END(error_exit)
1362
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001363/*
1364 * Test if a given stack is an NMI stack or not.
1365 */
1366 .macro test_in_nmi reg stack nmi_ret normal_ret
1367 cmpq %\reg, \stack
1368 ja \normal_ret
1369 subq $EXCEPTION_STKSZ, %\reg
1370 cmpq %\reg, \stack
1371 jb \normal_ret
1372 jmp \nmi_ret
1373 .endm
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001374
1375 /* runs on exception stack */
1376ENTRY(nmi)
1377 INTR_FRAME
1378 PARAVIRT_ADJUST_EXCEPTION_FRAME
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001379 /*
1380 * We allow breakpoints in NMIs. If a breakpoint occurs, then
1381 * the iretq it performs will take us out of NMI context.
1382 * This means that we can have nested NMIs where the next
1383 * NMI is using the top of the stack of the previous NMI. We
1384 * can't let it execute because the nested NMI will corrupt the
1385 * stack of the previous NMI. NMI handlers are not re-entrant
1386 * anyway.
1387 *
1388 * To handle this case we do the following:
1389 * Check the a special location on the stack that contains
1390 * a variable that is set when NMIs are executing.
1391 * The interrupted task's stack is also checked to see if it
1392 * is an NMI stack.
1393 * If the variable is not set and the stack is not the NMI
1394 * stack then:
1395 * o Set the special variable on the stack
1396 * o Copy the interrupt frame into a "saved" location on the stack
1397 * o Copy the interrupt frame into a "copy" location on the stack
1398 * o Continue processing the NMI
1399 * If the variable is set or the previous stack is the NMI stack:
1400 * o Modify the "copy" location to jump to the repeate_nmi
1401 * o return back to the first NMI
1402 *
1403 * Now on exit of the first NMI, we first clear the stack variable
1404 * The NMI stack will tell any nested NMIs at that point that it is
1405 * nested. Then we pop the stack normally with iret, and if there was
1406 * a nested NMI that updated the copy interrupt stack frame, a
1407 * jump will be made to the repeat_nmi code that will handle the second
1408 * NMI.
1409 */
1410
Denys Vlasenko146b2b02015-03-25 18:18:13 +01001411 /* Use %rdx as our temp variable throughout */
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001412 pushq_cfi %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001413 CFI_REL_OFFSET rdx, 0
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001414
1415 /*
Steven Rostedt45d5a162012-02-19 16:43:37 -05001416 * If %cs was not the kernel segment, then the NMI triggered in user
1417 * space, which means it is definitely not nested.
1418 */
Steven Rostedta38449ef2012-02-20 15:29:34 -05001419 cmpl $__KERNEL_CS, 16(%rsp)
Steven Rostedt45d5a162012-02-19 16:43:37 -05001420 jne first_nmi
1421
1422 /*
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001423 * Check the special variable on the stack to see if NMIs are
1424 * executing.
1425 */
Steven Rostedta38449ef2012-02-20 15:29:34 -05001426 cmpl $1, -8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001427 je nested_nmi
1428
1429 /*
1430 * Now test if the previous stack was an NMI stack.
1431 * We need the double check. We check the NMI stack to satisfy the
1432 * race when the first NMI clears the variable before returning.
1433 * We check the variable because the first NMI could be in a
1434 * breakpoint routine using a breakpoint stack.
1435 */
1436 lea 6*8(%rsp), %rdx
1437 test_in_nmi rdx, 4*8(%rsp), nested_nmi, first_nmi
Jan Beulich62610912012-02-24 14:54:37 +00001438 CFI_REMEMBER_STATE
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001439
1440nested_nmi:
1441 /*
1442 * Do nothing if we interrupted the fixup in repeat_nmi.
1443 * It's about to repeat the NMI handler, so we are fine
1444 * with ignoring this one.
1445 */
1446 movq $repeat_nmi, %rdx
1447 cmpq 8(%rsp), %rdx
1448 ja 1f
1449 movq $end_repeat_nmi, %rdx
1450 cmpq 8(%rsp), %rdx
1451 ja nested_nmi_out
1452
14531:
1454 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
Salman Qazi28696f42012-10-01 17:29:25 -07001455 leaq -1*8(%rsp), %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001456 movq %rdx, %rsp
Salman Qazi28696f42012-10-01 17:29:25 -07001457 CFI_ADJUST_CFA_OFFSET 1*8
1458 leaq -10*8(%rsp), %rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001459 pushq_cfi $__KERNEL_DS
1460 pushq_cfi %rdx
1461 pushfq_cfi
1462 pushq_cfi $__KERNEL_CS
1463 pushq_cfi $repeat_nmi
1464
1465 /* Put stack back */
Salman Qazi28696f42012-10-01 17:29:25 -07001466 addq $(6*8), %rsp
1467 CFI_ADJUST_CFA_OFFSET -6*8
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001468
1469nested_nmi_out:
1470 popq_cfi %rdx
Jan Beulich62610912012-02-24 14:54:37 +00001471 CFI_RESTORE rdx
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001472
1473 /* No need to check faults here */
1474 INTERRUPT_RETURN
1475
Jan Beulich62610912012-02-24 14:54:37 +00001476 CFI_RESTORE_STATE
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001477first_nmi:
1478 /*
1479 * Because nested NMIs will use the pushed location that we
1480 * stored in rdx, we must keep that space available.
1481 * Here's what our stack frame will look like:
1482 * +-------------------------+
1483 * | original SS |
1484 * | original Return RSP |
1485 * | original RFLAGS |
1486 * | original CS |
1487 * | original RIP |
1488 * +-------------------------+
1489 * | temp storage for rdx |
1490 * +-------------------------+
1491 * | NMI executing variable |
1492 * +-------------------------+
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001493 * | copied SS |
1494 * | copied Return RSP |
1495 * | copied RFLAGS |
1496 * | copied CS |
1497 * | copied RIP |
1498 * +-------------------------+
Salman Qazi28696f42012-10-01 17:29:25 -07001499 * | Saved SS |
1500 * | Saved Return RSP |
1501 * | Saved RFLAGS |
1502 * | Saved CS |
1503 * | Saved RIP |
1504 * +-------------------------+
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001505 * | pt_regs |
1506 * +-------------------------+
1507 *
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001508 * The saved stack frame is used to fix up the copied stack frame
1509 * that a nested NMI may change to make the interrupted NMI iret jump
1510 * to the repeat_nmi. The original stack frame and the temp storage
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001511 * is also used by nested NMIs and can not be trusted on exit.
1512 */
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001513 /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
Jan Beulich62610912012-02-24 14:54:37 +00001514 movq (%rsp), %rdx
1515 CFI_RESTORE rdx
1516
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001517 /* Set the NMI executing variable on the stack. */
1518 pushq_cfi $1
1519
Salman Qazi28696f42012-10-01 17:29:25 -07001520 /*
1521 * Leave room for the "copied" frame
1522 */
1523 subq $(5*8), %rsp
Jan Beulich444723d2013-01-24 09:27:31 +00001524 CFI_ADJUST_CFA_OFFSET 5*8
Salman Qazi28696f42012-10-01 17:29:25 -07001525
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001526 /* Copy the stack frame to the Saved frame */
1527 .rept 5
Salman Qazi28696f42012-10-01 17:29:25 -07001528 pushq_cfi 11*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001529 .endr
Denys Vlasenko911d2bb2015-02-26 14:40:36 -08001530 CFI_DEF_CFA_OFFSET 5*8
Jan Beulich62610912012-02-24 14:54:37 +00001531
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001532 /* Everything up to here is safe from nested NMIs */
1533
Jan Beulich62610912012-02-24 14:54:37 +00001534 /*
1535 * If there was a nested NMI, the first NMI's iret will return
1536 * here. But NMIs are still enabled and we can take another
1537 * nested NMI. The nested NMI checks the interrupted RIP to see
1538 * if it is between repeat_nmi and end_repeat_nmi, and if so
1539 * it will just return, as we are about to repeat an NMI anyway.
1540 * This makes it safe to copy to the stack frame that a nested
1541 * NMI will update.
1542 */
1543repeat_nmi:
1544 /*
1545 * Update the stack variable to say we are still in NMI (the update
1546 * is benign for the non-repeat case, where 1 was pushed just above
1547 * to this very stack slot).
1548 */
Salman Qazi28696f42012-10-01 17:29:25 -07001549 movq $1, 10*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001550
1551 /* Make another copy, this one may be modified by nested NMIs */
Salman Qazi28696f42012-10-01 17:29:25 -07001552 addq $(10*8), %rsp
1553 CFI_ADJUST_CFA_OFFSET -10*8
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001554 .rept 5
Salman Qazi28696f42012-10-01 17:29:25 -07001555 pushq_cfi -6*8(%rsp)
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001556 .endr
Salman Qazi28696f42012-10-01 17:29:25 -07001557 subq $(5*8), %rsp
Denys Vlasenko911d2bb2015-02-26 14:40:36 -08001558 CFI_DEF_CFA_OFFSET 5*8
Jan Beulich62610912012-02-24 14:54:37 +00001559end_repeat_nmi:
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001560
1561 /*
1562 * Everything below this point can be preempted by a nested
Steven Rostedt79fb4ad2012-02-24 15:55:13 -05001563 * NMI if the first NMI took an exception and reset our iret stack
1564 * so that we repeat another NMI.
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001565 */
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001566 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001567 ALLOC_PT_GPREGS_ON_STACK
1568
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001569 /*
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001570 * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
Steven Rostedt1fd466e2011-12-08 12:32:27 -05001571 * as we should not be calling schedule in NMI context.
1572 * Even with normal interrupts enabled. An NMI should not be
1573 * setting NEED_RESCHED or anything that normal interrupts and
1574 * exceptions might do.
1575 */
Denys Vlasenkoebfc4532015-02-26 14:40:34 -08001576 call paranoid_entry
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001577 DEFAULT_FRAME 0
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001578
1579 /*
1580 * Save off the CR2 register. If we take a page fault in the NMI then
1581 * it could corrupt the CR2 value. If the NMI preempts a page fault
1582 * handler before it was able to read the CR2 register, and then the
1583 * NMI itself takes a page fault, the page fault that was preempted
1584 * will read the information from the NMI page fault and not the
1585 * origin fault. Save it off and restore it if it changes.
1586 * Use the r12 callee-saved register.
1587 */
1588 movq %cr2, %r12
1589
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001590 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1591 movq %rsp,%rdi
1592 movq $-1,%rsi
1593 call do_nmi
Steven Rostedt7fbb98c2012-06-07 10:21:21 -04001594
1595 /* Did the NMI take a page fault? Restore cr2 if it did */
1596 movq %cr2, %rcx
1597 cmpq %rcx, %r12
1598 je 1f
1599 movq %r12, %cr2
16001:
1601
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001602 testl %ebx,%ebx /* swapgs needed? */
1603 jnz nmi_restore
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001604nmi_swapgs:
1605 SWAPGS_UNSAFE_STACK
1606nmi_restore:
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001607 RESTORE_EXTRA_REGS
1608 RESTORE_C_REGS
Jan Beulich444723d2013-01-24 09:27:31 +00001609 /* Pop the extra iret frame at once */
Denys Vlasenko76f5df42015-02-26 14:40:27 -08001610 REMOVE_PT_GPREGS_FROM_STACK 6*8
Salman Qazi28696f42012-10-01 17:29:25 -07001611
Steven Rostedt3f3c8b82011-12-08 12:36:23 -05001612 /* Clear the NMI executing stack variable */
Salman Qazi28696f42012-10-01 17:29:25 -07001613 movq $0, 5*8(%rsp)
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001614 jmp irq_return
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001615 CFI_ENDPROC
Alexander van Heukelumddeb8f22008-11-24 13:24:28 +01001616END(nmi)
1617
1618ENTRY(ignore_sysret)
1619 CFI_STARTPROC
1620 mov $-ENOSYS,%eax
1621 sysret
1622 CFI_ENDPROC
1623END(ignore_sysret)
1624