blob: 6ad064c8cf35e6fdfc9c384212a6f76c780ffd69 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
Ingo Molnara49976d2015-06-08 09:49:11 +02003 * Copyright (C) 1991,1992 Linus Torvalds
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Ingo Molnara49976d2015-06-08 09:49:11 +02005 * entry_32.S contains the system-call and low-level fault and trap handling routines.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
Andy Lutomirski39e87012015-10-05 17:48:13 -07007 * Stack layout while running C code:
Ingo Molnara49976d2015-06-08 09:49:11 +02008 * ptrace needs to have all registers on the stack.
9 * If the order here is changed, it needs to be
10 * updated in fork.c:copy_process(), signal.c:do_signal(),
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * ptrace.c and ptrace.h
12 *
13 * 0(%esp) - %ebx
14 * 4(%esp) - %ecx
15 * 8(%esp) - %edx
Denys Vlasenko9b47feb2015-06-08 22:35:33 +020016 * C(%esp) - %esi
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 * 10(%esp) - %edi
18 * 14(%esp) - %ebp
19 * 18(%esp) - %eax
20 * 1C(%esp) - %ds
21 * 20(%esp) - %es
Jeremy Fitzhardinge464d1a72007-02-13 13:26:20 +010022 * 24(%esp) - %fs
Tejun Heoccbeed32009-02-09 22:17:40 +090023 * 28(%esp) - %gs saved iff !CONFIG_X86_32_LAZY_GS
24 * 2C(%esp) - orig_eax
25 * 30(%esp) - %eip
26 * 34(%esp) - %cs
27 * 38(%esp) - %eflags
28 * 3C(%esp) - %oldesp
29 * 40(%esp) - %oldss
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 */
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/linkage.h>
Eric Parisd7e75282012-01-03 14:23:06 -050033#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/thread_info.h>
Ingo Molnar55f327f2006-07-03 00:24:43 -070035#include <asm/irqflags.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/errno.h>
37#include <asm/segment.h>
38#include <asm/smp.h>
Stas Sergeevbe44d2a2006-12-07 02:14:01 +010039#include <asm/percpu.h>
Cyrill Gorcunovab68ed92008-03-25 22:16:32 +030040#include <asm/processor-flags.h>
Thomas Gleixner9b7dc562008-05-02 20:10:09 +020041#include <asm/irq_vectors.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010042#include <asm/cpufeatures.h>
Andy Lutomirskib4ca46e2011-08-25 16:10:33 -040043#include <asm/alternative-asm.h>
H. Peter Anvin6837a542012-04-20 12:19:50 -070044#include <asm/asm.h>
H. Peter Anvine59d1b02012-09-21 13:58:10 -070045#include <asm/smap.h>
Josh Poimboeuf4d516f42016-09-21 16:04:01 -050046#include <asm/frame.h>
David Woodhouse2641f082018-01-11 21:46:28 +000047#include <asm/nospec-branch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Jiri Olsaea714542011-03-07 19:10:39 +010049 .section .entry.text, "ax"
50
Rusty Russell139ec7c2006-12-07 02:14:08 +010051/*
52 * We use macros for low-level operations which need to be overridden
53 * for paravirtualization. The following will never clobber any registers:
54 * INTERRUPT_RETURN (aka. "iret")
55 * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
Jeremy Fitzhardinged75cd222008-06-25 00:19:26 -040056 * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
Rusty Russell139ec7c2006-12-07 02:14:08 +010057 *
58 * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
59 * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
60 * Allowing a register to be clobbered can shrink the paravirt replacement
61 * enough to patch inline, increasing performance.
62 */
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#ifdef CONFIG_PREEMPT
Ingo Molnara49976d2015-06-08 09:49:11 +020065# define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#else
Ingo Molnara49976d2015-06-08 09:49:11 +020067# define preempt_stop(clobbers)
68# define resume_kernel restore_all
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#endif
70
Ingo Molnar55f327f2006-07-03 00:24:43 -070071.macro TRACE_IRQS_IRET
72#ifdef CONFIG_TRACE_IRQFLAGS
Ingo Molnara49976d2015-06-08 09:49:11 +020073 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off?
74 jz 1f
Ingo Molnar55f327f2006-07-03 00:24:43 -070075 TRACE_IRQS_ON
761:
77#endif
78.endm
79
Tejun Heoccbeed32009-02-09 22:17:40 +090080/*
81 * User gs save/restore
82 *
83 * %gs is used for userland TLS and kernel only uses it for stack
84 * canary which is required to be at %gs:20 by gcc. Read the comment
85 * at the top of stackprotector.h for more info.
86 *
87 * Local labels 98 and 99 are used.
88 */
89#ifdef CONFIG_X86_32_LAZY_GS
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Tejun Heoccbeed32009-02-09 22:17:40 +090091 /* unfortunately push/pop can't be no-op */
92.macro PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +020093 pushl $0
Tejun Heoccbeed32009-02-09 22:17:40 +090094.endm
95.macro POP_GS pop=0
Ingo Molnara49976d2015-06-08 09:49:11 +020096 addl $(4 + \pop), %esp
Tejun Heoccbeed32009-02-09 22:17:40 +090097.endm
98.macro POP_GS_EX
99.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Tejun Heoccbeed32009-02-09 22:17:40 +0900101 /* all the rest are no-op */
102.macro PTGS_TO_GS
103.endm
104.macro PTGS_TO_GS_EX
105.endm
106.macro GS_TO_REG reg
107.endm
108.macro REG_TO_PTGS reg
109.endm
110.macro SET_KERNEL_GS reg
111.endm
112
113#else /* CONFIG_X86_32_LAZY_GS */
114
115.macro PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +0200116 pushl %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900117.endm
118
119.macro POP_GS pop=0
Ingo Molnara49976d2015-06-08 09:49:11 +020012098: popl %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900121 .if \pop <> 0
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200122 add $\pop, %esp
Tejun Heoccbeed32009-02-09 22:17:40 +0900123 .endif
124.endm
125.macro POP_GS_EX
126.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +020012799: movl $0, (%esp)
128 jmp 98b
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100129.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200130 _ASM_EXTABLE(98b, 99b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900131.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Tejun Heoccbeed32009-02-09 22:17:40 +0900133.macro PTGS_TO_GS
Ingo Molnara49976d2015-06-08 09:49:11 +020013498: mov PT_GS(%esp), %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900135.endm
136.macro PTGS_TO_GS_EX
137.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +020013899: movl $0, PT_GS(%esp)
139 jmp 98b
Tejun Heoccbeed32009-02-09 22:17:40 +0900140.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200141 _ASM_EXTABLE(98b, 99b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900142.endm
143
144.macro GS_TO_REG reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200145 movl %gs, \reg
Tejun Heoccbeed32009-02-09 22:17:40 +0900146.endm
147.macro REG_TO_PTGS reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200148 movl \reg, PT_GS(%esp)
Tejun Heoccbeed32009-02-09 22:17:40 +0900149.endm
150.macro SET_KERNEL_GS reg
Ingo Molnara49976d2015-06-08 09:49:11 +0200151 movl $(__KERNEL_STACK_CANARY), \reg
152 movl \reg, %gs
Tejun Heoccbeed32009-02-09 22:17:40 +0900153.endm
154
Ingo Molnara49976d2015-06-08 09:49:11 +0200155#endif /* CONFIG_X86_32_LAZY_GS */
Tejun Heoccbeed32009-02-09 22:17:40 +0900156
Andy Lutomirski150ac782015-10-05 17:48:14 -0700157.macro SAVE_ALL pt_regs_ax=%eax
Tejun Heof0d96112009-02-09 22:17:40 +0900158 cld
Tejun Heoccbeed32009-02-09 22:17:40 +0900159 PUSH_GS
Ingo Molnara49976d2015-06-08 09:49:11 +0200160 pushl %fs
161 pushl %es
162 pushl %ds
Andy Lutomirski150ac782015-10-05 17:48:14 -0700163 pushl \pt_regs_ax
Ingo Molnara49976d2015-06-08 09:49:11 +0200164 pushl %ebp
165 pushl %edi
166 pushl %esi
167 pushl %edx
168 pushl %ecx
169 pushl %ebx
170 movl $(__USER_DS), %edx
171 movl %edx, %ds
172 movl %edx, %es
173 movl $(__KERNEL_PERCPU), %edx
174 movl %edx, %fs
Tejun Heoccbeed32009-02-09 22:17:40 +0900175 SET_KERNEL_GS %edx
Tejun Heof0d96112009-02-09 22:17:40 +0900176.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500178/*
179 * This is a sneaky trick to help the unwinder find pt_regs on the stack. The
180 * frame pointer is replaced with an encoded pointer to pt_regs. The encoding
Josh Poimboeuf5c99b692017-10-09 20:20:03 -0500181 * is just clearing the MSB, which makes it an invalid stack address and is also
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500182 * a signal to the unwinder that it's a pt_regs pointer in disguise.
183 *
184 * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the
185 * original rbp.
186 */
187.macro ENCODE_FRAME_POINTER
188#ifdef CONFIG_FRAME_POINTER
189 mov %esp, %ebp
Josh Poimboeuf5c99b692017-10-09 20:20:03 -0500190 andl $0x7fffffff, %ebp
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500191#endif
192.endm
193
Tejun Heof0d96112009-02-09 22:17:40 +0900194.macro RESTORE_INT_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +0200195 popl %ebx
196 popl %ecx
197 popl %edx
198 popl %esi
199 popl %edi
200 popl %ebp
201 popl %eax
Tejun Heof0d96112009-02-09 22:17:40 +0900202.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203
Tejun Heoccbeed32009-02-09 22:17:40 +0900204.macro RESTORE_REGS pop=0
Tejun Heof0d96112009-02-09 22:17:40 +0900205 RESTORE_INT_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +02002061: popl %ds
2072: popl %es
2083: popl %fs
Tejun Heoccbeed32009-02-09 22:17:40 +0900209 POP_GS \pop
Tejun Heof0d96112009-02-09 22:17:40 +0900210.pushsection .fixup, "ax"
Ingo Molnara49976d2015-06-08 09:49:11 +02002114: movl $0, (%esp)
212 jmp 1b
2135: movl $0, (%esp)
214 jmp 2b
2156: movl $0, (%esp)
216 jmp 3b
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200218 _ASM_EXTABLE(1b, 4b)
219 _ASM_EXTABLE(2b, 5b)
220 _ASM_EXTABLE(3b, 6b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900221 POP_GS_EX
Tejun Heof0d96112009-02-09 22:17:40 +0900222.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Brian Gerst01003012016-08-13 12:38:19 -0400224/*
225 * %eax: prev task
226 * %edx: next task
227 */
228ENTRY(__switch_to_asm)
229 /*
230 * Save callee-saved registers
231 * This must match the order in struct inactive_task_frame
232 */
233 pushl %ebp
234 pushl %ebx
235 pushl %edi
236 pushl %esi
237
238 /* switch stack */
239 movl %esp, TASK_threadsp(%eax)
240 movl TASK_threadsp(%edx), %esp
241
242#ifdef CONFIG_CC_STACKPROTECTOR
243 movl TASK_stack_canary(%edx), %ebx
244 movl %ebx, PER_CPU_VAR(stack_canary)+stack_canary_offset
245#endif
246
David Woodhousec995efd2018-01-12 17:49:25 +0000247#ifdef CONFIG_RETPOLINE
248 /*
249 * When switching from a shallower to a deeper call stack
250 * the RSB may either underflow or use entries populated
251 * with userspace addresses. On CPUs where those concerns
252 * exist, overwrite the RSB with entries which capture
253 * speculative execution to prevent attack.
254 */
David Woodhoused1c99102018-02-19 10:50:56 +0000255 FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
David Woodhousec995efd2018-01-12 17:49:25 +0000256#endif
257
Brian Gerst01003012016-08-13 12:38:19 -0400258 /* restore callee-saved registers */
259 popl %esi
260 popl %edi
261 popl %ebx
262 popl %ebp
263
264 jmp __switch_to
265END(__switch_to_asm)
266
267/*
Josh Poimboeufebd57492017-05-23 10:37:29 -0500268 * The unwinder expects the last frame on the stack to always be at the same
269 * offset from the end of the page, which allows it to validate the stack.
270 * Calling schedule_tail() directly would break that convention because its an
271 * asmlinkage function so its argument has to be pushed on the stack. This
272 * wrapper creates a proper "end of stack" frame header before the call.
273 */
274ENTRY(schedule_tail_wrapper)
275 FRAME_BEGIN
276
277 pushl %eax
278 call schedule_tail
279 popl %eax
280
281 FRAME_END
282 ret
283ENDPROC(schedule_tail_wrapper)
284/*
Brian Gerst01003012016-08-13 12:38:19 -0400285 * A newly forked process directly context switches into this address.
286 *
287 * eax: prev task we switched from
Brian Gerst616d2482016-08-13 12:38:20 -0400288 * ebx: kernel thread func (NULL for user thread)
289 * edi: kernel thread arg
Brian Gerst01003012016-08-13 12:38:19 -0400290 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291ENTRY(ret_from_fork)
Josh Poimboeufebd57492017-05-23 10:37:29 -0500292 call schedule_tail_wrapper
Andy Lutomirski39e87012015-10-05 17:48:13 -0700293
Brian Gerst616d2482016-08-13 12:38:20 -0400294 testl %ebx, %ebx
295 jnz 1f /* kernel threads are uncommon */
296
2972:
Andy Lutomirski39e87012015-10-05 17:48:13 -0700298 /* When we fork, we trace the syscall return in the child, too. */
Josh Poimboeufebd57492017-05-23 10:37:29 -0500299 movl %esp, %eax
Andy Lutomirski39e87012015-10-05 17:48:13 -0700300 call syscall_return_slowpath
301 jmp restore_all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Brian Gerst616d2482016-08-13 12:38:20 -0400303 /* kernel thread */
3041: movl %edi, %eax
David Woodhouse2641f082018-01-11 21:46:28 +0000305 CALL_NOSPEC %ebx
Andy Lutomirski39e87012015-10-05 17:48:13 -0700306 /*
Brian Gerst616d2482016-08-13 12:38:20 -0400307 * A kernel thread is allowed to return here after successfully
308 * calling do_execve(). Exit to userspace to complete the execve()
309 * syscall.
Andy Lutomirski39e87012015-10-05 17:48:13 -0700310 */
Brian Gerst616d2482016-08-13 12:38:20 -0400311 movl $0, PT_EAX(%esp)
312 jmp 2b
313END(ret_from_fork)
Al Viro6783eaa22012-08-02 23:05:11 +0400314
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315/*
316 * Return to user mode is not as complex as all this looks,
317 * but we want the default path for a system call return to
318 * go as quickly as possible which is why some of this is
319 * less clear than it otherwise should be.
320 */
321
322 # userspace resumption stub bypassing syscall exit tracing
323 ALIGN
324ret_from_exception:
Rusty Russell139ec7c2006-12-07 02:14:08 +0100325 preempt_stop(CLBR_ANY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326ret_from_intr:
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100327#ifdef CONFIG_VM86
Ingo Molnara49976d2015-06-08 09:49:11 +0200328 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
329 movb PT_CS(%esp), %al
330 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100331#else
332 /*
Al Viro6783eaa22012-08-02 23:05:11 +0400333 * We can be coming here from child spawned by kernel_thread().
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100334 */
Ingo Molnara49976d2015-06-08 09:49:11 +0200335 movl PT_CS(%esp), %eax
336 andl $SEGMENT_RPL_MASK, %eax
Dmitry Adamushko29a2e282012-03-22 21:39:25 +0100337#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200338 cmpl $USER_RPL, %eax
339 jb resume_kernel # not returning to v8086 or userspace
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341ENTRY(resume_userspace)
Andy Lutomirski5d73fc72015-07-31 14:41:09 -0700342 DISABLE_INTERRUPTS(CLBR_ANY)
Peter Zijlstrae32e58a2008-06-06 10:14:08 +0200343 TRACE_IRQS_OFF
Andy Lutomirski5d73fc72015-07-31 14:41:09 -0700344 movl %esp, %eax
345 call prepare_exit_to_usermode
Ingo Molnara49976d2015-06-08 09:49:11 +0200346 jmp restore_all
Jan Beulich47a55cd2007-02-13 13:26:24 +0100347END(ret_from_exception)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349#ifdef CONFIG_PREEMPT
350ENTRY(resume_kernel)
Rusty Russell139ec7c2006-12-07 02:14:08 +0100351 DISABLE_INTERRUPTS(CLBR_ANY)
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500352.Lneed_resched:
Ingo Molnara49976d2015-06-08 09:49:11 +0200353 cmpl $0, PER_CPU_VAR(__preempt_count)
354 jnz restore_all
355 testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
356 jz restore_all
357 call preempt_schedule_irq
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500358 jmp .Lneed_resched
Jan Beulich47a55cd2007-02-13 13:26:24 +0100359END(resume_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360#endif
361
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800362GLOBAL(__begin_SYSENTER_singlestep_region)
363/*
364 * All code from here through __end_SYSENTER_singlestep_region is subject
365 * to being single-stepped if a user program sets TF and executes SYSENTER.
366 * There is absolutely nothing that we can do to prevent this from happening
367 * (thanks Intel!). To keep our handling of this situation as simple as
368 * possible, we handle TF just like AC and NT, except that our #DB handler
369 * will ignore all of the single-step traps generated in this range.
370 */
371
372#ifdef CONFIG_XEN
373/*
374 * Xen doesn't set %esp to be precisely what the normal SYSENTER
375 * entry point expects, so fix it up before using the normal path.
376 */
377ENTRY(xen_sysenter_target)
378 addl $5*4, %esp /* remove xen-provided frame */
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500379 jmp .Lsysenter_past_esp
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800380#endif
381
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800382/*
383 * 32-bit SYSENTER entry.
384 *
385 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
386 * if X86_FEATURE_SEP is available. This is the preferred system call
387 * entry on 32-bit systems.
388 *
389 * The SYSENTER instruction, in principle, should *only* occur in the
390 * vDSO. In practice, a small number of Android devices were shipped
391 * with a copy of Bionic that inlined a SYSENTER instruction. This
392 * never happened in any of Google's Bionic versions -- it only happened
393 * in a narrow range of Intel-provided versions.
394 *
395 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
396 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
397 * SYSENTER does not save anything on the stack,
398 * and does not save old EIP (!!!), ESP, or EFLAGS.
399 *
400 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
401 * user and/or vm86 state), we explicitly disable the SYSENTER
402 * instruction in vm86 mode by reprogramming the MSRs.
403 *
404 * Arguments:
405 * eax system call number
406 * ebx arg1
407 * ecx arg2
408 * edx arg3
409 * esi arg4
410 * edi arg5
411 * ebp user stack
412 * 0(%ebp) arg6
413 */
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200414ENTRY(entry_SYSENTER_32)
Ingo Molnara49976d2015-06-08 09:49:11 +0200415 movl TSS_sysenter_sp0(%esp), %esp
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500416.Lsysenter_past_esp:
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700417 pushl $__USER_DS /* pt_regs->ss */
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800418 pushl %ebp /* pt_regs->sp (stashed in bp) */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700419 pushfl /* pt_regs->flags (except IF = 0) */
420 orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
421 pushl $__USER_CS /* pt_regs->cs */
422 pushl $0 /* pt_regs->ip = 0 (placeholder) */
423 pushl %eax /* pt_regs->orig_ax */
424 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
425
Ingo Molnar55f327f2006-07-03 00:24:43 -0700426 /*
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800427 * SYSENTER doesn't filter flags, so we need to clear NT, AC
428 * and TF ourselves. To save a few cycles, we can check whether
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800429 * either was set instead of doing an unconditional popfq.
430 * This needs to happen before enabling interrupts so that
431 * we don't get preempted with NT set.
432 *
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800433 * If TF is set, we will single-step all the way to here -- do_debug
434 * will ignore all the traps. (Yes, this is slow, but so is
435 * single-stepping in general. This allows us to avoid having
436 * a more complicated code to handle the case where a user program
437 * forces us to single-step through the SYSENTER entry code.)
438 *
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800439 * NB.: .Lsysenter_fix_flags is a label with the code under it moved
440 * out-of-line as an optimization: NT is unlikely to be set in the
441 * majority of the cases and instead of polluting the I$ unnecessarily,
442 * we're keeping that code behind a branch which will predict as
443 * not-taken and therefore its instructions won't be fetched.
444 */
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800445 testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800446 jnz .Lsysenter_fix_flags
447.Lsysenter_flags_fixed:
448
449 /*
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700450 * User mode is traced as though IRQs are on, and SYSENTER
451 * turned them off.
Ingo Molnar55f327f2006-07-03 00:24:43 -0700452 */
Ingo Molnar55f327f2006-07-03 00:24:43 -0700453 TRACE_IRQS_OFF
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700454
455 movl %esp, %eax
456 call do_fast_syscall_32
Boris Ostrovsky91e2eea2015-11-19 16:55:45 -0500457 /* XEN PV guests always use IRET path */
458 ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \
459 "jmp .Lsyscall_32_done", X86_FEATURE_XENPV
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700460
461/* Opportunistic SYSEXIT */
462 TRACE_IRQS_ON /* User mode traces as IRQs on. */
463 movl PT_EIP(%esp), %edx /* pt_regs->ip */
464 movl PT_OLDESP(%esp), %ecx /* pt_regs->sp */
Andy Lutomirski3bd29512015-10-16 15:42:55 -07004651: mov PT_FS(%esp), %fs
466 PTGS_TO_GS
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700467 popl %ebx /* pt_regs->bx */
468 addl $2*4, %esp /* skip pt_regs->cx and pt_regs->dx */
469 popl %esi /* pt_regs->si */
470 popl %edi /* pt_regs->di */
471 popl %ebp /* pt_regs->bp */
472 popl %eax /* pt_regs->ax */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700473
474 /*
Andy Lutomirskic2c9b522016-03-09 19:00:27 -0800475 * Restore all flags except IF. (We restore IF separately because
476 * STI gives a one-instruction window in which we won't be interrupted,
477 * whereas POPF does not.)
478 */
479 addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
480 btr $X86_EFLAGS_IF_BIT, (%esp)
481 popfl
482
483 /*
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700484 * Return back to the vDSO, which will pop ecx and edx.
485 * Don't bother with DS and ES (they already contain __USER_DS).
486 */
Boris Ostrovsky88c15ec2015-11-19 16:55:46 -0500487 sti
488 sysexit
Roland McGrathaf0575b2008-06-24 04:16:52 -0700489
Ingo Molnara49976d2015-06-08 09:49:11 +0200490.pushsection .fixup, "ax"
4912: movl $0, PT_FS(%esp)
492 jmp 1b
Jeremy Fitzhardingef95d47c2006-12-07 02:14:02 +0100493.popsection
Ingo Molnara49976d2015-06-08 09:49:11 +0200494 _ASM_EXTABLE(1b, 2b)
Tejun Heoccbeed32009-02-09 22:17:40 +0900495 PTGS_TO_GS_EX
Andy Lutomirski67f590e2016-03-09 19:00:26 -0800496
497.Lsysenter_fix_flags:
498 pushl $X86_EFLAGS_FIXED
499 popfl
500 jmp .Lsysenter_flags_fixed
Andy Lutomirskif2b37572016-03-09 19:00:30 -0800501GLOBAL(__end_SYSENTER_singlestep_region)
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200502ENDPROC(entry_SYSENTER_32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Andy Lutomirskifda57b22016-03-09 19:00:35 -0800504/*
505 * 32-bit legacy system call entry.
506 *
507 * 32-bit x86 Linux system calls traditionally used the INT $0x80
508 * instruction. INT $0x80 lands here.
509 *
510 * This entry point can be used by any 32-bit perform system calls.
511 * Instances of INT $0x80 can be found inline in various programs and
512 * libraries. It is also used by the vDSO's __kernel_vsyscall
513 * fallback for hardware that doesn't support a faster entry method.
514 * Restarted 32-bit system calls also fall back to INT $0x80
515 * regardless of what instruction was originally used to do the system
516 * call. (64-bit programs can use INT $0x80 as well, but they can
517 * only run on 64-bit kernels and therefore land in
518 * entry_INT80_compat.)
519 *
520 * This is considered a slow path. It is not used by most libc
521 * implementations on modern hardware except during process startup.
522 *
523 * Arguments:
524 * eax system call number
525 * ebx arg1
526 * ecx arg2
527 * edx arg3
528 * esi arg4
529 * edi arg5
530 * ebp arg6
531 */
Ingo Molnarb2502b42015-06-08 08:42:03 +0200532ENTRY(entry_INT80_32)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700533 ASM_CLAC
Andy Lutomirski150ac782015-10-05 17:48:14 -0700534 pushl %eax /* pt_regs->orig_ax */
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700535 SAVE_ALL pt_regs_ax=$-ENOSYS /* save rest */
Andy Lutomirski150ac782015-10-05 17:48:14 -0700536
537 /*
Andy Lutomirskia798f092016-03-09 13:24:32 -0800538 * User mode is traced as though IRQs are on, and the interrupt gate
539 * turned them off.
Andy Lutomirski150ac782015-10-05 17:48:14 -0700540 */
Andy Lutomirskia798f092016-03-09 13:24:32 -0800541 TRACE_IRQS_OFF
Andy Lutomirski150ac782015-10-05 17:48:14 -0700542
543 movl %esp, %eax
Andy Lutomirskia798f092016-03-09 13:24:32 -0800544 call do_int80_syscall_32
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700545.Lsyscall_32_done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
547restore_all:
Alexander van Heukelum2e04bc72009-06-18 00:35:57 +0200548 TRACE_IRQS_IRET
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500549.Lrestore_all_notrace:
H. Peter Anvin34273f42014-05-04 10:36:22 -0700550#ifdef CONFIG_X86_ESPFIX32
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500551 ALTERNATIVE "jmp .Lrestore_nocheck", "", X86_BUG_ESPFIX
Andy Lutomirski58a5aac2016-02-29 15:50:19 -0800552
Ingo Molnara49976d2015-06-08 09:49:11 +0200553 movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
554 /*
555 * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
556 * are returning to the kernel.
557 * See comments in process.c:copy_thread() for details.
558 */
559 movb PT_OLDSS(%esp), %ah
560 movb PT_CS(%esp), %al
561 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
562 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500563 je .Lldt_ss # returning to user-space with LDT SS
H. Peter Anvin34273f42014-05-04 10:36:22 -0700564#endif
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500565.Lrestore_nocheck:
Ingo Molnara49976d2015-06-08 09:49:11 +0200566 RESTORE_REGS 4 # skip orig_eax/error_code
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500567.Lirq_return:
Mathieu Desnoyers10bcc802018-01-29 15:20:18 -0500568 /*
569 * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
570 * when returning from IPI handler and when returning from
571 * scheduler to user-space.
572 */
Ingo Molnar3701d8632008-02-09 23:24:08 +0100573 INTERRUPT_RETURN
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500574
Ingo Molnara49976d2015-06-08 09:49:11 +0200575.section .fixup, "ax"
576ENTRY(iret_exc )
577 pushl $0 # no error code
578 pushl $do_iret_error
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500579 jmp common_exception
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580.previous
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500581 _ASM_EXTABLE(.Lirq_return, iret_exc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
H. Peter Anvin34273f42014-05-04 10:36:22 -0700583#ifdef CONFIG_X86_ESPFIX32
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500584.Lldt_ss:
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200585/*
586 * Setup and switch to ESPFIX stack
587 *
588 * We're returning to userspace with a 16 bit stack. The CPU will not
589 * restore the high word of ESP for us on executing iret... This is an
590 * "official" bug of all the x86-compatible CPUs, which we can work
591 * around to make dosemu and wine happy. We do this by preloading the
592 * high word of ESP with the high word of the userspace ESP while
593 * compensating for the offset by changing to the ESPFIX segment with
594 * a base address that matches for the difference.
595 */
Brian Gerst72c511d2010-07-31 12:48:23 -0400596#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
Ingo Molnara49976d2015-06-08 09:49:11 +0200597 mov %esp, %edx /* load kernel esp */
598 mov PT_OLDESP(%esp), %eax /* load userspace esp */
599 mov %dx, %ax /* eax: new kernel esp */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200600 sub %eax, %edx /* offset (low word is 0) */
601 shr $16, %edx
Ingo Molnara49976d2015-06-08 09:49:11 +0200602 mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
603 mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
604 pushl $__ESPFIX_SS
605 pushl %eax /* new kernel esp */
606 /*
607 * Disable interrupts, but do not irqtrace this section: we
Alexander van Heukelum2e04bc72009-06-18 00:35:57 +0200608 * will soon execute iret and the tracer was already set to
Ingo Molnara49976d2015-06-08 09:49:11 +0200609 * the irqstate after the IRET:
610 */
Jan Beulichfdbd5182017-02-03 01:58:03 -0700611 DISABLE_INTERRUPTS(CLBR_ANY)
Ingo Molnara49976d2015-06-08 09:49:11 +0200612 lss (%esp), %esp /* switch to espfix segment */
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500613 jmp .Lrestore_nocheck
H. Peter Anvin34273f42014-05-04 10:36:22 -0700614#endif
Ingo Molnarb2502b42015-06-08 08:42:03 +0200615ENDPROC(entry_INT80_32)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Tejun Heof0d96112009-02-09 22:17:40 +0900617.macro FIXUP_ESPFIX_STACK
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200618/*
619 * Switch back for ESPFIX stack to the normal zerobased stack
620 *
621 * We can't call C functions using the ESPFIX stack. This code reads
622 * the high word of the segment base from the GDT and swiches to the
623 * normal stack and adjusts ESP with the matching offset.
624 */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700625#ifdef CONFIG_X86_ESPFIX32
Alexander van Heukelumdc4c2a02009-06-18 00:35:58 +0200626 /* fixup the stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200627 mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
628 mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200629 shl $16, %eax
Ingo Molnara49976d2015-06-08 09:49:11 +0200630 addl %esp, %eax /* the adjusted stack pointer */
631 pushl $__KERNEL_DS
632 pushl %eax
633 lss (%esp), %esp /* switch to the normal stack segment */
H. Peter Anvin34273f42014-05-04 10:36:22 -0700634#endif
Tejun Heof0d96112009-02-09 22:17:40 +0900635.endm
636.macro UNWIND_ESPFIX_STACK
H. Peter Anvin34273f42014-05-04 10:36:22 -0700637#ifdef CONFIG_X86_ESPFIX32
Ingo Molnara49976d2015-06-08 09:49:11 +0200638 movl %ss, %eax
Tejun Heof0d96112009-02-09 22:17:40 +0900639 /* see if on espfix stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200640 cmpw $__ESPFIX_SS, %ax
641 jne 27f
642 movl $__KERNEL_DS, %eax
643 movl %eax, %ds
644 movl %eax, %es
Tejun Heof0d96112009-02-09 22:17:40 +0900645 /* switch to normal stack */
646 FIXUP_ESPFIX_STACK
64727:
H. Peter Anvin34273f42014-05-04 10:36:22 -0700648#endif
Tejun Heof0d96112009-02-09 22:17:40 +0900649.endm
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651/*
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200652 * Build the entry stubs with some assembler magic.
653 * We pack 1 stub into every 8-byte block.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200655 .align 8
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656ENTRY(irq_entries_start)
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200657 vector=FIRST_EXTERNAL_VECTOR
658 .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
Ingo Molnara49976d2015-06-08 09:49:11 +0200659 pushl $(~vector+0x80) /* Note: always in signed byte range */
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200660 vector=vector+1
661 jmp common_interrupt
Denys Vlasenko3304c9c2015-04-03 21:49:13 +0200662 .align 8
663 .endr
Jan Beulich47a55cd2007-02-13 13:26:24 +0100664END(irq_entries_start)
665
Ingo Molnar55f327f2006-07-03 00:24:43 -0700666/*
667 * the CPU automatically disables interrupts when executing an IRQ vector,
668 * so IRQ-flags tracing has to follow that:
669 */
H. Peter Anvinb7c62442008-11-11 13:24:58 -0800670 .p2align CONFIG_X86_L1_CACHE_SHIFT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671common_interrupt:
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700672 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200673 addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500675 ENCODE_FRAME_POINTER
Ingo Molnar55f327f2006-07-03 00:24:43 -0700676 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +0200677 movl %esp, %eax
678 call do_IRQ
679 jmp ret_from_intr
Jan Beulich47a55cd2007-02-13 13:26:24 +0100680ENDPROC(common_interrupt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
Tejun Heo02cf94c2009-01-21 17:26:06 +0900682#define BUILD_INTERRUPT3(name, nr, fn) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683ENTRY(name) \
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700684 ASM_CLAC; \
Ingo Molnara49976d2015-06-08 09:49:11 +0200685 pushl $~(nr); \
Jan Beulichfe7cacc2006-06-26 13:57:44 +0200686 SAVE_ALL; \
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500687 ENCODE_FRAME_POINTER; \
Ingo Molnar55f327f2006-07-03 00:24:43 -0700688 TRACE_IRQS_OFF \
Ingo Molnara49976d2015-06-08 09:49:11 +0200689 movl %esp, %eax; \
690 call fn; \
691 jmp ret_from_intr; \
Jan Beulich47a55cd2007-02-13 13:26:24 +0100692ENDPROC(name)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
Ingo Molnara49976d2015-06-08 09:49:11 +0200694#define BUILD_INTERRUPT(name, nr) \
695 BUILD_INTERRUPT3(name, nr, smp_##name); \
Tejun Heo02cf94c2009-01-21 17:26:06 +0900696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697/* The include is where all of the SMP etc. interrupts come from */
Ingo Molnar1164dd02009-01-28 19:34:09 +0100698#include <asm/entry_arch.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700ENTRY(coprocessor_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700701 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200702 pushl $0
703 pushl $do_coprocessor_error
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500704 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100705END(coprocessor_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707ENTRY(simd_coprocessor_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700708 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200709 pushl $0
Brian Gerst40d2e762010-03-21 09:00:43 -0400710#ifdef CONFIG_X86_INVD_BUG
711 /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
Ingo Molnara49976d2015-06-08 09:49:11 +0200712 ALTERNATIVE "pushl $do_general_protection", \
713 "pushl $do_simd_coprocessor_error", \
Borislav Petkov8e65f6e2015-01-18 12:35:55 +0100714 X86_FEATURE_XMM
Brian Gerst40d2e762010-03-21 09:00:43 -0400715#else
Ingo Molnara49976d2015-06-08 09:49:11 +0200716 pushl $do_simd_coprocessor_error
Brian Gerst40d2e762010-03-21 09:00:43 -0400717#endif
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500718 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100719END(simd_coprocessor_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721ENTRY(device_not_available)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700722 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200723 pushl $-1 # mark this as an int
724 pushl $do_device_not_available
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500725 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100726END(device_not_available)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
Rusty Russelld3561b72006-12-07 02:14:07 +0100728#ifdef CONFIG_PARAVIRT
729ENTRY(native_iret)
Ingo Molnar3701d8632008-02-09 23:24:08 +0100730 iret
H. Peter Anvin6837a542012-04-20 12:19:50 -0700731 _ASM_EXTABLE(native_iret, iret_exc)
Jan Beulich47a55cd2007-02-13 13:26:24 +0100732END(native_iret)
Rusty Russelld3561b72006-12-07 02:14:07 +0100733#endif
734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735ENTRY(overflow)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700736 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200737 pushl $0
738 pushl $do_overflow
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500739 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100740END(overflow)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742ENTRY(bounds)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700743 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200744 pushl $0
745 pushl $do_bounds
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500746 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100747END(bounds)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
749ENTRY(invalid_op)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700750 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200751 pushl $0
752 pushl $do_invalid_op
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500753 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100754END(invalid_op)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756ENTRY(coprocessor_segment_overrun)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700757 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200758 pushl $0
759 pushl $do_coprocessor_segment_overrun
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500760 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100761END(coprocessor_segment_overrun)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763ENTRY(invalid_TSS)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700764 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200765 pushl $do_invalid_TSS
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500766 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100767END(invalid_TSS)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769ENTRY(segment_not_present)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700770 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200771 pushl $do_segment_not_present
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500772 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100773END(segment_not_present)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
775ENTRY(stack_segment)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700776 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200777 pushl $do_stack_segment
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500778 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100779END(stack_segment)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781ENTRY(alignment_check)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700782 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200783 pushl $do_alignment_check
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500784 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100785END(alignment_check)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Prasanna S.Pd28c4392006-09-26 10:52:34 +0200787ENTRY(divide_error)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700788 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200789 pushl $0 # no error code
790 pushl $do_divide_error
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500791 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100792END(divide_error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
794#ifdef CONFIG_X86_MCE
795ENTRY(machine_check)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700796 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200797 pushl $0
798 pushl machine_check_vector
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500799 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100800END(machine_check)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801#endif
802
803ENTRY(spurious_interrupt_bug)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700804 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200805 pushl $0
806 pushl $do_spurious_interrupt_bug
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500807 jmp common_exception
Jan Beulich47a55cd2007-02-13 13:26:24 +0100808END(spurious_interrupt_bug)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700810#ifdef CONFIG_XEN
811ENTRY(xen_hypervisor_callback)
Ingo Molnara49976d2015-06-08 09:49:11 +0200812 pushl $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700813 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500814 ENCODE_FRAME_POINTER
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700815 TRACE_IRQS_OFF
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700816
Ingo Molnara49976d2015-06-08 09:49:11 +0200817 /*
818 * Check to see if we got the event in the critical
819 * region in xen_iret_direct, after we've reenabled
820 * events and checked for pending events. This simulates
821 * iret instruction's behaviour where it delivers a
822 * pending interrupt when enabling interrupts:
823 */
824 movl PT_EIP(%esp), %eax
825 cmpl $xen_iret_start_crit, %eax
826 jb 1f
827 cmpl $xen_iret_end_crit, %eax
828 jae 1f
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700829
Ingo Molnara49976d2015-06-08 09:49:11 +0200830 jmp xen_iret_crit_fixup
Jeremy Fitzhardinge9ec2b802007-07-17 18:37:07 -0700831
Jeremy Fitzhardingee2a81ba2008-03-17 16:37:17 -0700832ENTRY(xen_do_upcall)
Ingo Molnara49976d2015-06-08 09:49:11 +02008331: mov %esp, %eax
834 call xen_evtchn_do_upcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000835#ifndef CONFIG_PREEMPT
Ingo Molnara49976d2015-06-08 09:49:11 +0200836 call xen_maybe_preempt_hcall
David Vrabelfdfd8112015-02-19 15:23:17 +0000837#endif
Ingo Molnara49976d2015-06-08 09:49:11 +0200838 jmp ret_from_intr
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700839ENDPROC(xen_hypervisor_callback)
840
Ingo Molnara49976d2015-06-08 09:49:11 +0200841/*
842 * Hypervisor uses this for application faults while it executes.
843 * We get here for two reasons:
844 * 1. Fault while reloading DS, ES, FS or GS
845 * 2. Fault while executing IRET
846 * Category 1 we fix up by reattempting the load, and zeroing the segment
847 * register if the load fails.
848 * Category 2 we fix up by jumping to do_iret_error. We cannot use the
849 * normal Linux return path in this case because if we use the IRET hypercall
850 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
851 * We distinguish between categories by maintaining a status value in EAX.
852 */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700853ENTRY(xen_failsafe_callback)
Ingo Molnara49976d2015-06-08 09:49:11 +0200854 pushl %eax
855 movl $1, %eax
8561: mov 4(%esp), %ds
8572: mov 8(%esp), %es
8583: mov 12(%esp), %fs
8594: mov 16(%esp), %gs
David Vrabela349e23d12012-10-19 17:29:07 +0100860 /* EAX == 0 => Category 1 (Bad segment)
861 EAX != 0 => Category 2 (Bad IRET) */
Ingo Molnara49976d2015-06-08 09:49:11 +0200862 testl %eax, %eax
863 popl %eax
864 lea 16(%esp), %esp
865 jz 5f
866 jmp iret_exc
8675: pushl $-1 /* orig_ax = -1 => not a system call */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700868 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500869 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +0200870 jmp ret_from_exception
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700871
Ingo Molnara49976d2015-06-08 09:49:11 +0200872.section .fixup, "ax"
8736: xorl %eax, %eax
874 movl %eax, 4(%esp)
875 jmp 1b
8767: xorl %eax, %eax
877 movl %eax, 8(%esp)
878 jmp 2b
8798: xorl %eax, %eax
880 movl %eax, 12(%esp)
881 jmp 3b
8829: xorl %eax, %eax
883 movl %eax, 16(%esp)
884 jmp 4b
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700885.previous
Ingo Molnara49976d2015-06-08 09:49:11 +0200886 _ASM_EXTABLE(1b, 6b)
887 _ASM_EXTABLE(2b, 7b)
888 _ASM_EXTABLE(3b, 8b)
889 _ASM_EXTABLE(4b, 9b)
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700890ENDPROC(xen_failsafe_callback)
891
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800892BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
Thomas Gleixner4b9a8dc2017-08-28 08:47:31 +0200893 xen_evtchn_do_upcall)
Sheng Yang38e20b02010-05-14 12:40:51 +0100894
Ingo Molnara49976d2015-06-08 09:49:11 +0200895#endif /* CONFIG_XEN */
Jeremy Fitzhardinge5ead97c2007-07-17 18:37:04 -0700896
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800897#if IS_ENABLED(CONFIG_HYPERV)
898
899BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
Thomas Gleixner4b9a8dc2017-08-28 08:47:31 +0200900 hyperv_vector_handler)
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800901
Vitaly Kuznetsov93286262018-01-24 14:23:33 +0100902BUILD_INTERRUPT3(hyperv_reenlightenment_vector, HYPERV_REENLIGHTENMENT_VECTOR,
903 hyperv_reenlightenment_intr)
904
K. Y. Srinivasanbc2b0332013-02-03 17:22:39 -0800905#endif /* CONFIG_HYPERV */
906
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100907ENTRY(page_fault)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700908 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200909 pushl $do_page_fault
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100910 ALIGN
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500911 jmp common_exception
912END(page_fault)
913
914common_exception:
Tejun Heoccbeed32009-02-09 22:17:40 +0900915 /* the function address is in %gs's slot on the stack */
Ingo Molnara49976d2015-06-08 09:49:11 +0200916 pushl %fs
917 pushl %es
918 pushl %ds
919 pushl %eax
920 pushl %ebp
921 pushl %edi
922 pushl %esi
923 pushl %edx
924 pushl %ecx
925 pushl %ebx
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500926 ENCODE_FRAME_POINTER
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100927 cld
Ingo Molnara49976d2015-06-08 09:49:11 +0200928 movl $(__KERNEL_PERCPU), %ecx
929 movl %ecx, %fs
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100930 UNWIND_ESPFIX_STACK
Tejun Heoccbeed32009-02-09 22:17:40 +0900931 GS_TO_REG %ecx
Ingo Molnara49976d2015-06-08 09:49:11 +0200932 movl PT_GS(%esp), %edi # get the function address
933 movl PT_ORIG_EAX(%esp), %edx # get the error code
934 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
Tejun Heoccbeed32009-02-09 22:17:40 +0900935 REG_TO_PTGS %ecx
936 SET_KERNEL_GS %ecx
Ingo Molnara49976d2015-06-08 09:49:11 +0200937 movl $(__USER_DS), %ecx
938 movl %ecx, %ds
939 movl %ecx, %es
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100940 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +0200941 movl %esp, %eax # pt_regs pointer
David Woodhouse2641f082018-01-11 21:46:28 +0000942 CALL_NOSPEC %edi
Ingo Molnara49976d2015-06-08 09:49:11 +0200943 jmp ret_from_exception
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -0500944END(common_exception)
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100945
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100946ENTRY(debug)
Andy Lutomirski75366562016-03-09 19:00:32 -0800947 /*
948 * #DB can happen at the first instruction of
949 * entry_SYSENTER_32 or in Xen's SYSENTER prologue. If this
950 * happens, then we will be running on a very small stack. We
951 * need to detect this condition and switch to the thread
952 * stack before calling any C code at all.
953 *
954 * If you edit this code, keep in mind that NMIs can happen in here.
955 */
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700956 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +0200957 pushl $-1 # mark this as an int
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100958 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500959 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +0200960 xorl %edx, %edx # error code 0
961 movl %esp, %eax # pt_regs pointer
Andy Lutomirski75366562016-03-09 19:00:32 -0800962
963 /* Are we currently on the SYSENTER stack? */
Andy Lutomirski72f5e082017-12-04 15:07:20 +0100964 movl PER_CPU_VAR(cpu_entry_area), %ecx
Dave Hansen4fe2d8b2017-12-04 17:25:07 -0800965 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
966 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
967 cmpl $SIZEOF_entry_stack, %ecx
Andy Lutomirski75366562016-03-09 19:00:32 -0800968 jb .Ldebug_from_sysenter_stack
969
970 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +0200971 call do_debug
972 jmp ret_from_exception
Andy Lutomirski75366562016-03-09 19:00:32 -0800973
974.Ldebug_from_sysenter_stack:
975 /* We're on the SYSENTER stack. Switch off. */
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500976 movl %esp, %ebx
Andy Lutomirski75366562016-03-09 19:00:32 -0800977 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
978 TRACE_IRQS_OFF
979 call do_debug
Josh Poimboeuf946c1912016-10-20 11:34:40 -0500980 movl %ebx, %esp
Andy Lutomirski75366562016-03-09 19:00:32 -0800981 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100982END(debug)
983
984/*
Andy Lutomirski75366562016-03-09 19:00:32 -0800985 * NMI is doubly nasty. It can happen on the first instruction of
986 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
987 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
988 * switched stacks. We handle both conditions by simply checking whether we
989 * interrupted kernel code running on the SYSENTER stack.
Alexander van Heukelumd211af02008-11-24 15:38:45 +0100990 */
991ENTRY(nmi)
H. Peter Anvine59d1b02012-09-21 13:58:10 -0700992 ASM_CLAC
H. Peter Anvin34273f42014-05-04 10:36:22 -0700993#ifdef CONFIG_X86_ESPFIX32
Ingo Molnara49976d2015-06-08 09:49:11 +0200994 pushl %eax
995 movl %ss, %eax
996 cmpw $__ESPFIX_SS, %ax
997 popl %eax
Josh Poimboeuf1b002552016-09-21 16:03:59 -0500998 je .Lnmi_espfix_stack
H. Peter Anvin34273f42014-05-04 10:36:22 -0700999#endif
Andy Lutomirski75366562016-03-09 19:00:32 -08001000
1001 pushl %eax # pt_regs->orig_ax
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001002 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001003 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +02001004 xorl %edx, %edx # zero error code
1005 movl %esp, %eax # pt_regs pointer
Andy Lutomirski75366562016-03-09 19:00:32 -08001006
1007 /* Are we currently on the SYSENTER stack? */
Andy Lutomirski72f5e082017-12-04 15:07:20 +01001008 movl PER_CPU_VAR(cpu_entry_area), %ecx
Dave Hansen4fe2d8b2017-12-04 17:25:07 -08001009 addl $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1010 subl %eax, %ecx /* ecx = (end of entry_stack) - esp */
1011 cmpl $SIZEOF_entry_stack, %ecx
Andy Lutomirski75366562016-03-09 19:00:32 -08001012 jb .Lnmi_from_sysenter_stack
1013
1014 /* Not on SYSENTER stack. */
Ingo Molnara49976d2015-06-08 09:49:11 +02001015 call do_nmi
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001016 jmp .Lrestore_all_notrace
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001017
Andy Lutomirski75366562016-03-09 19:00:32 -08001018.Lnmi_from_sysenter_stack:
1019 /*
1020 * We're on the SYSENTER stack. Switch off. No one (not even debug)
1021 * is using the thread stack right now, so it's safe for us to use it.
1022 */
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001023 movl %esp, %ebx
Andy Lutomirski75366562016-03-09 19:00:32 -08001024 movl PER_CPU_VAR(cpu_current_top_of_stack), %esp
1025 call do_nmi
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001026 movl %ebx, %esp
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001027 jmp .Lrestore_all_notrace
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001028
H. Peter Anvin34273f42014-05-04 10:36:22 -07001029#ifdef CONFIG_X86_ESPFIX32
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001030.Lnmi_espfix_stack:
Ingo Molnar131484c2015-05-28 12:21:47 +02001031 /*
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001032 * create the pointer to lss back
1033 */
Ingo Molnara49976d2015-06-08 09:49:11 +02001034 pushl %ss
1035 pushl %esp
1036 addl $4, (%esp)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001037 /* copy the iret frame of 12 bytes */
1038 .rept 3
Ingo Molnara49976d2015-06-08 09:49:11 +02001039 pushl 16(%esp)
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001040 .endr
Ingo Molnara49976d2015-06-08 09:49:11 +02001041 pushl %eax
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001042 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001043 ENCODE_FRAME_POINTER
Ingo Molnara49976d2015-06-08 09:49:11 +02001044 FIXUP_ESPFIX_STACK # %eax == %esp
1045 xorl %edx, %edx # zero error code
1046 call do_nmi
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001047 RESTORE_REGS
Ingo Molnara49976d2015-06-08 09:49:11 +02001048 lss 12+4(%esp), %esp # back to espfix stack
Josh Poimboeuf1b002552016-09-21 16:03:59 -05001049 jmp .Lirq_return
H. Peter Anvin34273f42014-05-04 10:36:22 -07001050#endif
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001051END(nmi)
1052
1053ENTRY(int3)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001054 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001055 pushl $-1 # mark this as an int
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001056 SAVE_ALL
Josh Poimboeuf946c1912016-10-20 11:34:40 -05001057 ENCODE_FRAME_POINTER
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001058 TRACE_IRQS_OFF
Ingo Molnara49976d2015-06-08 09:49:11 +02001059 xorl %edx, %edx # zero error code
1060 movl %esp, %eax # pt_regs pointer
1061 call do_int3
1062 jmp ret_from_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001063END(int3)
1064
1065ENTRY(general_protection)
Ingo Molnara49976d2015-06-08 09:49:11 +02001066 pushl $do_general_protection
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001067 jmp common_exception
Alexander van Heukelumd211af02008-11-24 15:38:45 +01001068END(general_protection)
1069
Gleb Natapov631bc482010-10-14 11:22:52 +02001070#ifdef CONFIG_KVM_GUEST
1071ENTRY(async_page_fault)
H. Peter Anvine59d1b02012-09-21 13:58:10 -07001072 ASM_CLAC
Ingo Molnara49976d2015-06-08 09:49:11 +02001073 pushl $do_async_page_fault
Josh Poimboeuf7252c4c2016-09-21 16:04:00 -05001074 jmp common_exception
Sedat Dilek2ae9d292011-03-08 22:39:24 +01001075END(async_page_fault)
Gleb Natapov631bc482010-10-14 11:22:52 +02001076#endif
Andy Lutomirski2deb4be2016-07-14 13:22:55 -07001077
1078ENTRY(rewind_stack_do_exit)
1079 /* Prevent any naive code from trying to unwind to our caller. */
1080 xorl %ebp, %ebp
1081
1082 movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
1083 leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1084
1085 call do_exit
10861: jmp 1b
1087END(rewind_stack_do_exit)