blob: d9bbd316530e40ea62c3c849623eab65601f9c7c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Ingo Molnar54ad7262015-06-05 13:02:28 +02002 * Compatibility mode system call entry point for x86-64.
3 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Copyright 2000-2002 Andi Kleen, SuSE Labs.
Ingo Molnar54ad7262015-06-05 13:02:28 +02005 */
Ingo Molnard36f9472015-06-03 18:29:26 +02006#include "calling.h"
Sam Ravnborge2d5df92005-09-09 21:28:48 +02007#include <asm/asm-offsets.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/current.h>
9#include <asm/errno.h>
Ingo Molnar54ad7262015-06-05 13:02:28 +020010#include <asm/ia32_unistd.h>
11#include <asm/thread_info.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/segment.h>
Ingo Molnar2601e642006-07-03 00:24:45 -070013#include <asm/irqflags.h>
H. Peter Anvin1ce6f862012-04-20 12:19:50 -070014#include <asm/asm.h>
H. Peter Anvin63bcff22012-09-21 12:43:12 -070015#include <asm/smap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/linkage.h>
Eric Parisd7e75282012-01-03 14:23:06 -050017#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Roland McGrath5cbf1562008-06-24 01:13:31 -070019/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
20#include <linux/elf-em.h>
21#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
Ingo Molnar54ad7262015-06-05 13:02:28 +020022#define __AUDIT_ARCH_LE 0x40000000
Roland McGrath5cbf1562008-06-24 01:13:31 -070023
24#ifndef CONFIG_AUDITSYSCALL
Ingo Molnar54ad7262015-06-05 13:02:28 +020025# define sysexit_audit ia32_ret_from_sys_call
26# define sysretl_audit ia32_ret_from_sys_call
Roland McGrath5cbf1562008-06-24 01:13:31 -070027#endif
28
Jiri Olsaea714542011-03-07 19:10:39 +010029 .section .entry.text, "ax"
30
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040031#ifdef CONFIG_PARAVIRT
32ENTRY(native_usergs_sysret32)
33 swapgs
34 sysretl
35ENDPROC(native_usergs_sysret32)
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -040036#endif
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038/*
Ingo Molnar54ad7262015-06-05 13:02:28 +020039 * 32-bit SYSENTER instruction entry.
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 *
Denys Vlasenkob87cf632015-02-26 14:40:32 -080041 * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
42 * IF and VM in rflags are cleared (IOW: interrupts are off).
43 * SYSENTER does not save anything on the stack,
44 * and does not save old rip (!!!) and rflags.
45 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 * Arguments:
Denys Vlasenkob87cf632015-02-26 14:40:32 -080047 * eax system call number
48 * ebx arg1
49 * ecx arg2
50 * edx arg3
51 * esi arg4
52 * edi arg5
53 * ebp user stack
54 * 0(%ebp) arg6
55 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 * This is purely a fast path. For anything complicated we use the int 0x80
Denys Vlasenkob87cf632015-02-26 14:40:32 -080057 * path below. We set up a complete hardware stack frame to share code
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 * with the int 0x80 path.
Denys Vlasenkob87cf632015-02-26 14:40:32 -080059 */
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +020060ENTRY(entry_SYSENTER_compat)
Denys Vlasenkoa232e3d2015-03-27 11:36:20 +010061 /*
62 * Interrupts are off on entry.
63 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
64 * it is too small to ever cause noticeable irq latency.
65 */
Jeremy Fitzhardinge457da702008-06-26 07:28:51 -070066 SWAPGS_UNSAFE_STACK
Denys Vlasenko3a232082015-04-24 17:31:35 +020067 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
Jeremy Fitzhardinge66804152008-06-25 00:19:29 -040068 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenkoa232e3d2015-03-27 11:36:20 +010069
Denys Vlasenko4ee8ec12015-03-27 11:36:21 +010070 /* Zero-extending 32-bit regs, do not remove */
71 movl %ebp, %ebp
72 movl %eax, %eax
73
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +020074 movl ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +020075
76 /* Construct struct pt_regs on stack */
Ingo Molnar131484c2015-05-28 12:21:47 +020077 pushq $__USER32_DS /* pt_regs->ss */
78 pushq %rbp /* pt_regs->sp */
79 pushfq /* pt_regs->flags */
80 pushq $__USER32_CS /* pt_regs->cs */
Ingo Molnar54ad7262015-06-05 13:02:28 +020081 pushq %r10 /* pt_regs->ip = thread_info->sysenter_return */
Ingo Molnar131484c2015-05-28 12:21:47 +020082 pushq %rax /* pt_regs->orig_ax */
83 pushq %rdi /* pt_regs->di */
84 pushq %rsi /* pt_regs->si */
85 pushq %rdx /* pt_regs->dx */
86 pushq %rcx /* pt_regs->cx */
87 pushq $-ENOSYS /* pt_regs->ax */
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 cld
Ingo Molnar54ad7262015-06-05 13:02:28 +020089 sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +020090
Denys Vlasenkob87cf632015-02-26 14:40:32 -080091 /*
92 * no need to do an access_ok check here because rbp has been
Ingo Molnar54ad7262015-06-05 13:02:28 +020093 * 32-bit zero extended
Denys Vlasenkob87cf632015-02-26 14:40:32 -080094 */
H. Peter Anvin63bcff22012-09-21 12:43:12 -070095 ASM_STAC
Ingo Molnar54ad7262015-06-05 13:02:28 +0200961: movl (%rbp), %ebp
97 _ASM_EXTABLE(1b, ia32_badarg)
H. Peter Anvin63bcff22012-09-21 12:43:12 -070098 ASM_CLAC
Andy Lutomirski8c7aa692014-10-01 11:49:04 -070099
100 /*
101 * Sysenter doesn't filter flags, so we need to clear NT
102 * ourselves. To save a few cycles, we can check whether
103 * NT was set instead of doing an unconditional popfq.
104 */
Ingo Molnar54ad7262015-06-05 13:02:28 +0200105 testl $X86_EFLAGS_NT, EFLAGS(%rsp)
106 jnz sysenter_fix_flags
Andy Lutomirski8c7aa692014-10-01 11:49:04 -0700107sysenter_flags_fixed:
108
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200109 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
110 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200111 jnz sysenter_tracesys
Ingo Molnar131484c2015-05-28 12:21:47 +0200112
Roland McGrathd4d67152008-07-09 02:38:07 -0700113sysenter_do_call:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200114 /* 32-bit syscall -> 64-bit C ABI argument conversion */
115 movl %edi, %r8d /* arg5 */
116 movl %ebp, %r9d /* arg6 */
117 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
118 movl %ebx, %edi /* arg1 */
119 movl %edx, %edx /* arg3 (zero extension) */
Roland McGrath5cbf1562008-06-24 01:13:31 -0700120sysenter_dispatch:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200121 cmpq $(IA32_NR_syscalls-1), %rax
Denys Vlasenko3f5159a2015-04-21 18:03:14 +0200122 ja 1f
Ingo Molnar54ad7262015-06-05 13:02:28 +0200123 call *ia32_sys_call_table(, %rax, 8)
124 movq %rax, RAX(%rsp)
Denys Vlasenko3f5159a2015-04-21 18:03:14 +02001251:
Jeremy Fitzhardinge66804152008-06-25 00:19:29 -0400126 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700127 TRACE_IRQS_OFF
Ingo Molnardca5b522015-03-24 19:44:42 +0100128 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
Roland McGrath5cbf1562008-06-24 01:13:31 -0700129 jnz sysexit_audit
130sysexit_from_sys_call:
Andy Lutomirski4214a162015-04-02 17:12:12 -0700131 /*
132 * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
133 * NMI between STI and SYSEXIT has poorly specified behavior,
134 * and and NMI followed by an IRQ with usergs is fatal. So
135 * we just pretend we're using SYSEXIT but we really use
136 * SYSRETL instead.
137 *
138 * This code path is still called 'sysexit' because it pairs
139 * with 'sysenter' and it uses the SYSENTER calling convention.
140 */
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200142 movl RIP(%rsp), %ecx /* User %eip */
Denys Vlasenkoc73e36b2015-07-03 22:19:02 +0200143 movl RSI(%rsp), %esi
144 movl RDI(%rsp), %edi
Ingo Molnar54ad7262015-06-05 13:02:28 +0200145 xorl %edx, %edx /* Do not leak kernel information */
146 xorq %r8, %r8
147 xorq %r9, %r9
148 xorq %r10, %r10
149 movl EFLAGS(%rsp), %r11d /* User eflags */
Ingo Molnar2601e642006-07-03 00:24:45 -0700150 TRACE_IRQS_ON
Andy Lutomirski4214a162015-04-02 17:12:12 -0700151
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800152 /*
Andy Lutomirski4214a162015-04-02 17:12:12 -0700153 * SYSRETL works even on Intel CPUs. Use it in preference to SYSEXIT,
154 * since it avoids a dicey window with interrupts enabled.
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800155 */
Ingo Molnar54ad7262015-06-05 13:02:28 +0200156 movl RSP(%rsp), %esp
Andy Lutomirski4214a162015-04-02 17:12:12 -0700157
158 /*
159 * USERGS_SYSRET32 does:
160 * gsbase = user's gs base
161 * eip = ecx
162 * rflags = r11
163 * cs = __USER32_CS
164 * ss = __USER_DS
165 *
166 * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
167 *
168 * pop %ebp
169 * pop %edx
170 * pop %ecx
171 *
172 * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
173 * avoid info leaks. R11 ends up with VDSO32_SYSENTER_RETURN's
174 * address (already known to user code), and R12-R15 are
175 * callee-saved and therefore don't contain any interesting
176 * kernel data.
177 */
178 USERGS_SYSRET32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Roland McGrath5cbf1562008-06-24 01:13:31 -0700180#ifdef CONFIG_AUDITSYSCALL
181 .macro auditsys_entry_common
Denys Vlasenko1536bb42015-06-09 20:54:08 +0200182 /*
Denys Vlasenkoa92fde252015-06-09 20:54:09 +0200183 * At this point, registers hold syscall args in the 32-bit syscall ABI:
184 * EAX is syscall number, the 6 args are in EBX,ECX,EDX,ESI,EDI,EBP.
185 *
186 * We want to pass them to __audit_syscall_entry(), which is a 64-bit
187 * C function with 5 parameters, so shuffle them to match what
188 * the function expects: RDI,RSI,RDX,RCX,R8.
189 */
190 movl %esi, %r8d /* arg5 (R8 ) <= 4th syscall arg (ESI) */
191 xchg %ecx, %edx /* arg4 (RCX) <= 3rd syscall arg (EDX) */
192 /* arg3 (RDX) <= 2nd syscall arg (ECX) */
193 movl %ebx, %esi /* arg2 (RSI) <= 1st syscall arg (EBX) */
194 movl %eax, %edi /* arg1 (RDI) <= syscall number (EAX) */
195 call __audit_syscall_entry
196
197 /*
198 * We are going to jump back to the syscall dispatch code.
199 * Prepare syscall args as required by the 64-bit C ABI.
200 * Registers clobbered by __audit_syscall_entry() are
201 * loaded from pt_regs on stack:
Denys Vlasenko1536bb42015-06-09 20:54:08 +0200202 */
203 movl ORIG_RAX(%rsp), %eax /* syscall number */
204 movl %ebx, %edi /* arg1 */
205 movl RCX(%rsp), %esi /* arg2 */
206 movl RDX(%rsp), %edx /* arg3 */
207 movl RSI(%rsp), %ecx /* arg4 */
208 movl RDI(%rsp), %r8d /* arg5 */
209 movl %ebp, %r9d /* arg6 */
Roland McGrath5cbf1562008-06-24 01:13:31 -0700210 .endm
211
Jan Beulich81766742009-10-26 15:20:29 +0000212 .macro auditsys_exit exit
Roland McGrath5cbf1562008-06-24 01:13:31 -0700213 TRACE_IRQS_ON
Jan Beulich40a1ef92013-01-30 07:55:53 +0000214 ENABLE_INTERRUPTS(CLBR_NONE)
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700215 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
216 jnz ia32_ret_from_sys_call
Ingo Molnar54ad7262015-06-05 13:02:28 +0200217 movl %eax, %esi /* second arg, syscall return value */
218 cmpl $-MAX_ERRNO, %eax /* is it an error ? */
219 jbe 1f
220 movslq %eax, %rsi /* if error sign extend to 64 bits */
2211: setbe %al /* 1 if error, 0 if not */
222 movzbl %al, %edi /* zero-extend that into %edi */
223 call __audit_syscall_exit
224 movq RAX(%rsp), %rax /* reload syscall return value */
225 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
Jan Beulich40a1ef92013-01-30 07:55:53 +0000226 DISABLE_INTERRUPTS(CLBR_NONE)
Roland McGrath5cbf1562008-06-24 01:13:31 -0700227 TRACE_IRQS_OFF
Ingo Molnar54ad7262015-06-05 13:02:28 +0200228 testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
229 jz \exit
230 xorl %eax, %eax /* Do not leak kernel information */
Denys Vlasenkoef0cd5d2015-06-02 21:04:01 +0200231 movq %rax, R11(%rsp)
232 movq %rax, R10(%rsp)
233 movq %rax, R9(%rsp)
234 movq %rax, R8(%rsp)
Andy Lutomirski29ea1b22015-07-03 12:44:28 -0700235 jmp int_ret_from_sys_call_irqs_off
Roland McGrath5cbf1562008-06-24 01:13:31 -0700236 .endm
237
238sysenter_auditsys:
Roland McGrath5cbf1562008-06-24 01:13:31 -0700239 auditsys_entry_common
Ingo Molnar54ad7262015-06-05 13:02:28 +0200240 jmp sysenter_dispatch
Roland McGrath5cbf1562008-06-24 01:13:31 -0700241
242sysexit_audit:
243 auditsys_exit sysexit_from_sys_call
244#endif
245
Andy Lutomirski8c7aa692014-10-01 11:49:04 -0700246sysenter_fix_flags:
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200247 pushq $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
Ingo Molnar131484c2015-05-28 12:21:47 +0200248 popfq
Ingo Molnar54ad7262015-06-05 13:02:28 +0200249 jmp sysenter_flags_fixed
Andy Lutomirski8c7aa692014-10-01 11:49:04 -0700250
Roland McGrath5cbf1562008-06-24 01:13:31 -0700251sysenter_tracesys:
Roland McGrath5cbf1562008-06-24 01:13:31 -0700252#ifdef CONFIG_AUDITSYSCALL
Ingo Molnardca5b522015-03-24 19:44:42 +0100253 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
Roland McGrath5cbf1562008-06-24 01:13:31 -0700254 jz sysenter_auditsys
255#endif
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800256 SAVE_EXTRA_REGS
Ingo Molnar54ad7262015-06-05 13:02:28 +0200257 xorl %eax, %eax /* Do not leak kernel information */
Denys Vlasenkoef0cd5d2015-06-02 21:04:01 +0200258 movq %rax, R11(%rsp)
259 movq %rax, R10(%rsp)
260 movq %rax, R9(%rsp)
261 movq %rax, R8(%rsp)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200262 movq %rsp, %rdi /* &pt_regs -> arg1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 call syscall_trace_enter
Denys Vlasenko73cbf682015-06-02 21:04:02 +0200264
265 /* Reload arg registers from stack. (see sysenter_tracesys) */
266 movl RCX(%rsp), %ecx
267 movl RDX(%rsp), %edx
268 movl RSI(%rsp), %esi
269 movl RDI(%rsp), %edi
Ingo Molnar54ad7262015-06-05 13:02:28 +0200270 movl %eax, %eax /* zero extension */
Denys Vlasenko73cbf682015-06-02 21:04:02 +0200271
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800272 RESTORE_EXTRA_REGS
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 jmp sysenter_do_call
Ingo Molnar4c8cd0c2015-06-08 08:33:56 +0200274ENDPROC(entry_SYSENTER_compat)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
276/*
Ingo Molnar54ad7262015-06-05 13:02:28 +0200277 * 32-bit SYSCALL instruction entry.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 *
Ingo Molnar54ad7262015-06-05 13:02:28 +0200279 * 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800280 * then loads new ss, cs, and rip from previously programmed MSRs.
281 * rflags gets masked by a value from another MSR (so CLD and CLAC
282 * are not needed). SYSCALL does not save anything on the stack
283 * and does not change rsp.
284 *
285 * Note: rflags saving+masking-with-MSR happens only in Long mode
Ingo Molnar54ad7262015-06-05 13:02:28 +0200286 * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800287 * Don't get confused: rflags saving+masking depends on Long Mode Active bit
288 * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
289 * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
290 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 * Arguments:
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800292 * eax system call number
293 * ecx return address
294 * ebx arg1
295 * ebp arg2 (note: not saved in the stack frame, should not be touched)
296 * edx arg3
297 * esi arg4
298 * edi arg5
299 * esp user stack
300 * 0(%esp) arg6
301 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 * This is purely a fast path. For anything complicated we use the int 0x80
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800303 * path below. We set up a complete hardware stack frame to share code
304 * with the int 0x80 path.
305 */
Ingo Molnar2cd23552015-06-08 08:28:07 +0200306ENTRY(entry_SYSCALL_compat)
Denys Vlasenkoa232e3d2015-03-27 11:36:20 +0100307 /*
308 * Interrupts are off on entry.
309 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
310 * it is too small to ever cause noticeable irq latency.
311 */
Jeremy Fitzhardinge457da702008-06-26 07:28:51 -0700312 SWAPGS_UNSAFE_STACK
Ingo Molnar54ad7262015-06-05 13:02:28 +0200313 movl %esp, %r8d
314 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
Jeremy Fitzhardinge66804152008-06-25 00:19:29 -0400315 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenkoa232e3d2015-03-27 11:36:20 +0100316
Denys Vlasenko4ee8ec12015-03-27 11:36:21 +0100317 /* Zero-extending 32-bit regs, do not remove */
Ingo Molnar54ad7262015-06-05 13:02:28 +0200318 movl %eax, %eax
Denys Vlasenko4ee8ec12015-03-27 11:36:21 +0100319
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +0200320 /* Construct struct pt_regs on stack */
Ingo Molnar131484c2015-05-28 12:21:47 +0200321 pushq $__USER32_DS /* pt_regs->ss */
322 pushq %r8 /* pt_regs->sp */
323 pushq %r11 /* pt_regs->flags */
324 pushq $__USER32_CS /* pt_regs->cs */
325 pushq %rcx /* pt_regs->ip */
326 pushq %rax /* pt_regs->orig_ax */
327 pushq %rdi /* pt_regs->di */
328 pushq %rsi /* pt_regs->si */
329 pushq %rdx /* pt_regs->dx */
330 pushq %rbp /* pt_regs->cx */
Ingo Molnar54ad7262015-06-05 13:02:28 +0200331 movl %ebp, %ecx
Ingo Molnar131484c2015-05-28 12:21:47 +0200332 pushq $-ENOSYS /* pt_regs->ax */
Ingo Molnar54ad7262015-06-05 13:02:28 +0200333 sub $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +0200334
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800335 /*
Ingo Molnar54ad7262015-06-05 13:02:28 +0200336 * No need to do an access_ok check here because r8 has been
337 * 32-bit zero extended:
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800338 */
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700339 ASM_STAC
Ingo Molnar54ad7262015-06-05 13:02:28 +02003401: movl (%r8), %ebp
341 _ASM_EXTABLE(1b, ia32_badarg)
H. Peter Anvin63bcff22012-09-21 12:43:12 -0700342 ASM_CLAC
Denys Vlasenko9b47feb2015-06-08 22:35:33 +0200343 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
344 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
345 jnz cstar_tracesys
Ingo Molnar131484c2015-05-28 12:21:47 +0200346
Jan Beulich295286a82008-08-29 13:21:11 +0100347cstar_do_call:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200348 /* 32-bit syscall -> 64-bit C ABI argument conversion */
349 movl %edi, %r8d /* arg5 */
350 movl %ebp, %r9d /* arg6 */
351 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
352 movl %ebx, %edi /* arg1 */
353 movl %edx, %edx /* arg3 (zero extension) */
354
Roland McGrath5cbf1562008-06-24 01:13:31 -0700355cstar_dispatch:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200356 cmpq $(IA32_NR_syscalls-1), %rax
Denys Vlasenko3f5159a2015-04-21 18:03:14 +0200357 ja 1f
Ingo Molnar54ad7262015-06-05 13:02:28 +0200358
359 call *ia32_sys_call_table(, %rax, 8)
360 movq %rax, RAX(%rsp)
Denys Vlasenko3f5159a2015-04-21 18:03:14 +02003611:
Denys Vlasenkoaee4b012015-06-09 20:54:07 +0200362 movl RCX(%rsp), %ebp
Jeremy Fitzhardinge66804152008-06-25 00:19:29 -0400363 DISABLE_INTERRUPTS(CLBR_NONE)
Ingo Molnar2601e642006-07-03 00:24:45 -0700364 TRACE_IRQS_OFF
Ingo Molnar54ad7262015-06-05 13:02:28 +0200365 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
366 jnz sysretl_audit
367
Roland McGrath5cbf1562008-06-24 01:13:31 -0700368sysretl_from_sys_call:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200369 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
Denys Vlasenkoc73e36b2015-07-03 22:19:02 +0200370 movl RDX(%rsp), %edx
371 movl RSI(%rsp), %esi
372 movl RDI(%rsp), %edi
Ingo Molnar54ad7262015-06-05 13:02:28 +0200373 movl RIP(%rsp), %ecx
374 movl EFLAGS(%rsp), %r11d
375 xorq %r10, %r10
376 xorq %r9, %r9
377 xorq %r8, %r8
Ingo Molnar2601e642006-07-03 00:24:45 -0700378 TRACE_IRQS_ON
Ingo Molnar54ad7262015-06-05 13:02:28 +0200379 movl RSP(%rsp), %esp
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800380 /*
Ingo Molnar54ad7262015-06-05 13:02:28 +0200381 * 64-bit->32-bit SYSRET restores eip from ecx,
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800382 * eflags from r11 (but RF and VM bits are forced to 0),
383 * cs and ss are loaded from MSRs.
Ingo Molnar54ad7262015-06-05 13:02:28 +0200384 * (Note: 32-bit->32-bit SYSRET is different: since r11
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800385 * does not exist, it merely sets eflags.IF=1).
Andy Lutomirski61f01dd2015-04-26 16:47:59 -0700386 *
387 * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
388 * descriptor is not reinitialized. This means that we must
389 * avoid SYSRET with SS == NULL, which could happen if we schedule,
390 * exit the kernel, and re-enter using an interrupt vector. (All
391 * interrupt entries on x86_64 set SS to NULL.) We prevent that
392 * from happening by reloading SS in __switch_to.
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800393 */
Jeremy Fitzhardinge2be29982008-06-25 00:19:28 -0400394 USERGS_SYSRET32
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800395
Roland McGrath5cbf1562008-06-24 01:13:31 -0700396#ifdef CONFIG_AUDITSYSCALL
397cstar_auditsys:
Roland McGrath5cbf1562008-06-24 01:13:31 -0700398 auditsys_entry_common
Ingo Molnar54ad7262015-06-05 13:02:28 +0200399 jmp cstar_dispatch
Roland McGrath5cbf1562008-06-24 01:13:31 -0700400
401sysretl_audit:
Jan Beulich81766742009-10-26 15:20:29 +0000402 auditsys_exit sysretl_from_sys_call
Roland McGrath5cbf1562008-06-24 01:13:31 -0700403#endif
404
405cstar_tracesys:
406#ifdef CONFIG_AUDITSYSCALL
Ingo Molnar54ad7262015-06-05 13:02:28 +0200407 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
408 jz cstar_auditsys
Roland McGrath5cbf1562008-06-24 01:13:31 -0700409#endif
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800410 SAVE_EXTRA_REGS
Ingo Molnar54ad7262015-06-05 13:02:28 +0200411 xorl %eax, %eax /* Do not leak kernel information */
Denys Vlasenkoef0cd5d2015-06-02 21:04:01 +0200412 movq %rax, R11(%rsp)
413 movq %rax, R10(%rsp)
Denys Vlasenko53e9acc2015-06-03 14:56:09 +0200414 movq %rax, R9(%rsp)
Denys Vlasenkoef0cd5d2015-06-02 21:04:01 +0200415 movq %rax, R8(%rsp)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200416 movq %rsp, %rdi /* &pt_regs -> arg1 */
Denys Vlasenko53e9acc2015-06-03 14:56:09 +0200417 call syscall_trace_enter
Denys Vlasenko73cbf682015-06-02 21:04:02 +0200418
419 /* Reload arg registers from stack. (see sysenter_tracesys) */
420 movl RCX(%rsp), %ecx
421 movl RDX(%rsp), %edx
422 movl RSI(%rsp), %esi
423 movl RDI(%rsp), %edi
Ingo Molnar54ad7262015-06-05 13:02:28 +0200424 movl %eax, %eax /* zero extension */
Denys Vlasenko73cbf682015-06-02 21:04:02 +0200425
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800426 RESTORE_EXTRA_REGS
Denys Vlasenko53e9acc2015-06-03 14:56:09 +0200427 jmp cstar_do_call
Ingo Molnar2cd23552015-06-08 08:28:07 +0200428END(entry_SYSCALL_compat)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430ia32_badarg:
Andy Lutomirski5e99cb72015-07-03 12:44:19 -0700431 /*
432 * So far, we've entered kernel mode, set AC, turned on IRQs, and
433 * saved C regs except r8-r11. We haven't done any of the other
434 * standard entry work, though. We want to bail, but we shouldn't
435 * treat this as a syscall entry since we don't even know what the
436 * args are. Instead, treat this as a non-syscall entry, finish
437 * the entry work, and immediately exit after setting AX = -EFAULT.
438 *
439 * We're really just being polite here. Killing the task outright
440 * would be a reasonable action, too. Given that the only valid
441 * way to have gotten here is through the vDSO, and we already know
442 * that the stack pointer is bad, the task isn't going to survive
443 * for long no matter what we do.
444 */
445
446 ASM_CLAC /* undo STAC */
447 movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */
448
449 /* Fill in the rest of pt_regs */
450 xorl %eax, %eax
451 movq %rax, R11(%rsp)
452 movq %rax, R10(%rsp)
453 movq %rax, R9(%rsp)
454 movq %rax, R8(%rsp)
455 SAVE_EXTRA_REGS
456
457 /* Turn IRQs back off. */
458 DISABLE_INTERRUPTS(CLBR_NONE)
459 TRACE_IRQS_OFF
460
461 /* And exit again. */
462 jmp retint_user
463
Denys Vlasenko61b1e3e2015-06-02 19:35:10 +0200464ia32_ret_from_sys_call:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200465 xorl %eax, %eax /* Do not leak kernel information */
Denys Vlasenkoef0cd5d2015-06-02 21:04:01 +0200466 movq %rax, R11(%rsp)
467 movq %rax, R10(%rsp)
468 movq %rax, R9(%rsp)
469 movq %rax, R8(%rsp)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200470 jmp int_ret_from_sys_call
Denys Vlasenko61b1e3e2015-06-02 19:35:10 +0200471
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800472/*
473 * Emulated IA32 system calls via int 0x80.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 *
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800475 * Arguments:
476 * eax system call number
477 * ebx arg1
478 * ecx arg2
479 * edx arg3
480 * esi arg4
481 * edi arg5
482 * ebp arg6 (note: not saved in the stack frame, should not be touched)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 *
484 * Notes:
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800485 * Uses the same stack frame as the x86-64 version.
486 * All registers except eax must be saved (but ptrace may violate that).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 * Arguments are zero extended. For system calls that want sign extension and
488 * take long arguments a wrapper is needed. Most calls can just be called
489 * directly.
Denys Vlasenkob87cf632015-02-26 14:40:32 -0800490 * Assumes it is only called from user space and entered with interrupts off.
491 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Ingo Molnar2cd23552015-06-08 08:28:07 +0200493ENTRY(entry_INT80_compat)
Denys Vlasenkoa232e3d2015-03-27 11:36:20 +0100494 /*
495 * Interrupts are off on entry.
496 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
497 * it is too small to ever cause noticeable irq latency.
498 */
Jeremy Fitzhardinge360c0442008-07-08 15:06:28 -0700499 PARAVIRT_ADJUST_EXCEPTION_FRAME
Jeremy Fitzhardinge66804152008-06-25 00:19:29 -0400500 SWAPGS
Jeremy Fitzhardinge66804152008-06-25 00:19:29 -0400501 ENABLE_INTERRUPTS(CLBR_NONE)
Denys Vlasenkoa232e3d2015-03-27 11:36:20 +0100502
Denys Vlasenko4ee8ec12015-03-27 11:36:21 +0100503 /* Zero-extending 32-bit regs, do not remove */
Ingo Molnar54ad7262015-06-05 13:02:28 +0200504 movl %eax, %eax
Denys Vlasenko4ee8ec12015-03-27 11:36:21 +0100505
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +0200506 /* Construct struct pt_regs on stack (iret frame is already on stack) */
Ingo Molnar131484c2015-05-28 12:21:47 +0200507 pushq %rax /* pt_regs->orig_ax */
508 pushq %rdi /* pt_regs->di */
509 pushq %rsi /* pt_regs->si */
510 pushq %rdx /* pt_regs->dx */
511 pushq %rcx /* pt_regs->cx */
512 pushq $-ENOSYS /* pt_regs->ax */
Denys Vlasenko61b1e3e2015-06-02 19:35:10 +0200513 pushq $0 /* pt_regs->r8 */
514 pushq $0 /* pt_regs->r9 */
515 pushq $0 /* pt_regs->r10 */
516 pushq $0 /* pt_regs->r11 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 cld
Ingo Molnar54ad7262015-06-05 13:02:28 +0200518 sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
Denys Vlasenko4c9c0e92015-03-31 19:00:04 +0200519
Ingo Molnar54ad7262015-06-05 13:02:28 +0200520 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
521 testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
522 jnz ia32_tracesys
523
Roland McGrathc09249f2009-02-06 18:15:18 -0800524ia32_do_call:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200525 /* 32-bit syscall -> 64-bit C ABI argument conversion */
526 movl %edi, %r8d /* arg5 */
527 movl %ebp, %r9d /* arg6 */
528 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
529 movl %ebx, %edi /* arg1 */
530 movl %edx, %edx /* arg3 (zero extension) */
531 cmpq $(IA32_NR_syscalls-1), %rax
Denys Vlasenko3f5159a2015-04-21 18:03:14 +0200532 ja 1f
Ingo Molnar54ad7262015-06-05 13:02:28 +0200533
Denys Vlasenkoeb478542015-06-07 20:24:30 +0200534 call *ia32_sys_call_table(, %rax, 8)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200535 movq %rax, RAX(%rsp)
Denys Vlasenko3f5159a2015-04-21 18:03:14 +02005361:
Ingo Molnar54ad7262015-06-05 13:02:28 +0200537 jmp int_ret_from_sys_call
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800539ia32_tracesys:
540 SAVE_EXTRA_REGS
Ingo Molnar54ad7262015-06-05 13:02:28 +0200541 movq %rsp, %rdi /* &pt_regs -> arg1 */
542 call syscall_trace_enter
Denys Vlasenko73cbf682015-06-02 21:04:02 +0200543 /*
544 * Reload arg registers from stack in case ptrace changed them.
545 * Don't reload %eax because syscall_trace_enter() returned
546 * the %rax value we should see. But do truncate it to 32 bits.
547 * If it's -1 to make us punt the syscall, then (u32)-1 is still
548 * an appropriately invalid value.
549 */
550 movl RCX(%rsp), %ecx
551 movl RDX(%rsp), %edx
552 movl RSI(%rsp), %esi
553 movl RDI(%rsp), %edi
Ingo Molnar54ad7262015-06-05 13:02:28 +0200554 movl %eax, %eax /* zero extension */
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800555 RESTORE_EXTRA_REGS
Ingo Molnar54ad7262015-06-05 13:02:28 +0200556 jmp ia32_do_call
Ingo Molnar2cd23552015-06-08 08:28:07 +0200557END(entry_INT80_compat)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Ramkumar Ramachandrad2475b82013-07-10 23:34:28 +0530559 .macro PTREGSCALL label, func
Jan Beulichf6b2bc82011-11-29 11:24:10 +0000560 ALIGN
561GLOBAL(\label)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200562 leaq \func(%rip), %rax
563 jmp ia32_ptregs_common
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 .endm
565
Ingo Molnar54ad7262015-06-05 13:02:28 +0200566 PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
567 PTREGSCALL stub32_sigreturn, sys32_sigreturn
568 PTREGSCALL stub32_fork, sys_fork
569 PTREGSCALL stub32_vfork, sys_vfork
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Jan Beulichf6b2bc82011-11-29 11:24:10 +0000571 ALIGN
Al Viro1d4b4b22012-10-22 22:34:11 -0400572GLOBAL(stub32_clone)
Ingo Molnar54ad7262015-06-05 13:02:28 +0200573 leaq sys_clone(%rip), %rax
Denys Vlasenko5cdc6832015-06-03 15:58:49 +0200574 /*
Denys Vlasenko7a5a9822015-06-03 15:58:50 +0200575 * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
576 * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
577 *
578 * The native 64-bit kernel's sys_clone() implements the latter,
579 * so we need to swap arguments here before calling it:
Denys Vlasenko5cdc6832015-06-03 15:58:49 +0200580 */
Denys Vlasenko7a5a9822015-06-03 15:58:50 +0200581 xchg %r8, %rcx
Ingo Molnar54ad7262015-06-05 13:02:28 +0200582 jmp ia32_ptregs_common
Al Viro1d4b4b22012-10-22 22:34:11 -0400583
584 ALIGN
Jan Beulichf6b2bc82011-11-29 11:24:10 +0000585ia32_ptregs_common:
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800586 SAVE_EXTRA_REGS 8
Ingo Molnar54ad7262015-06-05 13:02:28 +0200587 call *%rax
Denys Vlasenko76f5df42015-02-26 14:40:27 -0800588 RESTORE_EXTRA_REGS 8
589 ret
Jan Beulich4b787e02006-06-26 13:56:55 +0200590END(ia32_ptregs_common)