Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 3 | * Compatibility mode system call entry point for x86-64. |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Copyright 2000-2002 Andi Kleen, SuSE Labs. |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 6 | */ |
Ingo Molnar | d36f947 | 2015-06-03 18:29:26 +0200 | [diff] [blame] | 7 | #include "calling.h" |
Sam Ravnborg | e2d5df9 | 2005-09-09 21:28:48 +0200 | [diff] [blame] | 8 | #include <asm/asm-offsets.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | #include <asm/current.h> |
| 10 | #include <asm/errno.h> |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 11 | #include <asm/ia32_unistd.h> |
| 12 | #include <asm/thread_info.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <asm/segment.h> |
Ingo Molnar | 2601e64 | 2006-07-03 00:24:45 -0700 | [diff] [blame] | 14 | #include <asm/irqflags.h> |
H. Peter Anvin | 1ce6f86 | 2012-04-20 12:19:50 -0700 | [diff] [blame] | 15 | #include <asm/asm.h> |
H. Peter Anvin | 63bcff2 | 2012-09-21 12:43:12 -0700 | [diff] [blame] | 16 | #include <asm/smap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/linkage.h> |
Eric Paris | d7e7528 | 2012-01-03 14:23:06 -0500 | [diff] [blame] | 18 | #include <linux/err.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Jiri Olsa | ea71454 | 2011-03-07 19:10:39 +0100 | [diff] [blame] | 20 | .section .entry.text, "ax" |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 23 | * 32-bit SYSENTER entry. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 25 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
| 26 | * on 64-bit kernels running on Intel CPUs. |
| 27 | * |
| 28 | * The SYSENTER instruction, in principle, should *only* occur in the |
| 29 | * vDSO. In practice, a small number of Android devices were shipped |
| 30 | * with a copy of Bionic that inlined a SYSENTER instruction. This |
| 31 | * never happened in any of Google's Bionic versions -- it only happened |
| 32 | * in a narrow range of Intel-provided versions. |
| 33 | * |
| 34 | * SYSENTER loads SS, RSP, CS, and RIP from previously programmed MSRs. |
| 35 | * IF and VM in RFLAGS are cleared (IOW: interrupts are off). |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 36 | * SYSENTER does not save anything on the stack, |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 37 | * and does not save old RIP (!!!), RSP, or RFLAGS. |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 38 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | * Arguments: |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 40 | * eax system call number |
| 41 | * ebx arg1 |
| 42 | * ecx arg2 |
| 43 | * edx arg3 |
| 44 | * esi arg4 |
| 45 | * edi arg5 |
| 46 | * ebp user stack |
| 47 | * 0(%ebp) arg6 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 48 | */ |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 49 | ENTRY(entry_SYSENTER_compat) |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 50 | /* Interrupts are off on entry. */ |
Jeremy Fitzhardinge | 457da70 | 2008-06-26 07:28:51 -0700 | [diff] [blame] | 51 | SWAPGS_UNSAFE_STACK |
Denys Vlasenko | 3a23208 | 2015-04-24 17:31:35 +0200 | [diff] [blame] | 52 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 53 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 54 | /* |
| 55 | * User tracing code (ptrace or signal handlers) might assume that |
| 56 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit |
| 57 | * syscall. Just in case the high bits are nonzero, zero-extend |
| 58 | * the syscall number. (This could almost certainly be deleted |
| 59 | * with no ill effects.) |
| 60 | */ |
Denys Vlasenko | 4ee8ec1 | 2015-03-27 11:36:21 +0100 | [diff] [blame] | 61 | movl %eax, %eax |
| 62 | |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 63 | /* Construct struct pt_regs on stack */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 64 | pushq $__USER32_DS /* pt_regs->ss */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 65 | pushq %rbp /* pt_regs->sp (stashed in bp) */ |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 66 | |
| 67 | /* |
| 68 | * Push flags. This is nasty. First, interrupts are currently |
| 69 | * off, but we need pt_regs->flags to have IF set. Second, even |
| 70 | * if TF was set when SYSENTER started, it's clear by now. We fix |
| 71 | * that later using TIF_SINGLESTEP. |
| 72 | */ |
| 73 | pushfq /* pt_regs->flags (except IF = 0) */ |
| 74 | orl $X86_EFLAGS_IF, (%rsp) /* Fix saved flags */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 75 | pushq $__USER32_CS /* pt_regs->cs */ |
Denys Vlasenko | 778843f | 2016-05-02 16:56:50 +0200 | [diff] [blame] | 76 | pushq $0 /* pt_regs->ip = 0 (placeholder) */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 77 | pushq %rax /* pt_regs->orig_ax */ |
| 78 | pushq %rdi /* pt_regs->di */ |
| 79 | pushq %rsi /* pt_regs->si */ |
| 80 | pushq %rdx /* pt_regs->dx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 81 | pushq %rcx /* pt_regs->cx */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 82 | pushq $-ENOSYS /* pt_regs->ax */ |
Denys Vlasenko | 778843f | 2016-05-02 16:56:50 +0200 | [diff] [blame] | 83 | pushq $0 /* pt_regs->r8 = 0 */ |
| 84 | pushq $0 /* pt_regs->r9 = 0 */ |
| 85 | pushq $0 /* pt_regs->r10 = 0 */ |
| 86 | pushq $0 /* pt_regs->r11 = 0 */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 87 | pushq %rbx /* pt_regs->rbx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 88 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
Denys Vlasenko | 778843f | 2016-05-02 16:56:50 +0200 | [diff] [blame] | 89 | pushq $0 /* pt_regs->r12 = 0 */ |
| 90 | pushq $0 /* pt_regs->r13 = 0 */ |
| 91 | pushq $0 /* pt_regs->r14 = 0 */ |
| 92 | pushq $0 /* pt_regs->r15 = 0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | cld |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 94 | |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 95 | /* |
Andy Lutomirski | e786041 | 2016-03-09 19:00:25 -0800 | [diff] [blame] | 96 | * SYSENTER doesn't filter flags, so we need to clear NT and AC |
Andy Lutomirski | 8c7aa69 | 2014-10-01 11:49:04 -0700 | [diff] [blame] | 97 | * ourselves. To save a few cycles, we can check whether |
Andy Lutomirski | e786041 | 2016-03-09 19:00:25 -0800 | [diff] [blame] | 98 | * either was set instead of doing an unconditional popfq. |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 99 | * This needs to happen before enabling interrupts so that |
| 100 | * we don't get preempted with NT set. |
Borislav Petkov | 374a3a3 | 2015-10-09 19:08:59 +0200 | [diff] [blame] | 101 | * |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 102 | * If TF is set, we will single-step all the way to here -- do_debug |
| 103 | * will ignore all the traps. (Yes, this is slow, but so is |
| 104 | * single-stepping in general. This allows us to avoid having |
| 105 | * a more complicated code to handle the case where a user program |
| 106 | * forces us to single-step through the SYSENTER entry code.) |
| 107 | * |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 108 | * NB.: .Lsysenter_fix_flags is a label with the code under it moved |
Borislav Petkov | 374a3a3 | 2015-10-09 19:08:59 +0200 | [diff] [blame] | 109 | * out-of-line as an optimization: NT is unlikely to be set in the |
| 110 | * majority of the cases and instead of polluting the I$ unnecessarily, |
| 111 | * we're keeping that code behind a branch which will predict as |
| 112 | * not-taken and therefore its instructions won't be fetched. |
Andy Lutomirski | 8c7aa69 | 2014-10-01 11:49:04 -0700 | [diff] [blame] | 113 | */ |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 114 | testl $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, EFLAGS(%rsp) |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 115 | jnz .Lsysenter_fix_flags |
| 116 | .Lsysenter_flags_fixed: |
Andy Lutomirski | 8c7aa69 | 2014-10-01 11:49:04 -0700 | [diff] [blame] | 117 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 118 | /* |
| 119 | * User mode is traced as though IRQs are on, and SYSENTER |
| 120 | * turned them off. |
| 121 | */ |
| 122 | TRACE_IRQS_OFF |
Andy Lutomirski | e62a254 | 2015-10-05 17:48:02 -0700 | [diff] [blame] | 123 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 124 | movq %rsp, %rdi |
| 125 | call do_fast_syscall_32 |
Boris Ostrovsky | 91e2eea | 2015-11-19 16:55:45 -0500 | [diff] [blame] | 126 | /* XEN PV guests always use IRET path */ |
| 127 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
| 128 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 129 | jmp sysret32_from_system_call |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 131 | .Lsysenter_fix_flags: |
Andy Lutomirski | b611acf | 2015-10-05 17:47:55 -0700 | [diff] [blame] | 132 | pushq $X86_EFLAGS_FIXED |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 133 | popfq |
Borislav Petkov | f74acf0 | 2015-12-12 11:27:57 +0100 | [diff] [blame] | 134 | jmp .Lsysenter_flags_fixed |
Andy Lutomirski | f2b3757 | 2016-03-09 19:00:30 -0800 | [diff] [blame] | 135 | GLOBAL(__end_entry_SYSENTER_compat) |
Ingo Molnar | 4c8cd0c | 2015-06-08 08:33:56 +0200 | [diff] [blame] | 136 | ENDPROC(entry_SYSENTER_compat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | |
| 138 | /* |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 139 | * 32-bit SYSCALL entry. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 140 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 141 | * 32-bit system calls through the vDSO's __kernel_vsyscall enter here |
| 142 | * on 64-bit kernels running on AMD CPUs. |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 143 | * |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 144 | * The SYSCALL instruction, in principle, should *only* occur in the |
| 145 | * vDSO. In practice, it appears that this really is the case. |
| 146 | * As evidence: |
| 147 | * |
| 148 | * - The calling convention for SYSCALL has changed several times without |
| 149 | * anyone noticing. |
| 150 | * |
| 151 | * - Prior to the in-kernel X86_BUG_SYSRET_SS_ATTRS fixup, anything |
| 152 | * user task that did SYSCALL without immediately reloading SS |
| 153 | * would randomly crash. |
| 154 | * |
| 155 | * - Most programmers do not directly target AMD CPUs, and the 32-bit |
| 156 | * SYSCALL instruction does not exist on Intel CPUs. Even on AMD |
| 157 | * CPUs, Linux disables the SYSCALL instruction on 32-bit kernels |
| 158 | * because the SYSCALL instruction in legacy/native 32-bit mode (as |
| 159 | * opposed to compat mode) is sufficiently poorly designed as to be |
| 160 | * essentially unusable. |
| 161 | * |
| 162 | * 32-bit SYSCALL saves RIP to RCX, clears RFLAGS.RF, then saves |
| 163 | * RFLAGS to R11, then loads new SS, CS, and RIP from previously |
| 164 | * programmed MSRs. RFLAGS gets masked by a value from another MSR |
| 165 | * (so CLD and CLAC are not needed). SYSCALL does not save anything on |
| 166 | * the stack and does not change RSP. |
| 167 | * |
| 168 | * Note: RFLAGS saving+masking-with-MSR happens only in Long mode |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 169 | * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it). |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 170 | * Don't get confused: RFLAGS saving+masking depends on Long Mode Active bit |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 171 | * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes |
| 172 | * or target CS descriptor's L bit (SYSCALL does not read segment descriptors). |
| 173 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | * Arguments: |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 175 | * eax system call number |
| 176 | * ecx return address |
| 177 | * ebx arg1 |
| 178 | * ebp arg2 (note: not saved in the stack frame, should not be touched) |
| 179 | * edx arg3 |
| 180 | * esi arg4 |
| 181 | * edi arg5 |
| 182 | * esp user stack |
| 183 | * 0(%esp) arg6 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 184 | */ |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 185 | ENTRY(entry_SYSCALL_compat) |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 186 | /* Interrupts are off on entry. */ |
Andy Lutomirski | 8a9949b | 2017-08-07 20:59:21 -0700 | [diff] [blame] | 187 | swapgs |
Andy Lutomirski | e62a254 | 2015-10-05 17:48:02 -0700 | [diff] [blame] | 188 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 189 | /* Stash user ESP and switch to the kernel stack. */ |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 190 | movl %esp, %r8d |
| 191 | movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 192 | |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 193 | /* Construct struct pt_regs on stack */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 194 | pushq $__USER32_DS /* pt_regs->ss */ |
| 195 | pushq %r8 /* pt_regs->sp */ |
| 196 | pushq %r11 /* pt_regs->flags */ |
| 197 | pushq $__USER32_CS /* pt_regs->cs */ |
| 198 | pushq %rcx /* pt_regs->ip */ |
Andy Lutomirski | 8a9949b | 2017-08-07 20:59:21 -0700 | [diff] [blame] | 199 | GLOBAL(entry_SYSCALL_compat_after_hwframe) |
| 200 | movl %eax, %eax /* discard orig_ax high bits */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 201 | pushq %rax /* pt_regs->orig_ax */ |
| 202 | pushq %rdi /* pt_regs->di */ |
| 203 | pushq %rsi /* pt_regs->si */ |
| 204 | pushq %rdx /* pt_regs->dx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 205 | pushq %rbp /* pt_regs->cx (stashed in bp) */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 206 | pushq $-ENOSYS /* pt_regs->ax */ |
Denys Vlasenko | 778843f | 2016-05-02 16:56:50 +0200 | [diff] [blame] | 207 | pushq $0 /* pt_regs->r8 = 0 */ |
| 208 | pushq $0 /* pt_regs->r9 = 0 */ |
| 209 | pushq $0 /* pt_regs->r10 = 0 */ |
| 210 | pushq $0 /* pt_regs->r11 = 0 */ |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 211 | pushq %rbx /* pt_regs->rbx */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 212 | pushq %rbp /* pt_regs->rbp (will be overwritten) */ |
Denys Vlasenko | 778843f | 2016-05-02 16:56:50 +0200 | [diff] [blame] | 213 | pushq $0 /* pt_regs->r12 = 0 */ |
| 214 | pushq $0 /* pt_regs->r13 = 0 */ |
| 215 | pushq $0 /* pt_regs->r14 = 0 */ |
| 216 | pushq $0 /* pt_regs->r15 = 0 */ |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 217 | |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 218 | /* |
| 219 | * User mode is traced as though IRQs are on, and SYSENTER |
| 220 | * turned them off. |
| 221 | */ |
| 222 | TRACE_IRQS_OFF |
| 223 | |
| 224 | movq %rsp, %rdi |
| 225 | call do_fast_syscall_32 |
Boris Ostrovsky | 91e2eea | 2015-11-19 16:55:45 -0500 | [diff] [blame] | 226 | /* XEN PV guests always use IRET path */ |
| 227 | ALTERNATIVE "testl %eax, %eax; jz .Lsyscall_32_done", \ |
| 228 | "jmp .Lsyscall_32_done", X86_FEATURE_XENPV |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 229 | |
| 230 | /* Opportunistic SYSRET */ |
| 231 | sysret32_from_system_call: |
| 232 | TRACE_IRQS_ON /* User mode traces as IRQs on. */ |
| 233 | movq RBX(%rsp), %rbx /* pt_regs->rbx */ |
| 234 | movq RBP(%rsp), %rbp /* pt_regs->rbp */ |
| 235 | movq EFLAGS(%rsp), %r11 /* pt_regs->flags (in r11) */ |
| 236 | movq RIP(%rsp), %rcx /* pt_regs->ip (in rcx) */ |
| 237 | addq $RAX, %rsp /* Skip r8-r15 */ |
| 238 | popq %rax /* pt_regs->rax */ |
| 239 | popq %rdx /* Skip pt_regs->cx */ |
| 240 | popq %rdx /* pt_regs->dx */ |
| 241 | popq %rsi /* pt_regs->si */ |
| 242 | popq %rdi /* pt_regs->di */ |
| 243 | |
| 244 | /* |
| 245 | * USERGS_SYSRET32 does: |
| 246 | * GSBASE = user's GS base |
| 247 | * EIP = ECX |
| 248 | * RFLAGS = R11 |
| 249 | * CS = __USER32_CS |
| 250 | * SS = __USER_DS |
| 251 | * |
| 252 | * ECX will not match pt_regs->cx, but we're returning to a vDSO |
| 253 | * trampoline that will fix up RCX, so this is okay. |
| 254 | * |
| 255 | * R12-R15 are callee-saved, so they contain whatever was in them |
| 256 | * when the system call started, which is already known to user |
| 257 | * code. We zero R8-R10 to avoid info leaks. |
| 258 | */ |
| 259 | xorq %r8, %r8 |
| 260 | xorq %r9, %r9 |
| 261 | xorq %r10, %r10 |
| 262 | movq RSP-ORIG_RAX(%rsp), %rsp |
Boris Ostrovsky | 75ef821 | 2015-11-19 16:55:47 -0500 | [diff] [blame] | 263 | swapgs |
| 264 | sysretl |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 265 | END(entry_SYSCALL_compat) |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 266 | |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 267 | /* |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 268 | * 32-bit legacy system call entry. |
| 269 | * |
| 270 | * 32-bit x86 Linux system calls traditionally used the INT $0x80 |
| 271 | * instruction. INT $0x80 lands here. |
| 272 | * |
| 273 | * This entry point can be used by 32-bit and 64-bit programs to perform |
| 274 | * 32-bit system calls. Instances of INT $0x80 can be found inline in |
| 275 | * various programs and libraries. It is also used by the vDSO's |
| 276 | * __kernel_vsyscall fallback for hardware that doesn't support a faster |
| 277 | * entry method. Restarted 32-bit system calls also fall back to INT |
| 278 | * $0x80 regardless of what instruction was originally used to do the |
| 279 | * system call. |
| 280 | * |
| 281 | * This is considered a slow path. It is not used by most libc |
| 282 | * implementations on modern hardware except during process startup. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | * |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 284 | * Arguments: |
| 285 | * eax system call number |
| 286 | * ebx arg1 |
| 287 | * ecx arg2 |
| 288 | * edx arg3 |
| 289 | * esi arg4 |
| 290 | * edi arg5 |
Andy Lutomirski | fda57b2 | 2016-03-09 19:00:35 -0800 | [diff] [blame] | 291 | * ebp arg6 |
Denys Vlasenko | b87cf63 | 2015-02-26 14:40:32 -0800 | [diff] [blame] | 292 | */ |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 293 | ENTRY(entry_INT80_compat) |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 294 | /* |
| 295 | * Interrupts are off on entry. |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 296 | */ |
Andy Lutomirski | 3d44d51 | 2016-02-24 12:18:49 -0800 | [diff] [blame] | 297 | ASM_CLAC /* Do this early to minimize exposure */ |
Jeremy Fitzhardinge | 6680415 | 2008-06-25 00:19:29 -0400 | [diff] [blame] | 298 | SWAPGS |
Denys Vlasenko | a232e3d | 2015-03-27 11:36:20 +0100 | [diff] [blame] | 299 | |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 300 | /* |
| 301 | * User tracing code (ptrace or signal handlers) might assume that |
| 302 | * the saved RAX contains a 32-bit number when we're invoking a 32-bit |
| 303 | * syscall. Just in case the high bits are nonzero, zero-extend |
| 304 | * the syscall number. (This could almost certainly be deleted |
| 305 | * with no ill effects.) |
| 306 | */ |
Ingo Molnar | 54ad726 | 2015-06-05 13:02:28 +0200 | [diff] [blame] | 307 | movl %eax, %eax |
Denys Vlasenko | 4ee8ec1 | 2015-03-27 11:36:21 +0100 | [diff] [blame] | 308 | |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 309 | /* Construct struct pt_regs on stack (iret frame is already on stack) */ |
Ingo Molnar | 131484c | 2015-05-28 12:21:47 +0200 | [diff] [blame] | 310 | pushq %rax /* pt_regs->orig_ax */ |
| 311 | pushq %rdi /* pt_regs->di */ |
| 312 | pushq %rsi /* pt_regs->si */ |
| 313 | pushq %rdx /* pt_regs->dx */ |
| 314 | pushq %rcx /* pt_regs->cx */ |
| 315 | pushq $-ENOSYS /* pt_regs->ax */ |
Denys Vlasenko | 778843f | 2016-05-02 16:56:50 +0200 | [diff] [blame] | 316 | pushq $0 /* pt_regs->r8 = 0 */ |
| 317 | pushq $0 /* pt_regs->r9 = 0 */ |
| 318 | pushq $0 /* pt_regs->r10 = 0 */ |
| 319 | pushq $0 /* pt_regs->r11 = 0 */ |
Andy Lutomirski | 8169aff | 2015-10-05 17:48:05 -0700 | [diff] [blame] | 320 | pushq %rbx /* pt_regs->rbx */ |
| 321 | pushq %rbp /* pt_regs->rbp */ |
| 322 | pushq %r12 /* pt_regs->r12 */ |
| 323 | pushq %r13 /* pt_regs->r13 */ |
| 324 | pushq %r14 /* pt_regs->r14 */ |
| 325 | pushq %r15 /* pt_regs->r15 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | cld |
Denys Vlasenko | 4c9c0e9 | 2015-03-31 19:00:04 +0200 | [diff] [blame] | 327 | |
Denys Vlasenko | 73cbf68 | 2015-06-02 21:04:02 +0200 | [diff] [blame] | 328 | /* |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 329 | * User mode is traced as though IRQs are on, and the interrupt |
| 330 | * gate turned them off. |
Denys Vlasenko | 73cbf68 | 2015-06-02 21:04:02 +0200 | [diff] [blame] | 331 | */ |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 332 | TRACE_IRQS_OFF |
| 333 | |
| 334 | movq %rsp, %rdi |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 335 | call do_int80_syscall_32 |
Andy Lutomirski | a474e67 | 2015-10-05 17:48:11 -0700 | [diff] [blame] | 336 | .Lsyscall_32_done: |
Andy Lutomirski | ee08c6b | 2015-10-05 17:48:09 -0700 | [diff] [blame] | 337 | |
| 338 | /* Go back to user mode. */ |
| 339 | TRACE_IRQS_ON |
| 340 | SWAPGS |
| 341 | jmp restore_regs_and_iret |
Ingo Molnar | 2cd2355 | 2015-06-08 08:28:07 +0200 | [diff] [blame] | 342 | END(entry_INT80_compat) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | |
Jiri Slaby | 4999348 | 2017-08-24 10:06:24 +0200 | [diff] [blame] | 344 | ENTRY(stub32_clone) |
Denys Vlasenko | 5cdc683 | 2015-06-03 15:58:49 +0200 | [diff] [blame] | 345 | /* |
Denys Vlasenko | 7a5a982 | 2015-06-03 15:58:50 +0200 | [diff] [blame] | 346 | * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr). |
| 347 | * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val). |
| 348 | * |
| 349 | * The native 64-bit kernel's sys_clone() implements the latter, |
| 350 | * so we need to swap arguments here before calling it: |
Denys Vlasenko | 5cdc683 | 2015-06-03 15:58:49 +0200 | [diff] [blame] | 351 | */ |
Denys Vlasenko | 7a5a982 | 2015-06-03 15:58:50 +0200 | [diff] [blame] | 352 | xchg %r8, %rcx |
Andy Lutomirski | 8169aff | 2015-10-05 17:48:05 -0700 | [diff] [blame] | 353 | jmp sys_clone |
Jiri Slaby | 4999348 | 2017-08-24 10:06:24 +0200 | [diff] [blame] | 354 | ENDPROC(stub32_clone) |