Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 1 | /* |
| 2 | * common.c - C code for kernel entry and exit |
| 3 | * Copyright (c) 2015 Andrew Lutomirski |
| 4 | * GPL v2 |
| 5 | * |
| 6 | * Based on asm and ptrace code by many authors. The code here originated |
| 7 | * in ptrace.c and signal.c. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/sched.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 12 | #include <linux/sched/task_stack.h> |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 13 | #include <linux/mm.h> |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/ptrace.h> |
| 17 | #include <linux/tracehook.h> |
| 18 | #include <linux/audit.h> |
| 19 | #include <linux/seccomp.h> |
| 20 | #include <linux/signal.h> |
| 21 | #include <linux/export.h> |
| 22 | #include <linux/context_tracking.h> |
| 23 | #include <linux/user-return-notifier.h> |
| 24 | #include <linux/uprobes.h> |
Josh Poimboeuf | afb94c9 | 2017-02-13 19:42:31 -0600 | [diff] [blame] | 25 | #include <linux/livepatch.h> |
Thomas Garnier | 5ea0727 | 2017-06-14 18:12:01 -0700 | [diff] [blame^] | 26 | #include <linux/syscalls.h> |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 27 | |
| 28 | #include <asm/desc.h> |
| 29 | #include <asm/traps.h> |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 30 | #include <asm/vdso.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 31 | #include <linux/uaccess.h> |
Borislav Petkov | cd4d09e | 2016-01-26 22:12:04 +0100 | [diff] [blame] | 32 | #include <asm/cpufeature.h> |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 33 | |
| 34 | #define CREATE_TRACE_POINTS |
| 35 | #include <trace/events/syscalls.h> |
| 36 | |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 37 | #ifdef CONFIG_CONTEXT_TRACKING |
| 38 | /* Called on entry from user mode with IRQs off. */ |
Paolo Bonzini | be8a18e | 2016-06-20 16:58:30 +0200 | [diff] [blame] | 39 | __visible inline void enter_from_user_mode(void) |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 40 | { |
| 41 | CT_WARN_ON(ct_state() != CONTEXT_USER); |
Paolo Bonzini | 2e9d1e1 | 2016-06-20 16:58:29 +0200 | [diff] [blame] | 42 | user_exit_irqoff(); |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 43 | } |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 44 | #else |
| 45 | static inline void enter_from_user_mode(void) {} |
Andy Lutomirski | feed36c | 2015-07-03 12:44:25 -0700 | [diff] [blame] | 46 | #endif |
| 47 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 48 | static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch) |
| 49 | { |
| 50 | #ifdef CONFIG_X86_64 |
| 51 | if (arch == AUDIT_ARCH_X86_64) { |
| 52 | audit_syscall_entry(regs->orig_ax, regs->di, |
| 53 | regs->si, regs->dx, regs->r10); |
| 54 | } else |
| 55 | #endif |
| 56 | { |
| 57 | audit_syscall_entry(regs->orig_ax, regs->bx, |
| 58 | regs->cx, regs->dx, regs->si); |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | /* |
Andy Lutomirski | c87a851 | 2016-05-27 13:08:59 -0700 | [diff] [blame] | 63 | * Returns the syscall nr to run (which should match regs->orig_ax) or -1 |
| 64 | * to skip the syscall. |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 65 | */ |
Andy Lutomirski | c87a851 | 2016-05-27 13:08:59 -0700 | [diff] [blame] | 66 | static long syscall_trace_enter(struct pt_regs *regs) |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 67 | { |
Andy Lutomirski | c87a851 | 2016-05-27 13:08:59 -0700 | [diff] [blame] | 68 | u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64; |
| 69 | |
Linus Torvalds | 97245d0 | 2016-09-13 14:29:22 -0700 | [diff] [blame] | 70 | struct thread_info *ti = current_thread_info(); |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 71 | unsigned long ret = 0; |
Kees Cook | 93e35ef | 2016-06-09 12:36:50 -0700 | [diff] [blame] | 72 | bool emulated = false; |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 73 | u32 work; |
| 74 | |
Andy Lutomirski | 4aabd14 | 2015-10-05 17:48:21 -0700 | [diff] [blame] | 75 | if (IS_ENABLED(CONFIG_DEBUG_ENTRY)) |
| 76 | BUG_ON(regs != task_pt_regs(current)); |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 77 | |
Andy Lutomirski | dd63607 | 2015-10-05 17:48:22 -0700 | [diff] [blame] | 78 | work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY; |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 79 | |
Kees Cook | 93e35ef | 2016-06-09 12:36:50 -0700 | [diff] [blame] | 80 | if (unlikely(work & _TIF_SYSCALL_EMU)) |
| 81 | emulated = true; |
| 82 | |
| 83 | if ((emulated || (work & _TIF_SYSCALL_TRACE)) && |
| 84 | tracehook_report_syscall_entry(regs)) |
| 85 | return -1L; |
| 86 | |
| 87 | if (emulated) |
| 88 | return -1L; |
| 89 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 90 | #ifdef CONFIG_SECCOMP |
| 91 | /* |
Kees Cook | 93e35ef | 2016-06-09 12:36:50 -0700 | [diff] [blame] | 92 | * Do seccomp after ptrace, to catch any tracer changes. |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 93 | */ |
| 94 | if (work & _TIF_SECCOMP) { |
| 95 | struct seccomp_data sd; |
| 96 | |
| 97 | sd.arch = arch; |
| 98 | sd.nr = regs->orig_ax; |
| 99 | sd.instruction_pointer = regs->ip; |
| 100 | #ifdef CONFIG_X86_64 |
| 101 | if (arch == AUDIT_ARCH_X86_64) { |
| 102 | sd.args[0] = regs->di; |
| 103 | sd.args[1] = regs->si; |
| 104 | sd.args[2] = regs->dx; |
| 105 | sd.args[3] = regs->r10; |
| 106 | sd.args[4] = regs->r8; |
| 107 | sd.args[5] = regs->r9; |
| 108 | } else |
| 109 | #endif |
| 110 | { |
| 111 | sd.args[0] = regs->bx; |
| 112 | sd.args[1] = regs->cx; |
| 113 | sd.args[2] = regs->dx; |
| 114 | sd.args[3] = regs->si; |
| 115 | sd.args[4] = regs->di; |
| 116 | sd.args[5] = regs->bp; |
| 117 | } |
| 118 | |
Andy Lutomirski | c87a851 | 2016-05-27 13:08:59 -0700 | [diff] [blame] | 119 | ret = __secure_computing(&sd); |
| 120 | if (ret == -1) |
| 121 | return ret; |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 122 | } |
| 123 | #endif |
| 124 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 125 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) |
| 126 | trace_sys_enter(regs, regs->orig_ax); |
| 127 | |
| 128 | do_audit_syscall_entry(regs, arch); |
| 129 | |
| 130 | return ret ?: regs->orig_ax; |
| 131 | } |
| 132 | |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 133 | #define EXIT_TO_USERMODE_LOOP_FLAGS \ |
| 134 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
Josh Poimboeuf | afb94c9 | 2017-02-13 19:42:31 -0600 | [diff] [blame] | 135 | _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING) |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 136 | |
| 137 | static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 138 | { |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 139 | /* |
| 140 | * In order to return to user mode, we need to have IRQs off with |
Josh Poimboeuf | afb94c9 | 2017-02-13 19:42:31 -0600 | [diff] [blame] | 141 | * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 142 | * can be set at any time on preemptable kernels if we have IRQs on, |
| 143 | * so we need to loop. Disabling preemption wouldn't help: doing the |
| 144 | * work to clear some of the flags can sleep. |
| 145 | */ |
| 146 | while (true) { |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 147 | /* We have work to do. */ |
| 148 | local_irq_enable(); |
| 149 | |
| 150 | if (cached_flags & _TIF_NEED_RESCHED) |
| 151 | schedule(); |
| 152 | |
| 153 | if (cached_flags & _TIF_UPROBE) |
| 154 | uprobe_notify_resume(regs); |
| 155 | |
| 156 | /* deal with pending signal delivery */ |
| 157 | if (cached_flags & _TIF_SIGPENDING) |
| 158 | do_signal(regs); |
| 159 | |
| 160 | if (cached_flags & _TIF_NOTIFY_RESUME) { |
| 161 | clear_thread_flag(TIF_NOTIFY_RESUME); |
| 162 | tracehook_notify_resume(regs); |
| 163 | } |
| 164 | |
| 165 | if (cached_flags & _TIF_USER_RETURN_NOTIFY) |
| 166 | fire_user_return_notifiers(); |
| 167 | |
Josh Poimboeuf | afb94c9 | 2017-02-13 19:42:31 -0600 | [diff] [blame] | 168 | if (cached_flags & _TIF_PATCH_PENDING) |
| 169 | klp_update_patch_state(current); |
| 170 | |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 171 | /* Disable IRQs and retry */ |
| 172 | local_irq_disable(); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 173 | |
Linus Torvalds | 97245d0 | 2016-09-13 14:29:22 -0700 | [diff] [blame] | 174 | cached_flags = READ_ONCE(current_thread_info()->flags); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 175 | |
| 176 | if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) |
| 177 | break; |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 178 | } |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | /* Called with IRQs disabled. */ |
| 182 | __visible inline void prepare_exit_to_usermode(struct pt_regs *regs) |
| 183 | { |
Linus Torvalds | 97245d0 | 2016-09-13 14:29:22 -0700 | [diff] [blame] | 184 | struct thread_info *ti = current_thread_info(); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 185 | u32 cached_flags; |
| 186 | |
Thomas Garnier | 5ea0727 | 2017-06-14 18:12:01 -0700 | [diff] [blame^] | 187 | addr_limit_user_check(); |
| 188 | |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 189 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled())) |
| 190 | local_irq_disable(); |
| 191 | |
| 192 | lockdep_sys_exit(); |
| 193 | |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 194 | cached_flags = READ_ONCE(ti->flags); |
Andy Lutomirski | 39b48e5 | 2015-10-05 17:48:23 -0700 | [diff] [blame] | 195 | |
| 196 | if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS)) |
| 197 | exit_to_usermode_loop(regs, cached_flags); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 198 | |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 199 | #ifdef CONFIG_COMPAT |
| 200 | /* |
| 201 | * Compat syscalls set TS_COMPAT. Make sure we clear it before |
| 202 | * returning to user mode. We need to clear it *after* signal |
| 203 | * handling, because syscall restart has a fixup for compat |
| 204 | * syscalls. The fixup is exercised by the ptrace_syscall_32 |
| 205 | * selftest. |
Andy Lutomirski | 609c19a | 2016-07-26 23:12:22 -0700 | [diff] [blame] | 206 | * |
| 207 | * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer |
| 208 | * special case only applies after poking regs and before the |
| 209 | * very next return to user mode. |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 210 | */ |
Andy Lutomirski | b9d989c | 2016-09-13 14:29:21 -0700 | [diff] [blame] | 211 | current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED); |
Andy Lutomirski | 4e79e18 | 2016-02-10 14:15:27 -0800 | [diff] [blame] | 212 | #endif |
| 213 | |
Paolo Bonzini | 2e9d1e1 | 2016-06-20 16:58:29 +0200 | [diff] [blame] | 214 | user_enter_irqoff(); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 215 | } |
| 216 | |
Andy Lutomirski | f5e6a97 | 2015-10-05 17:48:24 -0700 | [diff] [blame] | 217 | #define SYSCALL_EXIT_WORK_FLAGS \ |
| 218 | (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
| 219 | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) |
| 220 | |
| 221 | static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags) |
| 222 | { |
| 223 | bool step; |
| 224 | |
| 225 | audit_syscall_exit(regs); |
| 226 | |
| 227 | if (cached_flags & _TIF_SYSCALL_TRACEPOINT) |
| 228 | trace_sys_exit(regs, regs->ax); |
| 229 | |
| 230 | /* |
| 231 | * If TIF_SYSCALL_EMU is set, we only get here because of |
| 232 | * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP). |
| 233 | * We already reported this syscall instruction in |
| 234 | * syscall_trace_enter(). |
| 235 | */ |
| 236 | step = unlikely( |
| 237 | (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)) |
| 238 | == _TIF_SINGLESTEP); |
| 239 | if (step || cached_flags & _TIF_SYSCALL_TRACE) |
| 240 | tracehook_report_syscall_exit(regs, step); |
| 241 | } |
| 242 | |
Andy Lutomirski | 1f484aa | 2015-07-03 12:44:23 -0700 | [diff] [blame] | 243 | /* |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 244 | * Called with IRQs on and fully valid regs. Returns with IRQs off in a |
| 245 | * state such that we can immediately switch to user mode. |
| 246 | */ |
Andy Lutomirski | f5e6a97 | 2015-10-05 17:48:24 -0700 | [diff] [blame] | 247 | __visible inline void syscall_return_slowpath(struct pt_regs *regs) |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 248 | { |
Linus Torvalds | 97245d0 | 2016-09-13 14:29:22 -0700 | [diff] [blame] | 249 | struct thread_info *ti = current_thread_info(); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 250 | u32 cached_flags = READ_ONCE(ti->flags); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 251 | |
| 252 | CT_WARN_ON(ct_state() != CONTEXT_KERNEL); |
| 253 | |
Andy Lutomirski | 460d124 | 2015-10-05 17:48:18 -0700 | [diff] [blame] | 254 | if (IS_ENABLED(CONFIG_PROVE_LOCKING) && |
| 255 | WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax)) |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 256 | local_irq_enable(); |
| 257 | |
| 258 | /* |
| 259 | * First do one-time work. If these work items are enabled, we |
| 260 | * want to run them exactly once per syscall exit with IRQs on. |
| 261 | */ |
Andy Lutomirski | f5e6a97 | 2015-10-05 17:48:24 -0700 | [diff] [blame] | 262 | if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS)) |
| 263 | syscall_slow_exit_work(regs, cached_flags); |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 264 | |
Andy Lutomirski | c5c46f5 | 2015-07-03 12:44:26 -0700 | [diff] [blame] | 265 | local_irq_disable(); |
| 266 | prepare_exit_to_usermode(regs); |
| 267 | } |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 268 | |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 269 | #ifdef CONFIG_X86_64 |
| 270 | __visible void do_syscall_64(struct pt_regs *regs) |
| 271 | { |
Linus Torvalds | 97245d0 | 2016-09-13 14:29:22 -0700 | [diff] [blame] | 272 | struct thread_info *ti = current_thread_info(); |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 273 | unsigned long nr = regs->orig_ax; |
| 274 | |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 275 | enter_from_user_mode(); |
Andy Lutomirski | 1e423bf | 2016-01-28 15:11:28 -0800 | [diff] [blame] | 276 | local_irq_enable(); |
| 277 | |
| 278 | if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) |
| 279 | nr = syscall_trace_enter(regs); |
| 280 | |
| 281 | /* |
| 282 | * NB: Native and x32 syscalls are dispatched from the same |
| 283 | * table. The only functional difference is the x32 bit in |
| 284 | * regs->orig_ax, which changes the behavior of some syscalls. |
| 285 | */ |
| 286 | if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { |
| 287 | regs->ax = sys_call_table[nr & __SYSCALL_MASK]( |
| 288 | regs->di, regs->si, regs->dx, |
| 289 | regs->r10, regs->r8, regs->r9); |
| 290 | } |
| 291 | |
| 292 | syscall_return_slowpath(regs); |
| 293 | } |
| 294 | #endif |
| 295 | |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 296 | #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) |
| 297 | /* |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 298 | * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does |
| 299 | * all entry and exit work and returns with IRQs off. This function is |
| 300 | * extremely hot in workloads that use it, and it's usually called from |
Andy Lutomirski | 33c52129 | 2015-10-05 17:48:19 -0700 | [diff] [blame] | 301 | * do_fast_syscall_32, so forcibly inline it to improve performance. |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 302 | */ |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 303 | static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs) |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 304 | { |
Linus Torvalds | 97245d0 | 2016-09-13 14:29:22 -0700 | [diff] [blame] | 305 | struct thread_info *ti = current_thread_info(); |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 306 | unsigned int nr = (unsigned int)regs->orig_ax; |
| 307 | |
| 308 | #ifdef CONFIG_IA32_EMULATION |
Andy Lutomirski | b9d989c | 2016-09-13 14:29:21 -0700 | [diff] [blame] | 309 | current->thread.status |= TS_COMPAT; |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 310 | #endif |
| 311 | |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 312 | if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { |
| 313 | /* |
| 314 | * Subtlety here: if ptrace pokes something larger than |
| 315 | * 2^32-1 into orig_ax, this truncates it. This may or |
| 316 | * may not be necessary, but it matches the old asm |
| 317 | * behavior. |
| 318 | */ |
| 319 | nr = syscall_trace_enter(regs); |
| 320 | } |
| 321 | |
Andy Lutomirski | 33c52129 | 2015-10-05 17:48:19 -0700 | [diff] [blame] | 322 | if (likely(nr < IA32_NR_syscalls)) { |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 323 | /* |
| 324 | * It's possible that a 32-bit syscall implementation |
| 325 | * takes a 64-bit parameter but nonetheless assumes that |
| 326 | * the high bits are zero. Make sure we zero-extend all |
| 327 | * of the args. |
| 328 | */ |
| 329 | regs->ax = ia32_sys_call_table[nr]( |
| 330 | (unsigned int)regs->bx, (unsigned int)regs->cx, |
| 331 | (unsigned int)regs->dx, (unsigned int)regs->si, |
| 332 | (unsigned int)regs->di, (unsigned int)regs->bp); |
| 333 | } |
| 334 | |
| 335 | syscall_return_slowpath(regs); |
| 336 | } |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 337 | |
Andy Lutomirski | a798f09 | 2016-03-09 13:24:32 -0800 | [diff] [blame] | 338 | /* Handles int $0x80 */ |
| 339 | __visible void do_int80_syscall_32(struct pt_regs *regs) |
Andy Lutomirski | 8b13c25 | 2015-10-05 17:48:17 -0700 | [diff] [blame] | 340 | { |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 341 | enter_from_user_mode(); |
Andy Lutomirski | 8b13c25 | 2015-10-05 17:48:17 -0700 | [diff] [blame] | 342 | local_irq_enable(); |
| 343 | do_syscall_32_irqs_on(regs); |
| 344 | } |
| 345 | |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 346 | /* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */ |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 347 | __visible long do_fast_syscall_32(struct pt_regs *regs) |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 348 | { |
| 349 | /* |
| 350 | * Called using the internal vDSO SYSENTER/SYSCALL32 calling |
| 351 | * convention. Adjust regs so it looks like we entered using int80. |
| 352 | */ |
| 353 | |
| 354 | unsigned long landing_pad = (unsigned long)current->mm->context.vdso + |
| 355 | vdso_image_32.sym_int80_landing_pad; |
| 356 | |
| 357 | /* |
| 358 | * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward |
| 359 | * so that 'regs->ip -= 2' lands back on an int $0x80 instruction. |
| 360 | * Fix it up. |
| 361 | */ |
| 362 | regs->ip = landing_pad; |
| 363 | |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 364 | enter_from_user_mode(); |
| 365 | |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 366 | local_irq_enable(); |
Andy Lutomirski | 9999c8c | 2016-03-09 13:24:33 -0800 | [diff] [blame] | 367 | |
| 368 | /* Fetch EBP from where the vDSO stashed it. */ |
Andy Lutomirski | c68ca67 | 2015-10-05 17:48:20 -0700 | [diff] [blame] | 369 | if ( |
| 370 | #ifdef CONFIG_X86_64 |
| 371 | /* |
| 372 | * Micro-optimization: the pointer we're following is explicitly |
| 373 | * 32 bits, so it can't be out of range. |
| 374 | */ |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 375 | __get_user(*(u32 *)®s->bp, |
Andy Lutomirski | c68ca67 | 2015-10-05 17:48:20 -0700 | [diff] [blame] | 376 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
| 377 | #else |
Andy Lutomirski | 30bfa7b | 2015-12-16 23:18:48 -0800 | [diff] [blame] | 378 | get_user(*(u32 *)®s->bp, |
Andy Lutomirski | c68ca67 | 2015-10-05 17:48:20 -0700 | [diff] [blame] | 379 | (u32 __user __force *)(unsigned long)(u32)regs->sp) |
| 380 | #endif |
| 381 | ) { |
| 382 | |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 383 | /* User code screwed up. */ |
| 384 | local_irq_disable(); |
| 385 | regs->ax = -EFAULT; |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 386 | prepare_exit_to_usermode(regs); |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 387 | return 0; /* Keep it simple: use IRET. */ |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 388 | } |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 389 | |
| 390 | /* Now this is just like a normal syscall. */ |
Andy Lutomirski | 8b13c25 | 2015-10-05 17:48:17 -0700 | [diff] [blame] | 391 | do_syscall_32_irqs_on(regs); |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 392 | |
| 393 | #ifdef CONFIG_X86_64 |
| 394 | /* |
| 395 | * Opportunistic SYSRETL: if possible, try to return using SYSRETL. |
| 396 | * SYSRETL is available on all 64-bit CPUs, so we don't need to |
| 397 | * bother with SYSEXIT. |
| 398 | * |
| 399 | * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, |
| 400 | * because the ECX fixup above will ensure that this is essentially |
| 401 | * never the case. |
| 402 | */ |
| 403 | return regs->cs == __USER32_CS && regs->ss == __USER_DS && |
| 404 | regs->ip == landing_pad && |
| 405 | (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0; |
| 406 | #else |
Andy Lutomirski | 5f310f7 | 2015-10-05 17:48:15 -0700 | [diff] [blame] | 407 | /* |
| 408 | * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT. |
| 409 | * |
| 410 | * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP, |
| 411 | * because the ECX fixup above will ensure that this is essentially |
| 412 | * never the case. |
| 413 | * |
| 414 | * We don't allow syscalls at all from VM86 mode, but we still |
| 415 | * need to check VM, because we might be returning from sys_vm86. |
| 416 | */ |
| 417 | return static_cpu_has(X86_FEATURE_SEP) && |
| 418 | regs->cs == __USER_CS && regs->ss == __USER_DS && |
| 419 | regs->ip == landing_pad && |
| 420 | (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0; |
Andy Lutomirski | 7841b40 | 2015-10-05 17:48:12 -0700 | [diff] [blame] | 421 | #endif |
Andy Lutomirski | 710246d | 2015-10-05 17:48:10 -0700 | [diff] [blame] | 422 | } |
Andy Lutomirski | bd2d3a3 | 2015-10-05 17:48:08 -0700 | [diff] [blame] | 423 | #endif |