blob: 03505ffbe1b68d49982db71dfad0c83b04e45246 [file] [log] [blame]
Andy Lutomirski1f484aa2015-07-03 12:44:23 -07001/*
2 * common.c - C code for kernel entry and exit
3 * Copyright (c) 2015 Andrew Lutomirski
4 * GPL v2
5 *
6 * Based on asm and ptrace code by many authors. The code here originated
7 * in ptrace.c and signal.c.
8 */
9
10#include <linux/kernel.h>
11#include <linux/sched.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010012#include <linux/sched/task_stack.h>
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070013#include <linux/mm.h>
14#include <linux/smp.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/tracehook.h>
18#include <linux/audit.h>
19#include <linux/seccomp.h>
20#include <linux/signal.h>
21#include <linux/export.h>
22#include <linux/context_tracking.h>
23#include <linux/user-return-notifier.h>
24#include <linux/uprobes.h>
Josh Poimboeufafb94c92017-02-13 19:42:31 -060025#include <linux/livepatch.h>
Thomas Garnier5ea07272017-06-14 18:12:01 -070026#include <linux/syscalls.h>
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070027
28#include <asm/desc.h>
29#include <asm/traps.h>
Andy Lutomirski710246d2015-10-05 17:48:10 -070030#include <asm/vdso.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080031#include <linux/uaccess.h>
Borislav Petkovcd4d09e2016-01-26 22:12:04 +010032#include <asm/cpufeature.h>
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070033
34#define CREATE_TRACE_POINTS
35#include <trace/events/syscalls.h>
36
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070037#ifdef CONFIG_CONTEXT_TRACKING
38/* Called on entry from user mode with IRQs off. */
Paolo Bonzinibe8a18e2016-06-20 16:58:30 +020039__visible inline void enter_from_user_mode(void)
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070040{
41 CT_WARN_ON(ct_state() != CONTEXT_USER);
Paolo Bonzini2e9d1e12016-06-20 16:58:29 +020042 user_exit_irqoff();
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070043}
Andy Lutomirski9999c8c2016-03-09 13:24:33 -080044#else
45static inline void enter_from_user_mode(void) {}
Andy Lutomirskifeed36c2015-07-03 12:44:25 -070046#endif
47
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070048static void do_audit_syscall_entry(struct pt_regs *regs, u32 arch)
49{
50#ifdef CONFIG_X86_64
51 if (arch == AUDIT_ARCH_X86_64) {
52 audit_syscall_entry(regs->orig_ax, regs->di,
53 regs->si, regs->dx, regs->r10);
54 } else
55#endif
56 {
57 audit_syscall_entry(regs->orig_ax, regs->bx,
58 regs->cx, regs->dx, regs->si);
59 }
60}
61
62/*
Andy Lutomirskic87a8512016-05-27 13:08:59 -070063 * Returns the syscall nr to run (which should match regs->orig_ax) or -1
64 * to skip the syscall.
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070065 */
Andy Lutomirskic87a8512016-05-27 13:08:59 -070066static long syscall_trace_enter(struct pt_regs *regs)
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070067{
Andy Lutomirskic87a8512016-05-27 13:08:59 -070068 u32 arch = in_ia32_syscall() ? AUDIT_ARCH_I386 : AUDIT_ARCH_X86_64;
69
Linus Torvalds97245d02016-09-13 14:29:22 -070070 struct thread_info *ti = current_thread_info();
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070071 unsigned long ret = 0;
Kees Cook93e35ef2016-06-09 12:36:50 -070072 bool emulated = false;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070073 u32 work;
74
Andy Lutomirski4aabd142015-10-05 17:48:21 -070075 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
76 BUG_ON(regs != task_pt_regs(current));
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070077
Andy Lutomirskidd636072015-10-05 17:48:22 -070078 work = ACCESS_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070079
Kees Cook93e35ef2016-06-09 12:36:50 -070080 if (unlikely(work & _TIF_SYSCALL_EMU))
81 emulated = true;
82
83 if ((emulated || (work & _TIF_SYSCALL_TRACE)) &&
84 tracehook_report_syscall_entry(regs))
85 return -1L;
86
87 if (emulated)
88 return -1L;
89
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070090#ifdef CONFIG_SECCOMP
91 /*
Kees Cook93e35ef2016-06-09 12:36:50 -070092 * Do seccomp after ptrace, to catch any tracer changes.
Andy Lutomirski1f484aa2015-07-03 12:44:23 -070093 */
94 if (work & _TIF_SECCOMP) {
95 struct seccomp_data sd;
96
97 sd.arch = arch;
98 sd.nr = regs->orig_ax;
99 sd.instruction_pointer = regs->ip;
100#ifdef CONFIG_X86_64
101 if (arch == AUDIT_ARCH_X86_64) {
102 sd.args[0] = regs->di;
103 sd.args[1] = regs->si;
104 sd.args[2] = regs->dx;
105 sd.args[3] = regs->r10;
106 sd.args[4] = regs->r8;
107 sd.args[5] = regs->r9;
108 } else
109#endif
110 {
111 sd.args[0] = regs->bx;
112 sd.args[1] = regs->cx;
113 sd.args[2] = regs->dx;
114 sd.args[3] = regs->si;
115 sd.args[4] = regs->di;
116 sd.args[5] = regs->bp;
117 }
118
Andy Lutomirskic87a8512016-05-27 13:08:59 -0700119 ret = __secure_computing(&sd);
120 if (ret == -1)
121 return ret;
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700122 }
123#endif
124
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700125 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
126 trace_sys_enter(regs, regs->orig_ax);
127
128 do_audit_syscall_entry(regs, arch);
129
130 return ret ?: regs->orig_ax;
131}
132
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700133#define EXIT_TO_USERMODE_LOOP_FLAGS \
134 (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
Josh Poimboeufafb94c92017-02-13 19:42:31 -0600135 _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY | _TIF_PATCH_PENDING)
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700136
137static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700138{
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700139 /*
140 * In order to return to user mode, we need to have IRQs off with
Josh Poimboeufafb94c92017-02-13 19:42:31 -0600141 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700142 * can be set at any time on preemptable kernels if we have IRQs on,
143 * so we need to loop. Disabling preemption wouldn't help: doing the
144 * work to clear some of the flags can sleep.
145 */
146 while (true) {
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700147 /* We have work to do. */
148 local_irq_enable();
149
150 if (cached_flags & _TIF_NEED_RESCHED)
151 schedule();
152
153 if (cached_flags & _TIF_UPROBE)
154 uprobe_notify_resume(regs);
155
156 /* deal with pending signal delivery */
157 if (cached_flags & _TIF_SIGPENDING)
158 do_signal(regs);
159
160 if (cached_flags & _TIF_NOTIFY_RESUME) {
161 clear_thread_flag(TIF_NOTIFY_RESUME);
162 tracehook_notify_resume(regs);
163 }
164
165 if (cached_flags & _TIF_USER_RETURN_NOTIFY)
166 fire_user_return_notifiers();
167
Josh Poimboeufafb94c92017-02-13 19:42:31 -0600168 if (cached_flags & _TIF_PATCH_PENDING)
169 klp_update_patch_state(current);
170
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700171 /* Disable IRQs and retry */
172 local_irq_disable();
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700173
Linus Torvalds97245d02016-09-13 14:29:22 -0700174 cached_flags = READ_ONCE(current_thread_info()->flags);
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700175
176 if (!(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
177 break;
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700178 }
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700179}
180
181/* Called with IRQs disabled. */
182__visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
183{
Linus Torvalds97245d02016-09-13 14:29:22 -0700184 struct thread_info *ti = current_thread_info();
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700185 u32 cached_flags;
186
Thomas Garnier5ea07272017-06-14 18:12:01 -0700187 addr_limit_user_check();
188
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700189 if (IS_ENABLED(CONFIG_PROVE_LOCKING) && WARN_ON(!irqs_disabled()))
190 local_irq_disable();
191
192 lockdep_sys_exit();
193
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800194 cached_flags = READ_ONCE(ti->flags);
Andy Lutomirski39b48e52015-10-05 17:48:23 -0700195
196 if (unlikely(cached_flags & EXIT_TO_USERMODE_LOOP_FLAGS))
197 exit_to_usermode_loop(regs, cached_flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700198
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800199#ifdef CONFIG_COMPAT
200 /*
201 * Compat syscalls set TS_COMPAT. Make sure we clear it before
202 * returning to user mode. We need to clear it *after* signal
203 * handling, because syscall restart has a fixup for compat
204 * syscalls. The fixup is exercised by the ptrace_syscall_32
205 * selftest.
Andy Lutomirski609c19a2016-07-26 23:12:22 -0700206 *
207 * We also need to clear TS_REGS_POKED_I386: the 32-bit tracer
208 * special case only applies after poking regs and before the
209 * very next return to user mode.
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800210 */
Andy Lutomirskib9d989c2016-09-13 14:29:21 -0700211 current->thread.status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
Andy Lutomirski4e79e182016-02-10 14:15:27 -0800212#endif
213
Paolo Bonzini2e9d1e12016-06-20 16:58:29 +0200214 user_enter_irqoff();
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700215}
216
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700217#define SYSCALL_EXIT_WORK_FLAGS \
218 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
219 _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT)
220
221static void syscall_slow_exit_work(struct pt_regs *regs, u32 cached_flags)
222{
223 bool step;
224
225 audit_syscall_exit(regs);
226
227 if (cached_flags & _TIF_SYSCALL_TRACEPOINT)
228 trace_sys_exit(regs, regs->ax);
229
230 /*
231 * If TIF_SYSCALL_EMU is set, we only get here because of
232 * TIF_SINGLESTEP (i.e. this is PTRACE_SYSEMU_SINGLESTEP).
233 * We already reported this syscall instruction in
234 * syscall_trace_enter().
235 */
236 step = unlikely(
237 (cached_flags & (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU))
238 == _TIF_SINGLESTEP);
239 if (step || cached_flags & _TIF_SYSCALL_TRACE)
240 tracehook_report_syscall_exit(regs, step);
241}
242
Andy Lutomirski1f484aa2015-07-03 12:44:23 -0700243/*
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700244 * Called with IRQs on and fully valid regs. Returns with IRQs off in a
245 * state such that we can immediately switch to user mode.
246 */
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700247__visible inline void syscall_return_slowpath(struct pt_regs *regs)
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700248{
Linus Torvalds97245d02016-09-13 14:29:22 -0700249 struct thread_info *ti = current_thread_info();
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700250 u32 cached_flags = READ_ONCE(ti->flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700251
252 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
253
Andy Lutomirski460d1242015-10-05 17:48:18 -0700254 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
255 WARN(irqs_disabled(), "syscall %ld left IRQs disabled", regs->orig_ax))
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700256 local_irq_enable();
257
258 /*
259 * First do one-time work. If these work items are enabled, we
260 * want to run them exactly once per syscall exit with IRQs on.
261 */
Andy Lutomirskif5e6a972015-10-05 17:48:24 -0700262 if (unlikely(cached_flags & SYSCALL_EXIT_WORK_FLAGS))
263 syscall_slow_exit_work(regs, cached_flags);
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700264
Andy Lutomirskic5c46f52015-07-03 12:44:26 -0700265 local_irq_disable();
266 prepare_exit_to_usermode(regs);
267}
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700268
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800269#ifdef CONFIG_X86_64
270__visible void do_syscall_64(struct pt_regs *regs)
271{
Linus Torvalds97245d02016-09-13 14:29:22 -0700272 struct thread_info *ti = current_thread_info();
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800273 unsigned long nr = regs->orig_ax;
274
Andy Lutomirski9999c8c2016-03-09 13:24:33 -0800275 enter_from_user_mode();
Andy Lutomirski1e423bf2016-01-28 15:11:28 -0800276 local_irq_enable();
277
278 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
279 nr = syscall_trace_enter(regs);
280
281 /*
282 * NB: Native and x32 syscalls are dispatched from the same
283 * table. The only functional difference is the x32 bit in
284 * regs->orig_ax, which changes the behavior of some syscalls.
285 */
286 if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
287 regs->ax = sys_call_table[nr & __SYSCALL_MASK](
288 regs->di, regs->si, regs->dx,
289 regs->r10, regs->r8, regs->r9);
290 }
291
292 syscall_return_slowpath(regs);
293}
294#endif
295
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700296#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
297/*
Andy Lutomirski9999c8c2016-03-09 13:24:33 -0800298 * Does a 32-bit syscall. Called with IRQs on in CONTEXT_KERNEL. Does
299 * all entry and exit work and returns with IRQs off. This function is
300 * extremely hot in workloads that use it, and it's usually called from
Andy Lutomirski33c521292015-10-05 17:48:19 -0700301 * do_fast_syscall_32, so forcibly inline it to improve performance.
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700302 */
Andy Lutomirskia798f092016-03-09 13:24:32 -0800303static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700304{
Linus Torvalds97245d02016-09-13 14:29:22 -0700305 struct thread_info *ti = current_thread_info();
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700306 unsigned int nr = (unsigned int)regs->orig_ax;
307
308#ifdef CONFIG_IA32_EMULATION
Andy Lutomirskib9d989c2016-09-13 14:29:21 -0700309 current->thread.status |= TS_COMPAT;
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700310#endif
311
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700312 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
313 /*
314 * Subtlety here: if ptrace pokes something larger than
315 * 2^32-1 into orig_ax, this truncates it. This may or
316 * may not be necessary, but it matches the old asm
317 * behavior.
318 */
319 nr = syscall_trace_enter(regs);
320 }
321
Andy Lutomirski33c521292015-10-05 17:48:19 -0700322 if (likely(nr < IA32_NR_syscalls)) {
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700323 /*
324 * It's possible that a 32-bit syscall implementation
325 * takes a 64-bit parameter but nonetheless assumes that
326 * the high bits are zero. Make sure we zero-extend all
327 * of the args.
328 */
329 regs->ax = ia32_sys_call_table[nr](
330 (unsigned int)regs->bx, (unsigned int)regs->cx,
331 (unsigned int)regs->dx, (unsigned int)regs->si,
332 (unsigned int)regs->di, (unsigned int)regs->bp);
333 }
334
335 syscall_return_slowpath(regs);
336}
Andy Lutomirski710246d2015-10-05 17:48:10 -0700337
Andy Lutomirskia798f092016-03-09 13:24:32 -0800338/* Handles int $0x80 */
339__visible void do_int80_syscall_32(struct pt_regs *regs)
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700340{
Andy Lutomirski9999c8c2016-03-09 13:24:33 -0800341 enter_from_user_mode();
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700342 local_irq_enable();
343 do_syscall_32_irqs_on(regs);
344}
345
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700346/* Returns 0 to return using IRET or 1 to return using SYSEXIT/SYSRETL. */
Andy Lutomirski7841b402015-10-05 17:48:12 -0700347__visible long do_fast_syscall_32(struct pt_regs *regs)
Andy Lutomirski710246d2015-10-05 17:48:10 -0700348{
349 /*
350 * Called using the internal vDSO SYSENTER/SYSCALL32 calling
351 * convention. Adjust regs so it looks like we entered using int80.
352 */
353
354 unsigned long landing_pad = (unsigned long)current->mm->context.vdso +
355 vdso_image_32.sym_int80_landing_pad;
356
357 /*
358 * SYSENTER loses EIP, and even SYSCALL32 needs us to skip forward
359 * so that 'regs->ip -= 2' lands back on an int $0x80 instruction.
360 * Fix it up.
361 */
362 regs->ip = landing_pad;
363
Andy Lutomirski9999c8c2016-03-09 13:24:33 -0800364 enter_from_user_mode();
365
Andy Lutomirski710246d2015-10-05 17:48:10 -0700366 local_irq_enable();
Andy Lutomirski9999c8c2016-03-09 13:24:33 -0800367
368 /* Fetch EBP from where the vDSO stashed it. */
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700369 if (
370#ifdef CONFIG_X86_64
371 /*
372 * Micro-optimization: the pointer we're following is explicitly
373 * 32 bits, so it can't be out of range.
374 */
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800375 __get_user(*(u32 *)&regs->bp,
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700376 (u32 __user __force *)(unsigned long)(u32)regs->sp)
377#else
Andy Lutomirski30bfa7b2015-12-16 23:18:48 -0800378 get_user(*(u32 *)&regs->bp,
Andy Lutomirskic68ca672015-10-05 17:48:20 -0700379 (u32 __user __force *)(unsigned long)(u32)regs->sp)
380#endif
381 ) {
382
Andy Lutomirski710246d2015-10-05 17:48:10 -0700383 /* User code screwed up. */
384 local_irq_disable();
385 regs->ax = -EFAULT;
Andy Lutomirski710246d2015-10-05 17:48:10 -0700386 prepare_exit_to_usermode(regs);
Andy Lutomirski7841b402015-10-05 17:48:12 -0700387 return 0; /* Keep it simple: use IRET. */
Andy Lutomirski710246d2015-10-05 17:48:10 -0700388 }
Andy Lutomirski710246d2015-10-05 17:48:10 -0700389
390 /* Now this is just like a normal syscall. */
Andy Lutomirski8b13c252015-10-05 17:48:17 -0700391 do_syscall_32_irqs_on(regs);
Andy Lutomirski7841b402015-10-05 17:48:12 -0700392
393#ifdef CONFIG_X86_64
394 /*
395 * Opportunistic SYSRETL: if possible, try to return using SYSRETL.
396 * SYSRETL is available on all 64-bit CPUs, so we don't need to
397 * bother with SYSEXIT.
398 *
399 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
400 * because the ECX fixup above will ensure that this is essentially
401 * never the case.
402 */
403 return regs->cs == __USER32_CS && regs->ss == __USER_DS &&
404 regs->ip == landing_pad &&
405 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF)) == 0;
406#else
Andy Lutomirski5f310f72015-10-05 17:48:15 -0700407 /*
408 * Opportunistic SYSEXIT: if possible, try to return using SYSEXIT.
409 *
410 * Unlike 64-bit opportunistic SYSRET, we can't check that CX == IP,
411 * because the ECX fixup above will ensure that this is essentially
412 * never the case.
413 *
414 * We don't allow syscalls at all from VM86 mode, but we still
415 * need to check VM, because we might be returning from sys_vm86.
416 */
417 return static_cpu_has(X86_FEATURE_SEP) &&
418 regs->cs == __USER_CS && regs->ss == __USER_DS &&
419 regs->ip == landing_pad &&
420 (regs->flags & (X86_EFLAGS_RF | X86_EFLAGS_TF | X86_EFLAGS_VM)) == 0;
Andy Lutomirski7841b402015-10-05 17:48:12 -0700421#endif
Andy Lutomirski710246d2015-10-05 17:48:10 -0700422}
Andy Lutomirskibd2d3a32015-10-05 17:48:08 -0700423#endif