blob: 42eff115c42694994dde61e584a551d3e8d5cc2a [file] [log] [blame]
Thomas Gleixner142781e2020-07-22 23:59:56 +02001// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/context_tracking.h>
4#include <linux/entry-common.h>
Thomas Gleixnera9f3a742020-07-22 23:59:57 +02005#include <linux/livepatch.h>
6#include <linux/audit.h>
Thomas Gleixner142781e2020-07-22 23:59:56 +02007
8#define CREATE_TRACE_POINTS
9#include <trace/events/syscalls.h>
10
11/**
12 * enter_from_user_mode - Establish state when coming from user mode
13 *
14 * Syscall/interrupt entry disables interrupts, but user mode is traced as
15 * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
16 *
17 * 1) Tell lockdep that interrupts are disabled
18 * 2) Invoke context tracking if enabled to reactivate RCU
19 * 3) Trace interrupts off state
20 */
21static __always_inline void enter_from_user_mode(struct pt_regs *regs)
22{
23 arch_check_user_regs(regs);
24 lockdep_hardirqs_off(CALLER_ADDR0);
25
26 CT_WARN_ON(ct_state() != CONTEXT_USER);
27 user_exit_irqoff();
28
29 instrumentation_begin();
30 trace_hardirqs_off_finish();
31 instrumentation_end();
32}
33
34static inline void syscall_enter_audit(struct pt_regs *regs, long syscall)
35{
36 if (unlikely(audit_context())) {
37 unsigned long args[6];
38
39 syscall_get_arguments(current, regs, args);
40 audit_syscall_entry(syscall, args[0], args[1], args[2], args[3]);
41 }
42}
43
44static long syscall_trace_enter(struct pt_regs *regs, long syscall,
45 unsigned long ti_work)
46{
47 long ret = 0;
48
49 /* Handle ptrace */
50 if (ti_work & (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU)) {
51 ret = arch_syscall_enter_tracehook(regs);
52 if (ret || (ti_work & _TIF_SYSCALL_EMU))
53 return -1L;
54 }
55
56 /* Do seccomp after ptrace, to catch any tracer changes. */
57 if (ti_work & _TIF_SECCOMP) {
58 ret = __secure_computing(NULL);
59 if (ret == -1L)
60 return ret;
61 }
62
63 if (unlikely(ti_work & _TIF_SYSCALL_TRACEPOINT))
64 trace_sys_enter(regs, syscall);
65
66 syscall_enter_audit(regs, syscall);
67
68 return ret ? : syscall;
69}
70
71noinstr long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall)
72{
73 unsigned long ti_work;
74
75 enter_from_user_mode(regs);
76 instrumentation_begin();
77
78 local_irq_enable();
79 ti_work = READ_ONCE(current_thread_info()->flags);
80 if (ti_work & SYSCALL_ENTER_WORK)
81 syscall = syscall_trace_enter(regs, syscall, ti_work);
82 instrumentation_end();
83
84 return syscall;
85}
86
Thomas Gleixnera9f3a742020-07-22 23:59:57 +020087/**
88 * exit_to_user_mode - Fixup state when exiting to user mode
89 *
90 * Syscall/interupt exit enables interrupts, but the kernel state is
91 * interrupts disabled when this is invoked. Also tell RCU about it.
92 *
93 * 1) Trace interrupts on state
94 * 2) Invoke context tracking if enabled to adjust RCU state
95 * 3) Invoke architecture specific last minute exit code, e.g. speculation
96 * mitigations, etc.
97 * 4) Tell lockdep that interrupts are enabled
98 */
99static __always_inline void exit_to_user_mode(void)
100{
101 instrumentation_begin();
102 trace_hardirqs_on_prepare();
103 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
104 instrumentation_end();
105
106 user_enter_irqoff();
107 arch_exit_to_user_mode();
108 lockdep_hardirqs_on(CALLER_ADDR0);
109}
110
111/* Workaround to allow gradual conversion of architecture code */
Jens Axboe12db8b62020-10-26 14:32:28 -0600112void __weak arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal) { }
113
114static void handle_signal_work(struct pt_regs *regs, unsigned long ti_work)
115{
116 if (ti_work & _TIF_NOTIFY_SIGNAL)
117 tracehook_notify_signal();
118
119 arch_do_signal_or_restart(regs, ti_work & _TIF_SIGPENDING);
120}
Thomas Gleixnera9f3a742020-07-22 23:59:57 +0200121
122static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
123 unsigned long ti_work)
124{
125 /*
126 * Before returning to user space ensure that all pending work
127 * items have been completed.
128 */
129 while (ti_work & EXIT_TO_USER_MODE_WORK) {
130
131 local_irq_enable_exit_to_user(ti_work);
132
133 if (ti_work & _TIF_NEED_RESCHED)
134 schedule();
135
136 if (ti_work & _TIF_UPROBE)
137 uprobe_notify_resume(regs);
138
139 if (ti_work & _TIF_PATCH_PENDING)
140 klp_update_patch_state(current);
141
Jens Axboe12db8b62020-10-26 14:32:28 -0600142 if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
143 handle_signal_work(regs, ti_work);
Thomas Gleixnera9f3a742020-07-22 23:59:57 +0200144
145 if (ti_work & _TIF_NOTIFY_RESUME) {
146 clear_thread_flag(TIF_NOTIFY_RESUME);
147 tracehook_notify_resume(regs);
148 rseq_handle_notify_resume(NULL, regs);
149 }
150
151 /* Architecture specific TIF work */
152 arch_exit_to_user_mode_work(regs, ti_work);
153
154 /*
155 * Disable interrupts and reevaluate the work flags as they
156 * might have changed while interrupts and preemption was
157 * enabled above.
158 */
159 local_irq_disable_exit_to_user();
160 ti_work = READ_ONCE(current_thread_info()->flags);
161 }
162
163 /* Return the latest work state for arch_exit_to_user_mode() */
164 return ti_work;
165}
166
167static void exit_to_user_mode_prepare(struct pt_regs *regs)
168{
169 unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
170
171 lockdep_assert_irqs_disabled();
172
173 if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
174 ti_work = exit_to_user_mode_loop(regs, ti_work);
175
176 arch_exit_to_user_mode_prepare(regs, ti_work);
177
178 /* Ensure that the address limit is intact and no locks are held */
179 addr_limit_user_check();
180 lockdep_assert_irqs_disabled();
181 lockdep_sys_exit();
182}
183
184#ifndef _TIF_SINGLESTEP
185static inline bool report_single_step(unsigned long ti_work)
186{
187 return false;
188}
189#else
190/*
191 * If TIF_SYSCALL_EMU is set, then the only reason to report is when
192 * TIF_SINGLESTEP is set (i.e. PTRACE_SYSEMU_SINGLESTEP). This syscall
193 * instruction has been already reported in syscall_enter_from_usermode().
194 */
195#define SYSEMU_STEP (_TIF_SINGLESTEP | _TIF_SYSCALL_EMU)
196
197static inline bool report_single_step(unsigned long ti_work)
198{
199 return (ti_work & SYSEMU_STEP) == _TIF_SINGLESTEP;
200}
201#endif
202
203static void syscall_exit_work(struct pt_regs *regs, unsigned long ti_work)
204{
205 bool step;
206
207 audit_syscall_exit(regs);
208
209 if (ti_work & _TIF_SYSCALL_TRACEPOINT)
210 trace_sys_exit(regs, syscall_get_return_value(current, regs));
211
212 step = report_single_step(ti_work);
213 if (step || ti_work & _TIF_SYSCALL_TRACE)
214 arch_syscall_exit_tracehook(regs, step);
215}
216
217/*
218 * Syscall specific exit to user mode preparation. Runs with interrupts
219 * enabled.
220 */
221static void syscall_exit_to_user_mode_prepare(struct pt_regs *regs)
222{
223 u32 cached_flags = READ_ONCE(current_thread_info()->flags);
224 unsigned long nr = syscall_get_nr(current, regs);
225
226 CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
227
228 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
229 if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
230 local_irq_enable();
231 }
232
233 rseq_syscall(regs);
234
235 /*
236 * Do one-time syscall specific work. If these work items are
237 * enabled, we want to run them exactly once per syscall exit with
238 * interrupts enabled.
239 */
240 if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
241 syscall_exit_work(regs, cached_flags);
242}
243
244__visible noinstr void syscall_exit_to_user_mode(struct pt_regs *regs)
245{
246 instrumentation_begin();
247 syscall_exit_to_user_mode_prepare(regs);
248 local_irq_disable_exit_to_user();
249 exit_to_user_mode_prepare(regs);
250 instrumentation_end();
251 exit_to_user_mode();
252}
253
Thomas Gleixner142781e2020-07-22 23:59:56 +0200254noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
255{
256 enter_from_user_mode(regs);
257}
Thomas Gleixnera9f3a742020-07-22 23:59:57 +0200258
259noinstr void irqentry_exit_to_user_mode(struct pt_regs *regs)
260{
261 instrumentation_begin();
262 exit_to_user_mode_prepare(regs);
263 instrumentation_end();
264 exit_to_user_mode();
265}
Thomas Gleixnera5497ba2020-07-22 23:59:58 +0200266
Ingo Molnaraadfc2f2020-07-25 11:19:51 +0200267noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs)
Thomas Gleixnera5497ba2020-07-22 23:59:58 +0200268{
269 irqentry_state_t ret = {
270 .exit_rcu = false,
271 };
272
273 if (user_mode(regs)) {
274 irqentry_enter_from_user_mode(regs);
275 return ret;
276 }
277
278 /*
279 * If this entry hit the idle task invoke rcu_irq_enter() whether
280 * RCU is watching or not.
281 *
282 * Interupts can nest when the first interrupt invokes softirq
283 * processing on return which enables interrupts.
284 *
285 * Scheduler ticks in the idle task can mark quiescent state and
286 * terminate a grace period, if and only if the timer interrupt is
287 * not nested into another interrupt.
288 *
289 * Checking for __rcu_is_watching() here would prevent the nesting
290 * interrupt to invoke rcu_irq_enter(). If that nested interrupt is
291 * the tick then rcu_flavor_sched_clock_irq() would wrongfully
292 * assume that it is the first interupt and eventually claim
293 * quiescient state and end grace periods prematurely.
294 *
295 * Unconditionally invoke rcu_irq_enter() so RCU state stays
296 * consistent.
297 *
298 * TINY_RCU does not support EQS, so let the compiler eliminate
299 * this part when enabled.
300 */
301 if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
302 /*
303 * If RCU is not watching then the same careful
304 * sequence vs. lockdep and tracing is required
305 * as in irq_enter_from_user_mode().
306 */
307 lockdep_hardirqs_off(CALLER_ADDR0);
308 rcu_irq_enter();
309 instrumentation_begin();
310 trace_hardirqs_off_finish();
311 instrumentation_end();
312
313 ret.exit_rcu = true;
314 return ret;
315 }
316
317 /*
318 * If RCU is watching then RCU only wants to check whether it needs
319 * to restart the tick in NOHZ mode. rcu_irq_enter_check_tick()
320 * already contains a warning when RCU is not watching, so no point
321 * in having another one here.
322 */
323 instrumentation_begin();
324 rcu_irq_enter_check_tick();
325 /* Use the combo lockdep/tracing function */
326 trace_hardirqs_off();
327 instrumentation_end();
328
329 return ret;
330}
331
332void irqentry_exit_cond_resched(void)
333{
334 if (!preempt_count()) {
335 /* Sanity check RCU and thread stack */
336 rcu_irq_exit_check_preempt();
337 if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
338 WARN_ON_ONCE(!on_thread_stack());
339 if (need_resched())
340 preempt_schedule_irq();
341 }
342}
343
Ingo Molnaraadfc2f2020-07-25 11:19:51 +0200344noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
Thomas Gleixnera5497ba2020-07-22 23:59:58 +0200345{
346 lockdep_assert_irqs_disabled();
347
348 /* Check whether this returns to user mode */
349 if (user_mode(regs)) {
350 irqentry_exit_to_user_mode(regs);
351 } else if (!regs_irqs_disabled(regs)) {
352 /*
353 * If RCU was not watching on entry this needs to be done
354 * carefully and needs the same ordering of lockdep/tracing
355 * and RCU as the return to user mode path.
356 */
357 if (state.exit_rcu) {
358 instrumentation_begin();
359 /* Tell the tracer that IRET will enable interrupts */
360 trace_hardirqs_on_prepare();
361 lockdep_hardirqs_on_prepare(CALLER_ADDR0);
362 instrumentation_end();
363 rcu_irq_exit();
364 lockdep_hardirqs_on(CALLER_ADDR0);
365 return;
366 }
367
368 instrumentation_begin();
369 if (IS_ENABLED(CONFIG_PREEMPTION))
370 irqentry_exit_cond_resched();
371 /* Covers both tracing and lockdep */
372 trace_hardirqs_on();
373 instrumentation_end();
374 } else {
375 /*
376 * IRQ flags state is correct already. Just tell RCU if it
377 * was not watching on entry.
378 */
379 if (state.exit_rcu)
380 rcu_irq_exit();
381 }
382}