blob: dca53515ae3f7318315321065ad2feac7840bb8a [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/init.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010017#include <linux/sched/mm.h>
Ingo Molnar8703e8a2017-02-08 18:51:30 +010018#include <linux/sched/user.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010019#include <linux/sched/debug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010020#include <linux/sched/task.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010021#include <linux/sched/task_stack.h>
Ingo Molnar32ef5512017-02-05 11:48:36 +010022#include <linux/sched/cputime.h>
Christian Brauner3eb39f42018-11-19 00:51:56 +010023#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/fs.h>
Christian Brauner3eb39f42018-11-19 00:51:56 +010025#include <linux/proc_fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/tty.h>
27#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070028#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070032#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070033#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090034#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070035#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080036#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080037#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080038#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080040#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053041#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050042#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000043#include <linux/cn_proc.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070044#include <linux/compiler.h>
Christoph Hellwig31ea70e2017-06-03 21:01:00 +020045#include <linux/posix-timers.h>
Roman Gushchin76f969e2019-04-19 10:03:04 -070046#include <linux/cgroup.h>
Richard Guy Briggsb48345a2019-05-10 12:21:49 -040047#include <linux/audit.h>
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -070048
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050049#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080051
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#include <asm/param.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080053#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070054#include <asm/unistd.h>
55#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010056#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
58/*
59 * SLAB caches for signal bits.
60 */
61
Christoph Lametere18b8902006-12-06 20:33:20 -080062static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090064int print_fatal_signals __read_mostly;
65
Roland McGrath35de2542008-07-25 19:45:51 -070066static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070067{
Roland McGrath35de2542008-07-25 19:45:51 -070068 return t->sighand->action[sig - 1].sa.sa_handler;
69}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070070
Christian Braunere4a8b4e2018-08-21 22:00:15 -070071static inline bool sig_handler_ignored(void __user *handler, int sig)
Roland McGrath35de2542008-07-25 19:45:51 -070072{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070073 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070074 return handler == SIG_IGN ||
Christian Braunere4a8b4e2018-08-21 22:00:15 -070075 (handler == SIG_DFL && sig_kernel_ignore(sig));
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070076}
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Christian Brauner41aaa482018-08-21 22:00:19 -070078static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Roland McGrath35de2542008-07-25 19:45:51 -070080 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Oleg Nesterovf008faf2009-04-02 16:58:02 -070082 handler = sig_handler(t, sig);
83
Eric W. Biederman86989c42018-07-19 19:47:27 -050084 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 return true;
87
Oleg Nesterovf008faf2009-04-02 16:58:02 -070088 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterovac253852017-11-17 15:30:04 -080089 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Christian Brauner41aaa482018-08-21 22:00:19 -070090 return true;
Oleg Nesterovf008faf2009-04-02 16:58:02 -070091
Eric W. Biederman33da8e72019-08-16 12:33:54 -050092 /* Only allow kernel generated signals to this kthread */
Jens Axboee8b33b82021-03-25 18:18:59 -060093 if (unlikely((t->flags & PF_KTHREAD) &&
Eric W. Biederman33da8e72019-08-16 12:33:54 -050094 (handler == SIG_KTHREAD_KERNEL) && !force))
95 return true;
96
Oleg Nesterovf008faf2009-04-02 16:58:02 -070097 return sig_handler_ignored(handler, sig);
98}
99
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700100static bool sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -0700101{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /*
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
105 * unblocked.
106 */
Roland McGrath325d22d2007-11-12 15:41:55 -0800107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700108 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800110 /*
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
114 */
115 if (t->ptrace && sig != SIGKILL)
Christian Brauner6a0cdcd2018-08-21 22:00:23 -0700116 return false;
Roland McGrath35de2542008-07-25 19:45:51 -0700117
Oleg Nesterov628c1bc2017-11-17 15:30:01 -0800118 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
121/*
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
124 */
Christian Brauner938696a2018-08-21 22:00:27 -0700125static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
127 unsigned long ready;
128 long i;
129
130 switch (_NSIG_WORDS) {
131 default:
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
134 break;
135
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
140 break;
141
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
144 break;
145
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
147 }
148 return ready != 0;
149}
150
151#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
152
Christian Brauner09ae8542018-08-21 22:00:30 -0700153static bool recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Roman Gushchin76f969e2019-04-19 10:03:04 -0700155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 PENDING(&t->pending, &t->blocked) ||
Roman Gushchin76f969e2019-04-19 10:03:04 -0700157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 set_tsk_thread_flag(t, TIF_SIGPENDING);
Christian Brauner09ae8542018-08-21 22:00:30 -0700160 return true;
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700161 }
Christian Brauner09ae8542018-08-21 22:00:30 -0700162
Roland McGrathb74d0de2007-06-06 03:59:00 -0700163 /*
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
167 */
Christian Brauner09ae8542018-08-21 22:00:30 -0700168 return false;
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700169}
170
171/*
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
174 */
175void recalc_sigpending_and_wake(struct task_struct *t)
176{
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
181void recalc_sigpending(void)
182{
Miroslav Benes8df19472021-03-29 15:28:15 +0200183 if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700184 clear_thread_flag(TIF_SIGPENDING);
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +0200187EXPORT_SYMBOL(recalc_sigpending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Eric W. Biederman088fe472018-07-23 17:26:49 -0500189void calculate_sigpending(void)
190{
191 /* Have any signals or users of TIF_SIGPENDING been delayed
192 * until after fork?
193 */
194 spin_lock_irq(&current->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
196 recalc_sigpending();
197 spin_unlock_irq(&current->sighand->siglock);
198}
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/* Given the mask, find the first available signal that should be serviced. */
201
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800202#define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800205
Davide Libenzifba2afa2007-05-10 22:23:13 -0700206int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
208 unsigned long i, *s, *m, x;
209 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 s = pending->signal.sig;
212 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800213
214 /*
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
217 */
218 x = *s &~ *m;
219 if (x) {
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
222 sig = ffz(~x) + 1;
223 return sig;
224 }
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 switch (_NSIG_WORDS) {
227 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800228 for (i = 1; i < _NSIG_WORDS; ++i) {
229 x = *++s &~ *++m;
230 if (!x)
231 continue;
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800234 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 break;
236
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800237 case 2:
238 x = s[1] &~ m[1];
239 if (!x)
240 break;
241 sig = ffz(~x) + _NSIG_BPW + 1;
242 break;
243
244 case 1:
245 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 break;
247 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 return sig;
250}
251
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900252static inline void print_dropped_signal(int sig)
253{
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
255
256 if (!print_fatal_signals)
257 return;
258
259 if (!__ratelimit(&ratelimit_state))
260 return;
261
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900263 current->comm, current->pid, sig);
264}
265
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100266/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200267 * task_set_jobctl_pending - set jobctl pending bits
268 * @task: target task
269 * @mask: pending bits to set
270 *
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
275 * becomes noop.
276 *
277 * CONTEXT:
278 * Must be called with @task->sighand->siglock held.
279 *
280 * RETURNS:
281 * %true if @mask is set, %false if made noop because @task was dying.
282 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700283bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200284{
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
288
Jens Axboe1e4cf0d2021-03-25 18:23:44 -0600289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
Tejun Heo7dd3db52011-06-02 11:14:00 +0200290 return false;
291
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
294
295 task->jobctl |= mask;
296 return true;
297}
298
299/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200300 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100301 * @task: target task
302 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
306 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100307 *
308 * CONTEXT:
309 * Must be called with @task->sighand->siglock held.
310 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200311void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100312{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700315 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100317 }
318}
319
320/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200321 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100322 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200323 * @mask: pending bits to clear
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100324 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100328 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100331 *
332 * CONTEXT:
333 * Must be called with @task->sighand->siglock held.
334 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700335void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100336{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
338
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
341
342 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200343
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100346}
347
348/**
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
351 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100353 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Mauro Carvalho Chehab68d86812019-08-02 21:48:33 -0700355 * stop, the appropriate `SIGNAL_*` flags are set.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100356 *
357 * CONTEXT:
358 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100359 *
360 * RETURNS:
361 * %true if group stop completion should be notified to the parent, %false
362 * otherwise.
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100363 */
364static bool task_participate_group_stop(struct task_struct *task)
365{
366 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100368
Tejun Heoa8f072c2011-06-02 11:13:59 +0200369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100370
Tejun Heo3759a0d2011-06-02 11:14:00 +0200371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100372
373 if (!consume)
374 return false;
375
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
378
Tejun Heo244056f2011-03-23 10:37:01 +0100379 /*
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
382 */
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c1902e2011-03-23 10:37:00 +0100385 return true;
386 }
387 return false;
388}
389
Eric W. Biederman924de3b2018-07-23 13:38:00 -0500390void task_join_group_stop(struct task_struct *task)
391{
Oleg Nesterov7b3c36f2020-11-01 17:07:44 -0800392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
394
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
399 return;
400
Eric W. Biederman924de3b2018-07-23 13:38:00 -0500401 /* Have the new thread join an on-going signal group stop */
Oleg Nesterov7b3c36f2020-11-01 17:07:44 -0800402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
Eric W. Biederman924de3b2018-07-23 13:38:00 -0500403}
404
David Howellsc69e8d92008-11-14 10:39:19 +1100405/*
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700408 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100409 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900410static struct sigqueue *
411__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
413 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800414 struct user_struct *user;
Linus Torvaldsfda31c52020-02-24 12:47:14 -0800415 int sigpending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800417 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000418 * Protect access to @t credentials. This can go away when all
419 * callers hold rcu read lock.
Linus Torvaldsfda31c52020-02-24 12:47:14 -0800420 *
421 * NOTE! A pending signal will hold on to the user refcount,
422 * and we get/put the refcount only when the sigpending count
423 * changes from/to zero.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800424 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000425 rcu_read_lock();
Linus Torvaldsfda31c52020-02-24 12:47:14 -0800426 user = __task_cred(t)->user;
427 sigpending = atomic_inc_return(&user->sigpending);
428 if (sigpending == 1)
429 get_uid(user);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000430 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900431
Linus Torvaldsfda31c52020-02-24 12:47:14 -0800432 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900434 } else {
435 print_dropped_signal(sig);
436 }
437
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (unlikely(q == NULL)) {
Linus Torvaldsfda31c52020-02-24 12:47:14 -0800439 if (atomic_dec_and_test(&user->sigpending))
440 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 } else {
442 INIT_LIST_HEAD(&q->list);
443 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100444 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
David Howellsd84f4f92008-11-14 10:39:23 +1100446
447 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
Andrew Morton514a01b2006-02-03 03:04:41 -0800450static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
452 if (q->flags & SIGQUEUE_PREALLOC)
453 return;
Linus Torvaldsfda31c52020-02-24 12:47:14 -0800454 if (atomic_dec_and_test(&q->user->sigpending))
455 free_uid(q->user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 kmem_cache_free(sigqueue_cachep, q);
457}
458
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800459void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
461 struct sigqueue *q;
462
463 sigemptyset(&queue->signal);
464 while (!list_empty(&queue->list)) {
465 q = list_entry(queue->list.next, struct sigqueue , list);
466 list_del_init(&q->list);
467 __sigqueue_free(q);
468 }
469}
470
471/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400472 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800474void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
476 unsigned long flags;
477
478 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400479 clear_tsk_thread_flag(t, TIF_SIGPENDING);
480 flush_sigqueue(&t->pending);
481 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 spin_unlock_irqrestore(&t->sighand->siglock, flags);
483}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +0200484EXPORT_SYMBOL(flush_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500486#ifdef CONFIG_POSIX_TIMERS
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400487static void __flush_itimer_signals(struct sigpending *pending)
488{
489 sigset_t signal, retain;
490 struct sigqueue *q, *n;
491
492 signal = pending->signal;
493 sigemptyset(&retain);
494
495 list_for_each_entry_safe(q, n, &pending->list, list) {
496 int sig = q->info.si_signo;
497
498 if (likely(q->info.si_code != SI_TIMER)) {
499 sigaddset(&retain, sig);
500 } else {
501 sigdelset(&signal, sig);
502 list_del_init(&q->list);
503 __sigqueue_free(q);
504 }
505 }
506
507 sigorsets(&pending->signal, &signal, &retain);
508}
509
510void flush_itimer_signals(void)
511{
512 struct task_struct *tsk = current;
513 unsigned long flags;
514
515 spin_lock_irqsave(&tsk->sighand->siglock, flags);
516 __flush_itimer_signals(&tsk->pending);
517 __flush_itimer_signals(&tsk->signal->shared_pending);
518 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
519}
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500520#endif
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400521
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700522void ignore_signals(struct task_struct *t)
523{
524 int i;
525
526 for (i = 0; i < _NSIG; ++i)
527 t->sighand->action[i].sa.sa_handler = SIG_IGN;
528
529 flush_signals(t);
530}
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 * Flush all handlers for a task.
534 */
535
536void
537flush_signal_handlers(struct task_struct *t, int force_default)
538{
539 int i;
540 struct k_sigaction *ka = &t->sighand->action[0];
541 for (i = _NSIG ; i != 0 ; i--) {
542 if (force_default || ka->sa.sa_handler != SIG_IGN)
543 ka->sa.sa_handler = SIG_DFL;
544 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700545#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700546 ka->sa.sa_restorer = NULL;
547#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 sigemptyset(&ka->sa.sa_mask);
549 ka++;
550 }
551}
552
Christian Brauner67a48a22018-08-21 22:00:34 -0700553bool unhandled_signal(struct task_struct *tsk, int sig)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200554{
Roland McGrath445a91d2008-07-25 19:45:52 -0700555 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700556 if (is_global_init(tsk))
Christian Brauner67a48a22018-08-21 22:00:34 -0700557 return true;
558
Roland McGrath445a91d2008-07-25 19:45:52 -0700559 if (handler != SIG_IGN && handler != SIG_DFL)
Christian Brauner67a48a22018-08-21 22:00:34 -0700560 return false;
561
Tejun Heoa288eec2011-06-17 16:50:37 +0200562 /* if ptraced, let the tracer determine */
563 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200564}
565
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200566static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500567 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 /*
572 * Collect the siginfo appropriate to this signal. Check if
573 * there is another siginfo for the same signal.
574 */
575 list_for_each_entry(q, &list->list, list) {
576 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700577 if (first)
578 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 first = q;
580 }
581 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700582
583 sigdelset(&list->signal, sig);
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700586still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 list_del_init(&first->list);
588 copy_siginfo(info, &first->info);
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500589
590 *resched_timer =
591 (first->flags & SIGQUEUE_PREALLOC) &&
592 (info->si_code == SI_TIMER) &&
593 (info->si_sys_private);
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700597 /*
598 * Ok, it wasn't in the queue. This must be
599 * a fast-pathed signal or we must have been
600 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 */
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -0600602 clear_siginfo(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 info->si_signo = sig;
604 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800605 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 info->si_pid = 0;
607 info->si_uid = 0;
608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609}
610
611static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200612 kernel_siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
Roland McGrath27d91e02006-09-29 02:00:31 -0700614 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800616 if (sig)
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500617 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 return sig;
619}
620
621/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700622 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 * expected to free it.
624 *
625 * All callers have to hold the siglock.
626 */
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200627int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628{
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500629 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700630 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000631
632 /* We only dequeue private signals from ourselves, we don't let
633 * signalfd steal them
634 */
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500635 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800636 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500638 mask, info, &resched_timer);
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500639#ifdef CONFIG_POSIX_TIMERS
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800640 /*
641 * itimer signal ?
642 *
643 * itimers are process shared and we restart periodic
644 * itimers in the signal delivery path to prevent DoS
645 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700646 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800647 * itimers, as the SIGALRM is a legacy signal and only
648 * queued once. Changing the restart behaviour to
649 * restart the timer in the signal dequeue path is
650 * reducing the timer noise on heavy loaded !highres
651 * systems too.
652 */
653 if (unlikely(signr == SIGALRM)) {
654 struct hrtimer *tmr = &tsk->signal->real_timer;
655
656 if (!hrtimer_is_queued(tmr) &&
Thomas Gleixner2456e852016-12-25 11:38:40 +0100657 tsk->signal->it_real_incr != 0) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800658 hrtimer_forward(tmr, tmr->base->get_time(),
659 tsk->signal->it_real_incr);
660 hrtimer_restart(tmr);
661 }
662 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500663#endif
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800664 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700665
Davide Libenzib8fceee2007-09-20 12:40:16 -0700666 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700667 if (!signr)
668 return 0;
669
670 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800671 /*
672 * Set a marker that we have dequeued a stop signal. Our
673 * caller might release the siglock and then the pending
674 * stop signal it is about to process is no longer in the
675 * pending bitmasks, but must still be cleared by a SIGCONT
676 * (and overruled by a SIGKILL). So those cases clear this
677 * shared flag after we've set it. Note that this flag may
678 * remain set after the signal we return is ignored or
679 * handled. That doesn't matter because its only purpose
680 * is to alert stop-signal processing code when another
681 * processor has come along and cleared the flag.
682 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200683 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800684 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500685#ifdef CONFIG_POSIX_TIMERS
Eric W. Biederman57db7e42017-06-13 04:31:16 -0500686 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 /*
688 * Release the siglock to ensure proper locking order
689 * of timer locks outside of siglocks. Note, we leave
690 * irqs disabled here, since the posix-timers code is
691 * about to disable them again anyway.
692 */
693 spin_unlock(&tsk->sighand->siglock);
Thomas Gleixner96fe3b02017-05-30 23:15:46 +0200694 posixtimer_rearm(info);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 spin_lock(&tsk->sighand->siglock);
Eric W. Biederman9943d3a2017-07-24 14:53:03 -0500696
697 /* Don't expose the si_sys_private value to userspace */
698 info->si_sys_private = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
Nicolas Pitrebaa73d92016-11-11 00:10:10 -0500700#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return signr;
702}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +0200703EXPORT_SYMBOL_GPL(dequeue_signal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Eric W. Biederman7146db32019-02-06 17:51:47 -0600705static int dequeue_synchronous_signal(kernel_siginfo_t *info)
706{
707 struct task_struct *tsk = current;
708 struct sigpending *pending = &tsk->pending;
709 struct sigqueue *q, *sync = NULL;
710
711 /*
712 * Might a synchronous signal be in the queue?
713 */
714 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
715 return 0;
716
717 /*
718 * Return the first synchronous signal in the queue.
719 */
720 list_for_each_entry(q, &pending->list, list) {
Pavel Machek7665a472020-07-24 11:05:31 +0200721 /* Synchronous signals have a positive si_code */
Eric W. Biederman7146db32019-02-06 17:51:47 -0600722 if ((q->info.si_code > SI_USER) &&
723 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
724 sync = q;
725 goto next;
726 }
727 }
728 return 0;
729next:
730 /*
731 * Check if there is another siginfo for the same signal.
732 */
733 list_for_each_entry_continue(q, &pending->list, list) {
734 if (q->info.si_signo == sync->info.si_signo)
735 goto still_pending;
736 }
737
738 sigdelset(&pending->signal, sync->info.si_signo);
739 recalc_sigpending();
740still_pending:
741 list_del_init(&sync->list);
742 copy_siginfo(info, &sync->info);
743 __sigqueue_free(sync);
744 return info->si_signo;
745}
746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747/*
748 * Tell a process that it has a new active signal..
749 *
750 * NOTE! we rely on the previous spin_lock to
751 * lock interrupts for us! We can only be called with
752 * "siglock" held, and the local interrupt must
753 * have been disabled when that got acquired!
754 *
755 * No need to set need_resched since signal event passing
756 * goes through ->blocked
757 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100758void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100762 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500763 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 * executing another processor and just now entering stopped state.
765 * By using wake_up_state, we ensure the process will wake up and
766 * handle its death signal.
767 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100768 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 kick_process(t);
770}
771
772/*
773 * Remove signals in mask from the pending set and queue.
774 * Returns 1 if any signals were found.
775 *
776 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800777 */
Christian Brauner8f113512018-08-21 22:00:38 -0700778static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd52006-01-08 01:02:48 -0800779{
780 struct sigqueue *q, *n;
781 sigset_t m;
782
783 sigandsets(&m, mask, &s->signal);
784 if (sigisemptyset(&m))
Christian Brauner8f113512018-08-21 22:00:38 -0700785 return;
George Anzinger71fabd52006-01-08 01:02:48 -0800786
Oleg Nesterov702a5072011-04-27 22:01:27 +0200787 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd52006-01-08 01:02:48 -0800788 list_for_each_entry_safe(q, n, &s->list, list) {
789 if (sigismember(mask, q->info.si_signo)) {
790 list_del_init(&q->list);
791 __sigqueue_free(q);
792 }
793 }
George Anzinger71fabd52006-01-08 01:02:48 -0800794}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200796static inline int is_si_special(const struct kernel_siginfo *info)
Oleg Nesterov614c5172009-12-15 16:47:22 -0800797{
Eric W. Biederman4ff4c312018-09-03 10:39:04 +0200798 return info <= SEND_SIG_PRIV;
Oleg Nesterov614c5172009-12-15 16:47:22 -0800799}
800
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200801static inline bool si_fromuser(const struct kernel_siginfo *info)
Oleg Nesterov614c5172009-12-15 16:47:22 -0800802{
803 return info == SEND_SIG_NOINFO ||
804 (!is_si_special(info) && SI_FROMUSER(info));
805}
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700808 * called with RCU read lock from check_kill_permission()
809 */
Christian Brauner2a9b9092018-08-21 22:00:11 -0700810static bool kill_ok_by_cred(struct task_struct *t)
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700811{
812 const struct cred *cred = current_cred();
813 const struct cred *tcred = __task_cred(t);
814
Christian Brauner2a9b9092018-08-21 22:00:11 -0700815 return uid_eq(cred->euid, tcred->suid) ||
816 uid_eq(cred->euid, tcred->uid) ||
817 uid_eq(cred->uid, tcred->suid) ||
818 uid_eq(cred->uid, tcred->uid) ||
819 ns_capable(tcred->user_ns, CAP_KILL);
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700820}
821
822/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100824 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 */
Eric W. Biedermanae7795b2018-09-25 11:27:20 +0200826static int check_kill_permission(int sig, struct kernel_siginfo *info,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 struct task_struct *t)
828{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700829 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700830 int error;
831
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700832 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700833 return -EINVAL;
834
Oleg Nesterov614c5172009-12-15 16:47:22 -0800835 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700836 return 0;
837
838 error = audit_signal_info(sig, t); /* Let audit system see the signal */
839 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400841
Oleg Nesterov065add32010-05-26 14:42:54 -0700842 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700843 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700844 switch (sig) {
845 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700846 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700847 /*
848 * We don't return the error if sid == NULL. The
849 * task was unhashed, the caller must notice this.
850 */
851 if (!sid || sid == task_session(current))
852 break;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500853 fallthrough;
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700854 default:
855 return -EPERM;
856 }
857 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100858
Stephen Smalley6b4f3d02017-09-08 12:40:01 -0400859 return security_task_kill(t, info, sig, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860}
861
Tejun Heofb1d9102011-06-14 11:20:17 +0200862/**
863 * ptrace_trap_notify - schedule trap to notify ptracer
864 * @t: tracee wanting to notify tracer
865 *
866 * This function schedules sticky ptrace trap which is cleared on the next
867 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
868 * ptracer.
869 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200870 * If @t is running, STOP trap will be taken. If trapped for STOP and
871 * ptracer is listening for events, tracee is woken up so that it can
872 * re-trap for the new event. If trapped otherwise, STOP trap will be
873 * eventually taken without returning to userland after the existing traps
874 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200875 *
876 * CONTEXT:
877 * Must be called with @task->sighand->siglock held.
878 */
879static void ptrace_trap_notify(struct task_struct *t)
880{
881 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
882 assert_spin_locked(&t->sighand->siglock);
883
884 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100885 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200886}
887
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700889 * Handle magic process-wide effects of stop/continue signals. Unlike
890 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 * time regardless of blocking, ignoring, or handling. This does the
892 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700893 * signals. The process stop is done as a signal action for SIG_DFL.
894 *
895 * Returns true if the signal should be actually delivered, otherwise
896 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700898static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700900 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700902 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Oleg Nesterov403bad72013-04-30 15:28:10 -0700904 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800905 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700906 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700908 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700910 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 /*
912 * This is a stop signal. Remove SIGCONT from all queues.
913 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700914 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700915 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700916 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700917 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700919 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200921 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700923 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700924 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700925 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700926 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200927 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200928 if (likely(!(t->ptrace & PT_SEIZED)))
929 wake_up_state(t, __TASK_STOPPED);
930 else
931 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700932 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700934 /*
935 * Notify the parent with CLD_CONTINUED if we were stopped.
936 *
937 * If we were in the middle of a group stop, we pretend it
938 * was already finished, and then continued. Since SIGCHLD
939 * doesn't queue we report only CLD_STOPPED, as if the next
940 * CLD_CONTINUED was dropped.
941 */
942 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700943 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700944 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700945 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700946 why |= SIGNAL_CLD_STOPPED;
947
948 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700949 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700950 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700951 * will take ->siglock, notice SIGNAL_CLD_MASK, and
Weikang Shi2e58f572018-10-30 15:07:05 -0700952 * notify its parent. See get_signal().
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700953 */
Jamie Iles2d39b3c2017-01-10 16:57:54 -0800954 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700955 signal->group_stop_count = 0;
956 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700959
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700960 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961}
962
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700963/*
964 * Test if P wants to take SIG. After we've checked all threads with this,
965 * it's equivalent to finding no threads not blocking SIG. Any threads not
966 * blocking SIG were ruled out because they are not running and already
967 * have pending signals. Such threads will dequeue from the shared queue
968 * as soon as they're available, so putting the signal on the shared queue
969 * will be equivalent to sending it to one such thread.
970 */
Christian Brauneracd14e62018-08-21 22:00:42 -0700971static inline bool wants_signal(int sig, struct task_struct *p)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700972{
973 if (sigismember(&p->blocked, sig))
Christian Brauneracd14e62018-08-21 22:00:42 -0700974 return false;
975
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700976 if (p->flags & PF_EXITING)
Christian Brauneracd14e62018-08-21 22:00:42 -0700977 return false;
978
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700979 if (sig == SIGKILL)
Christian Brauneracd14e62018-08-21 22:00:42 -0700980 return true;
981
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700982 if (task_is_stopped_or_traced(p))
Christian Brauneracd14e62018-08-21 22:00:42 -0700983 return false;
984
Jens Axboe5c251e92020-10-26 14:32:27 -0600985 return task_curr(p) || !task_sigpending(p);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700986}
987
Eric W. Biederman07296142018-07-13 21:39:13 -0500988static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700989{
990 struct signal_struct *signal = p->signal;
991 struct task_struct *t;
992
993 /*
994 * Now find a thread we can wake up to take the signal off the queue.
995 *
996 * If the main thread wants the signal, it gets first crack.
997 * Probably the least surprising to the average bear.
998 */
999 if (wants_signal(sig, p))
1000 t = p;
Eric W. Biederman07296142018-07-13 21:39:13 -05001001 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -07001002 /*
1003 * There is just one thread and it does not need to be woken.
1004 * It will dequeue unblocked signals before it runs again.
1005 */
1006 return;
1007 else {
1008 /*
1009 * Otherwise try to find a suitable thread.
1010 */
1011 t = signal->curr_target;
1012 while (!wants_signal(sig, t)) {
1013 t = next_thread(t);
1014 if (t == signal->curr_target)
1015 /*
1016 * No thread needs to be woken.
1017 * Any eligible threads will see
1018 * the signal in the queue soon.
1019 */
1020 return;
1021 }
1022 signal->curr_target = t;
1023 }
1024
1025 /*
1026 * Found a killable thread. If the signal will be fatal,
1027 * then start taking the whole group down immediately.
1028 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07001029 if (sig_fatal(p, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -08001030 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -07001031 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov42691572017-11-17 15:30:08 -08001032 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -07001033 /*
1034 * This signal will be fatal to the whole group.
1035 */
1036 if (!sig_kernel_coredump(sig)) {
1037 /*
1038 * Start a group exit and wake everybody up.
1039 * This way we don't have other threads
1040 * running and doing things after a slower
1041 * thread has the fatal signal pending.
1042 */
1043 signal->flags = SIGNAL_GROUP_EXIT;
1044 signal->group_exit_code = sig;
1045 signal->group_stop_count = 0;
1046 t = p;
1047 do {
Tejun Heo6dfca322011-06-02 11:14:00 +02001048 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -07001049 sigaddset(&t->pending.signal, SIGKILL);
1050 signal_wake_up(t, 1);
1051 } while_each_thread(p, t);
1052 return;
1053 }
1054 }
1055
1056 /*
1057 * The signal is already in the shared-pending queue.
1058 * Tell the chosen thread to wake up and dequeue it.
1059 */
1060 signal_wake_up(t, sig == SIGKILL);
1061 return;
1062}
1063
Christian Braunera19e2c02018-08-21 22:00:46 -07001064static inline bool legacy_queue(struct sigpending *signals, int sig)
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001065{
1066 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1067}
1068
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001069static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001070 enum pid_type type, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001072 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001073 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001074 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001075 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001076
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001077 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001078
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001079 result = TRACE_SIGNAL_IGNORED;
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001080 if (!prepare_signal(sig, t, force))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001081 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001082
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001083 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001085 * Short-circuit ignored signals and support queuing
1086 * exactly one non-rt signal, so that we can get more
1087 * detailed information about the cause of the signal.
1088 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001089 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001090 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001091 goto ret;
1092
1093 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001094 /*
Eric W. Biedermana6929332019-02-05 07:19:11 -06001095 * Skip useless siginfo allocation for SIGKILL and kernel threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 */
Jens Axboee8b33b82021-03-25 18:18:59 -06001097 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 goto out_set;
1099
Randy Dunlap5aba0852011-04-04 14:59:31 -07001100 /*
1101 * Real-time signals must be queued if sent by sigqueue, or
1102 * some other real-time mechanism. It is implementation
1103 * defined whether kill() does so. We attempt to do so, on
1104 * the principle of least surprise, but since kill is not
1105 * allowed to fail with EAGAIN when low on memory we just
1106 * make sure at least one signal gets delivered and don't
1107 * pass on the info struct.
1108 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001109 if (sig < SIGRTMIN)
1110 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1111 else
1112 override_rlimit = 0;
1113
Levin, Alexander (Sasha Levin)75f296d2017-11-15 17:35:54 -08001114 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001116 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001118 case (unsigned long) SEND_SIG_NOINFO:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001119 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 q->info.si_signo = sig;
1121 q->info.si_errno = 0;
1122 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001123 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001124 task_active_pid_ns(t));
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001125 rcu_read_lock();
1126 q->info.si_uid =
1127 from_kuid_munged(task_cred_xxx(t, user_ns),
1128 current_uid());
1129 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001131 case (unsigned long) SEND_SIG_PRIV:
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001132 clear_siginfo(&q->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 q->info.si_signo = sig;
1134 q->info.si_errno = 0;
1135 q->info.si_code = SI_KERNEL;
1136 q->info.si_pid = 0;
1137 q->info.si_uid = 0;
1138 break;
1139 default:
1140 copy_siginfo(&q->info, info);
1141 break;
1142 }
Eric W. Biederman8917bef2019-05-15 22:56:17 -05001143 } else if (!is_si_special(info) &&
1144 sig >= SIGRTMIN && info->si_code != SI_USER) {
1145 /*
1146 * Queue overflow, abort. We may abort if the
1147 * signal was rt and sent by user using something
1148 * other than kill().
1149 */
1150 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1151 ret = -EAGAIN;
1152 goto ret;
1153 } else {
1154 /*
1155 * This is a silent loss of information. We still
1156 * send the signal, but the *info bits are lost.
1157 */
1158 result = TRACE_SIGNAL_LOSE_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 }
1160
1161out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001162 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001163 sigaddset(&pending->signal, sig);
Eric W. Biedermanc3ad2c32018-07-23 15:20:37 -05001164
1165 /* Let multiprocess signals appear after on-going forks */
1166 if (type > PIDTYPE_TGID) {
1167 struct multiprocess_signals *delayed;
1168 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1169 sigset_t *signal = &delayed->signal;
1170 /* Can't queue both a stop and a continue signal */
1171 if (sig == SIGCONT)
1172 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1173 else if (sig_kernel_stop(sig))
1174 sigdelset(signal, SIGCONT);
1175 sigaddset(signal, sig);
1176 }
1177 }
1178
Eric W. Biederman07296142018-07-13 21:39:13 -05001179 complete_signal(sig, t, type);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001180ret:
Eric W. Biederman5a883ce2018-07-13 19:26:27 -05001181 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001182 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183}
1184
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001185static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1186{
1187 bool ret = false;
1188 switch (siginfo_layout(info->si_signo, info->si_code)) {
1189 case SIL_KILL:
1190 case SIL_CHLD:
1191 case SIL_RT:
1192 ret = true;
1193 break;
1194 case SIL_TIMER:
1195 case SIL_POLL:
1196 case SIL_FAULT:
Eric W. Biederman9abcabe2021-04-30 17:29:36 -05001197 case SIL_FAULT_TRAPNO:
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001198 case SIL_FAULT_MCEERR:
1199 case SIL_FAULT_BNDERR:
1200 case SIL_FAULT_PKUERR:
Marco Elverfb6cc122021-04-08 12:36:00 +02001201 case SIL_PERF_EVENT:
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001202 case SIL_SYS:
1203 ret = false;
1204 break;
1205 }
1206 return ret;
1207}
1208
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001209static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001210 enum pid_type type)
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001211{
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001212 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1213 bool force = false;
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001214
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001215 if (info == SEND_SIG_NOINFO) {
1216 /* Force if sent from an ancestor pid namespace */
1217 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1218 } else if (info == SEND_SIG_PRIV) {
1219 /* Don't ignore kernel generated signals */
1220 force = true;
1221 } else if (has_si_pid_and_uid(info)) {
1222 /* SIGKILL and SIGSTOP is special or has ids */
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001223 struct user_namespace *t_user_ns;
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001224
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001225 rcu_read_lock();
1226 t_user_ns = task_cred_xxx(t, user_ns);
1227 if (current_user_ns() != t_user_ns) {
1228 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1229 info->si_uid = from_kuid_munged(t_user_ns, uid);
1230 }
1231 rcu_read_unlock();
1232
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001233 /* A kernel generated signal? */
1234 force = (info->si_code == SI_KERNEL);
1235
1236 /* From an ancestor pid namespace? */
1237 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001238 info->si_pid = 0;
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001239 force = true;
1240 }
Eric W. Biederman7a0cf092019-05-15 22:54:56 -05001241 }
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001242 return __send_signal(sig, info, t, type, force);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001243}
1244
Al Viro4aaefee2012-11-05 13:09:56 -05001245static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001246{
Al Viro4aaefee2012-11-05 13:09:56 -05001247 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001248 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001249
Al Viroca5cd872007-10-29 04:31:16 +00001250#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001251 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001252 {
1253 int i;
1254 for (i = 0; i < 16; i++) {
1255 unsigned char insn;
1256
Andi Kleenb45c6e72010-01-08 14:42:52 -08001257 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1258 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001259 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001260 }
1261 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001262 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001263#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001264 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001265 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001266 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001267}
1268
1269static int __init setup_print_fatal_signals(char *str)
1270{
1271 get_option (&str, &print_fatal_signals);
1272
1273 return 1;
1274}
1275
1276__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001278int
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001279__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001280{
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001281 return send_signal(sig, info, p, PIDTYPE_TGID);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001282}
1283
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001284int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001285 enum pid_type type)
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001286{
1287 unsigned long flags;
1288 int ret = -ESRCH;
1289
1290 if (lock_task_sighand(p, &flags)) {
Eric W. Biedermanb2139842018-07-20 15:49:17 -05001291 ret = send_signal(sig, info, p, type);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001292 unlock_task_sighand(p, &flags);
1293 }
1294
1295 return ret;
1296}
1297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298/*
1299 * Force a signal that the process can't ignore: if necessary
1300 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001301 *
1302 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1303 * since we do not want to have a signal handler that was blocked
1304 * be invoked when user space had explicitly blocked it.
1305 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001306 * We don't want to have recursive SIGSEGV's etc, for example,
1307 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 */
Eric W. Biederman59c0e692019-02-07 11:01:20 -06001309static int
1310force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311{
1312 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001313 int ret, blocked, ignored;
1314 struct k_sigaction *action;
Eric W. Biederman59c0e692019-02-07 11:01:20 -06001315 int sig = info->si_signo;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001318 action = &t->sighand->action[sig-1];
1319 ignored = action->sa.sa_handler == SIG_IGN;
1320 blocked = sigismember(&t->blocked, sig);
1321 if (blocked || ignored) {
1322 action->sa.sa_handler = SIG_DFL;
1323 if (blocked) {
1324 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001325 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 }
Jamie Ileseb61b592017-08-18 15:16:18 -07001328 /*
1329 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1330 * debugging to leave init killable.
1331 */
1332 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001333 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Eric W. Biedermanb21c5bd2018-07-21 11:34:03 -05001334 ret = send_signal(sig, info, t, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1336
1337 return ret;
1338}
1339
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001340int force_sig_info(struct kernel_siginfo *info)
Eric W. Biederman59c0e692019-02-07 11:01:20 -06001341{
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001342 return force_sig_info_to_task(info, current);
Eric W. Biederman59c0e692019-02-07 11:01:20 -06001343}
1344
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345/*
1346 * Nuke all other threads in the group.
1347 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001348int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001350 struct task_struct *t = p;
1351 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 p->signal->group_stop_count = 0;
1354
Oleg Nesterov09faef12010-05-26 14:43:11 -07001355 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001356 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001357 count++;
1358
1359 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 if (t->exit_state)
1361 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 signal_wake_up(t, 1);
1364 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001365
1366 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367}
1368
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001369struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1370 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001371{
1372 struct sighand_struct *sighand;
1373
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001374 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001375 for (;;) {
1376 sighand = rcu_dereference(tsk->sighand);
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001377 if (unlikely(sighand == NULL))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001378 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001379
Oleg Nesterov392809b2014-09-28 23:44:18 +02001380 /*
1381 * This sighand can be already freed and even reused, but
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08001382 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
Oleg Nesterov392809b2014-09-28 23:44:18 +02001383 * initializes ->siglock: this slab can't go away, it has
1384 * the same object type, ->siglock can't be reinitialized.
1385 *
1386 * We need to ensure that tsk->sighand is still the same
1387 * after we take the lock, we can race with de_thread() or
1388 * __exit_signal(). In the latter case the next iteration
1389 * must see ->sighand == NULL.
1390 */
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001391 spin_lock_irqsave(&sighand->siglock, *flags);
Madhuparna Bhowmik913292c2020-01-24 10:29:08 +05301392 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001393 break;
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001394 spin_unlock_irqrestore(&sighand->siglock, *flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001395 }
Anna-Maria Gleixner59dc6f32018-05-25 11:05:07 +02001396 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001397
1398 return sighand;
1399}
1400
David Howellsc69e8d92008-11-14 10:39:19 +11001401/*
1402 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001403 */
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001404int group_send_sig_info(int sig, struct kernel_siginfo *info,
1405 struct task_struct *p, enum pid_type type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406{
David Howells694f6902010-08-04 16:59:14 +01001407 int ret;
1408
1409 rcu_read_lock();
1410 ret = check_kill_permission(sig, info, p);
1411 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001413 if (!ret && sig)
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001414 ret = do_send_sig_info(sig, info, p, type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415
1416 return ret;
1417}
1418
1419/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001420 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001422 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 */
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001424int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425{
1426 struct task_struct *p = NULL;
1427 int retval, success;
1428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 success = 0;
1430 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001431 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001432 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 success |= !err;
1434 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001435 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return success ? 0 : retval;
1437}
1438
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001439int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001441 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 struct task_struct *p;
1443
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001444 for (;;) {
1445 rcu_read_lock();
1446 p = pid_task(pid, PIDTYPE_PID);
1447 if (p)
Eric W. Biederman01024982018-07-13 18:40:57 -05001448 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001449 rcu_read_unlock();
1450 if (likely(!p || error != -ESRCH))
1451 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001452
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001453 /*
1454 * The task was unhashed in between, try again. If it
1455 * is dead, pid_task() will return NULL, if we race with
1456 * de_thread() it will find the new leader.
1457 */
1458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459}
1460
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001461static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001462{
1463 int error;
1464 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001465 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001466 rcu_read_unlock();
1467 return error;
1468}
1469
Christian Braunerbb17fcc2018-08-21 21:59:55 -07001470static inline bool kill_as_cred_perm(const struct cred *cred,
1471 struct task_struct *target)
Serge Hallynd178bc32011-09-26 10:45:18 -05001472{
1473 const struct cred *pcred = __task_cred(target);
Christian Braunerbb17fcc2018-08-21 21:59:55 -07001474
1475 return uid_eq(cred->euid, pcred->suid) ||
1476 uid_eq(cred->euid, pcred->uid) ||
1477 uid_eq(cred->uid, pcred->suid) ||
1478 uid_eq(cred->uid, pcred->uid);
Serge Hallynd178bc32011-09-26 10:45:18 -05001479}
1480
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001481/*
1482 * The usb asyncio usage of siginfo is wrong. The glibc support
1483 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1484 * AKA after the generic fields:
1485 * kernel_pid_t si_pid;
1486 * kernel_uid32_t si_uid;
1487 * sigval_t si_value;
1488 *
1489 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1490 * after the generic fields is:
1491 * void __user *si_addr;
1492 *
1493 * This is a practical problem when there is a 64bit big endian kernel
1494 * and a 32bit userspace. As the 32bit address will encoded in the low
1495 * 32bits of the pointer. Those low 32bits will be stored at higher
1496 * address than appear in a 32 bit pointer. So userspace will not
1497 * see the address it was expecting for it's completions.
1498 *
1499 * There is nothing in the encoding that can allow
1500 * copy_siginfo_to_user32 to detect this confusion of formats, so
1501 * handle this by requiring the caller of kill_pid_usb_asyncio to
1502 * notice when this situration takes place and to store the 32bit
1503 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1504 * parameter.
1505 */
1506int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1507 struct pid *pid, const struct cred *cred)
Harald Welte46113832005-10-10 19:44:29 +02001508{
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001509 struct kernel_siginfo info;
Harald Welte46113832005-10-10 19:44:29 +02001510 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001511 unsigned long flags;
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001512 int ret = -EINVAL;
1513
Zhiqiang Liueaec2b02020-03-30 10:18:33 +08001514 if (!valid_signal(sig))
1515 return ret;
1516
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001517 clear_siginfo(&info);
1518 info.si_signo = sig;
1519 info.si_errno = errno;
1520 info.si_code = SI_ASYNCIO;
1521 *((sigval_t *)&info.si_pid) = addr;
Harald Welte46113832005-10-10 19:44:29 +02001522
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001523 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001524 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001525 if (!p) {
1526 ret = -ESRCH;
1527 goto out_unlock;
1528 }
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001529 if (!kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001530 ret = -EPERM;
1531 goto out_unlock;
1532 }
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001533 ret = security_task_kill(p, &info, sig, cred);
David Quigley8f95dc52006-06-30 01:55:47 -07001534 if (ret)
1535 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001536
1537 if (sig) {
1538 if (lock_task_sighand(p, &flags)) {
Eric W. Biederman8ad23de2019-05-15 22:23:32 -05001539 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001540 unlock_task_sighand(p, &flags);
1541 } else
1542 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001543 }
1544out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001545 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001546 return ret;
1547}
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06001548EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550/*
1551 * kill_something_info() interprets pid in interesting ways just like kill(2).
1552 *
1553 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1554 * is probably wrong. Should make it like BSD or SYSV.
1555 */
1556
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001557static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001559 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001560
Zhiqiang Liu3075afd2020-03-30 10:44:43 +08001561 if (pid > 0)
1562 return kill_proc_info(sig, info, pid);
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001563
zhongjiang4ea77012017-07-10 15:52:57 -07001564 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1565 if (pid == INT_MIN)
1566 return -ESRCH;
1567
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001568 read_lock(&tasklist_lock);
1569 if (pid != -1) {
1570 ret = __kill_pgrp_info(sig, info,
1571 pid ? find_vpid(-pid) : task_pgrp(current));
1572 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573 int retval = 0, count = 0;
1574 struct task_struct * p;
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001577 if (task_pid_vnr(p) > 1 &&
1578 !same_thread_group(p, current)) {
Eric W. Biederman01024982018-07-13 18:40:57 -05001579 int err = group_send_sig_info(sig, info, p,
1580 PIDTYPE_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 ++count;
1582 if (err != -EPERM)
1583 retval = err;
1584 }
1585 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001586 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001588 read_unlock(&tasklist_lock);
1589
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001590 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591}
1592
1593/*
1594 * These are for backward compatibility with the rest of the kernel source.
1595 */
1596
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001597int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 /*
1600 * Make sure legacy kernel users don't send in bad values
1601 * (normal paths check this in check_kill_permission).
1602 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001603 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 return -EINVAL;
1605
Eric W. Biederman40b3b022018-07-21 10:45:15 -05001606 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +02001608EXPORT_SYMBOL(send_sig_info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001610#define __si_special(priv) \
1611 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613int
1614send_sig(int sig, struct task_struct *p, int priv)
1615{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001616 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +02001618EXPORT_SYMBOL(send_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619
Eric W. Biederman3cf5d072019-05-23 10:17:27 -05001620void force_sig(int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621{
Eric W. Biedermanffafd232019-05-14 19:17:47 -05001622 struct kernel_siginfo info;
1623
1624 clear_siginfo(&info);
1625 info.si_signo = sig;
1626 info.si_errno = 0;
1627 info.si_code = SI_KERNEL;
1628 info.si_pid = 0;
1629 info.si_uid = 0;
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001630 force_sig_info(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +02001632EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634/*
1635 * When things go south during signal handling, we
1636 * will force a SIGSEGV. And if the signal that caused
1637 * the problem was already a SIGSEGV, we'll want to
1638 * make sure we don't even try to deliver the signal..
1639 */
Eric W. Biedermancb44c9a2019-05-21 10:03:48 -05001640void force_sigsegv(int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641{
Eric W. Biedermancb44c9a2019-05-21 10:03:48 -05001642 struct task_struct *p = current;
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 if (sig == SIGSEGV) {
1645 unsigned long flags;
1646 spin_lock_irqsave(&p->sighand->siglock, flags);
1647 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1648 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1649 }
Eric W. Biederman3cf5d072019-05-23 10:17:27 -05001650 force_sig(SIGSEGV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651}
1652
Eric W. Biederman91ca1802019-02-06 16:39:13 -06001653int force_sig_fault_to_task(int sig, int code, void __user *addr
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001654 ___ARCH_SI_TRAPNO(int trapno)
1655 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1656 , struct task_struct *t)
1657{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001658 struct kernel_siginfo info;
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001659
1660 clear_siginfo(&info);
1661 info.si_signo = sig;
1662 info.si_errno = 0;
1663 info.si_code = code;
1664 info.si_addr = addr;
1665#ifdef __ARCH_SI_TRAPNO
1666 info.si_trapno = trapno;
1667#endif
1668#ifdef __ia64__
1669 info.si_imm = imm;
1670 info.si_flags = flags;
1671 info.si_isr = isr;
1672#endif
Eric W. Biederman59c0e692019-02-07 11:01:20 -06001673 return force_sig_info_to_task(&info, t);
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001674}
1675
Eric W. Biederman91ca1802019-02-06 16:39:13 -06001676int force_sig_fault(int sig, int code, void __user *addr
1677 ___ARCH_SI_TRAPNO(int trapno)
Eric W. Biederman2e1661d22019-05-23 11:04:24 -05001678 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
Eric W. Biederman91ca1802019-02-06 16:39:13 -06001679{
1680 return force_sig_fault_to_task(sig, code, addr
1681 ___ARCH_SI_TRAPNO(trapno)
Eric W. Biederman2e1661d22019-05-23 11:04:24 -05001682 ___ARCH_SI_IA64(imm, flags, isr), current);
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001683}
1684
1685int send_sig_fault(int sig, int code, void __user *addr
1686 ___ARCH_SI_TRAPNO(int trapno)
1687 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1688 , struct task_struct *t)
1689{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001690 struct kernel_siginfo info;
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001691
1692 clear_siginfo(&info);
1693 info.si_signo = sig;
1694 info.si_errno = 0;
1695 info.si_code = code;
1696 info.si_addr = addr;
1697#ifdef __ARCH_SI_TRAPNO
1698 info.si_trapno = trapno;
1699#endif
1700#ifdef __ia64__
1701 info.si_imm = imm;
1702 info.si_flags = flags;
1703 info.si_isr = isr;
1704#endif
1705 return send_sig_info(info.si_signo, &info, t);
1706}
1707
Eric W. Biedermanf8eac902019-02-05 18:14:19 -06001708int force_sig_mceerr(int code, void __user *addr, short lsb)
Eric W. Biederman38246732018-01-18 18:54:31 -06001709{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001710 struct kernel_siginfo info;
Eric W. Biederman38246732018-01-18 18:54:31 -06001711
1712 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1713 clear_siginfo(&info);
1714 info.si_signo = SIGBUS;
1715 info.si_errno = 0;
1716 info.si_code = code;
1717 info.si_addr = addr;
1718 info.si_addr_lsb = lsb;
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001719 return force_sig_info(&info);
Eric W. Biederman38246732018-01-18 18:54:31 -06001720}
1721
1722int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1723{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001724 struct kernel_siginfo info;
Eric W. Biederman38246732018-01-18 18:54:31 -06001725
1726 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1727 clear_siginfo(&info);
1728 info.si_signo = SIGBUS;
1729 info.si_errno = 0;
1730 info.si_code = code;
1731 info.si_addr = addr;
1732 info.si_addr_lsb = lsb;
1733 return send_sig_info(info.si_signo, &info, t);
1734}
1735EXPORT_SYMBOL(send_sig_mceerr);
Eric W. Biederman38246732018-01-18 18:54:31 -06001736
Eric W. Biederman38246732018-01-18 18:54:31 -06001737int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1738{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001739 struct kernel_siginfo info;
Eric W. Biederman38246732018-01-18 18:54:31 -06001740
1741 clear_siginfo(&info);
1742 info.si_signo = SIGSEGV;
1743 info.si_errno = 0;
1744 info.si_code = SEGV_BNDERR;
1745 info.si_addr = addr;
1746 info.si_lower = lower;
1747 info.si_upper = upper;
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001748 return force_sig_info(&info);
Eric W. Biederman38246732018-01-18 18:54:31 -06001749}
Eric W. Biederman38246732018-01-18 18:54:31 -06001750
1751#ifdef SEGV_PKUERR
1752int force_sig_pkuerr(void __user *addr, u32 pkey)
1753{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001754 struct kernel_siginfo info;
Eric W. Biederman38246732018-01-18 18:54:31 -06001755
1756 clear_siginfo(&info);
1757 info.si_signo = SIGSEGV;
1758 info.si_errno = 0;
1759 info.si_code = SEGV_PKUERR;
1760 info.si_addr = addr;
1761 info.si_pkey = pkey;
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001762 return force_sig_info(&info);
Eric W. Biederman38246732018-01-18 18:54:31 -06001763}
1764#endif
Eric W. Biedermanf8ec6602018-01-18 14:54:54 -06001765
Eric W. Biedermanaf5eeab2021-05-02 14:27:24 -05001766int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1767{
1768 struct kernel_siginfo info;
1769
1770 clear_siginfo(&info);
Eric W. Biederman0683b532021-05-02 17:28:31 -05001771 info.si_signo = SIGTRAP;
1772 info.si_errno = 0;
1773 info.si_code = TRAP_PERF;
1774 info.si_addr = addr;
1775 info.si_perf_data = sig_data;
1776 info.si_perf_type = type;
1777
Eric W. Biedermanaf5eeab2021-05-02 14:27:24 -05001778 return force_sig_info(&info);
1779}
1780
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -06001781/* For the crazy architectures that include trap information in
1782 * the errno field, instead of an actual errno value.
1783 */
1784int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1785{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001786 struct kernel_siginfo info;
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -06001787
1788 clear_siginfo(&info);
1789 info.si_signo = SIGTRAP;
1790 info.si_errno = errno;
1791 info.si_code = TRAP_HWBKPT;
1792 info.si_addr = addr;
Eric W. Biedermana89e9b82019-05-15 10:11:09 -05001793 return force_sig_info(&info);
Eric W. Biedermanf71dd7d2018-01-22 14:37:25 -06001794}
1795
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001796int kill_pgrp(struct pid *pid, int sig, int priv)
1797{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001798 int ret;
1799
1800 read_lock(&tasklist_lock);
1801 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1802 read_unlock(&tasklist_lock);
1803
1804 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001805}
1806EXPORT_SYMBOL(kill_pgrp);
1807
1808int kill_pid(struct pid *pid, int sig, int priv)
1809{
1810 return kill_pid_info(sig, __si_special(priv), pid);
1811}
1812EXPORT_SYMBOL(kill_pid);
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814/*
1815 * These functions support sending signals using preallocated sigqueue
1816 * structures. This is needed "because realtime applications cannot
1817 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001818 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 * we allocate the sigqueue structure from the timer_create. If this
1820 * allocation fails we are able to report the failure to the application
1821 * with an EAGAIN error.
1822 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823struct sigqueue *sigqueue_alloc(void)
1824{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001825 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001827 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001829
1830 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831}
1832
1833void sigqueue_free(struct sigqueue *q)
1834{
1835 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001836 spinlock_t *lock = &current->sighand->siglock;
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1839 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001840 * We must hold ->siglock while testing q->list
1841 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001842 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001844 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001845 q->flags &= ~SIGQUEUE_PREALLOC;
1846 /*
1847 * If it is queued it will be freed when dequeued,
1848 * like the "regular" sigqueue.
1849 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001850 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001851 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001852 spin_unlock_irqrestore(lock, flags);
1853
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001854 if (q)
1855 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856}
1857
Eric W. Biederman24122c72018-07-20 14:30:23 -05001858int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001859{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001860 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001861 struct sigpending *pending;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001862 struct task_struct *t;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001863 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001864 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001865
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001866 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001867
1868 ret = -1;
Eric W. Biederman24122c72018-07-20 14:30:23 -05001869 rcu_read_lock();
1870 t = pid_task(pid, type);
1871 if (!t || !likely(lock_task_sighand(t, &flags)))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001872 goto ret;
1873
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001874 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001875 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001876 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001877 goto out;
1878
1879 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001880 if (unlikely(!list_empty(&q->list))) {
1881 /*
1882 * If an SI_TIMER entry is already queue just increment
1883 * the overrun count.
1884 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001885 BUG_ON(q->info.si_code != SI_TIMER);
1886 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001887 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001888 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001889 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001890 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001891
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001892 signalfd_notify(t, sig);
Eric W. Biederman24122c72018-07-20 14:30:23 -05001893 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001894 list_add_tail(&q->list, &pending->list);
1895 sigaddset(&pending->signal, sig);
Eric W. Biederman07296142018-07-13 21:39:13 -05001896 complete_signal(sig, t, type);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001897 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001898out:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001899 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001900 unlock_task_sighand(t, &flags);
1901ret:
Eric W. Biederman24122c72018-07-20 14:30:23 -05001902 rcu_read_unlock();
Oleg Nesterove62e6652008-04-30 00:52:56 -07001903 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001904}
1905
Joel Fernandes (Google)b53b0b92019-04-30 12:21:53 -04001906static void do_notify_pidfd(struct task_struct *task)
1907{
1908 struct pid *pid;
1909
Joel Fernandes (Google)1caf7d52019-07-24 12:48:16 -04001910 WARN_ON(task->exit_state == 0);
Joel Fernandes (Google)b53b0b92019-04-30 12:21:53 -04001911 pid = task_pid(task);
1912 wake_up_all(&pid->wait_pidfd);
1913}
1914
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 * Let a parent know about the death of a child.
1917 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001918 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001919 * Returns true if our parent ignored us and so we've switched to
1920 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001922bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02001924 struct kernel_siginfo info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 unsigned long flags;
1926 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001927 bool autoreap = false;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001928 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930 BUG_ON(sig == -1);
1931
1932 /* do_notify_parent_cldstop should have been called instead. */
Matthew Wilcoxe1abb392007-12-06 11:07:35 -05001933 BUG_ON(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Tejun Heod21142e2011-06-17 16:50:34 +02001935 BUG_ON(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1937
Joel Fernandes (Google)b53b0b92019-04-30 12:21:53 -04001938 /* Wake up all pidfd waiters */
1939 do_notify_pidfd(tsk);
1940
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001941 if (sig != SIGCHLD) {
1942 /*
1943 * This is only possible if parent == real_parent.
1944 * Check if it has changed security domain.
1945 */
Eric W. Biedermand1e7fd62020-03-30 19:01:04 -05001946 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001947 sig = SIGCHLD;
1948 }
1949
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06001950 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 info.si_signo = sig;
1952 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001953 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001954 * We are under tasklist_lock here so our parent is tied to
1955 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001956 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001957 * task_active_pid_ns will always return the same pid namespace
1958 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001959 *
1960 * write_lock() currently calls preempt_disable() which is the
1961 * same as rcu_read_lock(), but according to Oleg, this is not
1962 * correct to rely on this
1963 */
1964 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001965 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001966 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1967 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001968 rcu_read_unlock();
1969
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01001970 task_cputime(tsk, &utime, &stime);
1971 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1972 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974 info.si_status = tsk->exit_code & 0x7f;
1975 if (tsk->exit_code & 0x80)
1976 info.si_code = CLD_DUMPED;
1977 else if (tsk->exit_code & 0x7f)
1978 info.si_code = CLD_KILLED;
1979 else {
1980 info.si_code = CLD_EXITED;
1981 info.si_status = tsk->exit_code >> 8;
1982 }
1983
1984 psig = tsk->parent->sighand;
1985 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001986 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1988 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1989 /*
1990 * We are exiting and our parent doesn't care. POSIX.1
1991 * defines special semantics for setting SIGCHLD to SIG_IGN
1992 * or setting the SA_NOCLDWAIT flag: we should be reaped
1993 * automatically and not left for our parent's wait4 call.
1994 * Rather than having the parent do it as a magic kind of
1995 * signal handler, we just set this to tell do_exit that we
1996 * can be cleaned up without becoming a zombie. Note that
1997 * we still call __wake_up_parent in this case, because a
1998 * blocked sys_wait4 might now return -ECHILD.
1999 *
2000 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2001 * is implementation-defined: we do (if you don't want
2002 * it, just use SIG_IGN instead).
2003 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02002004 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02002006 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 }
Eric W. Biederman61e713b2020-04-20 11:41:50 -05002008 /*
2009 * Send with __send_signal as si_pid and si_uid are in the
2010 * parent's namespaces.
2011 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02002012 if (valid_signal(sig) && sig)
Eric W. Biederman61e713b2020-04-20 11:41:50 -05002013 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 __wake_up_parent(tsk, tsk->parent);
2015 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07002016
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02002017 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019
Tejun Heo75b95952011-03-23 10:37:01 +01002020/**
2021 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2022 * @tsk: task reporting the state change
2023 * @for_ptracer: the notification is for ptracer
2024 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2025 *
2026 * Notify @tsk's parent that the stopped/continued state has changed. If
2027 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2028 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2029 *
2030 * CONTEXT:
2031 * Must be called with tasklist_lock at least read locked.
2032 */
2033static void do_notify_parent_cldstop(struct task_struct *tsk,
2034 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002036 struct kernel_siginfo info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07002038 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 struct sighand_struct *sighand;
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01002040 u64 utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Tejun Heo75b95952011-03-23 10:37:01 +01002042 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07002043 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01002044 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07002045 tsk = tsk->group_leader;
2046 parent = tsk->real_parent;
2047 }
2048
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002049 clear_siginfo(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 info.si_signo = SIGCHLD;
2051 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002052 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07002053 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002054 */
2055 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08002056 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002057 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002058 rcu_read_unlock();
2059
Frederic Weisbeckerbde82852017-01-31 04:09:31 +01002060 task_cputime(tsk, &utime, &stime);
2061 info.si_utime = nsec_to_clock_t(utime);
2062 info.si_stime = nsec_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
2064 info.si_code = why;
2065 switch (why) {
2066 case CLD_CONTINUED:
2067 info.si_status = SIGCONT;
2068 break;
2069 case CLD_STOPPED:
2070 info.si_status = tsk->signal->group_exit_code & 0x7f;
2071 break;
2072 case CLD_TRAPPED:
2073 info.si_status = tsk->exit_code & 0x7f;
2074 break;
2075 default:
2076 BUG();
2077 }
2078
2079 sighand = parent->sighand;
2080 spin_lock_irqsave(&sighand->siglock, flags);
2081 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2082 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2083 __group_send_sig_info(SIGCHLD, &info, parent);
2084 /*
2085 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2086 */
2087 __wake_up_parent(tsk, parent);
2088 spin_unlock_irqrestore(&sighand->siglock, flags);
2089}
2090
Christian Brauner6527de92018-08-21 21:59:59 -07002091static inline bool may_ptrace_stop(void)
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002092{
Tejun Heod21142e2011-06-17 16:50:34 +02002093 if (!likely(current->ptrace))
Christian Brauner6527de92018-08-21 21:59:59 -07002094 return false;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002095 /*
2096 * Are we in the middle of do_coredump?
2097 * If so and our tracer is also part of the coredump stopping
2098 * is a deadlock situation, and pointless because our tracer
2099 * is dead so don't allow us to stop.
2100 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07002101 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002102 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01002103 *
2104 * This is almost outdated, a task with the pending SIGKILL can't
2105 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2106 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002107 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07002108 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002109 unlikely(current->mm == current->parent->mm))
Christian Brauner6527de92018-08-21 21:59:59 -07002110 return false;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002111
Christian Brauner6527de92018-08-21 21:59:59 -07002112 return true;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07002113}
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115/*
Randy Dunlap5aba0852011-04-04 14:59:31 -07002116 * Return non-zero if there is a SIGKILL that should be waking us up.
Roland McGrath1a669c22008-02-06 01:37:37 -08002117 * Called with the siglock held.
2118 */
Christian Braunerf99e9d82018-08-21 22:00:50 -07002119static bool sigkill_pending(struct task_struct *tsk)
Roland McGrath1a669c22008-02-06 01:37:37 -08002120{
Christian Braunerf99e9d82018-08-21 22:00:50 -07002121 return sigismember(&tsk->pending.signal, SIGKILL) ||
2122 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
Roland McGrath1a669c22008-02-06 01:37:37 -08002123}
2124
2125/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 * This must be called with current->sighand->siglock held.
2127 *
2128 * This should be the path for all ptrace stops.
2129 * We always set current->last_siginfo while stopped here.
2130 * That makes it a way to test a stopped process for
2131 * being ptrace-stopped vs being job-control-stopped.
2132 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08002133 * If we actually decide not to stop at all because the tracer
2134 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 */
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002136static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07002137 __releases(&current->sighand->siglock)
2138 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139{
Tejun Heoceb6bd62011-03-23 10:37:01 +01002140 bool gstop_done = false;
2141
Roland McGrath1a669c22008-02-06 01:37:37 -08002142 if (arch_ptrace_stop_needed(exit_code, info)) {
2143 /*
2144 * The arch code has something special to do before a
2145 * ptrace stop. This is allowed to block, e.g. for faults
2146 * on user stack pages. We can't keep the siglock while
2147 * calling arch_ptrace_stop, so we must release it now.
2148 * To preserve proper semantics, we must do this before
2149 * any signal bookkeeping like checking group_stop_count.
2150 * Meanwhile, a SIGKILL could come in before we retake the
2151 * siglock. That must prevent us from sleeping in TASK_TRACED.
2152 * So after regaining the lock, we must check for SIGKILL.
2153 */
2154 spin_unlock_irq(&current->sighand->siglock);
2155 arch_ptrace_stop(exit_code, info);
2156 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07002157 if (sigkill_pending(current))
2158 return;
Roland McGrath1a669c22008-02-06 01:37:37 -08002159 }
2160
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002161 set_special_state(TASK_TRACED);
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02002164 * We're committing to trapping. TRACED should be visible before
2165 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2166 * Also, transition to TRACED and updates to ->jobctl should be
2167 * atomic with respect to siglock and should be done after the arch
2168 * hook as siglock is released and regrabbed across it.
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002169 *
2170 * TRACER TRACEE
2171 *
2172 * ptrace_attach()
2173 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2174 * do_wait()
2175 * set_current_state() smp_wmb();
2176 * ptrace_do_wait()
2177 * wait_task_stopped()
2178 * task_stopped_code()
2179 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 */
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002181 smp_wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
2183 current->last_siginfo = info;
2184 current->exit_code = exit_code;
2185
Tejun Heod79fdd62011-03-23 10:37:00 +01002186 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 * If @why is CLD_STOPPED, we're trapping to participate in a group
2188 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02002189 * across siglock relocks since INTERRUPT was scheduled, PENDING
2190 * could be clear now. We act as if SIGCONT is received after
2191 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01002192 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002193 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002195
Tejun Heofb1d9102011-06-14 11:20:17 +02002196 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02002197 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02002198 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2199 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02002200
Tejun Heo81be24b2011-06-02 11:13:59 +02002201 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02002202 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01002203
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 spin_unlock_irq(&current->sighand->siglock);
2205 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07002206 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01002207 /*
2208 * Notify parents of the stop.
2209 *
2210 * While ptraced, there are two parents - the ptracer and
2211 * the real_parent of the group_leader. The ptracer should
2212 * know about every stop while the real parent is only
2213 * interested in the completion of group stop. The states
2214 * for the two don't interact with each other. Notify
2215 * separately unless they're gonna be duplicates.
2216 */
2217 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002218 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01002219 do_notify_parent_cldstop(current, false, why);
2220
Miklos Szeredi53da1d92009-03-23 16:07:24 +01002221 /*
2222 * Don't want to allow preemption here, because
2223 * sys_ptrace() needs this task to be inactive.
2224 *
2225 * XXX: implement read_unlock_no_resched().
2226 */
2227 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 read_unlock(&tasklist_lock);
Roman Gushchin76f969e2019-04-19 10:03:04 -07002229 cgroup_enter_frozen();
Oleg Nesterov937c6b22019-10-09 17:02:30 +02002230 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002231 freezable_schedule();
Roman Gushchin05b28922019-05-16 10:38:21 -07002232 cgroup_leave_frozen(true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 } else {
2234 /*
2235 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002236 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01002237 *
2238 * If @gstop_done, the ptracer went away between group stop
2239 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02002240 * JOBCTL_STOP_PENDING on us and we'll re-enter
2241 * TASK_STOPPED in do_signal_stop() on return, so notifying
2242 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01002244 if (gstop_done)
2245 do_notify_parent_cldstop(current, false, why);
2246
Oleg Nesterov9899d112013-01-21 20:48:00 +01002247 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002248 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08002249 if (clear_code)
2250 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08002251 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 }
2253
2254 /*
2255 * We are back. Now reacquire the siglock before touching
2256 * last_siginfo, so that we are sure to have synchronized with
2257 * any signal-sending on another CPU that wants to examine it.
2258 */
2259 spin_lock_irq(&current->sighand->siglock);
2260 current->last_siginfo = NULL;
2261
Tejun Heo544b2c92011-06-14 11:20:18 +02002262 /* LISTENING can be set only during STOP traps, clear it */
2263 current->jobctl &= ~JOBCTL_LISTENING;
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 /*
2266 * Queued signals ignored us while we were stopped for tracing.
2267 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07002268 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07002270 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271}
2272
Tejun Heo3544d722011-06-14 11:20:15 +02002273static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002275 kernel_siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002277 clear_siginfo(&info);
Tejun Heo3544d722011-06-14 11:20:15 +02002278 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002280 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002281 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02002284 ptrace_stop(exit_code, why, 1, &info);
2285}
2286
2287void ptrace_notify(int exit_code)
2288{
2289 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002290 if (unlikely(current->task_works))
2291 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02002292
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02002294 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 spin_unlock_irq(&current->sighand->siglock);
2296}
2297
Tejun Heo73ddff22011-06-14 11:20:14 +02002298/**
2299 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2300 * @signr: signr causing group stop if initiating
2301 *
2302 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2303 * and participate in it. If already set, participate in the existing
2304 * group stop. If participated in a group stop (and thus slept), %true is
2305 * returned with siglock released.
2306 *
2307 * If ptraced, this function doesn't handle stop itself. Instead,
2308 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2309 * untouched. The caller must ensure that INTERRUPT trap handling takes
2310 * places afterwards.
2311 *
2312 * CONTEXT:
2313 * Must be called with @current->sighand->siglock held, which is released
2314 * on %true return.
2315 *
2316 * RETURNS:
2317 * %false if group stop is already cancelled or ptrace trap is scheduled.
2318 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002320static bool do_signal_stop(int signr)
2321 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322{
2323 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
Tejun Heoa8f072c2011-06-02 11:13:59 +02002325 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002326 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002327 struct task_struct *t;
2328
Tejun Heoa8f072c2011-06-02 11:13:59 +02002329 /* signr will be recorded in task->jobctl for retries */
2330 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002331
Tejun Heoa8f072c2011-06-02 11:13:59 +02002332 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002333 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002334 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002336 * There is no group stop already in progress. We must
2337 * initiate one now.
2338 *
2339 * While ptraced, a task may be resumed while group stop is
2340 * still in effect and then receive a stop signal and
2341 * initiate another group stop. This deviates from the
2342 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002343 * cause two group stops when !ptraced. That is why we
2344 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002345 *
2346 * The condition can be distinguished by testing whether
2347 * SIGNAL_STOP_STOPPED is already set. Don't generate
2348 * group_exit_code in such case.
2349 *
2350 * This is not necessary for SIGNAL_STOP_CONTINUED because
2351 * an intervening stop signal is required to cause two
2352 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002354 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2355 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002356
Tejun Heo7dd3db52011-06-02 11:14:00 +02002357 sig->group_stop_count = 0;
2358
2359 if (task_set_jobctl_pending(current, signr | gstop))
2360 sig->group_stop_count++;
2361
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002362 t = current;
2363 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002364 /*
2365 * Setting state to TASK_STOPPED for a group
2366 * stop is always done with the siglock held,
2367 * so this check has no races.
2368 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002369 if (!task_is_stopped(t) &&
2370 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002371 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002372 if (likely(!(t->ptrace & PT_SEIZED)))
2373 signal_wake_up(t, 0);
2374 else
2375 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002376 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002377 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002378 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002379
Tejun Heod21142e2011-06-17 16:50:34 +02002380 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002381 int notify = 0;
2382
2383 /*
2384 * If there are no other threads in the group, or if there
2385 * is a group stop in progress and we are the last to stop,
2386 * report to the parent.
2387 */
2388 if (task_participate_group_stop(current))
2389 notify = CLD_STOPPED;
2390
Peter Zijlstrab5bf9a92018-04-30 14:51:01 +02002391 set_special_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002392 spin_unlock_irq(&current->sighand->siglock);
2393
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002394 /*
2395 * Notify the parent of the group stop completion. Because
2396 * we're not holding either the siglock or tasklist_lock
2397 * here, ptracer may attach inbetween; however, this is for
2398 * group stop and should always be delivered to the real
2399 * parent of the group leader. The new ptracer will get
2400 * its notification when this task transitions into
2401 * TASK_TRACED.
2402 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002403 if (notify) {
2404 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002405 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002406 read_unlock(&tasklist_lock);
2407 }
2408
2409 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Roman Gushchin76f969e2019-04-19 10:03:04 -07002410 cgroup_enter_frozen();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002411 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002412 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002413 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002414 /*
2415 * While ptraced, group stop is handled by STOP trap.
2416 * Schedule it and let the caller deal with it.
2417 */
2418 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2419 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002420 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002421}
Tejun Heod79fdd62011-03-23 10:37:00 +01002422
Tejun Heo73ddff22011-06-14 11:20:14 +02002423/**
2424 * do_jobctl_trap - take care of ptrace jobctl traps
2425 *
Tejun Heo3544d722011-06-14 11:20:15 +02002426 * When PT_SEIZED, it's used for both group stop and explicit
2427 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2428 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2429 * the stop signal; otherwise, %SIGTRAP.
2430 *
2431 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2432 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002433 *
2434 * CONTEXT:
2435 * Must be called with @current->sighand->siglock held, which may be
2436 * released and re-acquired before returning with intervening sleep.
2437 */
2438static void do_jobctl_trap(void)
2439{
Tejun Heo3544d722011-06-14 11:20:15 +02002440 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002441 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002442
Tejun Heo3544d722011-06-14 11:20:15 +02002443 if (current->ptrace & PT_SEIZED) {
2444 if (!signal->group_stop_count &&
2445 !(signal->flags & SIGNAL_STOP_STOPPED))
2446 signr = SIGTRAP;
2447 WARN_ON_ONCE(!signr);
2448 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2449 CLD_STOPPED);
2450 } else {
2451 WARN_ON_ONCE(!signr);
2452 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002453 current->exit_code = 0;
2454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455}
2456
Roman Gushchin76f969e2019-04-19 10:03:04 -07002457/**
2458 * do_freezer_trap - handle the freezer jobctl trap
2459 *
2460 * Puts the task into frozen state, if only the task is not about to quit.
2461 * In this case it drops JOBCTL_TRAP_FREEZE.
2462 *
2463 * CONTEXT:
2464 * Must be called with @current->sighand->siglock held,
2465 * which is always released before returning.
2466 */
2467static void do_freezer_trap(void)
2468 __releases(&current->sighand->siglock)
2469{
2470 /*
2471 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2472 * let's make another loop to give it a chance to be handled.
2473 * In any case, we'll return back.
2474 */
2475 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2476 JOBCTL_TRAP_FREEZE) {
2477 spin_unlock_irq(&current->sighand->siglock);
2478 return;
2479 }
2480
2481 /*
2482 * Now we're sure that there is no pending fatal signal and no
2483 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2484 * immediately (if there is a non-fatal signal pending), and
2485 * put the task into sleep.
2486 */
2487 __set_current_state(TASK_INTERRUPTIBLE);
2488 clear_thread_flag(TIF_SIGPENDING);
2489 spin_unlock_irq(&current->sighand->siglock);
2490 cgroup_enter_frozen();
2491 freezable_schedule();
2492}
2493
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02002494static int ptrace_signal(int signr, kernel_siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002495{
Oleg Nesterov8a352412011-07-21 17:06:53 +02002496 /*
2497 * We do not check sig_kernel_stop(signr) but set this marker
2498 * unconditionally because we do not know whether debugger will
2499 * change signr. This flag has no meaning unless we are going
2500 * to stop after return from ptrace_stop(). In this case it will
2501 * be checked in do_signal_stop(), we should only stop if it was
2502 * not cleared by SIGCONT while we were sleeping. See also the
2503 * comment in dequeue_signal().
2504 */
2505 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002506 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002507
2508 /* We're back. Did the debugger cancel the sig? */
2509 signr = current->exit_code;
2510 if (signr == 0)
2511 return signr;
2512
2513 current->exit_code = 0;
2514
Randy Dunlap5aba0852011-04-04 14:59:31 -07002515 /*
2516 * Update the siginfo structure if the signal has
2517 * changed. If the debugger wanted something
2518 * specific in the siginfo structure then it should
2519 * have updated *info via PTRACE_SETSIGINFO.
2520 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002521 if (signr != info->si_signo) {
Eric W. Biedermanfaf1f222018-01-05 17:27:42 -06002522 clear_siginfo(info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002523 info->si_signo = signr;
2524 info->si_errno = 0;
2525 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002526 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002527 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002528 info->si_uid = from_kuid_munged(current_user_ns(),
2529 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002530 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002531 }
2532
2533 /* If the (new) signal is now blocked, requeue it. */
2534 if (sigismember(&current->blocked, signr)) {
Eric W. Biedermanb21c5bd2018-07-21 11:34:03 -05002535 send_signal(signr, info, current, PIDTYPE_PID);
Roland McGrath18c98b62008-04-17 18:44:38 -07002536 signr = 0;
2537 }
2538
2539 return signr;
2540}
2541
Peter Collingbourne6ac05e82020-11-20 12:33:45 -08002542static void hide_si_addr_tag_bits(struct ksignal *ksig)
2543{
2544 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2545 case SIL_FAULT:
Eric W. Biederman9abcabe2021-04-30 17:29:36 -05002546 case SIL_FAULT_TRAPNO:
Peter Collingbourne6ac05e82020-11-20 12:33:45 -08002547 case SIL_FAULT_MCEERR:
2548 case SIL_FAULT_BNDERR:
2549 case SIL_FAULT_PKUERR:
Marco Elverfb6cc122021-04-08 12:36:00 +02002550 case SIL_PERF_EVENT:
Peter Collingbourne6ac05e82020-11-20 12:33:45 -08002551 ksig->info.si_addr = arch_untagged_si_addr(
2552 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2553 break;
2554 case SIL_KILL:
2555 case SIL_TIMER:
2556 case SIL_POLL:
2557 case SIL_CHLD:
2558 case SIL_RT:
2559 case SIL_SYS:
2560 break;
2561 }
2562}
2563
Christian Brauner20ab7212018-08-21 22:00:54 -07002564bool get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002566 struct sighand_struct *sighand = current->sighand;
2567 struct signal_struct *signal = current->signal;
2568 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Jens Axboe35d0b382021-01-05 11:32:43 -07002570 if (unlikely(current->task_works))
2571 task_work_run();
2572
Jens Axboe12db8b62020-10-26 14:32:28 -06002573 /*
2574 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2575 * that the arch handlers don't all have to do it. If we get here
2576 * without TIF_SIGPENDING, just exit after running signal work.
2577 */
Jens Axboe12db8b62020-10-26 14:32:28 -06002578 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2579 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2580 tracehook_notify_signal();
2581 if (!task_sigpending(current))
2582 return false;
2583 }
Jens Axboe12db8b62020-10-26 14:32:28 -06002584
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302585 if (unlikely(uprobe_deny_signal()))
Christian Brauner20ab7212018-08-21 22:00:54 -07002586 return false;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302587
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002588 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002589 * Do this once, we can't return to user-mode if freezing() == T.
2590 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2591 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002592 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002593 try_to_freeze();
2594
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002595relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002596 spin_lock_irq(&sighand->siglock);
Oleg Nesterove91b4812020-06-30 17:32:54 +02002597
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002598 /*
2599 * Every stopped thread goes here after wakeup. Check to see if
2600 * we should notify the parent, prepare_signal(SIGCONT) encodes
2601 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2602 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002603 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002604 int why;
2605
2606 if (signal->flags & SIGNAL_CLD_CONTINUED)
2607 why = CLD_CONTINUED;
2608 else
2609 why = CLD_STOPPED;
2610
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002611 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002612
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002613 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002614
Tejun Heoceb6bd62011-03-23 10:37:01 +01002615 /*
2616 * Notify the parent that we're continuing. This event is
2617 * always per-process and doesn't make whole lot of sense
2618 * for ptracers, who shouldn't consume the state via
2619 * wait(2) either, but, for backward compatibility, notify
2620 * the ptracer of the group leader too unless it's gonna be
2621 * a duplicate.
2622 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002623 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002624 do_notify_parent_cldstop(current, false, why);
2625
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002626 if (ptrace_reparented(current->group_leader))
2627 do_notify_parent_cldstop(current->group_leader,
2628 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002629 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002630
Oleg Nesterove4420552008-04-30 00:52:44 -07002631 goto relock;
2632 }
2633
Eric W. Biederman35634ff2019-02-06 18:39:40 -06002634 /* Has this task already been marked for death? */
Eric W. Biedermancf43a752019-02-11 23:27:42 -06002635 if (signal_group_exit(signal)) {
2636 ksig->info.si_signo = signr = SIGKILL;
2637 sigdelset(&current->pending.signal, SIGKILL);
Zhenliang Wei98af37d2019-05-31 22:30:52 -07002638 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2639 &sighand->action[SIGKILL - 1]);
Eric W. Biedermancf43a752019-02-11 23:27:42 -06002640 recalc_sigpending();
Eric W. Biederman35634ff2019-02-06 18:39:40 -06002641 goto fatal;
Eric W. Biedermancf43a752019-02-11 23:27:42 -06002642 }
Eric W. Biederman35634ff2019-02-06 18:39:40 -06002643
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 for (;;) {
2645 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002646
2647 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2648 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002649 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002650
Roman Gushchin76f969e2019-04-19 10:03:04 -07002651 if (unlikely(current->jobctl &
2652 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2653 if (current->jobctl & JOBCTL_TRAP_MASK) {
2654 do_jobctl_trap();
2655 spin_unlock_irq(&sighand->siglock);
2656 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2657 do_freezer_trap();
2658
2659 goto relock;
2660 }
2661
2662 /*
2663 * If the task is leaving the frozen state, let's update
2664 * cgroup counters and reset the frozen bit.
2665 */
2666 if (unlikely(cgroup_task_frozen(current))) {
Tejun Heo73ddff22011-06-14 11:20:14 +02002667 spin_unlock_irq(&sighand->siglock);
Roman Gushchincb2c4cd2019-04-26 10:59:44 -07002668 cgroup_leave_frozen(false);
Tejun Heo73ddff22011-06-14 11:20:14 +02002669 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 }
2671
Eric W. Biederman7146db32019-02-06 17:51:47 -06002672 /*
2673 * Signals generated by the execution of an instruction
2674 * need to be delivered before any other pending signals
2675 * so that the instruction pointer in the signal stack
2676 * frame points to the faulting instruction.
2677 */
2678 signr = dequeue_synchronous_signal(&ksig->info);
2679 if (!signr)
2680 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
Tejun Heodd1d6772011-06-02 11:14:00 +02002682 if (!signr)
2683 break; /* will return 0 */
2684
Oleg Nesterov8a352412011-07-21 17:06:53 +02002685 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002686 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002688 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 }
2690
Tejun Heodd1d6772011-06-02 11:14:00 +02002691 ka = &sighand->action[signr-1];
2692
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002693 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002694 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002695
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2697 continue;
2698 if (ka->sa.sa_handler != SIG_DFL) {
2699 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002700 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 if (ka->sa.sa_flags & SA_ONESHOT)
2703 ka->sa.sa_handler = SIG_DFL;
2704
2705 break; /* will return non-zero "signr" value */
2706 }
2707
2708 /*
2709 * Now we are doing the default action for this signal.
2710 */
2711 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2712 continue;
2713
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002714 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002715 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002716 * Container-init gets no signals it doesn't want from same
2717 * container.
2718 *
2719 * Note that if global/container-init sees a sig_kernel_only()
2720 * signal here, the signal must have been generated internally
2721 * or must have come from an ancestor namespace. In either
2722 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002723 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002724 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002725 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 continue;
2727
2728 if (sig_kernel_stop(signr)) {
2729 /*
2730 * The default action is to stop all threads in
2731 * the thread group. The job control signals
2732 * do nothing in an orphaned pgrp, but SIGSTOP
2733 * always works. Note that siglock needs to be
2734 * dropped during the call to is_orphaned_pgrp()
2735 * because of lock ordering with tasklist_lock.
2736 * This allows an intervening SIGCONT to be posted.
2737 * We need to check for that and bail out if necessary.
2738 */
2739 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002740 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741
2742 /* signals can be posted during this window */
2743
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002744 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 goto relock;
2746
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002747 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 }
2749
Richard Weinberger828b1f62013-10-07 15:26:57 +02002750 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 /* It released the siglock. */
2752 goto relock;
2753 }
2754
2755 /*
2756 * We didn't actually stop, due to a race
2757 * with SIGCONT or something like that.
2758 */
2759 continue;
2760 }
2761
Eric W. Biederman35634ff2019-02-06 18:39:40 -06002762 fatal:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002763 spin_unlock_irq(&sighand->siglock);
Roman Gushchinf2b31bb2019-05-08 13:34:20 -07002764 if (unlikely(cgroup_task_frozen(current)))
2765 cgroup_leave_frozen(true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
2767 /*
2768 * Anything else is fatal, maybe with a core dump.
2769 */
2770 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002773 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002774 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002775 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 /*
2777 * If it was able to dump core, this kills all
2778 * other threads in the group and synchronizes with
2779 * their demise. If we lost the race with another
2780 * thread getting here, it set group_exit_code
2781 * first and our do_group_exit call below will use
2782 * that value and ignore the one we pass it.
2783 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002784 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 }
2786
2787 /*
Jens Axboe10442992021-03-26 08:57:10 -06002788 * PF_IO_WORKER threads will catch and exit on fatal signals
2789 * themselves. They have cleanup that must be performed, so
2790 * we cannot call do_exit() on their behalf.
2791 */
2792 if (current->flags & PF_IO_WORKER)
2793 goto out;
2794
2795 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 * Death signals, no core dump.
2797 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002798 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 /* NOTREACHED */
2800 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002801 spin_unlock_irq(&sighand->siglock);
Jens Axboe10442992021-03-26 08:57:10 -06002802out:
Richard Weinberger828b1f62013-10-07 15:26:57 +02002803 ksig->sig = signr;
Peter Collingbourne6ac05e82020-11-20 12:33:45 -08002804
2805 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2806 hide_si_addr_tag_bits(ksig);
2807
Richard Weinberger828b1f62013-10-07 15:26:57 +02002808 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809}
2810
Matt Fleming5e6292c2012-01-10 15:11:17 -08002811/**
Al Viroefee9842012-04-28 02:04:15 -04002812 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002813 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002814 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002815 *
Masanari Iidae2278672014-02-18 22:54:36 +09002816 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002817 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002818 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002819 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002820 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002821static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002822{
2823 sigset_t blocked;
2824
Al Viroa610d6e2012-05-21 23:42:15 -04002825 /* A signal was successfully delivered, and the
2826 saved sigmask was stored on the signal frame,
2827 and will be restored by sigreturn. So we can
2828 simply clear the restore sigmask flag. */
2829 clear_restore_sigmask();
2830
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002831 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2832 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2833 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002834 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002835 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002836}
2837
Al Viro2ce5da12012-11-07 15:11:25 -05002838void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2839{
2840 if (failed)
Eric W. Biedermancb44c9a2019-05-21 10:03:48 -05002841 force_sigsegv(ksig->sig);
Al Viro2ce5da12012-11-07 15:11:25 -05002842 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002843 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002844}
2845
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002846/*
2847 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002848 * group-wide signal. Other threads should be notified now to take
2849 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002850 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002851static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002852{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002853 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002854 struct task_struct *t;
2855
Oleg Nesterovf646e222011-04-27 19:18:39 +02002856 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2857 if (sigisemptyset(&retarget))
2858 return;
2859
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002860 t = tsk;
2861 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002862 if (t->flags & PF_EXITING)
2863 continue;
2864
2865 if (!has_pending_signals(&retarget, &t->blocked))
2866 continue;
2867 /* Remove the signals this thread can handle. */
2868 sigandsets(&retarget, &retarget, &t->blocked);
2869
Jens Axboe5c251e92020-10-26 14:32:27 -06002870 if (!task_sigpending(t))
Oleg Nesterovfec99932011-04-27 19:50:21 +02002871 signal_wake_up(t, 0);
2872
2873 if (sigisemptyset(&retarget))
2874 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002875 }
2876}
2877
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002878void exit_signals(struct task_struct *tsk)
2879{
2880 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002881 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002882
Tejun Heo77e4ef92011-12-12 18:12:21 -08002883 /*
2884 * @tsk is about to have PF_EXITING set - lock out users which
2885 * expect stable threadgroup.
2886 */
Ingo Molnar780de9d2017-02-02 11:50:56 +01002887 cgroup_threadgroup_change_begin(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002888
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002889 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2890 tsk->flags |= PF_EXITING;
Ingo Molnar780de9d2017-02-02 11:50:56 +01002891 cgroup_threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002892 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002893 }
2894
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002895 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002896 /*
2897 * From now this task is not visible for group-wide signals,
2898 * see wants_signal(), do_signal_stop().
2899 */
2900 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002901
Ingo Molnar780de9d2017-02-02 11:50:56 +01002902 cgroup_threadgroup_change_end(tsk);
Tejun Heo77e4ef92011-12-12 18:12:21 -08002903
Jens Axboe5c251e92020-10-26 14:32:27 -06002904 if (!task_sigpending(tsk))
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002905 goto out;
2906
Oleg Nesterovf646e222011-04-27 19:18:39 +02002907 unblocked = tsk->blocked;
2908 signotset(&unblocked);
2909 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002910
Tejun Heoa8f072c2011-06-02 11:13:59 +02002911 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c1902e2011-03-23 10:37:00 +01002912 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002913 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002914out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002915 spin_unlock_irq(&tsk->sighand->siglock);
2916
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002917 /*
2918 * If group stop has completed, deliver the notification. This
2919 * should always go to the real parent of the group leader.
2920 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002921 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002922 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002923 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002924 read_unlock(&tasklist_lock);
2925 }
2926}
2927
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928/*
2929 * System call entry points.
2930 */
2931
Randy Dunlap41c57892011-04-04 15:00:26 -07002932/**
2933 * sys_restart_syscall - restart a system call
2934 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002935SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002937 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 return restart->fn(restart);
2939}
2940
2941long do_no_restart_syscall(struct restart_block *param)
2942{
2943 return -EINTR;
2944}
2945
Oleg Nesterovb1828012011-04-27 21:56:14 +02002946static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2947{
Jens Axboe5c251e92020-10-26 14:32:27 -06002948 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
Oleg Nesterovb1828012011-04-27 21:56:14 +02002949 sigset_t newblocked;
2950 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002951 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002952 retarget_shared_pending(tsk, &newblocked);
2953 }
2954 tsk->blocked = *newset;
2955 recalc_sigpending();
2956}
2957
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002958/**
2959 * set_current_blocked - change current->blocked mask
2960 * @newset: new mask
2961 *
2962 * It is wrong to change ->blocked directly, this helper should be used
2963 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002964 */
Al Viro77097ae2012-04-27 13:58:59 -04002965void set_current_blocked(sigset_t *newset)
2966{
Al Viro77097ae2012-04-27 13:58:59 -04002967 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002968 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002969}
2970
2971void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002972{
2973 struct task_struct *tsk = current;
2974
Waiman Longc7be96a2016-12-14 15:04:10 -08002975 /*
2976 * In case the signal mask hasn't changed, there is nothing we need
2977 * to do. The current->blocked shouldn't be modified by other task.
2978 */
2979 if (sigequalsets(&tsk->blocked, newset))
2980 return;
2981
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002982 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002983 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002984 spin_unlock_irq(&tsk->sighand->siglock);
2985}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986
2987/*
2988 * This is also useful for kernel threads that want to temporarily
2989 * (or permanently) block certain signals.
2990 *
2991 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2992 * interface happily blocks "unblockable" signals like SIGKILL
2993 * and friends.
2994 */
2995int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2996{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002997 struct task_struct *tsk = current;
2998 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02003000 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08003001 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02003002 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08003003
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004 switch (how) {
3005 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02003006 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007 break;
3008 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02003009 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 break;
3011 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02003012 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013 break;
3014 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02003015 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08003017
Al Viro77097ae2012-04-27 13:58:59 -04003018 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02003019 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020}
Eric W. Biedermanfb50f5a2018-09-13 19:26:35 +02003021EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022
Deepa Dinamanided653c2018-09-19 21:41:04 -07003023/*
3024 * The api helps set app-provided sigmasks.
3025 *
3026 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3027 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
Oleg Nesterovb7724342019-07-16 16:29:53 -07003028 *
3029 * Note that it does set_restore_sigmask() in advance, so it must be always
3030 * paired with restore_saved_sigmask_unless() before return from syscall.
Deepa Dinamanided653c2018-09-19 21:41:04 -07003031 */
Oleg Nesterovb7724342019-07-16 16:29:53 -07003032int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
Deepa Dinamanided653c2018-09-19 21:41:04 -07003033{
Oleg Nesterovb7724342019-07-16 16:29:53 -07003034 sigset_t kmask;
Deepa Dinamanided653c2018-09-19 21:41:04 -07003035
Oleg Nesterovb7724342019-07-16 16:29:53 -07003036 if (!umask)
3037 return 0;
Deepa Dinamanided653c2018-09-19 21:41:04 -07003038 if (sigsetsize != sizeof(sigset_t))
3039 return -EINVAL;
Oleg Nesterovb7724342019-07-16 16:29:53 -07003040 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
Deepa Dinamanided653c2018-09-19 21:41:04 -07003041 return -EFAULT;
3042
Oleg Nesterovb7724342019-07-16 16:29:53 -07003043 set_restore_sigmask();
3044 current->saved_sigmask = current->blocked;
3045 set_current_blocked(&kmask);
Deepa Dinamanided653c2018-09-19 21:41:04 -07003046
3047 return 0;
3048}
Deepa Dinamanided653c2018-09-19 21:41:04 -07003049
3050#ifdef CONFIG_COMPAT
Oleg Nesterovb7724342019-07-16 16:29:53 -07003051int set_compat_user_sigmask(const compat_sigset_t __user *umask,
Deepa Dinamanided653c2018-09-19 21:41:04 -07003052 size_t sigsetsize)
3053{
Oleg Nesterovb7724342019-07-16 16:29:53 -07003054 sigset_t kmask;
Deepa Dinamanided653c2018-09-19 21:41:04 -07003055
Oleg Nesterovb7724342019-07-16 16:29:53 -07003056 if (!umask)
3057 return 0;
Deepa Dinamanided653c2018-09-19 21:41:04 -07003058 if (sigsetsize != sizeof(compat_sigset_t))
3059 return -EINVAL;
Oleg Nesterovb7724342019-07-16 16:29:53 -07003060 if (get_compat_sigset(&kmask, umask))
Deepa Dinamanided653c2018-09-19 21:41:04 -07003061 return -EFAULT;
3062
Oleg Nesterovb7724342019-07-16 16:29:53 -07003063 set_restore_sigmask();
3064 current->saved_sigmask = current->blocked;
3065 set_current_blocked(&kmask);
Deepa Dinamanided653c2018-09-19 21:41:04 -07003066
3067 return 0;
3068}
Deepa Dinamanided653c2018-09-19 21:41:04 -07003069#endif
3070
Randy Dunlap41c57892011-04-04 15:00:26 -07003071/**
3072 * sys_rt_sigprocmask - change the list of currently blocked signals
3073 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07003074 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07003075 * @oset: previous value of signal mask if non-null
3076 * @sigsetsize: size of sigset_t type
3077 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003078SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003079 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003082 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
3084 /* XXX: Don't preclude handling different sized sigset_t's. */
3085 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003086 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003088 old_set = current->blocked;
3089
3090 if (nset) {
3091 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3092 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003093 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3094
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003095 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003097 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02003099
3100 if (oset) {
3101 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3102 return -EFAULT;
3103 }
3104
3105 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003106}
3107
Al Viro322a56c2012-12-25 13:32:58 -05003108#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05003109COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3110 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003111{
Al Viro322a56c2012-12-25 13:32:58 -05003112 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003113
Al Viro322a56c2012-12-25 13:32:58 -05003114 /* XXX: Don't preclude handling different sized sigset_t's. */
3115 if (sigsetsize != sizeof(sigset_t))
3116 return -EINVAL;
3117
3118 if (nset) {
Al Viro322a56c2012-12-25 13:32:58 -05003119 sigset_t new_set;
3120 int error;
Al Viro3968cf62017-09-03 21:45:17 -04003121 if (get_compat_sigset(&new_set, nset))
Al Viro322a56c2012-12-25 13:32:58 -05003122 return -EFAULT;
Al Viro322a56c2012-12-25 13:32:58 -05003123 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3124
3125 error = sigprocmask(how, &new_set, NULL);
3126 if (error)
3127 return error;
3128 }
Dmitry V. Levinf4543222017-08-22 02:16:11 +03003129 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
Al Viro322a56c2012-12-25 13:32:58 -05003130}
3131#endif
Al Viro322a56c2012-12-25 13:32:58 -05003132
Christian Braunerb1d294c2018-08-21 22:00:02 -07003133static void do_sigpending(sigset_t *set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05003136 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 &current->signal->shared_pending.signal);
3138 spin_unlock_irq(&current->sighand->siglock);
3139
3140 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05003141 sigandsets(set, &current->blocked, set);
Randy Dunlap5aba0852011-04-04 14:59:31 -07003142}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143
Randy Dunlap41c57892011-04-04 15:00:26 -07003144/**
3145 * sys_rt_sigpending - examine a pending signal that has been raised
3146 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08003147 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07003148 * @sigsetsize: size of sigset_t type or larger
3149 */
Al Virofe9c1db2012-12-25 14:31:38 -05003150SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151{
Al Virofe9c1db2012-12-25 14:31:38 -05003152 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03003153
3154 if (sigsetsize > sizeof(*uset))
3155 return -EINVAL;
3156
Christian Braunerb1d294c2018-08-21 22:00:02 -07003157 do_sigpending(&set);
3158
3159 if (copy_to_user(uset, &set, sigsetsize))
3160 return -EFAULT;
3161
3162 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163}
3164
Al Virofe9c1db2012-12-25 14:31:38 -05003165#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05003166COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3167 compat_size_t, sigsetsize)
3168{
Al Virofe9c1db2012-12-25 14:31:38 -05003169 sigset_t set;
Dmitry V. Levin176826a2017-08-22 02:16:43 +03003170
3171 if (sigsetsize > sizeof(*uset))
3172 return -EINVAL;
3173
Christian Braunerb1d294c2018-08-21 22:00:02 -07003174 do_sigpending(&set);
3175
3176 return put_compat_sigset(uset, &set, sigsetsize);
Al Virofe9c1db2012-12-25 14:31:38 -05003177}
3178#endif
Al Virofe9c1db2012-12-25 14:31:38 -05003179
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003180static const struct {
3181 unsigned char limit, layout;
3182} sig_sicodes[] = {
3183 [SIGILL] = { NSIGILL, SIL_FAULT },
3184 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3185 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3186 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3187 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3188#if defined(SIGEMT)
3189 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3190#endif
3191 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3192 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3193 [SIGSYS] = { NSIGSYS, SIL_SYS },
3194};
3195
Eric W. Biedermanb2a2ab52018-10-10 20:11:25 -05003196static bool known_siginfo_layout(unsigned sig, int si_code)
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003197{
3198 if (si_code == SI_KERNEL)
3199 return true;
3200 else if ((si_code > SI_USER)) {
3201 if (sig_specific_sicodes(sig)) {
3202 if (si_code <= sig_sicodes[sig].limit)
3203 return true;
3204 }
3205 else if (si_code <= NSIGPOLL)
3206 return true;
3207 }
3208 else if (si_code >= SI_DETHREAD)
3209 return true;
3210 else if (si_code == SI_ASYNCNL)
3211 return true;
3212 return false;
3213}
3214
Eric W. Biedermana3670052018-10-10 20:29:44 -05003215enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
Eric W. Biedermancc731522017-07-16 22:36:59 -05003216{
3217 enum siginfo_layout layout = SIL_KILL;
3218 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003219 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3220 (si_code <= sig_sicodes[sig].limit)) {
3221 layout = sig_sicodes[sig].layout;
Eric W. Biederman31931c92018-04-24 20:59:47 -05003222 /* Handle the exceptions */
3223 if ((sig == SIGBUS) &&
3224 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3225 layout = SIL_FAULT_MCEERR;
3226 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3227 layout = SIL_FAULT_BNDERR;
3228#ifdef SEGV_PKUERR
3229 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3230 layout = SIL_FAULT_PKUERR;
3231#endif
Marco Elvered8e5082021-04-22 21:18:23 +02003232 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3233 layout = SIL_PERF_EVENT;
Eric W. Biederman9abcabe2021-04-30 17:29:36 -05003234#ifdef __ARCH_SI_TRAPNO
3235 else if (layout == SIL_FAULT)
3236 layout = SIL_FAULT_TRAPNO;
3237#endif
Eric W. Biederman31931c92018-04-24 20:59:47 -05003238 }
Eric W. Biedermancc731522017-07-16 22:36:59 -05003239 else if (si_code <= NSIGPOLL)
3240 layout = SIL_POLL;
3241 } else {
3242 if (si_code == SI_TIMER)
3243 layout = SIL_TIMER;
3244 else if (si_code == SI_SIGIO)
3245 layout = SIL_POLL;
3246 else if (si_code < 0)
3247 layout = SIL_RT;
Eric W. Biedermancc731522017-07-16 22:36:59 -05003248 }
3249 return layout;
3250}
3251
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003252static inline char __user *si_expansion(const siginfo_t __user *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253{
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003254 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3255}
3256
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003257int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258{
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003259 char __user *expansion = si_expansion(to);
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003260 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 return -EFAULT;
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003262 if (clear_user(expansion, SI_EXPANSION_SIZE))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 return -EFAULT;
Eric W. Biedermanc999b932018-04-14 13:03:25 -05003264 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265}
3266
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003267static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3268 const siginfo_t __user *from)
Eric W. Biederman4cd2e0e2018-04-18 17:30:19 -05003269{
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003270 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
Eric W. Biederman4ce5f9c2018-09-25 12:59:31 +02003271 char __user *expansion = si_expansion(from);
3272 char buf[SI_EXPANSION_SIZE];
3273 int i;
3274 /*
3275 * An unknown si_code might need more than
3276 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3277 * extra bytes are 0. This guarantees copy_siginfo_to_user
3278 * will return this data to userspace exactly.
3279 */
3280 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3281 return -EFAULT;
3282 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3283 if (buf[i] != 0)
3284 return -E2BIG;
3285 }
3286 }
Eric W. Biederman4cd2e0e2018-04-18 17:30:19 -05003287 return 0;
3288}
3289
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003290static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3291 const siginfo_t __user *from)
3292{
3293 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3294 return -EFAULT;
3295 to->si_signo = signo;
3296 return post_copy_siginfo_from_user(to, from);
3297}
3298
3299int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3300{
3301 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3302 return -EFAULT;
3303 return post_copy_siginfo_from_user(to, from);
3304}
3305
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003306#ifdef CONFIG_COMPAT
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003307/**
3308 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3309 * @to: compat siginfo destination
3310 * @from: kernel siginfo source
3311 *
3312 * Note: This function does not work properly for the SIGCHLD on x32, but
3313 * fortunately it doesn't have to. The only valid callers for this function are
3314 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3315 * The latter does not care because SIGCHLD will never cause a coredump.
3316 */
3317void copy_siginfo_to_external32(struct compat_siginfo *to,
3318 const struct kernel_siginfo *from)
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003319{
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003320 memset(to, 0, sizeof(*to));
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003321
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003322 to->si_signo = from->si_signo;
3323 to->si_errno = from->si_errno;
3324 to->si_code = from->si_code;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003325 switch(siginfo_layout(from->si_signo, from->si_code)) {
3326 case SIL_KILL:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003327 to->si_pid = from->si_pid;
3328 to->si_uid = from->si_uid;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003329 break;
3330 case SIL_TIMER:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003331 to->si_tid = from->si_tid;
3332 to->si_overrun = from->si_overrun;
3333 to->si_int = from->si_int;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003334 break;
3335 case SIL_POLL:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003336 to->si_band = from->si_band;
3337 to->si_fd = from->si_fd;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003338 break;
3339 case SIL_FAULT:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003340 to->si_addr = ptr_to_compat(from->si_addr);
Eric W. Biederman9abcabe2021-04-30 17:29:36 -05003341 break;
3342 case SIL_FAULT_TRAPNO:
3343 to->si_addr = ptr_to_compat(from->si_addr);
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003344 to->si_trapno = from->si_trapno;
Eric W. Biederman31931c92018-04-24 20:59:47 -05003345 break;
3346 case SIL_FAULT_MCEERR:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003347 to->si_addr = ptr_to_compat(from->si_addr);
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003348 to->si_addr_lsb = from->si_addr_lsb;
Eric W. Biederman31931c92018-04-24 20:59:47 -05003349 break;
3350 case SIL_FAULT_BNDERR:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003351 to->si_addr = ptr_to_compat(from->si_addr);
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003352 to->si_lower = ptr_to_compat(from->si_lower);
3353 to->si_upper = ptr_to_compat(from->si_upper);
Eric W. Biederman31931c92018-04-24 20:59:47 -05003354 break;
3355 case SIL_FAULT_PKUERR:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003356 to->si_addr = ptr_to_compat(from->si_addr);
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003357 to->si_pkey = from->si_pkey;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003358 break;
Marco Elverfb6cc122021-04-08 12:36:00 +02003359 case SIL_PERF_EVENT:
3360 to->si_addr = ptr_to_compat(from->si_addr);
Eric W. Biederman0683b532021-05-02 17:28:31 -05003361 to->si_perf_data = from->si_perf_data;
3362 to->si_perf_type = from->si_perf_type;
Marco Elverfb6cc122021-04-08 12:36:00 +02003363 break;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003364 case SIL_CHLD:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003365 to->si_pid = from->si_pid;
3366 to->si_uid = from->si_uid;
3367 to->si_status = from->si_status;
3368 to->si_utime = from->si_utime;
3369 to->si_stime = from->si_stime;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003370 break;
3371 case SIL_RT:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003372 to->si_pid = from->si_pid;
3373 to->si_uid = from->si_uid;
3374 to->si_int = from->si_int;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003375 break;
3376 case SIL_SYS:
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003377 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3378 to->si_syscall = from->si_syscall;
3379 to->si_arch = from->si_arch;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003380 break;
3381 }
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003382}
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003383
Christoph Hellwigc3b3f522020-05-05 12:12:53 +02003384int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3385 const struct kernel_siginfo *from)
3386{
3387 struct compat_siginfo new;
3388
3389 copy_siginfo_to_external32(&new, from);
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003390 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3391 return -EFAULT;
Eric W. Biedermanea64d5a2018-01-15 18:03:33 -06003392 return 0;
3393}
3394
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003395static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3396 const struct compat_siginfo *from)
3397{
3398 clear_siginfo(to);
3399 to->si_signo = from->si_signo;
3400 to->si_errno = from->si_errno;
3401 to->si_code = from->si_code;
3402 switch(siginfo_layout(from->si_signo, from->si_code)) {
3403 case SIL_KILL:
3404 to->si_pid = from->si_pid;
3405 to->si_uid = from->si_uid;
3406 break;
3407 case SIL_TIMER:
3408 to->si_tid = from->si_tid;
3409 to->si_overrun = from->si_overrun;
3410 to->si_int = from->si_int;
3411 break;
3412 case SIL_POLL:
3413 to->si_band = from->si_band;
3414 to->si_fd = from->si_fd;
3415 break;
3416 case SIL_FAULT:
3417 to->si_addr = compat_ptr(from->si_addr);
Eric W. Biederman9abcabe2021-04-30 17:29:36 -05003418 break;
3419 case SIL_FAULT_TRAPNO:
3420 to->si_addr = compat_ptr(from->si_addr);
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003421 to->si_trapno = from->si_trapno;
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003422 break;
3423 case SIL_FAULT_MCEERR:
3424 to->si_addr = compat_ptr(from->si_addr);
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003425 to->si_addr_lsb = from->si_addr_lsb;
3426 break;
3427 case SIL_FAULT_BNDERR:
3428 to->si_addr = compat_ptr(from->si_addr);
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003429 to->si_lower = compat_ptr(from->si_lower);
3430 to->si_upper = compat_ptr(from->si_upper);
3431 break;
3432 case SIL_FAULT_PKUERR:
3433 to->si_addr = compat_ptr(from->si_addr);
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003434 to->si_pkey = from->si_pkey;
3435 break;
Marco Elverfb6cc122021-04-08 12:36:00 +02003436 case SIL_PERF_EVENT:
3437 to->si_addr = compat_ptr(from->si_addr);
Eric W. Biederman0683b532021-05-02 17:28:31 -05003438 to->si_perf_data = from->si_perf_data;
3439 to->si_perf_type = from->si_perf_type;
Marco Elverfb6cc122021-04-08 12:36:00 +02003440 break;
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003441 case SIL_CHLD:
3442 to->si_pid = from->si_pid;
3443 to->si_uid = from->si_uid;
3444 to->si_status = from->si_status;
3445#ifdef CONFIG_X86_X32_ABI
3446 if (in_x32_syscall()) {
3447 to->si_utime = from->_sifields._sigchld_x32._utime;
3448 to->si_stime = from->_sifields._sigchld_x32._stime;
3449 } else
3450#endif
3451 {
3452 to->si_utime = from->si_utime;
3453 to->si_stime = from->si_stime;
3454 }
3455 break;
3456 case SIL_RT:
3457 to->si_pid = from->si_pid;
3458 to->si_uid = from->si_uid;
3459 to->si_int = from->si_int;
3460 break;
3461 case SIL_SYS:
3462 to->si_call_addr = compat_ptr(from->si_call_addr);
3463 to->si_syscall = from->si_syscall;
3464 to->si_arch = from->si_arch;
3465 break;
3466 }
3467 return 0;
3468}
3469
3470static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3471 const struct compat_siginfo __user *ufrom)
3472{
3473 struct compat_siginfo from;
3474
3475 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3476 return -EFAULT;
3477
3478 from.si_signo = signo;
3479 return post_copy_siginfo_from_user32(to, &from);
3480}
3481
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003482int copy_siginfo_from_user32(struct kernel_siginfo *to,
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003483 const struct compat_siginfo __user *ufrom)
3484{
3485 struct compat_siginfo from;
3486
3487 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3488 return -EFAULT;
3489
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003490 return post_copy_siginfo_from_user32(to, &from);
Eric W. Biederman212a36a2017-07-31 17:15:31 -05003491}
3492#endif /* CONFIG_COMPAT */
3493
Randy Dunlap41c57892011-04-04 15:00:26 -07003494/**
Oleg Nesterov943df142011-04-27 21:44:14 +02003495 * do_sigtimedwait - wait for queued signals specified in @which
3496 * @which: queued signals to wait for
3497 * @info: if non-null, the signal's siginfo is returned here
3498 * @ts: upper bound on process time suspension
3499 */
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003500static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003501 const struct timespec64 *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02003502{
Thomas Gleixner2456e852016-12-25 11:38:40 +01003503 ktime_t *to = NULL, timeout = KTIME_MAX;
Oleg Nesterov943df142011-04-27 21:44:14 +02003504 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02003505 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003506 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02003507
3508 if (ts) {
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003509 if (!timespec64_valid(ts))
Oleg Nesterov943df142011-04-27 21:44:14 +02003510 return -EINVAL;
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003511 timeout = timespec64_to_ktime(*ts);
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003512 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02003513 }
3514
3515 /*
3516 * Invert the set of allowed signals to get those we want to block.
3517 */
3518 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3519 signotset(&mask);
3520
3521 spin_lock_irq(&tsk->sighand->siglock);
3522 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2456e852016-12-25 11:38:40 +01003523 if (!sig && timeout) {
Oleg Nesterov943df142011-04-27 21:44:14 +02003524 /*
3525 * None ready, temporarily unblock those we're interested
3526 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02003527 * they arrive. Unblocking is always fine, we can avoid
3528 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02003529 */
3530 tsk->real_blocked = tsk->blocked;
3531 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3532 recalc_sigpending();
3533 spin_unlock_irq(&tsk->sighand->siglock);
3534
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003535 __set_current_state(TASK_INTERRUPTIBLE);
3536 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3537 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02003538 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003539 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07003540 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02003541 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02003542 }
3543 spin_unlock_irq(&tsk->sighand->siglock);
3544
3545 if (sig)
3546 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00003547 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02003548}
3549
3550/**
Randy Dunlap41c57892011-04-04 15:00:26 -07003551 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3552 * in @uthese
3553 * @uthese: queued signals to wait for
3554 * @uinfo: if non-null, the signal's siginfo is returned here
3555 * @uts: upper bound on process time suspension
3556 * @sigsetsize: size of sigset_t type
3557 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003558SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003559 siginfo_t __user *, uinfo,
3560 const struct __kernel_timespec __user *, uts,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003561 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 sigset_t these;
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003564 struct timespec64 ts;
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003565 kernel_siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02003566 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567
3568 /* XXX: Don't preclude handling different sized sigset_t's. */
3569 if (sigsetsize != sizeof(sigset_t))
3570 return -EINVAL;
3571
3572 if (copy_from_user(&these, uthese, sizeof(these)))
3573 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07003574
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 if (uts) {
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003576 if (get_timespec64(&ts, uts))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578 }
3579
Oleg Nesterov943df142011-04-27 21:44:14 +02003580 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581
Oleg Nesterov943df142011-04-27 21:44:14 +02003582 if (ret > 0 && uinfo) {
3583 if (copy_siginfo_to_user(uinfo, &info))
3584 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003585 }
3586
3587 return ret;
3588}
3589
Arnd Bergmanndf8522a2018-04-18 16:15:37 +02003590#ifdef CONFIG_COMPAT_32BIT_TIME
3591SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3592 siginfo_t __user *, uinfo,
3593 const struct old_timespec32 __user *, uts,
3594 size_t, sigsetsize)
3595{
3596 sigset_t these;
3597 struct timespec64 ts;
3598 kernel_siginfo_t info;
3599 int ret;
3600
3601 if (sigsetsize != sizeof(sigset_t))
3602 return -EINVAL;
3603
3604 if (copy_from_user(&these, uthese, sizeof(these)))
3605 return -EFAULT;
3606
3607 if (uts) {
3608 if (get_old_timespec32(&ts, uts))
3609 return -EFAULT;
3610 }
3611
3612 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3613
3614 if (ret > 0 && uinfo) {
3615 if (copy_siginfo_to_user(uinfo, &info))
3616 ret = -EFAULT;
3617 }
3618
3619 return ret;
3620}
3621#endif
3622
Al Viro1b3c8722017-05-31 04:46:17 -04003623#ifdef CONFIG_COMPAT
Arnd Bergmann2367c4b2018-04-18 16:18:35 +02003624COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3625 struct compat_siginfo __user *, uinfo,
3626 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3627{
3628 sigset_t s;
3629 struct timespec64 t;
3630 kernel_siginfo_t info;
3631 long ret;
3632
3633 if (sigsetsize != sizeof(sigset_t))
3634 return -EINVAL;
3635
3636 if (get_compat_sigset(&s, uthese))
3637 return -EFAULT;
3638
3639 if (uts) {
3640 if (get_timespec64(&t, uts))
3641 return -EFAULT;
3642 }
3643
3644 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3645
3646 if (ret > 0 && uinfo) {
3647 if (copy_siginfo_to_user32(uinfo, &info))
3648 ret = -EFAULT;
3649 }
3650
3651 return ret;
3652}
3653
3654#ifdef CONFIG_COMPAT_32BIT_TIME
Arnd Bergmann8dabe722019-01-07 00:33:08 +01003655COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
Al Viro1b3c8722017-05-31 04:46:17 -04003656 struct compat_siginfo __user *, uinfo,
Arnd Bergmann9afc5ee2018-07-13 12:52:28 +02003657 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
Al Viro1b3c8722017-05-31 04:46:17 -04003658{
Al Viro1b3c8722017-05-31 04:46:17 -04003659 sigset_t s;
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003660 struct timespec64 t;
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003661 kernel_siginfo_t info;
Al Viro1b3c8722017-05-31 04:46:17 -04003662 long ret;
3663
3664 if (sigsetsize != sizeof(sigset_t))
3665 return -EINVAL;
3666
Al Viro3968cf62017-09-03 21:45:17 -04003667 if (get_compat_sigset(&s, uthese))
Al Viro1b3c8722017-05-31 04:46:17 -04003668 return -EFAULT;
Al Viro1b3c8722017-05-31 04:46:17 -04003669
3670 if (uts) {
Arnd Bergmann49c39f82018-04-18 15:56:13 +02003671 if (get_old_timespec32(&t, uts))
Al Viro1b3c8722017-05-31 04:46:17 -04003672 return -EFAULT;
3673 }
3674
3675 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3676
3677 if (ret > 0 && uinfo) {
3678 if (copy_siginfo_to_user32(uinfo, &info))
3679 ret = -EFAULT;
3680 }
3681
3682 return ret;
3683}
3684#endif
Arnd Bergmann2367c4b2018-04-18 16:18:35 +02003685#endif
Al Viro1b3c8722017-05-31 04:46:17 -04003686
Christian Brauner3eb39f42018-11-19 00:51:56 +01003687static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3688{
3689 clear_siginfo(info);
3690 info->si_signo = sig;
3691 info->si_errno = 0;
3692 info->si_code = SI_USER;
3693 info->si_pid = task_tgid_vnr(current);
3694 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3695}
3696
Randy Dunlap41c57892011-04-04 15:00:26 -07003697/**
3698 * sys_kill - send a signal to a process
3699 * @pid: the PID of the process
3700 * @sig: signal to be sent
3701 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01003702SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003703{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003704 struct kernel_siginfo info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003705
Christian Brauner3eb39f42018-11-19 00:51:56 +01003706 prepare_kill_siginfo(sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003707
3708 return kill_something_info(sig, &info, pid);
3709}
3710
Christian Brauner3eb39f42018-11-19 00:51:56 +01003711/*
3712 * Verify that the signaler and signalee either are in the same pid namespace
3713 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3714 * namespace.
3715 */
3716static bool access_pidfd_pidns(struct pid *pid)
3717{
3718 struct pid_namespace *active = task_active_pid_ns(current);
3719 struct pid_namespace *p = ns_of_pid(pid);
3720
3721 for (;;) {
3722 if (!p)
3723 return false;
3724 if (p == active)
3725 break;
3726 p = p->parent;
3727 }
3728
3729 return true;
3730}
3731
Jann Hornadc5d872020-12-07 01:02:52 +01003732static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3733 siginfo_t __user *info)
Christian Brauner3eb39f42018-11-19 00:51:56 +01003734{
3735#ifdef CONFIG_COMPAT
3736 /*
3737 * Avoid hooking up compat syscalls and instead handle necessary
3738 * conversions here. Note, this is a stop-gap measure and should not be
3739 * considered a generic solution.
3740 */
3741 if (in_compat_syscall())
3742 return copy_siginfo_from_user32(
3743 kinfo, (struct compat_siginfo __user *)info);
3744#endif
3745 return copy_siginfo_from_user(kinfo, info);
3746}
3747
Christian Brauner2151ad12019-04-17 22:50:25 +02003748static struct pid *pidfd_to_pid(const struct file *file)
3749{
Christian Brauner3695eae2019-07-28 00:22:29 +02003750 struct pid *pid;
3751
3752 pid = pidfd_pid(file);
3753 if (!IS_ERR(pid))
3754 return pid;
Christian Brauner2151ad12019-04-17 22:50:25 +02003755
3756 return tgid_pidfd_to_pid(file);
3757}
3758
Christian Brauner3eb39f42018-11-19 00:51:56 +01003759/**
Christian Braunerc7323272019-06-04 15:18:43 +02003760 * sys_pidfd_send_signal - Signal a process through a pidfd
3761 * @pidfd: file descriptor of the process
3762 * @sig: signal to send
3763 * @info: signal info
3764 * @flags: future flags
Christian Brauner3eb39f42018-11-19 00:51:56 +01003765 *
3766 * The syscall currently only signals via PIDTYPE_PID which covers
3767 * kill(<positive-pid>, <signal>. It does not signal threads or process
3768 * groups.
3769 * In order to extend the syscall to threads and process groups the @flags
3770 * argument should be used. In essence, the @flags argument will determine
3771 * what is signaled and not the file descriptor itself. Put in other words,
3772 * grouping is a property of the flags argument not a property of the file
3773 * descriptor.
3774 *
3775 * Return: 0 on success, negative errno on failure
3776 */
3777SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3778 siginfo_t __user *, info, unsigned int, flags)
3779{
3780 int ret;
3781 struct fd f;
3782 struct pid *pid;
3783 kernel_siginfo_t kinfo;
3784
3785 /* Enforce flags be set to 0 until we add an extension. */
3786 if (flags)
3787 return -EINVAL;
3788
Christian Brauner738a7832019-04-18 12:18:39 +02003789 f = fdget(pidfd);
Christian Brauner3eb39f42018-11-19 00:51:56 +01003790 if (!f.file)
3791 return -EBADF;
3792
3793 /* Is this a pidfd? */
Christian Brauner2151ad12019-04-17 22:50:25 +02003794 pid = pidfd_to_pid(f.file);
Christian Brauner3eb39f42018-11-19 00:51:56 +01003795 if (IS_ERR(pid)) {
3796 ret = PTR_ERR(pid);
3797 goto err;
3798 }
3799
3800 ret = -EINVAL;
3801 if (!access_pidfd_pidns(pid))
3802 goto err;
3803
3804 if (info) {
3805 ret = copy_siginfo_from_user_any(&kinfo, info);
3806 if (unlikely(ret))
3807 goto err;
3808
3809 ret = -EINVAL;
3810 if (unlikely(sig != kinfo.si_signo))
3811 goto err;
3812
Jann Horn556a8882019-03-30 03:12:32 +01003813 /* Only allow sending arbitrary signals to yourself. */
3814 ret = -EPERM;
Christian Brauner3eb39f42018-11-19 00:51:56 +01003815 if ((task_pid(current) != pid) &&
Jann Horn556a8882019-03-30 03:12:32 +01003816 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3817 goto err;
Christian Brauner3eb39f42018-11-19 00:51:56 +01003818 } else {
3819 prepare_kill_siginfo(sig, &kinfo);
3820 }
3821
3822 ret = kill_pid_info(sig, &kinfo, pid);
3823
3824err:
3825 fdput(f);
3826 return ret;
3827}
Christian Brauner3eb39f42018-11-19 00:51:56 +01003828
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003829static int
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003830do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003831{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003832 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003833 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003834
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003835 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07003836 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07003837 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003838 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003839 /*
3840 * The null signal is a permissions and process existence
3841 * probe. No signal is actually delivered.
3842 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003843 if (!error && sig) {
Eric W. Biederman40b3b022018-07-21 10:45:15 -05003844 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07003845 /*
3846 * If lock_task_sighand() failed we pretend the task
3847 * dies after receiving the signal. The window is tiny,
3848 * and the signal is private anyway.
3849 */
3850 if (unlikely(error == -ESRCH))
3851 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003852 }
3853 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07003854 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003855
3856 return error;
3857}
3858
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003859static int do_tkill(pid_t tgid, pid_t pid, int sig)
3860{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003861 struct kernel_siginfo info;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003862
Eric W. Biederman5f749722018-01-22 14:58:57 -06003863 clear_siginfo(&info);
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003864 info.si_signo = sig;
3865 info.si_errno = 0;
3866 info.si_code = SI_TKILL;
3867 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08003868 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00003869
3870 return do_send_specific(tgid, pid, sig, &info);
3871}
3872
Linus Torvalds1da177e2005-04-16 15:20:36 -07003873/**
3874 * sys_tgkill - send signal to one specific thread
3875 * @tgid: the thread group ID of the thread
3876 * @pid: the PID of the thread
3877 * @sig: signal to be sent
3878 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08003879 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 * exists but it's not belonging to the target process anymore. This
3881 * method solves the problem of threads exiting and PIDs getting reused.
3882 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003883SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885 /* This is only valid for single tasks */
3886 if (pid <= 0 || tgid <= 0)
3887 return -EINVAL;
3888
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003889 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890}
3891
Randy Dunlap41c57892011-04-04 15:00:26 -07003892/**
3893 * sys_tkill - send signal to one specific task
3894 * @pid: the PID of the task
3895 * @sig: signal to be sent
3896 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003897 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3898 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003899SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003901 /* This is only valid for single tasks */
3902 if (pid <= 0)
3903 return -EINVAL;
3904
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003905 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906}
3907
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003908static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
Al Viro75907d42012-12-25 15:19:12 -05003909{
3910 /* Not even root can pretend to send signals from the kernel.
3911 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3912 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003913 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003914 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003915 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003916
Al Viro75907d42012-12-25 15:19:12 -05003917 /* POSIX.1b doesn't mention process groups. */
3918 return kill_proc_info(sig, info, pid);
3919}
3920
Randy Dunlap41c57892011-04-04 15:00:26 -07003921/**
3922 * sys_rt_sigqueueinfo - send signal information to a signal
3923 * @pid: the PID of the thread
3924 * @sig: signal to be sent
3925 * @uinfo: signal info to be sent
3926 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003927SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3928 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003929{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003930 kernel_siginfo_t info;
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003931 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
Eric W. Biederman4cd2e0e2018-04-18 17:30:19 -05003932 if (unlikely(ret))
3933 return ret;
Al Viro75907d42012-12-25 15:19:12 -05003934 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003935}
3936
Al Viro75907d42012-12-25 15:19:12 -05003937#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003938COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3939 compat_pid_t, pid,
3940 int, sig,
3941 struct compat_siginfo __user *, uinfo)
3942{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003943 kernel_siginfo_t info;
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003944 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
Al Viro75907d42012-12-25 15:19:12 -05003945 if (unlikely(ret))
3946 return ret;
3947 return do_rt_sigqueueinfo(pid, sig, &info);
3948}
3949#endif
Al Viro75907d42012-12-25 15:19:12 -05003950
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003951static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003952{
3953 /* This is only valid for single tasks */
3954 if (pid <= 0 || tgid <= 0)
3955 return -EINVAL;
3956
3957 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003958 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3959 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003960 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3961 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003962 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003963
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003964 return do_send_specific(tgid, pid, sig, info);
3965}
3966
3967SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3968 siginfo_t __user *, uinfo)
3969{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003970 kernel_siginfo_t info;
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003971 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
Eric W. Biederman4cd2e0e2018-04-18 17:30:19 -05003972 if (unlikely(ret))
3973 return ret;
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003974 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3975}
3976
Al Viro9aae8fc2012-12-24 23:12:04 -05003977#ifdef CONFIG_COMPAT
3978COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3979 compat_pid_t, tgid,
3980 compat_pid_t, pid,
3981 int, sig,
3982 struct compat_siginfo __user *, uinfo)
3983{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02003984 kernel_siginfo_t info;
Eric W. Biederman601d5ab2018-10-05 09:02:48 +02003985 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
Eric W. Biederman4cd2e0e2018-04-18 17:30:19 -05003986 if (unlikely(ret))
3987 return ret;
Al Viro9aae8fc2012-12-24 23:12:04 -05003988 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3989}
3990#endif
3991
Oleg Nesterov03417292014-06-06 14:36:53 -07003992/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003993 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003994 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003995void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003996{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003997 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003998 current->sighand->action[sig - 1].sa.sa_handler = action;
3999 if (action == SIG_IGN) {
4000 sigset_t mask;
4001
4002 sigemptyset(&mask);
4003 sigaddset(&mask, sig);
4004
4005 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4006 flush_sigqueue_mask(&mask, &current->pending);
4007 recalc_sigpending();
4008 }
Oleg Nesterov03417292014-06-06 14:36:53 -07004009 spin_unlock_irq(&current->sighand->siglock);
4010}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07004011EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07004012
Dmitry Safonov68463512016-09-05 16:33:08 +03004013void __weak sigaction_compat_abi(struct k_sigaction *act,
4014 struct k_sigaction *oact)
4015{
4016}
4017
Oleg Nesterov88531f72006-03-28 16:11:24 -08004018int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07004020 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08004022 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023
Jesper Juhl7ed20e12005-05-01 08:59:14 -07004024 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 return -EINVAL;
4026
Oleg Nesterovafe2b032014-06-06 14:36:51 -07004027 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028
Oleg Nesterovafe2b032014-06-06 14:36:51 -07004029 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030 if (oact)
4031 *oact = *k;
4032
Peter Collingbourne23acdc72020-11-12 18:53:34 -08004033 /*
Peter Collingbournea54f0df2020-11-16 19:17:25 -08004034 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4035 * e.g. by having an architecture use the bit in their uapi.
4036 */
4037 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4038
4039 /*
Peter Collingbourne23acdc72020-11-12 18:53:34 -08004040 * Clear unknown flag bits in order to allow userspace to detect missing
4041 * support for flag bits and to allow the kernel to use non-uapi bits
4042 * internally.
4043 */
4044 if (act)
4045 act->sa.sa_flags &= UAPI_SA_FLAGS;
4046 if (oact)
4047 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4048
Dmitry Safonov68463512016-09-05 16:33:08 +03004049 sigaction_compat_abi(act, oact);
4050
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03004052 sigdelsetmask(&act->sa.sa_mask,
4053 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08004054 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055 /*
4056 * POSIX 3.3.1.3:
4057 * "Setting a signal action to SIG_IGN for a signal that is
4058 * pending shall cause the pending signal to be discarded,
4059 * whether or not it is blocked."
4060 *
4061 * "Setting a signal action to SIG_DFL for a signal that is
4062 * pending and whose default action is to ignore the signal
4063 * (for example, SIGCHLD), shall cause the pending signal to
4064 * be discarded, whether or not it is blocked"
4065 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07004066 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08004067 sigemptyset(&mask);
4068 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07004069 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4070 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07004071 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 }
4074
Oleg Nesterovafe2b032014-06-06 14:36:51 -07004075 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 return 0;
4077}
4078
Oleg Nesterovc09c1442014-06-06 14:36:50 -07004079static int
Will Deacon22839862018-09-05 15:34:42 +01004080do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4081 size_t min_ss_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082{
Al Virobcfe8ad2017-05-27 00:29:34 -04004083 struct task_struct *t = current;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084
Al Virobcfe8ad2017-05-27 00:29:34 -04004085 if (oss) {
4086 memset(oss, 0, sizeof(stack_t));
4087 oss->ss_sp = (void __user *) t->sas_ss_sp;
4088 oss->ss_size = t->sas_ss_size;
4089 oss->ss_flags = sas_ss_flags(sp) |
4090 (current->sas_ss_flags & SS_FLAG_BITS);
4091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
Al Virobcfe8ad2017-05-27 00:29:34 -04004093 if (ss) {
4094 void __user *ss_sp = ss->ss_sp;
4095 size_t ss_size = ss->ss_size;
4096 unsigned ss_flags = ss->ss_flags;
Stas Sergeev407bc162016-04-14 23:20:03 +03004097 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
Al Virobcfe8ad2017-05-27 00:29:34 -04004099 if (unlikely(on_sig_stack(sp)))
4100 return -EPERM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101
Stas Sergeev407bc162016-04-14 23:20:03 +03004102 ss_mode = ss_flags & ~SS_FLAG_BITS;
Al Virobcfe8ad2017-05-27 00:29:34 -04004103 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4104 ss_mode != 0))
4105 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
Stas Sergeev407bc162016-04-14 23:20:03 +03004107 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 ss_size = 0;
4109 ss_sp = NULL;
4110 } else {
Will Deacon22839862018-09-05 15:34:42 +01004111 if (unlikely(ss_size < min_ss_size))
Al Virobcfe8ad2017-05-27 00:29:34 -04004112 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113 }
4114
Al Virobcfe8ad2017-05-27 00:29:34 -04004115 t->sas_ss_sp = (unsigned long) ss_sp;
4116 t->sas_ss_size = ss_size;
4117 t->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 }
Al Virobcfe8ad2017-05-27 00:29:34 -04004119 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120}
Al Virobcfe8ad2017-05-27 00:29:34 -04004121
Al Viro6bf9adf2012-12-14 14:09:47 -05004122SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4123{
Al Virobcfe8ad2017-05-27 00:29:34 -04004124 stack_t new, old;
4125 int err;
4126 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4127 return -EFAULT;
4128 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
Will Deacon22839862018-09-05 15:34:42 +01004129 current_user_stack_pointer(),
4130 MINSIGSTKSZ);
Al Virobcfe8ad2017-05-27 00:29:34 -04004131 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4132 err = -EFAULT;
4133 return err;
Al Viro6bf9adf2012-12-14 14:09:47 -05004134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135
Al Viro5c495742012-11-18 15:29:16 -05004136int restore_altstack(const stack_t __user *uss)
4137{
Al Virobcfe8ad2017-05-27 00:29:34 -04004138 stack_t new;
4139 if (copy_from_user(&new, uss, sizeof(stack_t)))
4140 return -EFAULT;
Will Deacon22839862018-09-05 15:34:42 +01004141 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4142 MINSIGSTKSZ);
Al Viro5c495742012-11-18 15:29:16 -05004143 /* squash all but EFAULT for now */
Al Virobcfe8ad2017-05-27 00:29:34 -04004144 return 0;
Al Viro5c495742012-11-18 15:29:16 -05004145}
4146
Al Viroc40702c2012-11-20 14:24:26 -05004147int __save_altstack(stack_t __user *uss, unsigned long sp)
4148{
4149 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03004150 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4151 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05004152 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03004153 if (err)
4154 return err;
4155 if (t->sas_ss_flags & SS_AUTODISARM)
4156 sas_ss_reset(t);
4157 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05004158}
4159
Al Viro90268432012-12-14 14:47:53 -05004160#ifdef CONFIG_COMPAT
Dominik Brodowski6203deb2018-03-17 17:11:51 +01004161static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4162 compat_stack_t __user *uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05004163{
4164 stack_t uss, uoss;
4165 int ret;
Al Viro90268432012-12-14 14:47:53 -05004166
4167 if (uss_ptr) {
4168 compat_stack_t uss32;
Al Viro90268432012-12-14 14:47:53 -05004169 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4170 return -EFAULT;
4171 uss.ss_sp = compat_ptr(uss32.ss_sp);
4172 uss.ss_flags = uss32.ss_flags;
4173 uss.ss_size = uss32.ss_size;
4174 }
Al Virobcfe8ad2017-05-27 00:29:34 -04004175 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
Will Deacon22839862018-09-05 15:34:42 +01004176 compat_user_stack_pointer(),
4177 COMPAT_MINSIGSTKSZ);
Al Viro90268432012-12-14 14:47:53 -05004178 if (ret >= 0 && uoss_ptr) {
Al Virobcfe8ad2017-05-27 00:29:34 -04004179 compat_stack_t old;
4180 memset(&old, 0, sizeof(old));
4181 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4182 old.ss_flags = uoss.ss_flags;
4183 old.ss_size = uoss.ss_size;
4184 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
Al Viro90268432012-12-14 14:47:53 -05004185 ret = -EFAULT;
4186 }
4187 return ret;
4188}
4189
Dominik Brodowski6203deb2018-03-17 17:11:51 +01004190COMPAT_SYSCALL_DEFINE2(sigaltstack,
4191 const compat_stack_t __user *, uss_ptr,
4192 compat_stack_t __user *, uoss_ptr)
4193{
4194 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4195}
4196
Al Viro90268432012-12-14 14:47:53 -05004197int compat_restore_altstack(const compat_stack_t __user *uss)
4198{
Dominik Brodowski6203deb2018-03-17 17:11:51 +01004199 int err = do_compat_sigaltstack(uss, NULL);
Al Viro90268432012-12-14 14:47:53 -05004200 /* squash all but -EFAULT for now */
4201 return err == -EFAULT ? err : 0;
4202}
Al Viroc40702c2012-11-20 14:24:26 -05004203
4204int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4205{
Stas Sergeev441398d2017-02-27 14:27:25 -08004206 int err;
Al Viroc40702c2012-11-20 14:24:26 -05004207 struct task_struct *t = current;
Stas Sergeev441398d2017-02-27 14:27:25 -08004208 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4209 &uss->ss_sp) |
4210 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05004211 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev441398d2017-02-27 14:27:25 -08004212 if (err)
4213 return err;
4214 if (t->sas_ss_flags & SS_AUTODISARM)
4215 sas_ss_reset(t);
4216 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05004217}
Al Viro90268432012-12-14 14:47:53 -05004218#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219
4220#ifdef __ARCH_WANT_SYS_SIGPENDING
4221
Randy Dunlap41c57892011-04-04 15:00:26 -07004222/**
4223 * sys_sigpending - examine pending signals
Dominik Brodowskid53238c2018-03-11 11:34:37 +01004224 * @uset: where mask of pending signal is returned
Randy Dunlap41c57892011-04-04 15:00:26 -07004225 */
Dominik Brodowskid53238c2018-03-11 11:34:37 +01004226SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227{
Dominik Brodowskid53238c2018-03-11 11:34:37 +01004228 sigset_t set;
Dominik Brodowskid53238c2018-03-11 11:34:37 +01004229
4230 if (sizeof(old_sigset_t) > sizeof(*uset))
4231 return -EINVAL;
4232
Christian Braunerb1d294c2018-08-21 22:00:02 -07004233 do_sigpending(&set);
4234
4235 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4236 return -EFAULT;
4237
4238 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239}
4240
Al Viro8f136212017-05-31 04:42:07 -04004241#ifdef CONFIG_COMPAT
4242COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4243{
4244 sigset_t set;
Christian Braunerb1d294c2018-08-21 22:00:02 -07004245
4246 do_sigpending(&set);
4247
4248 return put_user(set.sig[0], set32);
Al Viro8f136212017-05-31 04:42:07 -04004249}
4250#endif
4251
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252#endif
4253
4254#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07004255/**
4256 * sys_sigprocmask - examine and change blocked signals
4257 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02004258 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07004259 * @oset: previous value of signal mask if non-null
4260 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07004261 * Some platforms have their own version with special arguments;
4262 * others support only sys_rt_sigprocmask.
4263 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264
Oleg Nesterovb013c392011-04-28 11:36:20 +02004265SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01004266 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004267{
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02004269 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270
Oleg Nesterovb013c392011-04-28 11:36:20 +02004271 old_set = current->blocked.sig[0];
4272
4273 if (nset) {
4274 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4275 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02004277 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02004281 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 break;
4283 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02004284 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 break;
4286 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02004287 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02004289 default:
4290 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291 }
4292
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01004293 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004294 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02004295
4296 if (oset) {
4297 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4298 return -EFAULT;
4299 }
4300
4301 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302}
4303#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4304
Al Viroeaca6ea2012-11-25 23:12:10 -05004305#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07004306/**
4307 * sys_rt_sigaction - alter an action taken by a process
4308 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07004309 * @act: new sigaction
4310 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07004311 * @sigsetsize: size of sigset_t type
4312 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01004313SYSCALL_DEFINE4(rt_sigaction, int, sig,
4314 const struct sigaction __user *, act,
4315 struct sigaction __user *, oact,
4316 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004317{
4318 struct k_sigaction new_sa, old_sa;
Christian Braunerd8f993b2018-08-21 22:00:07 -07004319 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320
4321 /* XXX: Don't preclude handling different sized sigset_t's. */
4322 if (sigsetsize != sizeof(sigset_t))
Christian Braunerd8f993b2018-08-21 22:00:07 -07004323 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324
Christian Braunerd8f993b2018-08-21 22:00:07 -07004325 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4326 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327
4328 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
Christian Braunerd8f993b2018-08-21 22:00:07 -07004329 if (ret)
4330 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331
Christian Braunerd8f993b2018-08-21 22:00:07 -07004332 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4333 return -EFAULT;
4334
4335 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004336}
Al Viro08d32fe2012-12-25 18:38:15 -05004337#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05004338COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4339 const struct compat_sigaction __user *, act,
4340 struct compat_sigaction __user *, oact,
4341 compat_size_t, sigsetsize)
4342{
4343 struct k_sigaction new_ka, old_ka;
Al Viro08d32fe2012-12-25 18:38:15 -05004344#ifdef __ARCH_HAS_SA_RESTORER
4345 compat_uptr_t restorer;
4346#endif
4347 int ret;
4348
4349 /* XXX: Don't preclude handling different sized sigset_t's. */
4350 if (sigsetsize != sizeof(compat_sigset_t))
4351 return -EINVAL;
4352
4353 if (act) {
4354 compat_uptr_t handler;
4355 ret = get_user(handler, &act->sa_handler);
4356 new_ka.sa.sa_handler = compat_ptr(handler);
4357#ifdef __ARCH_HAS_SA_RESTORER
4358 ret |= get_user(restorer, &act->sa_restorer);
4359 new_ka.sa.sa_restorer = compat_ptr(restorer);
4360#endif
Al Viro3968cf62017-09-03 21:45:17 -04004361 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07004362 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05004363 if (ret)
4364 return -EFAULT;
Al Viro08d32fe2012-12-25 18:38:15 -05004365 }
4366
4367 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4368 if (!ret && oact) {
Al Viro08d32fe2012-12-25 18:38:15 -05004369 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4370 &oact->sa_handler);
Dmitry V. Levinf4543222017-08-22 02:16:11 +03004371 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4372 sizeof(oact->sa_mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07004373 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05004374#ifdef __ARCH_HAS_SA_RESTORER
4375 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4376 &oact->sa_restorer);
4377#endif
4378 }
4379 return ret;
4380}
4381#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05004382#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004383
Al Viro495dfbf2012-12-25 19:09:45 -05004384#ifdef CONFIG_OLD_SIGACTION
4385SYSCALL_DEFINE3(sigaction, int, sig,
4386 const struct old_sigaction __user *, act,
4387 struct old_sigaction __user *, oact)
4388{
4389 struct k_sigaction new_ka, old_ka;
4390 int ret;
4391
4392 if (act) {
4393 old_sigset_t mask;
Linus Torvalds96d4f262019-01-03 18:57:57 -08004394 if (!access_ok(act, sizeof(*act)) ||
Al Viro495dfbf2012-12-25 19:09:45 -05004395 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4396 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4397 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4398 __get_user(mask, &act->sa_mask))
4399 return -EFAULT;
4400#ifdef __ARCH_HAS_KA_RESTORER
4401 new_ka.ka_restorer = NULL;
4402#endif
4403 siginitset(&new_ka.sa.sa_mask, mask);
4404 }
4405
4406 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4407
4408 if (!ret && oact) {
Linus Torvalds96d4f262019-01-03 18:57:57 -08004409 if (!access_ok(oact, sizeof(*oact)) ||
Al Viro495dfbf2012-12-25 19:09:45 -05004410 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4411 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4412 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4413 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4414 return -EFAULT;
4415 }
4416
4417 return ret;
4418}
4419#endif
4420#ifdef CONFIG_COMPAT_OLD_SIGACTION
4421COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4422 const struct compat_old_sigaction __user *, act,
4423 struct compat_old_sigaction __user *, oact)
4424{
4425 struct k_sigaction new_ka, old_ka;
4426 int ret;
4427 compat_old_sigset_t mask;
4428 compat_uptr_t handler, restorer;
4429
4430 if (act) {
Linus Torvalds96d4f262019-01-03 18:57:57 -08004431 if (!access_ok(act, sizeof(*act)) ||
Al Viro495dfbf2012-12-25 19:09:45 -05004432 __get_user(handler, &act->sa_handler) ||
4433 __get_user(restorer, &act->sa_restorer) ||
4434 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4435 __get_user(mask, &act->sa_mask))
4436 return -EFAULT;
4437
4438#ifdef __ARCH_HAS_KA_RESTORER
4439 new_ka.ka_restorer = NULL;
4440#endif
4441 new_ka.sa.sa_handler = compat_ptr(handler);
4442 new_ka.sa.sa_restorer = compat_ptr(restorer);
4443 siginitset(&new_ka.sa.sa_mask, mask);
4444 }
4445
4446 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4447
4448 if (!ret && oact) {
Linus Torvalds96d4f262019-01-03 18:57:57 -08004449 if (!access_ok(oact, sizeof(*oact)) ||
Al Viro495dfbf2012-12-25 19:09:45 -05004450 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4451 &oact->sa_handler) ||
4452 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4453 &oact->sa_restorer) ||
4454 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4455 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4456 return -EFAULT;
4457 }
4458 return ret;
4459}
4460#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461
Fabian Frederickf6187762014-06-04 16:11:12 -07004462#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
4464/*
4465 * For backwards compatibility. Functionality superseded by sigprocmask.
4466 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01004467SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468{
4469 /* SMP safe */
4470 return current->blocked.sig[0];
4471}
4472
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01004473SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07004475 int old = current->blocked.sig[0];
4476 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01004478 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07004479 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480
4481 return old;
4482}
Fabian Frederickf6187762014-06-04 16:11:12 -07004483#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004484
4485#ifdef __ARCH_WANT_SYS_SIGNAL
4486/*
4487 * For backwards compatibility. Functionality superseded by sigaction.
4488 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01004489SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
4491 struct k_sigaction new_sa, old_sa;
4492 int ret;
4493
4494 new_sa.sa.sa_handler = handler;
4495 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03004496 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497
4498 ret = do_sigaction(sig, &new_sa, &old_sa);
4499
4500 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4501}
4502#endif /* __ARCH_WANT_SYS_SIGNAL */
4503
4504#ifdef __ARCH_WANT_SYS_PAUSE
4505
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01004506SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02004508 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08004509 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02004510 schedule();
4511 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 return -ERESTARTNOHAND;
4513}
4514
4515#endif
4516
Richard Weinberger9d8a7652015-11-20 15:57:21 -08004517static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04004518{
Al Viro68f3f162012-05-21 21:42:32 -04004519 current->saved_sigmask = current->blocked;
4520 set_current_blocked(set);
4521
Sasha Levin823dd322016-02-05 15:36:05 -08004522 while (!signal_pending(current)) {
4523 __set_current_state(TASK_INTERRUPTIBLE);
4524 schedule();
4525 }
Al Viro68f3f162012-05-21 21:42:32 -04004526 set_restore_sigmask();
4527 return -ERESTARTNOHAND;
4528}
Al Viro68f3f162012-05-21 21:42:32 -04004529
Randy Dunlap41c57892011-04-04 15:00:26 -07004530/**
4531 * sys_rt_sigsuspend - replace the signal mask for a value with the
4532 * @unewset value until a signal is received
4533 * @unewset: new signal mask value
4534 * @sigsetsize: size of sigset_t type
4535 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01004536SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08004537{
4538 sigset_t newset;
4539
4540 /* XXX: Don't preclude handling different sized sigset_t's. */
4541 if (sigsetsize != sizeof(sigset_t))
4542 return -EINVAL;
4543
4544 if (copy_from_user(&newset, unewset, sizeof(newset)))
4545 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04004546 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08004547}
Al Viroad4b65a2012-12-24 21:43:56 -05004548
4549#ifdef CONFIG_COMPAT
4550COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4551{
Al Viroad4b65a2012-12-24 21:43:56 -05004552 sigset_t newset;
Al Viroad4b65a2012-12-24 21:43:56 -05004553
4554 /* XXX: Don't preclude handling different sized sigset_t's. */
4555 if (sigsetsize != sizeof(sigset_t))
4556 return -EINVAL;
4557
Al Viro3968cf62017-09-03 21:45:17 -04004558 if (get_compat_sigset(&newset, unewset))
Al Viroad4b65a2012-12-24 21:43:56 -05004559 return -EFAULT;
Al Viroad4b65a2012-12-24 21:43:56 -05004560 return sigsuspend(&newset);
Al Viroad4b65a2012-12-24 21:43:56 -05004561}
4562#endif
David Woodhouse150256d2006-01-18 17:43:57 -08004563
Al Viro0a0e8cd2012-12-25 16:04:12 -05004564#ifdef CONFIG_OLD_SIGSUSPEND
4565SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4566{
4567 sigset_t blocked;
4568 siginitset(&blocked, mask);
4569 return sigsuspend(&blocked);
4570}
4571#endif
4572#ifdef CONFIG_OLD_SIGSUSPEND3
4573SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4574{
4575 sigset_t blocked;
4576 siginitset(&blocked, mask);
4577 return sigsuspend(&blocked);
4578}
4579#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07004580
Gideon Israel Dsouza52f5684c2014-04-07 15:39:20 -07004581__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07004582{
4583 return NULL;
4584}
4585
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02004586static inline void siginfo_buildtime_checks(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587{
Eric W. Biedermanaba1be22017-07-19 21:23:15 -05004588 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
Helge Deller41b27152016-03-22 14:27:54 -07004589
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02004590 /* Verify the offsets in the two siginfos match */
4591#define CHECK_OFFSET(field) \
4592 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4593
4594 /* kill */
4595 CHECK_OFFSET(si_pid);
4596 CHECK_OFFSET(si_uid);
4597
4598 /* timer */
4599 CHECK_OFFSET(si_tid);
4600 CHECK_OFFSET(si_overrun);
4601 CHECK_OFFSET(si_value);
4602
4603 /* rt */
4604 CHECK_OFFSET(si_pid);
4605 CHECK_OFFSET(si_uid);
4606 CHECK_OFFSET(si_value);
4607
4608 /* sigchld */
4609 CHECK_OFFSET(si_pid);
4610 CHECK_OFFSET(si_uid);
4611 CHECK_OFFSET(si_status);
4612 CHECK_OFFSET(si_utime);
4613 CHECK_OFFSET(si_stime);
4614
4615 /* sigfault */
4616 CHECK_OFFSET(si_addr);
Eric W. Biedermanadd0b322021-04-30 17:06:01 -05004617 CHECK_OFFSET(si_trapno);
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02004618 CHECK_OFFSET(si_addr_lsb);
4619 CHECK_OFFSET(si_lower);
4620 CHECK_OFFSET(si_upper);
4621 CHECK_OFFSET(si_pkey);
Eric W. Biederman0683b532021-05-02 17:28:31 -05004622 CHECK_OFFSET(si_perf_data);
4623 CHECK_OFFSET(si_perf_type);
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02004624
4625 /* sigpoll */
4626 CHECK_OFFSET(si_band);
4627 CHECK_OFFSET(si_fd);
4628
4629 /* sigsys */
4630 CHECK_OFFSET(si_call_addr);
4631 CHECK_OFFSET(si_syscall);
4632 CHECK_OFFSET(si_arch);
4633#undef CHECK_OFFSET
Eric W. Biederman70f1b0d2019-02-07 19:44:12 -06004634
4635 /* usb asyncio */
4636 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4637 offsetof(struct siginfo, si_addr));
4638 if (sizeof(int) == sizeof(void __user *)) {
4639 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4640 sizeof(void __user *));
4641 } else {
4642 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4643 sizeof_field(struct siginfo, si_uid)) !=
4644 sizeof(void __user *));
4645 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4646 offsetof(struct siginfo, si_uid));
4647 }
4648#ifdef CONFIG_COMPAT
4649 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4650 offsetof(struct compat_siginfo, si_addr));
4651 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4652 sizeof(compat_uptr_t));
4653 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4654 sizeof_field(struct siginfo, si_pid));
4655#endif
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02004656}
4657
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658void __init signals_init(void)
4659{
Eric W. Biedermanae7795b2018-09-25 11:27:20 +02004660 siginfo_buildtime_checks();
Jason Wessel67fc4e02010-05-20 21:04:21 -05004661
4662 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4663}
4664
4665#ifdef CONFIG_KGDB_KDB
4666#include <linux/kdb.h>
4667/*
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004668 * kdb_send_sig - Allows kdb to send signals without exposing
Jason Wessel67fc4e02010-05-20 21:04:21 -05004669 * signal internals. This function checks if the required locks are
4670 * available before calling the main signal code, to avoid kdb
4671 * deadlocks.
4672 */
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004673void kdb_send_sig(struct task_struct *t, int sig)
Jason Wessel67fc4e02010-05-20 21:04:21 -05004674{
4675 static struct task_struct *kdb_prev_t;
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004676 int new_t, ret;
Jason Wessel67fc4e02010-05-20 21:04:21 -05004677 if (!spin_trylock(&t->sighand->siglock)) {
4678 kdb_printf("Can't do kill command now.\n"
4679 "The sigmask lock is held somewhere else in "
4680 "kernel, try again later\n");
4681 return;
4682 }
Jason Wessel67fc4e02010-05-20 21:04:21 -05004683 new_t = kdb_prev_t != t;
4684 kdb_prev_t = t;
4685 if (t->state != TASK_RUNNING && new_t) {
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004686 spin_unlock(&t->sighand->siglock);
Jason Wessel67fc4e02010-05-20 21:04:21 -05004687 kdb_printf("Process is not RUNNING, sending a signal from "
4688 "kdb risks deadlock\n"
4689 "on the run queue locks. "
4690 "The signal has _not_ been sent.\n"
4691 "Reissue the kill command if you want to risk "
4692 "the deadlock.\n");
4693 return;
4694 }
Eric W. Biedermanb2139842018-07-20 15:49:17 -05004695 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
Eric W. Biederman0b44bf92017-08-17 15:45:38 -05004696 spin_unlock(&t->sighand->siglock);
4697 if (ret)
Jason Wessel67fc4e02010-05-20 21:04:21 -05004698 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4699 sig, t->pid);
4700 else
4701 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4702}
4703#endif /* CONFIG_KGDB_KDB */