blob: 670755212d350e14549e4f4e08e38552177515e8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/slab.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040014#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
Alex Kelly179899f2012-10-04 17:15:24 -070020#include <linux/coredump.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/ptrace.h>
Jesper Juhl7ed20e12005-05-01 08:59:14 -070024#include <linux/signal.h>
Davide Libenzifba2afa2007-05-10 22:23:13 -070025#include <linux/signalfd.h>
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090026#include <linux/ratelimit.h>
Roland McGrath35de2542008-07-25 19:45:51 -070027#include <linux/tracehook.h>
Randy.Dunlapc59ede72006-01-11 12:17:46 -080028#include <linux/capability.h>
Nigel Cunningham7dfb7102006-12-06 20:34:23 -080029#include <linux/freezer.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080030#include <linux/pid_namespace.h>
31#include <linux/nsproxy.h>
Serge E. Hallyn6b550f92012-01-10 15:11:37 -080032#include <linux/user_namespace.h>
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +053033#include <linux/uprobes.h>
Al Viro90268432012-12-14 14:47:53 -050034#include <linux/compat.h>
Jesper Derehag2b5faa42013-03-19 20:50:05 +000035#include <linux/cn_proc.h>
Gideon Israel Dsouza52f56842014-04-07 15:39:20 -070036#include <linux/compiler.h>
37
Masami Hiramatsud1eb6502009-11-24 16:56:45 -050038#define CREATE_TRACE_POINTS
39#include <trace/events/signal.h>
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -080040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <asm/param.h>
42#include <asm/uaccess.h>
43#include <asm/unistd.h>
44#include <asm/siginfo.h>
David Howellsd550bbd2012-03-28 18:30:03 +010045#include <asm/cacheflush.h>
Al Viroe1396062006-05-25 10:19:47 -040046#include "audit.h" /* audit_signal_info() */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*
49 * SLAB caches for signal bits.
50 */
51
Christoph Lametere18b8902006-12-06 20:33:20 -080052static struct kmem_cache *sigqueue_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +090054int print_fatal_signals __read_mostly;
55
Roland McGrath35de2542008-07-25 19:45:51 -070056static void __user *sig_handler(struct task_struct *t, int sig)
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070057{
Roland McGrath35de2542008-07-25 19:45:51 -070058 return t->sighand->action[sig - 1].sa.sa_handler;
59}
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070060
Roland McGrath35de2542008-07-25 19:45:51 -070061static int sig_handler_ignored(void __user *handler, int sig)
62{
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070063 /* Is it explicitly or implicitly ignored? */
Pavel Emelyanov93585ee2008-04-30 00:52:39 -070064 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
66}
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070068static int sig_task_ignored(struct task_struct *t, int sig, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Roland McGrath35de2542008-07-25 19:45:51 -070070 void __user *handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Oleg Nesterovf008faf2009-04-02 16:58:02 -070072 handler = sig_handler(t, sig);
73
Eric W. Biederman1f7d8a22018-07-19 19:47:27 -050074 /* SIGKILL and SIGSTOP may not be sent to the global init */
75 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
76 return true;
77
Oleg Nesterovf008faf2009-04-02 16:58:02 -070078 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
Oleg Nesterov794ac8e2017-11-17 15:30:04 -080079 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
Oleg Nesterovf008faf2009-04-02 16:58:02 -070080 return 1;
81
Eric W. Biedermana3714572019-08-16 12:33:54 -050082 /* Only allow kernel generated signals to this kthread */
83 if (unlikely((t->flags & PF_KTHREAD) &&
84 (handler == SIG_KTHREAD_KERNEL) && !force))
85 return true;
86
Oleg Nesterovf008faf2009-04-02 16:58:02 -070087 return sig_handler_ignored(handler, sig);
88}
89
Oleg Nesterovdef8cf72012-03-23 15:02:45 -070090static int sig_ignored(struct task_struct *t, int sig, bool force)
Oleg Nesterovf008faf2009-04-02 16:58:02 -070091{
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 /*
93 * Blocked signals are never ignored, since the
94 * signal handler may change by the time it is
95 * unblocked.
96 */
Roland McGrath325d22d2007-11-12 15:41:55 -080097 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -070098 return 0;
99
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800100 /*
101 * Tracers may want to know about even ignored signal unless it
102 * is SIGKILL which can't be reported anyway but can be ignored
103 * by SIGNAL_UNKILLABLE task.
104 */
105 if (t->ptrace && sig != SIGKILL)
Roland McGrath35de2542008-07-25 19:45:51 -0700106 return 0;
107
Oleg Nesterov1453b3a2017-11-17 15:30:01 -0800108 return sig_task_ignored(t, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
111/*
112 * Re-calculate pending state from the set of locally pending
113 * signals, globally pending signals, and blocked signals.
114 */
115static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
116{
117 unsigned long ready;
118 long i;
119
120 switch (_NSIG_WORDS) {
121 default:
122 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
123 ready |= signal->sig[i] &~ blocked->sig[i];
124 break;
125
126 case 4: ready = signal->sig[3] &~ blocked->sig[3];
127 ready |= signal->sig[2] &~ blocked->sig[2];
128 ready |= signal->sig[1] &~ blocked->sig[1];
129 ready |= signal->sig[0] &~ blocked->sig[0];
130 break;
131
132 case 2: ready = signal->sig[1] &~ blocked->sig[1];
133 ready |= signal->sig[0] &~ blocked->sig[0];
134 break;
135
136 case 1: ready = signal->sig[0] &~ blocked->sig[0];
137 }
138 return ready != 0;
139}
140
141#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
142
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700143static int recalc_sigpending_tsk(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200145 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 PENDING(&t->pending, &t->blocked) ||
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700147 PENDING(&t->signal->shared_pending, &t->blocked)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 set_tsk_thread_flag(t, TIF_SIGPENDING);
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700149 return 1;
150 }
Roland McGrathb74d0de2007-06-06 03:59:00 -0700151 /*
152 * We must never clear the flag in another thread, or in current
153 * when it's possible the current syscall is returning -ERESTART*.
154 * So we don't clear it here, and only callers who know they should do.
155 */
Roland McGrath7bb44ad2007-05-23 13:57:44 -0700156 return 0;
157}
158
159/*
160 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
161 * This is superfluous when called on current, the wakeup is a harmless no-op.
162 */
163void recalc_sigpending_and_wake(struct task_struct *t)
164{
165 if (recalc_sigpending_tsk(t))
166 signal_wake_up(t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
169void recalc_sigpending(void)
170{
Tejun Heodd1d6772011-06-02 11:14:00 +0200171 if (!recalc_sigpending_tsk(current) && !freezing(current))
Roland McGrathb74d0de2007-06-06 03:59:00 -0700172 clear_thread_flag(TIF_SIGPENDING);
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
176/* Given the mask, find the first available signal that should be serviced. */
177
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800178#define SYNCHRONOUS_MASK \
179 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
Will Drewrya0727e82012-04-12 16:48:00 -0500180 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800181
Davide Libenzifba2afa2007-05-10 22:23:13 -0700182int next_signal(struct sigpending *pending, sigset_t *mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183{
184 unsigned long i, *s, *m, x;
185 int sig = 0;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 s = pending->signal.sig;
188 m = mask->sig;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800189
190 /*
191 * Handle the first word specially: it contains the
192 * synchronous signals that need to be dequeued first.
193 */
194 x = *s &~ *m;
195 if (x) {
196 if (x & SYNCHRONOUS_MASK)
197 x &= SYNCHRONOUS_MASK;
198 sig = ffz(~x) + 1;
199 return sig;
200 }
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 switch (_NSIG_WORDS) {
203 default:
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800204 for (i = 1; i < _NSIG_WORDS; ++i) {
205 x = *++s &~ *++m;
206 if (!x)
207 continue;
208 sig = ffz(~x) + i*_NSIG_BPW + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 break;
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 break;
212
Linus Torvaldsa27341c2010-03-02 08:36:46 -0800213 case 2:
214 x = s[1] &~ m[1];
215 if (!x)
216 break;
217 sig = ffz(~x) + _NSIG_BPW + 1;
218 break;
219
220 case 1:
221 /* Nothing to do */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 break;
223 }
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 return sig;
226}
227
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900228static inline void print_dropped_signal(int sig)
229{
230 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
231
232 if (!print_fatal_signals)
233 return;
234
235 if (!__ratelimit(&ratelimit_state))
236 return;
237
Wang Xiaoqiang747800e2016-05-23 16:23:59 -0700238 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900239 current->comm, current->pid, sig);
240}
241
Tejun Heoe5c19022011-03-23 10:37:00 +0100242/**
Tejun Heo7dd3db52011-06-02 11:14:00 +0200243 * task_set_jobctl_pending - set jobctl pending bits
244 * @task: target task
245 * @mask: pending bits to set
246 *
247 * Clear @mask from @task->jobctl. @mask must be subset of
248 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
249 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
250 * cleared. If @task is already being killed or exiting, this function
251 * becomes noop.
252 *
253 * CONTEXT:
254 * Must be called with @task->sighand->siglock held.
255 *
256 * RETURNS:
257 * %true if @mask is set, %false if made noop because @task was dying.
258 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700259bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heo7dd3db52011-06-02 11:14:00 +0200260{
261 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
262 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
263 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
264
265 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
266 return false;
267
268 if (mask & JOBCTL_STOP_SIGMASK)
269 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
270
271 task->jobctl |= mask;
272 return true;
273}
274
275/**
Tejun Heoa8f072c2011-06-02 11:13:59 +0200276 * task_clear_jobctl_trapping - clear jobctl trapping bit
Tejun Heod79fdd62011-03-23 10:37:00 +0100277 * @task: target task
278 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200279 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
280 * Clear it and wake up the ptracer. Note that we don't need any further
281 * locking. @task->siglock guarantees that @task->parent points to the
282 * ptracer.
Tejun Heod79fdd62011-03-23 10:37:00 +0100283 *
284 * CONTEXT:
285 * Must be called with @task->sighand->siglock held.
286 */
Tejun Heo73ddff22011-06-14 11:20:14 +0200287void task_clear_jobctl_trapping(struct task_struct *task)
Tejun Heod79fdd62011-03-23 10:37:00 +0100288{
Tejun Heoa8f072c2011-06-02 11:13:59 +0200289 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
290 task->jobctl &= ~JOBCTL_TRAPPING;
Oleg Nesterov650226b2014-06-06 14:36:44 -0700291 smp_mb(); /* advised by wake_up_bit() */
Tejun Heo62c124f2011-06-02 11:14:00 +0200292 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
Tejun Heod79fdd62011-03-23 10:37:00 +0100293 }
294}
295
296/**
Tejun Heo3759a0d2011-06-02 11:14:00 +0200297 * task_clear_jobctl_pending - clear jobctl pending bits
Tejun Heoe5c19022011-03-23 10:37:00 +0100298 * @task: target task
Tejun Heo3759a0d2011-06-02 11:14:00 +0200299 * @mask: pending bits to clear
Tejun Heoe5c19022011-03-23 10:37:00 +0100300 *
Tejun Heo3759a0d2011-06-02 11:14:00 +0200301 * Clear @mask from @task->jobctl. @mask must be subset of
302 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
303 * STOP bits are cleared together.
Tejun Heoe5c19022011-03-23 10:37:00 +0100304 *
Tejun Heo6dfca322011-06-02 11:14:00 +0200305 * If clearing of @mask leaves no stop or trap pending, this function calls
306 * task_clear_jobctl_trapping().
Tejun Heoe5c19022011-03-23 10:37:00 +0100307 *
308 * CONTEXT:
309 * Must be called with @task->sighand->siglock held.
310 */
Palmer Dabbeltb76808e2015-04-30 21:19:57 -0700311void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
Tejun Heoe5c19022011-03-23 10:37:00 +0100312{
Tejun Heo3759a0d2011-06-02 11:14:00 +0200313 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
314
315 if (mask & JOBCTL_STOP_PENDING)
316 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
317
318 task->jobctl &= ~mask;
Tejun Heo6dfca322011-06-02 11:14:00 +0200319
320 if (!(task->jobctl & JOBCTL_PENDING_MASK))
321 task_clear_jobctl_trapping(task);
Tejun Heoe5c19022011-03-23 10:37:00 +0100322}
323
324/**
325 * task_participate_group_stop - participate in a group stop
326 * @task: task participating in a group stop
327 *
Tejun Heoa8f072c2011-06-02 11:13:59 +0200328 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
Tejun Heo39efa3e2011-03-23 10:37:00 +0100329 * Group stop states are cleared and the group stop count is consumed if
Tejun Heoa8f072c2011-06-02 11:13:59 +0200330 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
Tejun Heo39efa3e2011-03-23 10:37:00 +0100331 * stop, the appropriate %SIGNAL_* flags are set.
Tejun Heoe5c19022011-03-23 10:37:00 +0100332 *
333 * CONTEXT:
334 * Must be called with @task->sighand->siglock held.
Tejun Heo244056f2011-03-23 10:37:01 +0100335 *
336 * RETURNS:
337 * %true if group stop completion should be notified to the parent, %false
338 * otherwise.
Tejun Heoe5c19022011-03-23 10:37:00 +0100339 */
340static bool task_participate_group_stop(struct task_struct *task)
341{
342 struct signal_struct *sig = task->signal;
Tejun Heoa8f072c2011-06-02 11:13:59 +0200343 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
Tejun Heoe5c19022011-03-23 10:37:00 +0100344
Tejun Heoa8f072c2011-06-02 11:13:59 +0200345 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
Tejun Heo39efa3e2011-03-23 10:37:00 +0100346
Tejun Heo3759a0d2011-06-02 11:14:00 +0200347 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
Tejun Heoe5c19022011-03-23 10:37:00 +0100348
349 if (!consume)
350 return false;
351
352 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
353 sig->group_stop_count--;
354
Tejun Heo244056f2011-03-23 10:37:01 +0100355 /*
356 * Tell the caller to notify completion iff we are entering into a
357 * fresh group stop. Read comment in do_signal_stop() for details.
358 */
359 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
Jamie Iles916a05b2017-01-10 16:57:54 -0800360 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
Tejun Heoe5c19022011-03-23 10:37:00 +0100361 return true;
362 }
363 return false;
364}
365
David Howellsc69e8d92008-11-14 10:39:19 +1100366/*
367 * allocate a new signal queue record
368 * - this may be called without locks if and only if t == current, otherwise an
Randy Dunlap5aba0852011-04-04 14:59:31 -0700369 * appropriate lock must be held to stop the target task from exiting
David Howellsc69e8d92008-11-14 10:39:19 +1100370 */
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900371static struct sigqueue *
372__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
374 struct sigqueue *q = NULL;
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800375 struct user_struct *user;
Linus Torvalds43062592020-02-24 12:47:14 -0800376 int sigpending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800378 /*
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000379 * Protect access to @t credentials. This can go away when all
380 * callers hold rcu read lock.
Linus Torvalds43062592020-02-24 12:47:14 -0800381 *
382 * NOTE! A pending signal will hold on to the user refcount,
383 * and we get/put the refcount only when the sigpending count
384 * changes from/to zero.
Linus Torvalds10b1fbd2006-11-04 13:03:00 -0800385 */
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000386 rcu_read_lock();
Linus Torvalds43062592020-02-24 12:47:14 -0800387 user = __task_cred(t)->user;
388 sigpending = atomic_inc_return(&user->sigpending);
389 if (sigpending == 1)
390 get_uid(user);
Thomas Gleixner7cf7db82009-12-10 00:53:21 +0000391 rcu_read_unlock();
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900392
Linus Torvalds43062592020-02-24 12:47:14 -0800393 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 q = kmem_cache_alloc(sigqueue_cachep, flags);
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +0900395 } else {
396 print_dropped_signal(sig);
397 }
398
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 if (unlikely(q == NULL)) {
Linus Torvalds43062592020-02-24 12:47:14 -0800400 if (atomic_dec_and_test(&user->sigpending))
401 free_uid(user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 } else {
403 INIT_LIST_HEAD(&q->list);
404 q->flags = 0;
David Howellsd84f4f92008-11-14 10:39:23 +1100405 q->user = user;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 }
David Howellsd84f4f92008-11-14 10:39:23 +1100407
408 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409}
410
Andrew Morton514a01b2006-02-03 03:04:41 -0800411static void __sigqueue_free(struct sigqueue *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
413 if (q->flags & SIGQUEUE_PREALLOC)
414 return;
Linus Torvalds43062592020-02-24 12:47:14 -0800415 if (atomic_dec_and_test(&q->user->sigpending))
416 free_uid(q->user);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 kmem_cache_free(sigqueue_cachep, q);
418}
419
Oleg Nesterov6a14c5c2006-03-28 16:11:18 -0800420void flush_sigqueue(struct sigpending *queue)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421{
422 struct sigqueue *q;
423
424 sigemptyset(&queue->signal);
425 while (!list_empty(&queue->list)) {
426 q = list_entry(queue->list.next, struct sigqueue , list);
427 list_del_init(&q->list);
428 __sigqueue_free(q);
429 }
430}
431
432/*
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400433 * Flush all pending signals for this kthread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 */
Oleg Nesterovc81addc2006-03-28 16:11:17 -0800435void flush_signals(struct task_struct *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
437 unsigned long flags;
438
439 spin_lock_irqsave(&t->sighand->siglock, flags);
Oleg Nesterov9e7c8f82015-06-04 16:22:16 -0400440 clear_tsk_thread_flag(t, TIF_SIGPENDING);
441 flush_sigqueue(&t->pending);
442 flush_sigqueue(&t->signal->shared_pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 spin_unlock_irqrestore(&t->sighand->siglock, flags);
444}
445
Oleg Nesterovcbaffba2008-05-26 20:55:42 +0400446static void __flush_itimer_signals(struct sigpending *pending)
447{
448 sigset_t signal, retain;
449 struct sigqueue *q, *n;
450
451 signal = pending->signal;
452 sigemptyset(&retain);
453
454 list_for_each_entry_safe(q, n, &pending->list, list) {
455 int sig = q->info.si_signo;
456
457 if (likely(q->info.si_code != SI_TIMER)) {
458 sigaddset(&retain, sig);
459 } else {
460 sigdelset(&signal, sig);
461 list_del_init(&q->list);
462 __sigqueue_free(q);
463 }
464 }
465
466 sigorsets(&pending->signal, &signal, &retain);
467}
468
469void flush_itimer_signals(void)
470{
471 struct task_struct *tsk = current;
472 unsigned long flags;
473
474 spin_lock_irqsave(&tsk->sighand->siglock, flags);
475 __flush_itimer_signals(&tsk->pending);
476 __flush_itimer_signals(&tsk->signal->shared_pending);
477 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
478}
479
Oleg Nesterov10ab8252007-05-09 02:34:37 -0700480void ignore_signals(struct task_struct *t)
481{
482 int i;
483
484 for (i = 0; i < _NSIG; ++i)
485 t->sighand->action[i].sa.sa_handler = SIG_IGN;
486
487 flush_signals(t);
488}
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 * Flush all handlers for a task.
492 */
493
494void
495flush_signal_handlers(struct task_struct *t, int force_default)
496{
497 int i;
498 struct k_sigaction *ka = &t->sighand->action[0];
499 for (i = _NSIG ; i != 0 ; i--) {
500 if (force_default || ka->sa.sa_handler != SIG_IGN)
501 ka->sa.sa_handler = SIG_DFL;
502 ka->sa.sa_flags = 0;
Andrew Morton522cff12013-03-13 14:59:34 -0700503#ifdef __ARCH_HAS_SA_RESTORER
Kees Cook2ca39522013-03-13 14:59:33 -0700504 ka->sa.sa_restorer = NULL;
505#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 sigemptyset(&ka->sa.sa_mask);
507 ka++;
508 }
509}
510
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200511int unhandled_signal(struct task_struct *tsk, int sig)
512{
Roland McGrath445a91d2008-07-25 19:45:52 -0700513 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
Serge E. Hallynb460cbc2007-10-18 23:39:52 -0700514 if (is_global_init(tsk))
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200515 return 1;
Roland McGrath445a91d2008-07-25 19:45:52 -0700516 if (handler != SIG_IGN && handler != SIG_DFL)
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200517 return 0;
Tejun Heoa288eec2011-06-17 16:50:37 +0200518 /* if ptraced, let the tracer determine */
519 return !tsk->ptrace;
Masoud Asgharifard Sharbianiabd4f752007-07-22 11:12:28 +0200520}
521
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500522static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
523 bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
525 struct sigqueue *q, *first = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 /*
528 * Collect the siginfo appropriate to this signal. Check if
529 * there is another siginfo for the same signal.
530 */
531 list_for_each_entry(q, &list->list, list) {
532 if (q->info.si_signo == sig) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700533 if (first)
534 goto still_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 first = q;
536 }
537 }
Oleg Nesterovd4434202008-07-25 01:47:28 -0700538
539 sigdelset(&list->signal, sig);
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 if (first) {
Oleg Nesterovd4434202008-07-25 01:47:28 -0700542still_pending:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 list_del_init(&first->list);
544 copy_siginfo(info, &first->info);
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500545
546 *resched_timer =
547 (first->flags & SIGQUEUE_PREALLOC) &&
548 (info->si_code == SI_TIMER) &&
549 (info->si_sys_private);
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 __sigqueue_free(first);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 } else {
Randy Dunlap5aba0852011-04-04 14:59:31 -0700553 /*
554 * Ok, it wasn't in the queue. This must be
555 * a fast-pathed signal or we must have been
556 * out of queue space. So zero out the info.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 info->si_signo = sig;
559 info->si_errno = 0;
Oleg Nesterov7486e5d2009-12-15 16:47:24 -0800560 info->si_code = SI_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 info->si_pid = 0;
562 info->si_uid = 0;
563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
566static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500567 siginfo_t *info, bool *resched_timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
Roland McGrath27d91e02006-09-29 02:00:31 -0700569 int sig = next_signal(pending, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800571 if (sig)
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500572 collect_signal(sig, pending, info, resched_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 return sig;
574}
575
576/*
Randy Dunlap5aba0852011-04-04 14:59:31 -0700577 * Dequeue a signal and return the element to the caller, which is
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 * expected to free it.
579 *
580 * All callers have to hold the siglock.
581 */
582int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
583{
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500584 bool resched_timer = false;
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700585 int signr;
Benjamin Herrenschmidtcaec4e82007-06-12 08:16:18 +1000586
587 /* We only dequeue private signals from ourselves, we don't let
588 * signalfd steal them
589 */
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500590 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800591 if (!signr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 signr = __dequeue_signal(&tsk->signal->shared_pending,
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500593 mask, info, &resched_timer);
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800594 /*
595 * itimer signal ?
596 *
597 * itimers are process shared and we restart periodic
598 * itimers in the signal delivery path to prevent DoS
599 * attacks in the high resolution timer case. This is
Randy Dunlap5aba0852011-04-04 14:59:31 -0700600 * compliant with the old way of self-restarting
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800601 * itimers, as the SIGALRM is a legacy signal and only
602 * queued once. Changing the restart behaviour to
603 * restart the timer in the signal dequeue path is
604 * reducing the timer noise on heavy loaded !highres
605 * systems too.
606 */
607 if (unlikely(signr == SIGALRM)) {
608 struct hrtimer *tmr = &tsk->signal->real_timer;
609
610 if (!hrtimer_is_queued(tmr) &&
611 tsk->signal->it_real_incr.tv64 != 0) {
612 hrtimer_forward(tmr, tmr->base->get_time(),
613 tsk->signal->it_real_incr);
614 hrtimer_restart(tmr);
615 }
616 }
617 }
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700618
Davide Libenzib8fceee2007-09-20 12:40:16 -0700619 recalc_sigpending();
Pavel Emelyanovc5363d02008-04-30 00:52:40 -0700620 if (!signr)
621 return 0;
622
623 if (unlikely(sig_kernel_stop(signr))) {
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800624 /*
625 * Set a marker that we have dequeued a stop signal. Our
626 * caller might release the siglock and then the pending
627 * stop signal it is about to process is no longer in the
628 * pending bitmasks, but must still be cleared by a SIGCONT
629 * (and overruled by a SIGKILL). So those cases clear this
630 * shared flag after we've set it. Note that this flag may
631 * remain set after the signal we return is ignored or
632 * handled. That doesn't matter because its only purpose
633 * is to alert stop-signal processing code when another
634 * processor has come along and cleared the flag.
635 */
Tejun Heoa8f072c2011-06-02 11:13:59 +0200636 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Thomas Gleixner8bfd9a72007-02-16 01:28:12 -0800637 }
Eric W. Biedermanf719f202017-06-13 04:31:16 -0500638 if (resched_timer) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 /*
640 * Release the siglock to ensure proper locking order
641 * of timer locks outside of siglocks. Note, we leave
642 * irqs disabled here, since the posix-timers code is
643 * about to disable them again anyway.
644 */
645 spin_unlock(&tsk->sighand->siglock);
646 do_schedule_next_timer(info);
647 spin_lock(&tsk->sighand->siglock);
648 }
649 return signr;
650}
651
652/*
653 * Tell a process that it has a new active signal..
654 *
655 * NOTE! we rely on the previous spin_lock to
656 * lock interrupts for us! We can only be called with
657 * "siglock" held, and the local interrupt must
658 * have been disabled when that got acquired!
659 *
660 * No need to set need_resched since signal event passing
661 * goes through ->blocked
662 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100663void signal_wake_up_state(struct task_struct *t, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 set_tsk_thread_flag(t, TIF_SIGPENDING);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 /*
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100667 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500668 * case. We don't check t->state here because there is a race with it
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 * executing another processor and just now entering stopped state.
670 * By using wake_up_state, we ensure the process will wake up and
671 * handle its death signal.
672 */
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100673 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 kick_process(t);
675}
676
677/*
678 * Remove signals in mask from the pending set and queue.
679 * Returns 1 if any signals were found.
680 *
681 * All callers must be holding the siglock.
George Anzinger71fabd52006-01-08 01:02:48 -0800682 */
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700683static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
George Anzinger71fabd52006-01-08 01:02:48 -0800684{
685 struct sigqueue *q, *n;
686 sigset_t m;
687
688 sigandsets(&m, mask, &s->signal);
689 if (sigisemptyset(&m))
690 return 0;
691
Oleg Nesterov702a5072011-04-27 22:01:27 +0200692 sigandnsets(&s->signal, &s->signal, mask);
George Anzinger71fabd52006-01-08 01:02:48 -0800693 list_for_each_entry_safe(q, n, &s->list, list) {
694 if (sigismember(mask, q->info.si_signo)) {
695 list_del_init(&q->list);
696 __sigqueue_free(q);
697 }
698 }
699 return 1;
700}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Oleg Nesterov614c5172009-12-15 16:47:22 -0800702static inline int is_si_special(const struct siginfo *info)
703{
704 return info <= SEND_SIG_FORCED;
705}
706
707static inline bool si_fromuser(const struct siginfo *info)
708{
709 return info == SEND_SIG_NOINFO ||
710 (!is_si_special(info) && SI_FROMUSER(info));
711}
712
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -0600713static int dequeue_synchronous_signal(siginfo_t *info)
714{
715 struct task_struct *tsk = current;
716 struct sigpending *pending = &tsk->pending;
717 struct sigqueue *q, *sync = NULL;
718
719 /*
720 * Might a synchronous signal be in the queue?
721 */
722 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
723 return 0;
724
725 /*
726 * Return the first synchronous signal in the queue.
727 */
728 list_for_each_entry(q, &pending->list, list) {
729 /* Synchronous signals have a postive si_code */
730 if ((q->info.si_code > SI_USER) &&
731 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 sync = q;
733 goto next;
734 }
735 }
736 return 0;
737next:
738 /*
739 * Check if there is another siginfo for the same signal.
740 */
741 list_for_each_entry_continue(q, &pending->list, list) {
742 if (q->info.si_signo == sync->info.si_signo)
743 goto still_pending;
744 }
745
746 sigdelset(&pending->signal, sync->info.si_signo);
747 recalc_sigpending();
748still_pending:
749 list_del_init(&sync->list);
750 copy_siginfo(info, &sync->info);
751 __sigqueue_free(sync);
752 return info->si_signo;
753}
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755/*
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700756 * called with RCU read lock from check_kill_permission()
757 */
758static int kill_ok_by_cred(struct task_struct *t)
759{
760 const struct cred *cred = current_cred();
761 const struct cred *tcred = __task_cred(t);
762
Eric W. Biederman5af66202012-03-03 20:21:47 -0800763 if (uid_eq(cred->euid, tcred->suid) ||
764 uid_eq(cred->euid, tcred->uid) ||
765 uid_eq(cred->uid, tcred->suid) ||
766 uid_eq(cred->uid, tcred->uid))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700767 return 1;
768
Eric W. Biedermanc4a4d602011-11-16 23:15:31 -0800769 if (ns_capable(tcred->user_ns, CAP_KILL))
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700770 return 1;
771
772 return 0;
773}
774
775/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 * Bad permissions for sending the signal
David Howells694f6902010-08-04 16:59:14 +0100777 * - the caller must hold the RCU read lock
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 */
779static int check_kill_permission(int sig, struct siginfo *info,
780 struct task_struct *t)
781{
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700782 struct pid *sid;
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700783 int error;
784
Jesper Juhl7ed20e12005-05-01 08:59:14 -0700785 if (!valid_signal(sig))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700786 return -EINVAL;
787
Oleg Nesterov614c5172009-12-15 16:47:22 -0800788 if (!si_fromuser(info))
Oleg Nesterov3b5e9e52008-04-30 00:52:42 -0700789 return 0;
790
791 error = audit_signal_info(sig, t); /* Let audit system see the signal */
792 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 return error;
Amy Griffise54dc242007-03-29 18:01:04 -0400794
Oleg Nesterov065add32010-05-26 14:42:54 -0700795 if (!same_thread_group(current, t) &&
Serge E. Hallyn39fd3392011-03-23 16:43:19 -0700796 !kill_ok_by_cred(t)) {
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700797 switch (sig) {
798 case SIGCONT:
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700799 sid = task_session(t);
Oleg Nesterov2e2ba222008-04-30 00:53:01 -0700800 /*
801 * We don't return the error if sid == NULL. The
802 * task was unhashed, the caller must notice this.
803 */
804 if (!sid || sid == task_session(current))
805 break;
806 default:
807 return -EPERM;
808 }
809 }
Steve Grubbc2f0c7c2005-05-06 12:38:39 +0100810
Amy Griffise54dc242007-03-29 18:01:04 -0400811 return security_task_kill(t, info, sig, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812}
813
Tejun Heofb1d9102011-06-14 11:20:17 +0200814/**
815 * ptrace_trap_notify - schedule trap to notify ptracer
816 * @t: tracee wanting to notify tracer
817 *
818 * This function schedules sticky ptrace trap which is cleared on the next
819 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
820 * ptracer.
821 *
Tejun Heo544b2c92011-06-14 11:20:18 +0200822 * If @t is running, STOP trap will be taken. If trapped for STOP and
823 * ptracer is listening for events, tracee is woken up so that it can
824 * re-trap for the new event. If trapped otherwise, STOP trap will be
825 * eventually taken without returning to userland after the existing traps
826 * are finished by PTRACE_CONT.
Tejun Heofb1d9102011-06-14 11:20:17 +0200827 *
828 * CONTEXT:
829 * Must be called with @task->sighand->siglock held.
830 */
831static void ptrace_trap_notify(struct task_struct *t)
832{
833 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
834 assert_spin_locked(&t->sighand->siglock);
835
836 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
Oleg Nesterov910ffdb2013-01-21 20:47:41 +0100837 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200838}
839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840/*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700841 * Handle magic process-wide effects of stop/continue signals. Unlike
842 * the signal actions, these happen immediately at signal-generation
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 * time regardless of blocking, ignoring, or handling. This does the
844 * actual continuing for SIGCONT, but not the actual stopping for stop
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700845 * signals. The process stop is done as a signal action for SIG_DFL.
846 *
847 * Returns true if the signal should be actually delivered, otherwise
848 * it should be dropped.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 */
Oleg Nesterov403bad72013-04-30 15:28:10 -0700850static bool prepare_signal(int sig, struct task_struct *p, bool force)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851{
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700852 struct signal_struct *signal = p->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 struct task_struct *t;
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700854 sigset_t flush;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
Oleg Nesterov403bad72013-04-30 15:28:10 -0700856 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
Oleg Nesterov5fa534c2015-11-06 16:32:31 -0800857 if (!(signal->flags & SIGNAL_GROUP_EXIT))
Oleg Nesterov403bad72013-04-30 15:28:10 -0700858 return sig == SIGKILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 /*
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700860 * The process is in the middle of dying, nothing to do.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 */
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700862 } else if (sig_kernel_stop(sig)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 /*
864 * This is a stop signal. Remove SIGCONT from all queues.
865 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700866 siginitset(&flush, sigmask(SIGCONT));
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700867 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700868 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700869 flush_sigqueue_mask(&flush, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 } else if (sig == SIGCONT) {
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700871 unsigned int why;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 /*
Oleg Nesterov1deac632011-04-01 20:11:50 +0200873 * Remove all stop signals from all queues, wake all threads.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 */
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700875 siginitset(&flush, SIG_KERNEL_STOP_MASK);
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700876 flush_sigqueue_mask(&flush, &signal->shared_pending);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700877 for_each_thread(p, t) {
Oleg Nesterovc09c1442014-06-06 14:36:50 -0700878 flush_sigqueue_mask(&flush, &t->pending);
Tejun Heo3759a0d2011-06-02 11:14:00 +0200879 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
Tejun Heofb1d9102011-06-14 11:20:17 +0200880 if (likely(!(t->ptrace & PT_SEIZED)))
881 wake_up_state(t, __TASK_STOPPED);
882 else
883 ptrace_trap_notify(t);
Oleg Nesterov9490592f2014-06-06 14:36:48 -0700884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700886 /*
887 * Notify the parent with CLD_CONTINUED if we were stopped.
888 *
889 * If we were in the middle of a group stop, we pretend it
890 * was already finished, and then continued. Since SIGCHLD
891 * doesn't queue we report only CLD_STOPPED, as if the next
892 * CLD_CONTINUED was dropped.
893 */
894 why = 0;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700895 if (signal->flags & SIGNAL_STOP_STOPPED)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700896 why |= SIGNAL_CLD_CONTINUED;
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700897 else if (signal->group_stop_count)
Oleg Nesterovfc321d22008-04-30 00:52:46 -0700898 why |= SIGNAL_CLD_STOPPED;
899
900 if (why) {
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700901 /*
Roland McGrathae6d2ed2009-09-23 15:56:53 -0700902 * The first thread which returns from do_signal_stop()
Oleg Nesterov021e1ae2008-04-30 00:53:00 -0700903 * will take ->siglock, notice SIGNAL_CLD_MASK, and
904 * notify its parent. See get_signal_to_deliver().
905 */
Jamie Iles916a05b2017-01-10 16:57:54 -0800906 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
Oleg Nesterovad16a4602008-04-30 00:52:46 -0700907 signal->group_stop_count = 0;
908 signal->group_exit_code = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 }
Oleg Nesterov7e695a52008-04-30 00:52:59 -0700911
Oleg Nesterovdef8cf72012-03-23 15:02:45 -0700912 return !sig_ignored(p, sig, force);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700915/*
916 * Test if P wants to take SIG. After we've checked all threads with this,
917 * it's equivalent to finding no threads not blocking SIG. Any threads not
918 * blocking SIG were ruled out because they are not running and already
919 * have pending signals. Such threads will dequeue from the shared queue
920 * as soon as they're available, so putting the signal on the shared queue
921 * will be equivalent to sending it to one such thread.
922 */
923static inline int wants_signal(int sig, struct task_struct *p)
924{
925 if (sigismember(&p->blocked, sig))
926 return 0;
927 if (p->flags & PF_EXITING)
928 return 0;
929 if (sig == SIGKILL)
930 return 1;
931 if (task_is_stopped_or_traced(p))
932 return 0;
933 return task_curr(p) || !signal_pending(p);
934}
935
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700936static void complete_signal(int sig, struct task_struct *p, int group)
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700937{
938 struct signal_struct *signal = p->signal;
939 struct task_struct *t;
940
941 /*
942 * Now find a thread we can wake up to take the signal off the queue.
943 *
944 * If the main thread wants the signal, it gets first crack.
945 * Probably the least surprising to the average bear.
946 */
947 if (wants_signal(sig, p))
948 t = p;
Oleg Nesterov5fcd8352008-04-30 00:52:55 -0700949 else if (!group || thread_group_empty(p))
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700950 /*
951 * There is just one thread and it does not need to be woken.
952 * It will dequeue unblocked signals before it runs again.
953 */
954 return;
955 else {
956 /*
957 * Otherwise try to find a suitable thread.
958 */
959 t = signal->curr_target;
960 while (!wants_signal(sig, t)) {
961 t = next_thread(t);
962 if (t == signal->curr_target)
963 /*
964 * No thread needs to be woken.
965 * Any eligible threads will see
966 * the signal in the queue soon.
967 */
968 return;
969 }
970 signal->curr_target = t;
971 }
972
973 /*
974 * Found a killable thread. If the signal will be fatal,
975 * then start taking the whole group down immediately.
976 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -0700977 if (sig_fatal(p, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800978 !(signal->flags & SIGNAL_GROUP_EXIT) &&
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700979 !sigismember(&t->real_blocked, sig) &&
Oleg Nesterov4d53eb42017-11-17 15:30:08 -0800980 (sig == SIGKILL || !p->ptrace)) {
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700981 /*
982 * This signal will be fatal to the whole group.
983 */
984 if (!sig_kernel_coredump(sig)) {
985 /*
986 * Start a group exit and wake everybody up.
987 * This way we don't have other threads
988 * running and doing things after a slower
989 * thread has the fatal signal pending.
990 */
991 signal->flags = SIGNAL_GROUP_EXIT;
992 signal->group_exit_code = sig;
993 signal->group_stop_count = 0;
994 t = p;
995 do {
Tejun Heo6dfca322011-06-02 11:14:00 +0200996 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov71f11dc2008-04-30 00:52:53 -0700997 sigaddset(&t->pending.signal, SIGKILL);
998 signal_wake_up(t, 1);
999 } while_each_thread(p, t);
1000 return;
1001 }
1002 }
1003
1004 /*
1005 * The signal is already in the shared-pending queue.
1006 * Tell the chosen thread to wake up and dequeue it.
1007 */
1008 signal_wake_up(t, sig == SIGKILL);
1009 return;
1010}
1011
Pavel Emelyanovaf7fff92008-04-30 00:52:34 -07001012static inline int legacy_queue(struct sigpending *signals, int sig)
1013{
1014 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1015}
1016
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001017#ifdef CONFIG_USER_NS
1018static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1019{
1020 if (current_user_ns() == task_cred_xxx(t, user_ns))
1021 return;
1022
1023 if (SI_FROMKERNEL(info))
1024 return;
1025
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001026 rcu_read_lock();
1027 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1028 make_kuid(current_user_ns(), info->si_uid));
1029 rcu_read_unlock();
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001030}
1031#else
1032static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1033{
1034 return;
1035}
1036#endif
1037
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001038static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1039 int group, int from_ancestor_ns)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001041 struct sigpending *pending;
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001042 struct sigqueue *q;
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001043 int override_rlimit;
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001044 int ret = 0, result;
Mathieu Desnoyers0a16b602008-07-18 12:16:17 -04001045
Oleg Nesterov6e65acb2008-04-30 00:52:50 -07001046 assert_spin_locked(&t->sighand->siglock);
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001047
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001048 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterov629d3622012-03-23 15:02:44 -07001049 if (!prepare_signal(sig, t,
Eric W. Biedermanba277fe2018-09-03 20:02:46 +02001050 from_ancestor_ns || (info == SEND_SIG_PRIV) || (info == SEND_SIG_FORCED)))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001051 goto ret;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001052
1053 pending = group ? &t->signal->shared_pending : &t->pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 /*
Pavel Emelyanov2acb0242008-04-30 00:52:35 -07001055 * Short-circuit ignored signals and support queuing
1056 * exactly one non-rt signal, so that we can get more
1057 * detailed information about the cause of the signal.
1058 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001059 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001060 if (legacy_queue(pending, sig))
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001061 goto ret;
1062
1063 result = TRACE_SIGNAL_DELIVERED;
Davide Libenzifba2afa2007-05-10 22:23:13 -07001064 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 * fast-pathed signals for kernel-internal things like SIGSTOP
1066 * or SIGKILL.
1067 */
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001068 if (info == SEND_SIG_FORCED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 goto out_set;
1070
Randy Dunlap5aba0852011-04-04 14:59:31 -07001071 /*
1072 * Real-time signals must be queued if sent by sigqueue, or
1073 * some other real-time mechanism. It is implementation
1074 * defined whether kill() does so. We attempt to do so, on
1075 * the principle of least surprise, but since kill is not
1076 * allowed to fail with EAGAIN when low on memory we just
1077 * make sure at least one signal gets delivered and don't
1078 * pass on the info struct.
1079 */
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001080 if (sig < SIGRTMIN)
1081 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1082 else
1083 override_rlimit = 0;
1084
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001085 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
Vegard Nossum7a0aeb12009-05-16 11:28:33 +02001086 override_rlimit);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 if (q) {
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001088 list_add_tail(&q->list, &pending->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 switch ((unsigned long) info) {
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001090 case (unsigned long) SEND_SIG_NOINFO:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 q->info.si_signo = sig;
1092 q->info.si_errno = 0;
1093 q->info.si_code = SI_USER;
Sukadev Bhattiprolu9cd4fd12009-01-06 14:42:46 -08001094 q->info.si_pid = task_tgid_nr_ns(current,
Sukadev Bhattiprolu09bca052009-01-06 14:42:45 -08001095 task_active_pid_ns(t));
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001096 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 break;
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001098 case (unsigned long) SEND_SIG_PRIV:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 q->info.si_signo = sig;
1100 q->info.si_errno = 0;
1101 q->info.si_code = SI_KERNEL;
1102 q->info.si_pid = 0;
1103 q->info.si_uid = 0;
1104 break;
1105 default:
1106 copy_siginfo(&q->info, info);
Sukadev Bhattiprolu6588c1e2009-04-02 16:58:09 -07001107 if (from_ancestor_ns)
1108 q->info.si_pid = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 break;
1110 }
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08001111
1112 userns_fixup_signal_uid(&q->info, t);
1113
Oleg Nesterov621d3122005-10-30 15:03:45 -08001114 } else if (!is_si_special(info)) {
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001115 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1116 /*
1117 * Queue overflow, abort. We may abort if the
1118 * signal was rt and sent by user using something
1119 * other than kill().
1120 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001121 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1122 ret = -EAGAIN;
1123 goto ret;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001124 } else {
1125 /*
1126 * This is a silent loss of information. We still
1127 * send the signal, but the *info bits are lost.
1128 */
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001129 result = TRACE_SIGNAL_LOSE_INFO;
Masami Hiramatsuba005e12009-11-24 16:56:58 -05001130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 }
1132
1133out_set:
Oleg Nesterov53c30332008-04-30 00:53:00 -07001134 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001135 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001136 complete_signal(sig, t, group);
Oleg Nesterov6c303d32011-11-22 21:13:48 +01001137ret:
1138 trace_signal_generate(sig, info, t, group, result);
1139 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140}
1141
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001142static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1143 int group)
1144{
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001145 int from_ancestor_ns = 0;
1146
1147#ifdef CONFIG_PID_NS
Oleg Nesterovdd342002009-12-15 16:47:24 -08001148 from_ancestor_ns = si_fromuser(info) &&
1149 !task_pid_nr_ns(current, task_active_pid_ns(t));
Sukadev Bhattiprolu921cf9f2009-04-02 16:58:05 -07001150#endif
1151
1152 return __send_signal(sig, info, t, group, from_ancestor_ns);
Sukadev Bhattiprolu7978b562009-04-02 16:58:04 -07001153}
1154
Al Viro4aaefee2012-11-05 13:09:56 -05001155static void print_fatal_signal(int signr)
Ingo Molnar45807a12007-07-15 23:40:10 -07001156{
Al Viro4aaefee2012-11-05 13:09:56 -05001157 struct pt_regs *regs = signal_pt_regs();
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001158 pr_info("potentially unexpected fatal signal %d.\n", signr);
Ingo Molnar45807a12007-07-15 23:40:10 -07001159
Al Viroca5cd872007-10-29 04:31:16 +00001160#if defined(__i386__) && !defined(__arch_um__)
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001161 pr_info("code at %08lx: ", regs->ip);
Ingo Molnar45807a12007-07-15 23:40:10 -07001162 {
1163 int i;
1164 for (i = 0; i < 16; i++) {
1165 unsigned char insn;
1166
Andi Kleenb45c6e72010-01-08 14:42:52 -08001167 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1168 break;
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001169 pr_cont("%02x ", insn);
Ingo Molnar45807a12007-07-15 23:40:10 -07001170 }
1171 }
Wang Xiaoqiang747800e2016-05-23 16:23:59 -07001172 pr_cont("\n");
Ingo Molnar45807a12007-07-15 23:40:10 -07001173#endif
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001174 preempt_disable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001175 show_regs(regs);
Ed Swierk3a9f84d2009-01-26 15:33:31 -08001176 preempt_enable();
Ingo Molnar45807a12007-07-15 23:40:10 -07001177}
1178
1179static int __init setup_print_fatal_signals(char *str)
1180{
1181 get_option (&str, &print_fatal_signals);
1182
1183 return 1;
1184}
1185
1186__setup("print-fatal-signals=", setup_print_fatal_signals);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001188int
1189__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1190{
1191 return send_signal(sig, info, p, 1);
1192}
1193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194static int
1195specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1196{
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001197 return send_signal(sig, info, t, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198}
1199
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001200int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1201 bool group)
1202{
1203 unsigned long flags;
1204 int ret = -ESRCH;
1205
1206 if (lock_task_sighand(p, &flags)) {
1207 ret = send_signal(sig, info, p, group);
1208 unlock_task_sighand(p, &flags);
1209 }
1210
1211 return ret;
1212}
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214/*
1215 * Force a signal that the process can't ignore: if necessary
1216 * we unblock the signal and change any SIG_IGN to SIG_DFL.
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001217 *
1218 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1219 * since we do not want to have a signal handler that was blocked
1220 * be invoked when user space had explicitly blocked it.
1221 *
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001222 * We don't want to have recursive SIGSEGV's etc, for example,
1223 * that is why we also clear SIGNAL_UNKILLABLE.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225int
1226force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1227{
1228 unsigned long int flags;
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001229 int ret, blocked, ignored;
1230 struct k_sigaction *action;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 spin_lock_irqsave(&t->sighand->siglock, flags);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001233 action = &t->sighand->action[sig-1];
1234 ignored = action->sa.sa_handler == SIG_IGN;
1235 blocked = sigismember(&t->blocked, sig);
1236 if (blocked || ignored) {
1237 action->sa.sa_handler = SIG_DFL;
1238 if (blocked) {
1239 sigdelset(&t->blocked, sig);
Roland McGrath7bb44ad2007-05-23 13:57:44 -07001240 recalc_sigpending_and_wake(t);
Linus Torvaldsae74c3b2006-08-02 20:17:49 -07001241 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 }
Oleg Nesterov80fe7282008-04-30 00:53:05 -07001243 if (action->sa.sa_handler == SIG_DFL)
1244 t->signal->flags &= ~SIGNAL_UNKILLABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 ret = specific_send_sig_info(sig, info, t);
1246 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1247
1248 return ret;
1249}
1250
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251/*
1252 * Nuke all other threads in the group.
1253 */
Oleg Nesterov09faef12010-05-26 14:43:11 -07001254int zap_other_threads(struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255{
Oleg Nesterov09faef12010-05-26 14:43:11 -07001256 struct task_struct *t = p;
1257 int count = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 p->signal->group_stop_count = 0;
1260
Oleg Nesterov09faef12010-05-26 14:43:11 -07001261 while_each_thread(p, t) {
Tejun Heo6dfca322011-06-02 11:14:00 +02001262 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
Oleg Nesterov09faef12010-05-26 14:43:11 -07001263 count++;
1264
1265 /* Don't bother with already dead threads */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 if (t->exit_state)
1267 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 sigaddset(&t->pending.signal, SIGKILL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 signal_wake_up(t, 1);
1270 }
Oleg Nesterov09faef12010-05-26 14:43:11 -07001271
1272 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273}
1274
Namhyung Kimb8ed3742010-10-27 15:34:06 -07001275struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1276 unsigned long *flags)
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001277{
1278 struct sighand_struct *sighand;
1279
1280 for (;;) {
Paul E. McKenneyc41247e2014-05-05 08:18:30 -07001281 /*
1282 * Disable interrupts early to avoid deadlocks.
1283 * See rcu_read_unlock() comment header for details.
1284 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001285 local_irq_save(*flags);
1286 rcu_read_lock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001287 sighand = rcu_dereference(tsk->sighand);
Paul E. McKenneya8417962011-07-19 03:25:36 -07001288 if (unlikely(sighand == NULL)) {
1289 rcu_read_unlock();
1290 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001291 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001292 }
Oleg Nesterov392809b2014-09-28 23:44:18 +02001293 /*
1294 * This sighand can be already freed and even reused, but
1295 * we rely on SLAB_DESTROY_BY_RCU and sighand_ctor() which
1296 * initializes ->siglock: this slab can't go away, it has
1297 * the same object type, ->siglock can't be reinitialized.
1298 *
1299 * We need to ensure that tsk->sighand is still the same
1300 * after we take the lock, we can race with de_thread() or
1301 * __exit_signal(). In the latter case the next iteration
1302 * must see ->sighand == NULL.
1303 */
Paul E. McKenneya8417962011-07-19 03:25:36 -07001304 spin_lock(&sighand->siglock);
1305 if (likely(sighand == tsk->sighand)) {
1306 rcu_read_unlock();
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001307 break;
Paul E. McKenneya8417962011-07-19 03:25:36 -07001308 }
1309 spin_unlock(&sighand->siglock);
1310 rcu_read_unlock();
1311 local_irq_restore(*flags);
Oleg Nesterovf63ee722006-03-28 16:11:13 -08001312 }
1313
1314 return sighand;
1315}
1316
David Howellsc69e8d92008-11-14 10:39:19 +11001317/*
1318 * send signal info to all the members of a group
David Howellsc69e8d92008-11-14 10:39:19 +11001319 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1321{
David Howells694f6902010-08-04 16:59:14 +01001322 int ret;
1323
1324 rcu_read_lock();
1325 ret = check_kill_permission(sig, info, p);
1326 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001328 if (!ret && sig)
1329 ret = do_send_sig_info(sig, info, p, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
1331 return ret;
1332}
1333
1334/*
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001335 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 * control characters do (^C, ^Z etc)
David Howellsc69e8d92008-11-14 10:39:19 +11001337 * - the caller must hold at least a readlock on tasklist_lock
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 */
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001339int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340{
1341 struct task_struct *p = NULL;
1342 int retval, success;
1343
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 success = 0;
1345 retval = -ESRCH;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001346 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 int err = group_send_sig_info(sig, info, p);
1348 success |= !err;
1349 retval = err;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001350 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351 return success ? 0 : retval;
1352}
1353
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001354int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355{
Oleg Nesterovd36174b2008-02-08 04:19:18 -08001356 int error = -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 struct task_struct *p;
1358
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001359 for (;;) {
1360 rcu_read_lock();
1361 p = pid_task(pid, PIDTYPE_PID);
1362 if (p)
1363 error = group_send_sig_info(sig, info, p);
1364 rcu_read_unlock();
1365 if (likely(!p || error != -ESRCH))
1366 return error;
Oleg Nesterov6ca25b52008-04-30 00:52:45 -07001367
Paul E. McKenneyeca1a082014-10-23 11:41:22 -07001368 /*
1369 * The task was unhashed in between, try again. If it
1370 * is dead, pid_task() will return NULL, if we race with
1371 * de_thread() it will find the new leader.
1372 */
1373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374}
1375
Randy Dunlap5aba0852011-04-04 14:59:31 -07001376int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001377{
1378 int error;
1379 rcu_read_lock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001380 error = kill_pid_info(sig, info, find_vpid(pid));
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001381 rcu_read_unlock();
1382 return error;
1383}
1384
Serge Hallynd178bc32011-09-26 10:45:18 -05001385static int kill_as_cred_perm(const struct cred *cred,
1386 struct task_struct *target)
1387{
1388 const struct cred *pcred = __task_cred(target);
Eric W. Biederman5af66202012-03-03 20:21:47 -08001389 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1390 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
Serge Hallynd178bc32011-09-26 10:45:18 -05001391 return 0;
1392 return 1;
1393}
1394
Eric W. Biederman2425c082006-10-02 02:17:28 -07001395/* like kill_pid_info(), but doesn't use uid/euid of "current" */
Serge Hallynd178bc32011-09-26 10:45:18 -05001396int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1397 const struct cred *cred, u32 secid)
Harald Welte46113832005-10-10 19:44:29 +02001398{
1399 int ret = -EINVAL;
1400 struct task_struct *p;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001401 unsigned long flags;
Harald Welte46113832005-10-10 19:44:29 +02001402
1403 if (!valid_signal(sig))
1404 return ret;
1405
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001406 rcu_read_lock();
Eric W. Biederman2425c082006-10-02 02:17:28 -07001407 p = pid_task(pid, PIDTYPE_PID);
Harald Welte46113832005-10-10 19:44:29 +02001408 if (!p) {
1409 ret = -ESRCH;
1410 goto out_unlock;
1411 }
Serge Hallynd178bc32011-09-26 10:45:18 -05001412 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
Harald Welte46113832005-10-10 19:44:29 +02001413 ret = -EPERM;
1414 goto out_unlock;
1415 }
David Quigley8f95dc52006-06-30 01:55:47 -07001416 ret = security_task_kill(p, info, sig, secid);
1417 if (ret)
1418 goto out_unlock;
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001419
1420 if (sig) {
1421 if (lock_task_sighand(p, &flags)) {
1422 ret = __send_signal(sig, info, p, 1, 0);
1423 unlock_task_sighand(p, &flags);
1424 } else
1425 ret = -ESRCH;
Harald Welte46113832005-10-10 19:44:29 +02001426 }
1427out_unlock:
Thomas Gleixner14d8c9f2009-12-10 00:53:17 +00001428 rcu_read_unlock();
Harald Welte46113832005-10-10 19:44:29 +02001429 return ret;
1430}
Serge Hallynd178bc32011-09-26 10:45:18 -05001431EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433/*
1434 * kill_something_info() interprets pid in interesting ways just like kill(2).
1435 *
1436 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1437 * is probably wrong. Should make it like BSD or SYSV.
1438 */
1439
Gustavo Fernando Padovanbc64efd2008-07-25 01:47:33 -07001440static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441{
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001442 int ret;
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001443
1444 if (pid > 0) {
1445 rcu_read_lock();
1446 ret = kill_pid_info(sig, info, find_vpid(pid));
1447 rcu_read_unlock();
1448 return ret;
1449 }
1450
zhongjiangec1975a2017-07-10 15:52:57 -07001451 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1452 if (pid == INT_MIN)
1453 return -ESRCH;
1454
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001455 read_lock(&tasklist_lock);
1456 if (pid != -1) {
1457 ret = __kill_pgrp_info(sig, info,
1458 pid ? find_vpid(-pid) : task_pgrp(current));
1459 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 int retval = 0, count = 0;
1461 struct task_struct * p;
1462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 for_each_process(p) {
Sukadev Bhattiprolud25141a2008-10-29 14:01:11 -07001464 if (task_pid_vnr(p) > 1 &&
1465 !same_thread_group(p, current)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 int err = group_send_sig_info(sig, info, p);
1467 ++count;
1468 if (err != -EPERM)
1469 retval = err;
1470 }
1471 }
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001472 ret = count ? retval : -ESRCH;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 }
Pavel Emelyanovd5df7632008-02-08 04:19:22 -08001474 read_unlock(&tasklist_lock);
1475
Eric W. Biederman8d42db182007-02-12 00:52:55 -08001476 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477}
1478
1479/*
1480 * These are for backward compatibility with the rest of the kernel source.
1481 */
1482
Randy Dunlap5aba0852011-04-04 14:59:31 -07001483int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 /*
1486 * Make sure legacy kernel users don't send in bad values
1487 * (normal paths check this in check_kill_permission).
1488 */
Jesper Juhl7ed20e12005-05-01 08:59:14 -07001489 if (!valid_signal(sig))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490 return -EINVAL;
1491
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07001492 return do_send_sig_info(sig, info, p, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493}
1494
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001495#define __si_special(priv) \
1496 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498int
1499send_sig(int sig, struct task_struct *p, int priv)
1500{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001501 return send_sig_info(sig, __si_special(priv), p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502}
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504void
1505force_sig(int sig, struct task_struct *p)
1506{
Oleg Nesterovb67a1b92005-10-30 15:03:44 -08001507 force_sig_info(sig, SEND_SIG_PRIV, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
1510/*
1511 * When things go south during signal handling, we
1512 * will force a SIGSEGV. And if the signal that caused
1513 * the problem was already a SIGSEGV, we'll want to
1514 * make sure we don't even try to deliver the signal..
1515 */
1516int
1517force_sigsegv(int sig, struct task_struct *p)
1518{
1519 if (sig == SIGSEGV) {
1520 unsigned long flags;
1521 spin_lock_irqsave(&p->sighand->siglock, flags);
1522 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1523 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1524 }
1525 force_sig(SIGSEGV, p);
1526 return 0;
1527}
1528
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001529int kill_pgrp(struct pid *pid, int sig, int priv)
1530{
Pavel Emelyanov146a5052008-02-08 04:19:22 -08001531 int ret;
1532
1533 read_lock(&tasklist_lock);
1534 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1535 read_unlock(&tasklist_lock);
1536
1537 return ret;
Eric W. Biedermanc4b92fc2006-10-02 02:17:10 -07001538}
1539EXPORT_SYMBOL(kill_pgrp);
1540
1541int kill_pid(struct pid *pid, int sig, int priv)
1542{
1543 return kill_pid_info(sig, __si_special(priv), pid);
1544}
1545EXPORT_SYMBOL(kill_pid);
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547/*
1548 * These functions support sending signals using preallocated sigqueue
1549 * structures. This is needed "because realtime applications cannot
1550 * afford to lose notifications of asynchronous events, like timer
Randy Dunlap5aba0852011-04-04 14:59:31 -07001551 * expirations or I/O completions". In the case of POSIX Timers
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 * we allocate the sigqueue structure from the timer_create. If this
1553 * allocation fails we are able to report the failure to the application
1554 * with an EAGAIN error.
1555 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556struct sigqueue *sigqueue_alloc(void)
1557{
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001558 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001560 if (q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 q->flags |= SIGQUEUE_PREALLOC;
Naohiro Ooiwaf84d49b2009-11-09 00:46:42 +09001562
1563 return q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564}
1565
1566void sigqueue_free(struct sigqueue *q)
1567{
1568 unsigned long flags;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001569 spinlock_t *lock = &current->sighand->siglock;
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1572 /*
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001573 * We must hold ->siglock while testing q->list
1574 * to serialize with collect_signal() or with
Oleg Nesterovda7978b2008-05-23 13:04:41 -07001575 * __exit_signal()->flush_sigqueue().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001577 spin_lock_irqsave(lock, flags);
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001578 q->flags &= ~SIGQUEUE_PREALLOC;
1579 /*
1580 * If it is queued it will be freed when dequeued,
1581 * like the "regular" sigqueue.
1582 */
Oleg Nesterov60187d22007-08-30 23:56:35 -07001583 if (!list_empty(&q->list))
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001584 q = NULL;
Oleg Nesterov60187d22007-08-30 23:56:35 -07001585 spin_unlock_irqrestore(lock, flags);
1586
Oleg Nesterovc8e85b4f2008-05-26 20:55:42 +04001587 if (q)
1588 __sigqueue_free(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589}
1590
Oleg Nesterovac5c2152008-04-30 00:52:57 -07001591int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001592{
Oleg Nesterove62e6652008-04-30 00:52:56 -07001593 int sig = q->info.si_signo;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001594 struct sigpending *pending;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001595 unsigned long flags;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001596 int ret, result;
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001597
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001598 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
Oleg Nesterove62e6652008-04-30 00:52:56 -07001599
1600 ret = -1;
1601 if (!likely(lock_task_sighand(t, &flags)))
1602 goto ret;
1603
Oleg Nesterov7e695a52008-04-30 00:52:59 -07001604 ret = 1; /* the signal is ignored */
Oleg Nesterov163566f2011-11-22 21:37:41 +01001605 result = TRACE_SIGNAL_IGNORED;
Oleg Nesterovdef8cf72012-03-23 15:02:45 -07001606 if (!prepare_signal(sig, t, false))
Oleg Nesterove62e6652008-04-30 00:52:56 -07001607 goto out;
1608
1609 ret = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001610 if (unlikely(!list_empty(&q->list))) {
1611 /*
1612 * If an SI_TIMER entry is already queue just increment
1613 * the overrun count.
1614 */
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001615 BUG_ON(q->info.si_code != SI_TIMER);
1616 q->info.si_overrun++;
Oleg Nesterov163566f2011-11-22 21:37:41 +01001617 result = TRACE_SIGNAL_ALREADY_PENDING;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001618 goto out;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001619 }
Oleg Nesterovba661292008-07-23 20:52:05 +04001620 q->info.si_overrun = 0;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001621
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001622 signalfd_notify(t, sig);
Oleg Nesterov2ca35152008-04-30 00:52:54 -07001623 pending = group ? &t->signal->shared_pending : &t->pending;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001624 list_add_tail(&q->list, &pending->list);
1625 sigaddset(&pending->signal, sig);
Pavel Emelyanov4cd4b6d2008-04-30 00:52:55 -07001626 complete_signal(sig, t, group);
Oleg Nesterov163566f2011-11-22 21:37:41 +01001627 result = TRACE_SIGNAL_DELIVERED;
Oleg Nesterove62e6652008-04-30 00:52:56 -07001628out:
Oleg Nesterov163566f2011-11-22 21:37:41 +01001629 trace_signal_generate(sig, &q->info, t, group, result);
Oleg Nesterove62e6652008-04-30 00:52:56 -07001630 unlock_task_sighand(t, &flags);
1631ret:
1632 return ret;
Pavel Emelyanov9e3bd6c2008-04-30 00:52:41 -07001633}
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 * Let a parent know about the death of a child.
1637 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001638 *
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001639 * Returns true if our parent ignored us and so we've switched to
1640 * self-reaping.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001642bool do_notify_parent(struct task_struct *tsk, int sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643{
1644 struct siginfo info;
1645 unsigned long flags;
1646 struct sighand_struct *psig;
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001647 bool autoreap = false;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001648 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
Linus Torvaldsc5a7f952022-07-06 12:20:59 -07001650 WARN_ON_ONCE(sig == -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Linus Torvaldsc5a7f952022-07-06 12:20:59 -07001652 /* do_notify_parent_cldstop should have been called instead. */
1653 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Linus Torvaldsc5a7f952022-07-06 12:20:59 -07001655 WARN_ON_ONCE(!tsk->ptrace &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1657
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001658 if (sig != SIGCHLD) {
1659 /*
1660 * This is only possible if parent == real_parent.
1661 * Check if it has changed security domain.
1662 */
Eric W. Biederman110012a2020-03-30 19:01:04 -05001663 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
Oleg Nesterovb6e238d2012-03-19 17:03:41 +01001664 sig = SIGCHLD;
1665 }
1666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 info.si_signo = sig;
1668 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001669 /*
Eric W. Biederman32084502012-05-31 16:26:39 -07001670 * We are under tasklist_lock here so our parent is tied to
1671 * us and cannot change.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001672 *
Eric W. Biederman32084502012-05-31 16:26:39 -07001673 * task_active_pid_ns will always return the same pid namespace
1674 * until a task passes through release_task.
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001675 *
1676 * write_lock() currently calls preempt_disable() which is the
1677 * same as rcu_read_lock(), but according to Oleg, this is not
1678 * correct to rely on this
1679 */
1680 rcu_read_lock();
Eric W. Biederman32084502012-05-31 16:26:39 -07001681 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001682 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1683 task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001684 rcu_read_unlock();
1685
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001686 task_cputime(tsk, &utime, &stime);
1687 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1688 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690 info.si_status = tsk->exit_code & 0x7f;
1691 if (tsk->exit_code & 0x80)
1692 info.si_code = CLD_DUMPED;
1693 else if (tsk->exit_code & 0x7f)
1694 info.si_code = CLD_KILLED;
1695 else {
1696 info.si_code = CLD_EXITED;
1697 info.si_status = tsk->exit_code >> 8;
1698 }
1699
1700 psig = tsk->parent->sighand;
1701 spin_lock_irqsave(&psig->siglock, flags);
Tejun Heod21142e2011-06-17 16:50:34 +02001702 if (!tsk->ptrace && sig == SIGCHLD &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1704 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1705 /*
1706 * We are exiting and our parent doesn't care. POSIX.1
1707 * defines special semantics for setting SIGCHLD to SIG_IGN
1708 * or setting the SA_NOCLDWAIT flag: we should be reaped
1709 * automatically and not left for our parent's wait4 call.
1710 * Rather than having the parent do it as a magic kind of
1711 * signal handler, we just set this to tell do_exit that we
1712 * can be cleaned up without becoming a zombie. Note that
1713 * we still call __wake_up_parent in this case, because a
1714 * blocked sys_wait4 might now return -ECHILD.
1715 *
1716 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1717 * is implementation-defined: we do (if you don't want
1718 * it, just use SIG_IGN instead).
1719 */
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001720 autoreap = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001722 sig = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 }
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001724 if (valid_signal(sig) && sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 __group_send_sig_info(sig, &info, tsk->parent);
1726 __wake_up_parent(tsk, tsk->parent);
1727 spin_unlock_irqrestore(&psig->siglock, flags);
Roland McGrath2b2a1ff2008-07-25 19:45:54 -07001728
Oleg Nesterov53c8f9f2011-06-22 23:08:18 +02001729 return autoreap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730}
1731
Tejun Heo75b95952011-03-23 10:37:01 +01001732/**
1733 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1734 * @tsk: task reporting the state change
1735 * @for_ptracer: the notification is for ptracer
1736 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1737 *
1738 * Notify @tsk's parent that the stopped/continued state has changed. If
1739 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1740 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1741 *
1742 * CONTEXT:
1743 * Must be called with tasklist_lock at least read locked.
1744 */
1745static void do_notify_parent_cldstop(struct task_struct *tsk,
1746 bool for_ptracer, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747{
1748 struct siginfo info;
1749 unsigned long flags;
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001750 struct task_struct *parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 struct sighand_struct *sighand;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001752 cputime_t utime, stime;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Tejun Heo75b95952011-03-23 10:37:01 +01001754 if (for_ptracer) {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001755 parent = tsk->parent;
Tejun Heo75b95952011-03-23 10:37:01 +01001756 } else {
Oleg Nesterovbc505a42005-09-06 15:17:32 -07001757 tsk = tsk->group_leader;
1758 parent = tsk->real_parent;
1759 }
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 info.si_signo = SIGCHLD;
1762 info.si_errno = 0;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001763 /*
Randy Dunlap5aba0852011-04-04 14:59:31 -07001764 * see comment in do_notify_parent() about the following 4 lines
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001765 */
1766 rcu_read_lock();
Eric W. Biederman17cf22c2010-03-02 14:51:53 -08001767 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07001768 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001769 rcu_read_unlock();
1770
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001771 task_cputime(tsk, &utime, &stime);
1772 info.si_utime = cputime_to_clock_t(utime);
1773 info.si_stime = cputime_to_clock_t(stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 info.si_code = why;
1776 switch (why) {
1777 case CLD_CONTINUED:
1778 info.si_status = SIGCONT;
1779 break;
1780 case CLD_STOPPED:
1781 info.si_status = tsk->signal->group_exit_code & 0x7f;
1782 break;
1783 case CLD_TRAPPED:
1784 info.si_status = tsk->exit_code & 0x7f;
1785 break;
1786 default:
1787 BUG();
1788 }
1789
1790 sighand = parent->sighand;
1791 spin_lock_irqsave(&sighand->siglock, flags);
1792 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1793 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1794 __group_send_sig_info(SIGCHLD, &info, parent);
1795 /*
1796 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1797 */
1798 __wake_up_parent(tsk, parent);
1799 spin_unlock_irqrestore(&sighand->siglock, flags);
1800}
1801
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001802static inline int may_ptrace_stop(void)
1803{
Tejun Heod21142e2011-06-17 16:50:34 +02001804 if (!likely(current->ptrace))
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001805 return 0;
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001806 /*
1807 * Are we in the middle of do_coredump?
1808 * If so and our tracer is also part of the coredump stopping
1809 * is a deadlock situation, and pointless because our tracer
1810 * is dead so don't allow us to stop.
1811 * If SIGKILL was already sent before the caller unlocked
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001812 * ->siglock we must see ->core_state != NULL. Otherwise it
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001813 * is safe to enter schedule().
Oleg Nesterov9899d112013-01-21 20:48:00 +01001814 *
1815 * This is almost outdated, a task with the pending SIGKILL can't
1816 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1817 * after SIGKILL was already dequeued.
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001818 */
Oleg Nesterov999d9fc2008-07-25 01:47:41 -07001819 if (unlikely(current->mm->core_state) &&
Oleg Nesterovd5f70c02006-06-26 00:26:07 -07001820 unlikely(current->mm == current->parent->mm))
1821 return 0;
1822
1823 return 1;
1824}
1825
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826/*
1827 * This must be called with current->sighand->siglock held.
1828 *
1829 * This should be the path for all ptrace stops.
1830 * We always set current->last_siginfo while stopped here.
1831 * That makes it a way to test a stopped process for
1832 * being ptrace-stopped vs being job-control-stopped.
1833 *
Oleg Nesterov20686a32008-02-08 04:19:03 -08001834 * If we actually decide not to stop at all because the tracer
1835 * is gone, we keep current->exit_code unless clear_code.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 */
Tejun Heofe1bc6a2011-03-23 10:37:00 +01001837static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
Namhyung Kimb8401152010-10-27 15:34:07 -07001838 __releases(&current->sighand->siglock)
1839 __acquires(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840{
Tejun Heoceb6bd62011-03-23 10:37:01 +01001841 bool gstop_done = false;
1842
Roland McGrath1a669c22008-02-06 01:37:37 -08001843 if (arch_ptrace_stop_needed(exit_code, info)) {
1844 /*
1845 * The arch code has something special to do before a
1846 * ptrace stop. This is allowed to block, e.g. for faults
1847 * on user stack pages. We can't keep the siglock while
1848 * calling arch_ptrace_stop, so we must release it now.
1849 * To preserve proper semantics, we must do this before
1850 * any signal bookkeeping like checking group_stop_count.
Roland McGrath1a669c22008-02-06 01:37:37 -08001851 */
1852 spin_unlock_irq(&current->sighand->siglock);
1853 arch_ptrace_stop(exit_code, info);
1854 spin_lock_irq(&current->sighand->siglock);
Roland McGrath1a669c22008-02-06 01:37:37 -08001855 }
1856
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857 /*
Tejun Heo81be24b2011-06-02 11:13:59 +02001858 * We're committing to trapping. TRACED should be visible before
1859 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1860 * Also, transition to TRACED and updates to ->jobctl should be
1861 * atomic with respect to siglock and should be done after the arch
1862 * hook as siglock is released and regrabbed across it.
Eric W. Biederman0045dd62021-09-01 13:21:34 -05001863 * schedule() will not sleep if there is a pending signal that
1864 * can awaken the task.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 */
Tejun Heo81be24b2011-06-02 11:13:59 +02001866 set_current_state(TASK_TRACED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
1868 current->last_siginfo = info;
1869 current->exit_code = exit_code;
1870
Tejun Heod79fdd62011-03-23 10:37:00 +01001871 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 * If @why is CLD_STOPPED, we're trapping to participate in a group
1873 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
Tejun Heo73ddff22011-06-14 11:20:14 +02001874 * across siglock relocks since INTERRUPT was scheduled, PENDING
1875 * could be clear now. We act as if SIGCONT is received after
1876 * TASK_TRACED is entered - ignore it.
Tejun Heod79fdd62011-03-23 10:37:00 +01001877 */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001878 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 gstop_done = task_participate_group_stop(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001880
Tejun Heofb1d9102011-06-14 11:20:17 +02001881 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
Tejun Heo73ddff22011-06-14 11:20:14 +02001882 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
Tejun Heofb1d9102011-06-14 11:20:17 +02001883 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1884 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
Tejun Heo73ddff22011-06-14 11:20:14 +02001885
Tejun Heo81be24b2011-06-02 11:13:59 +02001886 /* entering a trap, clear TRAPPING */
Tejun Heoa8f072c2011-06-02 11:13:59 +02001887 task_clear_jobctl_trapping(current);
Tejun Heod79fdd62011-03-23 10:37:00 +01001888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 spin_unlock_irq(&current->sighand->siglock);
1890 read_lock(&tasklist_lock);
Oleg Nesterov3d749b92008-07-25 01:47:37 -07001891 if (may_ptrace_stop()) {
Tejun Heoceb6bd62011-03-23 10:37:01 +01001892 /*
1893 * Notify parents of the stop.
1894 *
1895 * While ptraced, there are two parents - the ptracer and
1896 * the real_parent of the group_leader. The ptracer should
1897 * know about every stop while the real parent is only
1898 * interested in the completion of group stop. The states
1899 * for the two don't interact with each other. Notify
1900 * separately unless they're gonna be duplicates.
1901 */
1902 do_notify_parent_cldstop(current, true, why);
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02001903 if (gstop_done && ptrace_reparented(current))
Tejun Heoceb6bd62011-03-23 10:37:01 +01001904 do_notify_parent_cldstop(current, false, why);
1905
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001906 /*
1907 * Don't want to allow preemption here, because
1908 * sys_ptrace() needs this task to be inactive.
1909 *
1910 * XXX: implement read_unlock_no_resched().
1911 */
1912 preempt_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 read_unlock(&tasklist_lock);
Miklos Szeredi53da1d92009-03-23 16:07:24 +01001914 preempt_enable_no_resched();
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02001915 freezable_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 } else {
1917 /*
1918 * By the time we got the lock, our tracer went away.
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001919 * Don't drop the lock yet, another tracer may come.
Tejun Heoceb6bd62011-03-23 10:37:01 +01001920 *
1921 * If @gstop_done, the ptracer went away between group stop
1922 * completion and here. During detach, it would have set
Tejun Heoa8f072c2011-06-02 11:13:59 +02001923 * JOBCTL_STOP_PENDING on us and we'll re-enter
1924 * TASK_STOPPED in do_signal_stop() on return, so notifying
1925 * the real parent of the group stop completion is enough.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 */
Tejun Heoceb6bd62011-03-23 10:37:01 +01001927 if (gstop_done)
1928 do_notify_parent_cldstop(current, false, why);
1929
Oleg Nesterov9899d112013-01-21 20:48:00 +01001930 /* tasklist protects us from ptrace_freeze_traced() */
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001931 __set_current_state(TASK_RUNNING);
Oleg Nesterov20686a32008-02-08 04:19:03 -08001932 if (clear_code)
1933 current->exit_code = 0;
Oleg Nesterov6405f7f2008-02-08 04:19:00 -08001934 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 }
1936
1937 /*
1938 * We are back. Now reacquire the siglock before touching
1939 * last_siginfo, so that we are sure to have synchronized with
1940 * any signal-sending on another CPU that wants to examine it.
1941 */
1942 spin_lock_irq(&current->sighand->siglock);
1943 current->last_siginfo = NULL;
1944
Tejun Heo544b2c92011-06-14 11:20:18 +02001945 /* LISTENING can be set only during STOP traps, clear it */
1946 current->jobctl &= ~JOBCTL_LISTENING;
1947
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 /*
1949 * Queued signals ignored us while we were stopped for tracing.
1950 * So check for any that we should take before resuming user mode.
Roland McGrathb74d0de2007-06-06 03:59:00 -07001951 * This sets TIF_SIGPENDING, but never clears it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 */
Roland McGrathb74d0de2007-06-06 03:59:00 -07001953 recalc_sigpending_tsk(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954}
1955
Tejun Heo3544d722011-06-14 11:20:15 +02001956static void ptrace_do_notify(int signr, int exit_code, int why)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957{
1958 siginfo_t info;
1959
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 memset(&info, 0, sizeof info);
Tejun Heo3544d722011-06-14 11:20:15 +02001961 info.si_signo = signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 info.si_code = exit_code;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001963 info.si_pid = task_pid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08001964 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
1966 /* Let the debugger run. */
Tejun Heo3544d722011-06-14 11:20:15 +02001967 ptrace_stop(exit_code, why, 1, &info);
1968}
1969
1970void ptrace_notify(int exit_code)
1971{
1972 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02001973 if (unlikely(current->task_works))
1974 task_work_run();
Tejun Heo3544d722011-06-14 11:20:15 +02001975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 spin_lock_irq(&current->sighand->siglock);
Tejun Heo3544d722011-06-14 11:20:15 +02001977 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 spin_unlock_irq(&current->sighand->siglock);
1979}
1980
Tejun Heo73ddff22011-06-14 11:20:14 +02001981/**
1982 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1983 * @signr: signr causing group stop if initiating
1984 *
1985 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1986 * and participate in it. If already set, participate in the existing
1987 * group stop. If participated in a group stop (and thus slept), %true is
1988 * returned with siglock released.
1989 *
1990 * If ptraced, this function doesn't handle stop itself. Instead,
1991 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1992 * untouched. The caller must ensure that INTERRUPT trap handling takes
1993 * places afterwards.
1994 *
1995 * CONTEXT:
1996 * Must be called with @current->sighand->siglock held, which is released
1997 * on %true return.
1998 *
1999 * RETURNS:
2000 * %false if group stop is already cancelled or ptrace trap is scheduled.
2001 * %true if participated in group stop.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 */
Tejun Heo73ddff22011-06-14 11:20:14 +02002003static bool do_signal_stop(int signr)
2004 __releases(&current->sighand->siglock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
2006 struct signal_struct *sig = current->signal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Tejun Heoa8f072c2011-06-02 11:13:59 +02002008 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
Palmer Dabbeltb76808e2015-04-30 21:19:57 -07002009 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
Oleg Nesterovf558b7e2008-02-04 22:27:24 -08002010 struct task_struct *t;
2011
Tejun Heoa8f072c2011-06-02 11:13:59 +02002012 /* signr will be recorded in task->jobctl for retries */
2013 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
Tejun Heod79fdd62011-03-23 10:37:00 +01002014
Tejun Heoa8f072c2011-06-02 11:13:59 +02002015 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
Oleg Nesterov573cf9a2008-04-30 00:52:36 -07002016 unlikely(signal_group_exit(sig)))
Tejun Heo73ddff22011-06-14 11:20:14 +02002017 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 /*
Tejun Heo408a37d2011-03-23 10:37:01 +01002019 * There is no group stop already in progress. We must
2020 * initiate one now.
2021 *
2022 * While ptraced, a task may be resumed while group stop is
2023 * still in effect and then receive a stop signal and
2024 * initiate another group stop. This deviates from the
2025 * usual behavior as two consecutive stop signals can't
Oleg Nesterov780006eac2011-04-01 20:12:16 +02002026 * cause two group stops when !ptraced. That is why we
2027 * also check !task_is_stopped(t) below.
Tejun Heo408a37d2011-03-23 10:37:01 +01002028 *
2029 * The condition can be distinguished by testing whether
2030 * SIGNAL_STOP_STOPPED is already set. Don't generate
2031 * group_exit_code in such case.
2032 *
2033 * This is not necessary for SIGNAL_STOP_CONTINUED because
2034 * an intervening stop signal is required to cause two
2035 * continued events regardless of ptrace.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 */
Tejun Heo408a37d2011-03-23 10:37:01 +01002037 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2038 sig->group_exit_code = signr;
Oleg Nesterova122b342006-03-28 16:11:22 -08002039
Tejun Heo7dd3db52011-06-02 11:14:00 +02002040 sig->group_stop_count = 0;
2041
2042 if (task_set_jobctl_pending(current, signr | gstop))
2043 sig->group_stop_count++;
2044
Oleg Nesterov8d38f202014-01-23 15:55:56 -08002045 t = current;
2046 while_each_thread(current, t) {
Oleg Nesterova122b342006-03-28 16:11:22 -08002047 /*
2048 * Setting state to TASK_STOPPED for a group
2049 * stop is always done with the siglock held,
2050 * so this check has no races.
2051 */
Tejun Heo7dd3db52011-06-02 11:14:00 +02002052 if (!task_is_stopped(t) &&
2053 task_set_jobctl_pending(t, signr | gstop)) {
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002054 sig->group_stop_count++;
Tejun Heofb1d9102011-06-14 11:20:17 +02002055 if (likely(!(t->ptrace & PT_SEIZED)))
2056 signal_wake_up(t, 0);
2057 else
2058 ptrace_trap_notify(t);
Oleg Nesterova122b342006-03-28 16:11:22 -08002059 }
Tejun Heod79fdd62011-03-23 10:37:00 +01002060 }
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002061 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002062
Tejun Heod21142e2011-06-17 16:50:34 +02002063 if (likely(!current->ptrace)) {
Tejun Heo5224fa32011-03-23 10:37:00 +01002064 int notify = 0;
2065
2066 /*
2067 * If there are no other threads in the group, or if there
2068 * is a group stop in progress and we are the last to stop,
2069 * report to the parent.
2070 */
2071 if (task_participate_group_stop(current))
2072 notify = CLD_STOPPED;
2073
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002074 __set_current_state(TASK_STOPPED);
Tejun Heo5224fa32011-03-23 10:37:00 +01002075 spin_unlock_irq(&current->sighand->siglock);
2076
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002077 /*
2078 * Notify the parent of the group stop completion. Because
2079 * we're not holding either the siglock or tasklist_lock
2080 * here, ptracer may attach inbetween; however, this is for
2081 * group stop and should always be delivered to the real
2082 * parent of the group leader. The new ptracer will get
2083 * its notification when this task transitions into
2084 * TASK_TRACED.
2085 */
Tejun Heo5224fa32011-03-23 10:37:00 +01002086 if (notify) {
2087 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002088 do_notify_parent_cldstop(current, false, notify);
Tejun Heo5224fa32011-03-23 10:37:00 +01002089 read_unlock(&tasklist_lock);
2090 }
2091
2092 /* Now we don't run again until woken by SIGCONT or SIGKILL */
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002093 freezable_schedule();
Tejun Heo73ddff22011-06-14 11:20:14 +02002094 return true;
Tejun Heod79fdd62011-03-23 10:37:00 +01002095 } else {
Tejun Heo73ddff22011-06-14 11:20:14 +02002096 /*
2097 * While ptraced, group stop is handled by STOP trap.
2098 * Schedule it and let the caller deal with it.
2099 */
2100 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2101 return false;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002102 }
Tejun Heo73ddff22011-06-14 11:20:14 +02002103}
Tejun Heod79fdd62011-03-23 10:37:00 +01002104
Tejun Heo73ddff22011-06-14 11:20:14 +02002105/**
2106 * do_jobctl_trap - take care of ptrace jobctl traps
2107 *
Tejun Heo3544d722011-06-14 11:20:15 +02002108 * When PT_SEIZED, it's used for both group stop and explicit
2109 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2110 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2111 * the stop signal; otherwise, %SIGTRAP.
2112 *
2113 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2114 * number as exit_code and no siginfo.
Tejun Heo73ddff22011-06-14 11:20:14 +02002115 *
2116 * CONTEXT:
2117 * Must be called with @current->sighand->siglock held, which may be
2118 * released and re-acquired before returning with intervening sleep.
2119 */
2120static void do_jobctl_trap(void)
2121{
Tejun Heo3544d722011-06-14 11:20:15 +02002122 struct signal_struct *signal = current->signal;
Tejun Heo73ddff22011-06-14 11:20:14 +02002123 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
Tejun Heod79fdd62011-03-23 10:37:00 +01002124
Tejun Heo3544d722011-06-14 11:20:15 +02002125 if (current->ptrace & PT_SEIZED) {
2126 if (!signal->group_stop_count &&
2127 !(signal->flags & SIGNAL_STOP_STOPPED))
2128 signr = SIGTRAP;
2129 WARN_ON_ONCE(!signr);
2130 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2131 CLD_STOPPED);
2132 } else {
2133 WARN_ON_ONCE(!signr);
2134 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002135 current->exit_code = 0;
2136 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137}
2138
Al Viro94eb22d2012-11-05 13:08:06 -05002139static int ptrace_signal(int signr, siginfo_t *info)
Roland McGrath18c98b62008-04-17 18:44:38 -07002140{
Al Virob7f95912012-11-05 13:06:22 -05002141 ptrace_signal_deliver();
Oleg Nesterov8a352412011-07-21 17:06:53 +02002142 /*
2143 * We do not check sig_kernel_stop(signr) but set this marker
2144 * unconditionally because we do not know whether debugger will
2145 * change signr. This flag has no meaning unless we are going
2146 * to stop after return from ptrace_stop(). In this case it will
2147 * be checked in do_signal_stop(), we should only stop if it was
2148 * not cleared by SIGCONT while we were sleeping. See also the
2149 * comment in dequeue_signal().
2150 */
2151 current->jobctl |= JOBCTL_STOP_DEQUEUED;
Tejun Heofe1bc6a2011-03-23 10:37:00 +01002152 ptrace_stop(signr, CLD_TRAPPED, 0, info);
Roland McGrath18c98b62008-04-17 18:44:38 -07002153
2154 /* We're back. Did the debugger cancel the sig? */
2155 signr = current->exit_code;
2156 if (signr == 0)
2157 return signr;
2158
2159 current->exit_code = 0;
2160
Randy Dunlap5aba0852011-04-04 14:59:31 -07002161 /*
2162 * Update the siginfo structure if the signal has
2163 * changed. If the debugger wanted something
2164 * specific in the siginfo structure then it should
2165 * have updated *info via PTRACE_SETSIGINFO.
2166 */
Roland McGrath18c98b62008-04-17 18:44:38 -07002167 if (signr != info->si_signo) {
2168 info->si_signo = signr;
2169 info->si_errno = 0;
2170 info->si_code = SI_USER;
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002171 rcu_read_lock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002172 info->si_pid = task_pid_vnr(current->parent);
Eric W. Biederman54ba47e2012-03-13 16:04:35 -07002173 info->si_uid = from_kuid_munged(current_user_ns(),
2174 task_uid(current->parent));
Serge E. Hallyn6b550f92012-01-10 15:11:37 -08002175 rcu_read_unlock();
Roland McGrath18c98b62008-04-17 18:44:38 -07002176 }
2177
2178 /* If the (new) signal is now blocked, requeue it. */
2179 if (sigismember(&current->blocked, signr)) {
2180 specific_send_sig_info(signr, info, current);
2181 signr = 0;
2182 }
2183
2184 return signr;
2185}
2186
Richard Weinberger828b1f62013-10-07 15:26:57 +02002187int get_signal(struct ksignal *ksig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188{
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002189 struct sighand_struct *sighand = current->sighand;
2190 struct signal_struct *signal = current->signal;
2191 int signr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Oleg Nesterovf784e8a2012-08-26 21:12:17 +02002193 if (unlikely(current->task_works))
2194 task_work_run();
Al Viro72667022012-07-15 14:10:52 +04002195
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05302196 if (unlikely(uprobe_deny_signal()))
2197 return 0;
2198
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002199 /*
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002200 * Do this once, we can't return to user-mode if freezing() == T.
2201 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2202 * thus do not need another check after return.
Roland McGrath13b1c3d2008-03-03 20:22:05 -08002203 */
Rafael J. Wysockifc558a72006-03-23 03:00:05 -08002204 try_to_freeze();
2205
Oleg Nesterov5d8f72b2012-10-26 19:46:06 +02002206relock:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002207 spin_lock_irq(&sighand->siglock);
Oleg Nesterov021e1ae2008-04-30 00:53:00 -07002208 /*
2209 * Every stopped thread goes here after wakeup. Check to see if
2210 * we should notify the parent, prepare_signal(SIGCONT) encodes
2211 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2212 */
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002213 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
Tejun Heoc672af32011-03-23 10:36:59 +01002214 int why;
2215
2216 if (signal->flags & SIGNAL_CLD_CONTINUED)
2217 why = CLD_CONTINUED;
2218 else
2219 why = CLD_STOPPED;
2220
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002221 signal->flags &= ~SIGNAL_CLD_MASK;
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002222
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002223 spin_unlock_irq(&sighand->siglock);
Oleg Nesterove4420552008-04-30 00:52:44 -07002224
Tejun Heoceb6bd62011-03-23 10:37:01 +01002225 /*
2226 * Notify the parent that we're continuing. This event is
2227 * always per-process and doesn't make whole lot of sense
2228 * for ptracers, who shouldn't consume the state via
2229 * wait(2) either, but, for backward compatibility, notify
2230 * the ptracer of the group leader too unless it's gonna be
2231 * a duplicate.
2232 */
Tejun Heoedf2ed12011-03-23 10:37:00 +01002233 read_lock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002234 do_notify_parent_cldstop(current, false, why);
2235
Oleg Nesterovbb3696d2011-06-24 17:34:23 +02002236 if (ptrace_reparented(current->group_leader))
2237 do_notify_parent_cldstop(current->group_leader,
2238 true, why);
Tejun Heoedf2ed12011-03-23 10:37:00 +01002239 read_unlock(&tasklist_lock);
Tejun Heoceb6bd62011-03-23 10:37:01 +01002240
Oleg Nesterove4420552008-04-30 00:52:44 -07002241 goto relock;
2242 }
2243
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002244 /* Has this task already been marked for death? */
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002245 if (signal_group_exit(signal)) {
2246 ksig->info.si_signo = signr = SIGKILL;
2247 sigdelset(&current->pending.signal, SIGKILL);
Zhenliang Wei9adcdd52019-05-31 22:30:52 -07002248 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2249 &sighand->action[SIGKILL - 1]);
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002250 recalc_sigpending();
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002251 goto fatal;
Eric W. Biedermanaa74f262019-02-11 23:27:42 -06002252 }
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 for (;;) {
2255 struct k_sigaction *ka;
Tejun Heodd1d6772011-06-02 11:14:00 +02002256
2257 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2258 do_signal_stop(0))
Roland McGrath7bcf6a22008-07-25 19:45:53 -07002259 goto relock;
Oleg Nesterov1be53962009-12-15 16:47:26 -08002260
Tejun Heo73ddff22011-06-14 11:20:14 +02002261 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2262 do_jobctl_trap();
2263 spin_unlock_irq(&sighand->siglock);
2264 goto relock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 }
2266
Eric W. Biederman181f1f0d2019-02-06 17:51:47 -06002267 /*
2268 * Signals generated by the execution of an instruction
2269 * need to be delivered before any other pending signals
2270 * so that the instruction pointer in the signal stack
2271 * frame points to the faulting instruction.
2272 */
2273 signr = dequeue_synchronous_signal(&ksig->info);
2274 if (!signr)
2275 signr = dequeue_signal(current, &current->blocked, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Tejun Heodd1d6772011-06-02 11:14:00 +02002277 if (!signr)
2278 break; /* will return 0 */
2279
Oleg Nesterov8a352412011-07-21 17:06:53 +02002280 if (unlikely(current->ptrace) && signr != SIGKILL) {
Richard Weinberger828b1f62013-10-07 15:26:57 +02002281 signr = ptrace_signal(signr, &ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 if (!signr)
Tejun Heodd1d6772011-06-02 11:14:00 +02002283 continue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 }
2285
Tejun Heodd1d6772011-06-02 11:14:00 +02002286 ka = &sighand->action[signr-1];
2287
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002288 /* Trace actually delivered signals. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002289 trace_signal_deliver(signr, &ksig->info, ka);
Masami Hiramatsuf9d42572009-11-24 16:56:51 -05002290
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2292 continue;
2293 if (ka->sa.sa_handler != SIG_DFL) {
2294 /* Run the handler. */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002295 ksig->ka = *ka;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296
2297 if (ka->sa.sa_flags & SA_ONESHOT)
2298 ka->sa.sa_handler = SIG_DFL;
2299
2300 break; /* will return non-zero "signr" value */
2301 }
2302
2303 /*
2304 * Now we are doing the default action for this signal.
2305 */
2306 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2307 continue;
2308
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002309 /*
Sukadev Bhattiprolu0fbc26a2007-10-18 23:40:13 -07002310 * Global init gets no signals it doesn't want.
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002311 * Container-init gets no signals it doesn't want from same
2312 * container.
2313 *
2314 * Note that if global/container-init sees a sig_kernel_only()
2315 * signal here, the signal must have been generated internally
2316 * or must have come from an ancestor namespace. In either
2317 * case, the signal cannot be dropped.
Sukadev Bhattiprolu84d73782006-12-08 02:38:01 -08002318 */
Oleg Nesterovfae5fa42008-04-30 00:53:03 -07002319 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
Sukadev Bhattiprolub3bfa0c2009-04-02 16:58:08 -07002320 !sig_kernel_only(signr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 continue;
2322
2323 if (sig_kernel_stop(signr)) {
2324 /*
2325 * The default action is to stop all threads in
2326 * the thread group. The job control signals
2327 * do nothing in an orphaned pgrp, but SIGSTOP
2328 * always works. Note that siglock needs to be
2329 * dropped during the call to is_orphaned_pgrp()
2330 * because of lock ordering with tasklist_lock.
2331 * This allows an intervening SIGCONT to be posted.
2332 * We need to check for that and bail out if necessary.
2333 */
2334 if (signr != SIGSTOP) {
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002335 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
2337 /* signals can be posted during this window */
2338
Eric W. Biederman3e7cd6c2007-02-12 00:52:58 -08002339 if (is_current_pgrp_orphaned())
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 goto relock;
2341
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002342 spin_lock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 }
2344
Richard Weinberger828b1f62013-10-07 15:26:57 +02002345 if (likely(do_signal_stop(ksig->info.si_signo))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 /* It released the siglock. */
2347 goto relock;
2348 }
2349
2350 /*
2351 * We didn't actually stop, due to a race
2352 * with SIGCONT or something like that.
2353 */
2354 continue;
2355 }
2356
Eric W. Biederman39beaea2019-02-06 18:39:40 -06002357 fatal:
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002358 spin_unlock_irq(&sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
2360 /*
2361 * Anything else is fatal, maybe with a core dump.
2362 */
2363 current->flags |= PF_SIGNALED;
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002364
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 if (sig_kernel_coredump(signr)) {
Oleg Nesterov2dce81b2008-04-30 00:52:58 -07002366 if (print_fatal_signals)
Richard Weinberger828b1f62013-10-07 15:26:57 +02002367 print_fatal_signal(ksig->info.si_signo);
Jesper Derehag2b5faa42013-03-19 20:50:05 +00002368 proc_coredump_connector(current);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 /*
2370 * If it was able to dump core, this kills all
2371 * other threads in the group and synchronizes with
2372 * their demise. If we lost the race with another
2373 * thread getting here, it set group_exit_code
2374 * first and our do_group_exit call below will use
2375 * that value and ignore the one we pass it.
2376 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002377 do_coredump(&ksig->info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 }
2379
2380 /*
2381 * Death signals, no core dump.
2382 */
Richard Weinberger828b1f62013-10-07 15:26:57 +02002383 do_group_exit(ksig->info.si_signo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 /* NOTREACHED */
2385 }
Oleg Nesterovf6b76d42008-04-30 00:52:47 -07002386 spin_unlock_irq(&sighand->siglock);
Richard Weinberger828b1f62013-10-07 15:26:57 +02002387
2388 ksig->sig = signr;
2389 return ksig->sig > 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390}
2391
Matt Fleming5e6292c2012-01-10 15:11:17 -08002392/**
Al Viroefee9842012-04-28 02:04:15 -04002393 * signal_delivered -
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002394 * @ksig: kernel signal struct
Al Viroefee9842012-04-28 02:04:15 -04002395 * @stepping: nonzero if debugger single-step or block-step in use
Matt Fleming5e6292c2012-01-10 15:11:17 -08002396 *
Masanari Iidae2278672014-02-18 22:54:36 +09002397 * This function should be called when a signal has successfully been
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002398 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
Al Viroefee9842012-04-28 02:04:15 -04002399 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002400 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
Matt Fleming5e6292c2012-01-10 15:11:17 -08002401 */
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002402static void signal_delivered(struct ksignal *ksig, int stepping)
Matt Fleming5e6292c2012-01-10 15:11:17 -08002403{
2404 sigset_t blocked;
2405
Al Viroa610d6e2012-05-21 23:42:15 -04002406 /* A signal was successfully delivered, and the
2407 saved sigmask was stored on the signal frame,
2408 and will be restored by sigreturn. So we can
2409 simply clear the restore sigmask flag. */
2410 clear_restore_sigmask();
2411
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002412 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2413 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2414 sigaddset(&blocked, ksig->sig);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002415 set_current_blocked(&blocked);
Richard Weinbergerdf5601f2013-10-07 15:37:19 +02002416 tracehook_signal_handler(stepping);
Matt Fleming5e6292c2012-01-10 15:11:17 -08002417}
2418
Al Viro2ce5da12012-11-07 15:11:25 -05002419void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2420{
2421 if (failed)
2422 force_sigsegv(ksig->sig, current);
2423 else
Richard Weinberger10b1c7a2014-07-13 13:36:04 +02002424 signal_delivered(ksig, stepping);
Al Viro2ce5da12012-11-07 15:11:25 -05002425}
2426
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002427/*
2428 * It could be that complete_signal() picked us to notify about the
Oleg Nesterovfec99932011-04-27 19:50:21 +02002429 * group-wide signal. Other threads should be notified now to take
2430 * the shared signals in @which since we will not.
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002431 */
Oleg Nesterovf646e222011-04-27 19:18:39 +02002432static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002433{
Oleg Nesterovf646e222011-04-27 19:18:39 +02002434 sigset_t retarget;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002435 struct task_struct *t;
2436
Oleg Nesterovf646e222011-04-27 19:18:39 +02002437 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2438 if (sigisemptyset(&retarget))
2439 return;
2440
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002441 t = tsk;
2442 while_each_thread(tsk, t) {
Oleg Nesterovfec99932011-04-27 19:50:21 +02002443 if (t->flags & PF_EXITING)
2444 continue;
2445
2446 if (!has_pending_signals(&retarget, &t->blocked))
2447 continue;
2448 /* Remove the signals this thread can handle. */
2449 sigandsets(&retarget, &retarget, &t->blocked);
2450
2451 if (!signal_pending(t))
2452 signal_wake_up(t, 0);
2453
2454 if (sigisemptyset(&retarget))
2455 break;
Oleg Nesterov0edceb7bc2011-04-27 19:17:37 +02002456 }
2457}
2458
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002459void exit_signals(struct task_struct *tsk)
2460{
2461 int group_stop = 0;
Oleg Nesterovf646e222011-04-27 19:18:39 +02002462 sigset_t unblocked;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002463
Tejun Heo77e4ef92011-12-12 18:12:21 -08002464 /*
2465 * @tsk is about to have PF_EXITING set - lock out users which
2466 * expect stable threadgroup.
2467 */
2468 threadgroup_change_begin(tsk);
2469
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002470 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2471 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002472 threadgroup_change_end(tsk);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002473 return;
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002474 }
2475
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002476 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002477 /*
2478 * From now this task is not visible for group-wide signals,
2479 * see wants_signal(), do_signal_stop().
2480 */
2481 tsk->flags |= PF_EXITING;
Tejun Heo77e4ef92011-12-12 18:12:21 -08002482
2483 threadgroup_change_end(tsk);
2484
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002485 if (!signal_pending(tsk))
2486 goto out;
2487
Oleg Nesterovf646e222011-04-27 19:18:39 +02002488 unblocked = tsk->blocked;
2489 signotset(&unblocked);
2490 retarget_shared_pending(tsk, &unblocked);
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002491
Tejun Heoa8f072c2011-06-02 11:13:59 +02002492 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
Tejun Heoe5c19022011-03-23 10:37:00 +01002493 task_participate_group_stop(tsk))
Tejun Heoedf2ed12011-03-23 10:37:00 +01002494 group_stop = CLD_STOPPED;
Oleg Nesterov5dee1702008-02-08 04:19:13 -08002495out:
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002496 spin_unlock_irq(&tsk->sighand->siglock);
2497
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002498 /*
2499 * If group stop has completed, deliver the notification. This
2500 * should always go to the real parent of the group leader.
2501 */
Roland McGrathae6d2ed2009-09-23 15:56:53 -07002502 if (unlikely(group_stop)) {
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002503 read_lock(&tasklist_lock);
Tejun Heo62bcf9d2011-03-23 10:37:01 +01002504 do_notify_parent_cldstop(tsk, false, group_stop);
Oleg Nesterovd12619b2008-02-08 04:19:12 -08002505 read_unlock(&tasklist_lock);
2506 }
2507}
2508
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509EXPORT_SYMBOL(recalc_sigpending);
2510EXPORT_SYMBOL_GPL(dequeue_signal);
2511EXPORT_SYMBOL(flush_signals);
2512EXPORT_SYMBOL(force_sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513EXPORT_SYMBOL(send_sig);
2514EXPORT_SYMBOL(send_sig_info);
2515EXPORT_SYMBOL(sigprocmask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516
2517/*
2518 * System call entry points.
2519 */
2520
Randy Dunlap41c57892011-04-04 15:00:26 -07002521/**
2522 * sys_restart_syscall - restart a system call
2523 */
Heiko Carstens754fe8d2009-01-14 14:14:09 +01002524SYSCALL_DEFINE0(restart_syscall)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525{
Andy Lutomirskif56141e2015-02-12 15:01:14 -08002526 struct restart_block *restart = &current->restart_block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 return restart->fn(restart);
2528}
2529
2530long do_no_restart_syscall(struct restart_block *param)
2531{
2532 return -EINTR;
2533}
2534
Oleg Nesterovb1828012011-04-27 21:56:14 +02002535static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2536{
2537 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2538 sigset_t newblocked;
2539 /* A set of now blocked but previously unblocked signals. */
Oleg Nesterov702a5072011-04-27 22:01:27 +02002540 sigandnsets(&newblocked, newset, &current->blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002541 retarget_shared_pending(tsk, &newblocked);
2542 }
2543 tsk->blocked = *newset;
2544 recalc_sigpending();
2545}
2546
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002547/**
2548 * set_current_blocked - change current->blocked mask
2549 * @newset: new mask
2550 *
2551 * It is wrong to change ->blocked directly, this helper should be used
2552 * to ensure the process can't miss a shared signal we are going to block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 */
Al Viro77097ae2012-04-27 13:58:59 -04002554void set_current_blocked(sigset_t *newset)
2555{
Al Viro77097ae2012-04-27 13:58:59 -04002556 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01002557 __set_current_blocked(newset);
Al Viro77097ae2012-04-27 13:58:59 -04002558}
2559
2560void __set_current_blocked(const sigset_t *newset)
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002561{
2562 struct task_struct *tsk = current;
2563
Waiman Long20a30612016-12-14 15:04:10 -08002564 /*
2565 * In case the signal mask hasn't changed, there is nothing we need
2566 * to do. The current->blocked shouldn't be modified by other task.
2567 */
2568 if (sigequalsets(&tsk->blocked, newset))
2569 return;
2570
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002571 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002572 __set_task_blocked(tsk, newset);
Oleg Nesterove6fa16a2011-04-27 20:59:41 +02002573 spin_unlock_irq(&tsk->sighand->siglock);
2574}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
2576/*
2577 * This is also useful for kernel threads that want to temporarily
2578 * (or permanently) block certain signals.
2579 *
2580 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2581 * interface happily blocks "unblockable" signals like SIGKILL
2582 * and friends.
2583 */
2584int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2585{
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002586 struct task_struct *tsk = current;
2587 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002589 /* Lockless, only current can change ->blocked, never from irq */
Oleg Nesterova26fd332006-03-23 03:00:49 -08002590 if (oldset)
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002591 *oldset = tsk->blocked;
Oleg Nesterova26fd332006-03-23 03:00:49 -08002592
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 switch (how) {
2594 case SIG_BLOCK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002595 sigorsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 break;
2597 case SIG_UNBLOCK:
Oleg Nesterov702a5072011-04-27 22:01:27 +02002598 sigandnsets(&newset, &tsk->blocked, set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 break;
2600 case SIG_SETMASK:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002601 newset = *set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 break;
2603 default:
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002604 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 }
Oleg Nesterova26fd332006-03-23 03:00:49 -08002606
Al Viro77097ae2012-04-27 13:58:59 -04002607 __set_current_blocked(&newset);
Oleg Nesterov73ef4ae2011-04-27 19:54:20 +02002608 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609}
2610
Randy Dunlap41c57892011-04-04 15:00:26 -07002611/**
2612 * sys_rt_sigprocmask - change the list of currently blocked signals
2613 * @how: whether to add, remove, or set signals
Randy Dunlapada9c932011-06-14 15:50:11 -07002614 * @nset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002615 * @oset: previous value of signal mask if non-null
2616 * @sigsetsize: size of sigset_t type
2617 */
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002618SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002619 sigset_t __user *, oset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 sigset_t old_set, new_set;
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002622 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623
2624 /* XXX: Don't preclude handling different sized sigset_t's. */
2625 if (sigsetsize != sizeof(sigset_t))
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002626 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002628 old_set = current->blocked;
2629
2630 if (nset) {
2631 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2632 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2634
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002635 error = sigprocmask(how, &new_set, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 if (error)
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002637 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 }
Oleg Nesterovbb7efee2011-04-27 21:18:10 +02002639
2640 if (oset) {
2641 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2642 return -EFAULT;
2643 }
2644
2645 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646}
2647
Al Viro322a56c2012-12-25 13:32:58 -05002648#ifdef CONFIG_COMPAT
Al Viro322a56c2012-12-25 13:32:58 -05002649COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2650 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651{
Al Viro322a56c2012-12-25 13:32:58 -05002652#ifdef __BIG_ENDIAN
2653 sigset_t old_set = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Al Viro322a56c2012-12-25 13:32:58 -05002655 /* XXX: Don't preclude handling different sized sigset_t's. */
2656 if (sigsetsize != sizeof(sigset_t))
2657 return -EINVAL;
2658
2659 if (nset) {
2660 compat_sigset_t new32;
2661 sigset_t new_set;
2662 int error;
2663 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2664 return -EFAULT;
2665
2666 sigset_from_compat(&new_set, &new32);
2667 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2668
2669 error = sigprocmask(how, &new_set, NULL);
2670 if (error)
2671 return error;
2672 }
2673 if (oset) {
2674 compat_sigset_t old32;
2675 sigset_to_compat(&old32, &old_set);
Al Virodb61ec22013-03-02 20:39:15 -05002676 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
Al Viro322a56c2012-12-25 13:32:58 -05002677 return -EFAULT;
2678 }
2679 return 0;
2680#else
2681 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2682 (sigset_t __user *)oset, sigsetsize);
2683#endif
2684}
2685#endif
Al Viro322a56c2012-12-25 13:32:58 -05002686
Al Virofe9c1db2012-12-25 14:31:38 -05002687static int do_sigpending(void *set, unsigned long sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 if (sigsetsize > sizeof(sigset_t))
Al Virofe9c1db2012-12-25 14:31:38 -05002690 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
2692 spin_lock_irq(&current->sighand->siglock);
Al Virofe9c1db2012-12-25 14:31:38 -05002693 sigorsets(set, &current->pending.signal,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 &current->signal->shared_pending.signal);
2695 spin_unlock_irq(&current->sighand->siglock);
2696
2697 /* Outside the lock because only this thread touches it. */
Al Virofe9c1db2012-12-25 14:31:38 -05002698 sigandsets(set, &current->blocked, set);
2699 return 0;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002700}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
Randy Dunlap41c57892011-04-04 15:00:26 -07002702/**
2703 * sys_rt_sigpending - examine a pending signal that has been raised
2704 * while blocked
Randy Dunlap20f22ab2013-03-04 14:32:59 -08002705 * @uset: stores pending signals
Randy Dunlap41c57892011-04-04 15:00:26 -07002706 * @sigsetsize: size of sigset_t type or larger
2707 */
Al Virofe9c1db2012-12-25 14:31:38 -05002708SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709{
Al Virofe9c1db2012-12-25 14:31:38 -05002710 sigset_t set;
2711 int err = do_sigpending(&set, sigsetsize);
2712 if (!err && copy_to_user(uset, &set, sigsetsize))
2713 err = -EFAULT;
2714 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715}
2716
Al Virofe9c1db2012-12-25 14:31:38 -05002717#ifdef CONFIG_COMPAT
Al Virofe9c1db2012-12-25 14:31:38 -05002718COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2719 compat_size_t, sigsetsize)
2720{
2721#ifdef __BIG_ENDIAN
2722 sigset_t set;
2723 int err = do_sigpending(&set, sigsetsize);
2724 if (!err) {
2725 compat_sigset_t set32;
2726 sigset_to_compat(&set32, &set);
2727 /* we can get here only if sigsetsize <= sizeof(set) */
2728 if (copy_to_user(uset, &set32, sigsetsize))
2729 err = -EFAULT;
2730 }
2731 return err;
2732#else
2733 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2734#endif
2735}
2736#endif
Al Virofe9c1db2012-12-25 14:31:38 -05002737
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2739
Al Viroce395962013-10-13 17:23:53 -04002740int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741{
2742 int err;
2743
2744 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2745 return -EFAULT;
2746 if (from->si_code < 0)
2747 return __copy_to_user(to, from, sizeof(siginfo_t))
2748 ? -EFAULT : 0;
2749 /*
2750 * If you change siginfo_t structure, please be sure
2751 * this code is fixed accordingly.
Davide Libenzifba2afa2007-05-10 22:23:13 -07002752 * Please remember to update the signalfd_copyinfo() function
2753 * inside fs/signalfd.c too, in case siginfo_t changes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754 * It should never copy any pad contained in the structure
2755 * to avoid security leaks, but must copy the generic
2756 * 3 ints plus the relevant union member.
2757 */
2758 err = __put_user(from->si_signo, &to->si_signo);
2759 err |= __put_user(from->si_errno, &to->si_errno);
2760 err |= __put_user((short)from->si_code, &to->si_code);
2761 switch (from->si_code & __SI_MASK) {
2762 case __SI_KILL:
2763 err |= __put_user(from->si_pid, &to->si_pid);
2764 err |= __put_user(from->si_uid, &to->si_uid);
2765 break;
2766 case __SI_TIMER:
2767 err |= __put_user(from->si_tid, &to->si_tid);
2768 err |= __put_user(from->si_overrun, &to->si_overrun);
2769 err |= __put_user(from->si_ptr, &to->si_ptr);
2770 break;
2771 case __SI_POLL:
2772 err |= __put_user(from->si_band, &to->si_band);
2773 err |= __put_user(from->si_fd, &to->si_fd);
2774 break;
2775 case __SI_FAULT:
2776 err |= __put_user(from->si_addr, &to->si_addr);
2777#ifdef __ARCH_SI_TRAPNO
2778 err |= __put_user(from->si_trapno, &to->si_trapno);
2779#endif
Andi Kleena337fda2010-09-27 20:32:19 +02002780#ifdef BUS_MCEERR_AO
Randy Dunlap5aba0852011-04-04 14:59:31 -07002781 /*
Andi Kleena337fda2010-09-27 20:32:19 +02002782 * Other callers might not initialize the si_lsb field,
Randy Dunlap5aba0852011-04-04 14:59:31 -07002783 * so check explicitly for the right codes here.
Andi Kleena337fda2010-09-27 20:32:19 +02002784 */
Amanieu d'Antras26135022015-08-06 15:46:29 -07002785 if (from->si_signo == SIGBUS &&
2786 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
Andi Kleena337fda2010-09-27 20:32:19 +02002787 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2788#endif
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002789#ifdef SEGV_BNDERR
Amanieu d'Antras26135022015-08-06 15:46:29 -07002790 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2791 err |= __put_user(from->si_lower, &to->si_lower);
2792 err |= __put_user(from->si_upper, &to->si_upper);
2793 }
Qiaowei Renee1b58d2014-11-14 07:18:19 -08002794#endif
Dave Hansencd0ea352016-02-12 13:02:12 -08002795#ifdef SEGV_PKUERR
2796 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2797 err |= __put_user(from->si_pkey, &to->si_pkey);
2798#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 break;
2800 case __SI_CHLD:
2801 err |= __put_user(from->si_pid, &to->si_pid);
2802 err |= __put_user(from->si_uid, &to->si_uid);
2803 err |= __put_user(from->si_status, &to->si_status);
2804 err |= __put_user(from->si_utime, &to->si_utime);
2805 err |= __put_user(from->si_stime, &to->si_stime);
2806 break;
2807 case __SI_RT: /* This is not generated by the kernel as of now. */
2808 case __SI_MESGQ: /* But this is */
2809 err |= __put_user(from->si_pid, &to->si_pid);
2810 err |= __put_user(from->si_uid, &to->si_uid);
2811 err |= __put_user(from->si_ptr, &to->si_ptr);
2812 break;
Will Drewrya0727e82012-04-12 16:48:00 -05002813#ifdef __ARCH_SIGSYS
2814 case __SI_SYS:
2815 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2816 err |= __put_user(from->si_syscall, &to->si_syscall);
2817 err |= __put_user(from->si_arch, &to->si_arch);
2818 break;
2819#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 default: /* this is just in case for now ... */
2821 err |= __put_user(from->si_pid, &to->si_pid);
2822 err |= __put_user(from->si_uid, &to->si_uid);
2823 break;
2824 }
2825 return err;
2826}
2827
2828#endif
2829
Randy Dunlap41c57892011-04-04 15:00:26 -07002830/**
Oleg Nesterov943df142011-04-27 21:44:14 +02002831 * do_sigtimedwait - wait for queued signals specified in @which
2832 * @which: queued signals to wait for
2833 * @info: if non-null, the signal's siginfo is returned here
2834 * @ts: upper bound on process time suspension
2835 */
2836int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002837 const struct timespec *ts)
Oleg Nesterov943df142011-04-27 21:44:14 +02002838{
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002839 ktime_t *to = NULL, timeout = { .tv64 = KTIME_MAX };
Oleg Nesterov943df142011-04-27 21:44:14 +02002840 struct task_struct *tsk = current;
Oleg Nesterov943df142011-04-27 21:44:14 +02002841 sigset_t mask = *which;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002842 int sig, ret = 0;
Oleg Nesterov943df142011-04-27 21:44:14 +02002843
2844 if (ts) {
2845 if (!timespec_valid(ts))
2846 return -EINVAL;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002847 timeout = timespec_to_ktime(*ts);
2848 to = &timeout;
Oleg Nesterov943df142011-04-27 21:44:14 +02002849 }
2850
2851 /*
2852 * Invert the set of allowed signals to get those we want to block.
2853 */
2854 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2855 signotset(&mask);
2856
2857 spin_lock_irq(&tsk->sighand->siglock);
2858 sig = dequeue_signal(tsk, &mask, info);
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002859 if (!sig && timeout.tv64) {
Oleg Nesterov943df142011-04-27 21:44:14 +02002860 /*
2861 * None ready, temporarily unblock those we're interested
2862 * while we are sleeping in so that we'll be awakened when
Oleg Nesterovb1828012011-04-27 21:56:14 +02002863 * they arrive. Unblocking is always fine, we can avoid
2864 * set_current_blocked().
Oleg Nesterov943df142011-04-27 21:44:14 +02002865 */
2866 tsk->real_blocked = tsk->blocked;
2867 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2868 recalc_sigpending();
2869 spin_unlock_irq(&tsk->sighand->siglock);
2870
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002871 __set_current_state(TASK_INTERRUPTIBLE);
2872 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2873 HRTIMER_MODE_REL);
Oleg Nesterov943df142011-04-27 21:44:14 +02002874 spin_lock_irq(&tsk->sighand->siglock);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002875 __set_task_blocked(tsk, &tsk->real_blocked);
Oleg Nesterov61140412014-06-06 14:36:46 -07002876 sigemptyset(&tsk->real_blocked);
Oleg Nesterovb1828012011-04-27 21:56:14 +02002877 sig = dequeue_signal(tsk, &mask, info);
Oleg Nesterov943df142011-04-27 21:44:14 +02002878 }
2879 spin_unlock_irq(&tsk->sighand->siglock);
2880
2881 if (sig)
2882 return sig;
Thomas Gleixner2b1ecc32016-07-04 09:50:25 +00002883 return ret ? -EINTR : -EAGAIN;
Oleg Nesterov943df142011-04-27 21:44:14 +02002884}
2885
2886/**
Randy Dunlap41c57892011-04-04 15:00:26 -07002887 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2888 * in @uthese
2889 * @uthese: queued signals to wait for
2890 * @uinfo: if non-null, the signal's siginfo is returned here
2891 * @uts: upper bound on process time suspension
2892 * @sigsetsize: size of sigset_t type
2893 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002894SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2895 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2896 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 sigset_t these;
2899 struct timespec ts;
2900 siginfo_t info;
Oleg Nesterov943df142011-04-27 21:44:14 +02002901 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903 /* XXX: Don't preclude handling different sized sigset_t's. */
2904 if (sigsetsize != sizeof(sigset_t))
2905 return -EINVAL;
2906
2907 if (copy_from_user(&these, uthese, sizeof(these)))
2908 return -EFAULT;
Randy Dunlap5aba0852011-04-04 14:59:31 -07002909
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 if (uts) {
2911 if (copy_from_user(&ts, uts, sizeof(ts)))
2912 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 }
2914
Oleg Nesterov943df142011-04-27 21:44:14 +02002915 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916
Oleg Nesterov943df142011-04-27 21:44:14 +02002917 if (ret > 0 && uinfo) {
2918 if (copy_siginfo_to_user(uinfo, &info))
2919 ret = -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920 }
2921
2922 return ret;
2923}
2924
Randy Dunlap41c57892011-04-04 15:00:26 -07002925/**
2926 * sys_kill - send a signal to a process
2927 * @pid: the PID of the process
2928 * @sig: signal to be sent
2929 */
Heiko Carstens17da2bd2009-01-14 14:14:10 +01002930SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931{
2932 struct siginfo info;
2933
2934 info.si_signo = sig;
2935 info.si_errno = 0;
2936 info.si_code = SI_USER;
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002937 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002938 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
2940 return kill_something_info(sig, &info, pid);
2941}
2942
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002943static int
2944do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002945{
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002946 struct task_struct *p;
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002947 int error = -ESRCH;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002948
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002949 rcu_read_lock();
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07002950 p = find_task_by_vpid(pid);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07002951 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002952 error = check_kill_permission(sig, info, p);
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002953 /*
2954 * The null signal is a permissions and process existence
2955 * probe. No signal is actually delivered.
2956 */
Oleg Nesterov4a30deb2009-09-23 15:57:00 -07002957 if (!error && sig) {
2958 error = do_send_sig_info(sig, info, p, false);
2959 /*
2960 * If lock_task_sighand() failed we pretend the task
2961 * dies after receiving the signal. The window is tiny,
2962 * and the signal is private anyway.
2963 */
2964 if (unlikely(error == -ESRCH))
2965 error = 0;
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002966 }
2967 }
Oleg Nesterov3547ff32008-04-30 00:52:51 -07002968 rcu_read_unlock();
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08002969
2970 return error;
2971}
2972
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002973static int do_tkill(pid_t tgid, pid_t pid, int sig)
2974{
Emese Revfyb9e146d2013-04-17 15:58:36 -07002975 struct siginfo info = {};
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002976
2977 info.si_signo = sig;
2978 info.si_errno = 0;
2979 info.si_code = SI_TKILL;
2980 info.si_pid = task_tgid_vnr(current);
Eric W. Biederman078de5f2012-02-08 07:00:08 -08002981 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
Thomas Gleixner30b4ae82009-04-04 21:01:01 +00002982
2983 return do_send_specific(tgid, pid, sig, &info);
2984}
2985
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986/**
2987 * sys_tgkill - send signal to one specific thread
2988 * @tgid: the thread group ID of the thread
2989 * @pid: the PID of the thread
2990 * @sig: signal to be sent
2991 *
Robert P. J. Day72fd4a32007-02-10 01:45:59 -08002992 * This syscall also checks the @tgid and returns -ESRCH even if the PID
Linus Torvalds1da177e2005-04-16 15:20:36 -07002993 * exists but it's not belonging to the target process anymore. This
2994 * method solves the problem of threads exiting and PIDs getting reused.
2995 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01002996SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002998 /* This is only valid for single tasks */
2999 if (pid <= 0 || tgid <= 0)
3000 return -EINVAL;
3001
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003002 return do_tkill(tgid, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003003}
3004
Randy Dunlap41c57892011-04-04 15:00:26 -07003005/**
3006 * sys_tkill - send signal to one specific task
3007 * @pid: the PID of the task
3008 * @sig: signal to be sent
3009 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3011 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003012SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003014 /* This is only valid for single tasks */
3015 if (pid <= 0)
3016 return -EINVAL;
3017
Vadim Lobanov6dd69f12005-10-30 15:02:18 -08003018 return do_tkill(0, pid, sig);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019}
3020
Al Viro75907d42012-12-25 15:19:12 -05003021static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3022{
3023 /* Not even root can pretend to send signals from the kernel.
3024 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3025 */
Andrey Vagin66dd34a2013-02-27 17:03:12 -08003026 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003027 (task_pid_vnr(current) != pid))
Al Viro75907d42012-12-25 15:19:12 -05003028 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003029
Al Viro75907d42012-12-25 15:19:12 -05003030 info->si_signo = sig;
3031
3032 /* POSIX.1b doesn't mention process groups. */
3033 return kill_proc_info(sig, info, pid);
3034}
3035
Randy Dunlap41c57892011-04-04 15:00:26 -07003036/**
3037 * sys_rt_sigqueueinfo - send signal information to a signal
3038 * @pid: the PID of the thread
3039 * @sig: signal to be sent
3040 * @uinfo: signal info to be sent
3041 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003042SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3043 siginfo_t __user *, uinfo)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044{
3045 siginfo_t info;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3047 return -EFAULT;
Al Viro75907d42012-12-25 15:19:12 -05003048 return do_rt_sigqueueinfo(pid, sig, &info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049}
3050
Al Viro75907d42012-12-25 15:19:12 -05003051#ifdef CONFIG_COMPAT
Al Viro75907d42012-12-25 15:19:12 -05003052COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3053 compat_pid_t, pid,
3054 int, sig,
3055 struct compat_siginfo __user *, uinfo)
3056{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003057 siginfo_t info = {};
Al Viro75907d42012-12-25 15:19:12 -05003058 int ret = copy_siginfo_from_user32(&info, uinfo);
3059 if (unlikely(ret))
3060 return ret;
3061 return do_rt_sigqueueinfo(pid, sig, &info);
3062}
3063#endif
Al Viro75907d42012-12-25 15:19:12 -05003064
Al Viro9aae8fc2012-12-24 23:12:04 -05003065static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003066{
3067 /* This is only valid for single tasks */
3068 if (pid <= 0 || tgid <= 0)
3069 return -EINVAL;
3070
3071 /* Not even root can pretend to send signals from the kernel.
Julien Tinnesda485242011-03-18 15:05:21 -07003072 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3073 */
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003074 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3075 (task_pid_vnr(current) != pid))
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003076 return -EPERM;
Vladimir Davydov69828dc2015-04-16 12:47:35 -07003077
Thomas Gleixner62ab4502009-04-04 21:01:06 +00003078 info->si_signo = sig;
3079
3080 return do_send_specific(tgid, pid, sig, info);
3081}
3082
3083SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3084 siginfo_t __user *, uinfo)
3085{
3086 siginfo_t info;
3087
3088 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3089 return -EFAULT;
3090
3091 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3092}
3093
Al Viro9aae8fc2012-12-24 23:12:04 -05003094#ifdef CONFIG_COMPAT
3095COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3096 compat_pid_t, tgid,
3097 compat_pid_t, pid,
3098 int, sig,
3099 struct compat_siginfo __user *, uinfo)
3100{
Amanieu d'Antras3c00cb52015-08-06 15:46:26 -07003101 siginfo_t info = {};
Al Viro9aae8fc2012-12-24 23:12:04 -05003102
3103 if (copy_siginfo_from_user32(&info, uinfo))
3104 return -EFAULT;
3105 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3106}
3107#endif
3108
Oleg Nesterov03417292014-06-06 14:36:53 -07003109/*
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003110 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
Oleg Nesterov03417292014-06-06 14:36:53 -07003111 */
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003112void kernel_sigaction(int sig, __sighandler_t action)
Oleg Nesterov03417292014-06-06 14:36:53 -07003113{
Oleg Nesterovec5955b2014-06-06 14:36:57 -07003114 spin_lock_irq(&current->sighand->siglock);
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003115 current->sighand->action[sig - 1].sa.sa_handler = action;
3116 if (action == SIG_IGN) {
3117 sigset_t mask;
3118
3119 sigemptyset(&mask);
3120 sigaddset(&mask, sig);
3121
3122 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3123 flush_sigqueue_mask(&mask, &current->pending);
3124 recalc_sigpending();
3125 }
Oleg Nesterov03417292014-06-06 14:36:53 -07003126 spin_unlock_irq(&current->sighand->siglock);
3127}
Oleg Nesterovb4e74262014-06-06 14:37:00 -07003128EXPORT_SYMBOL(kernel_sigaction);
Oleg Nesterov03417292014-06-06 14:36:53 -07003129
Dmitry Safonov68463512016-09-05 16:33:08 +03003130void __weak sigaction_compat_abi(struct k_sigaction *act,
3131 struct k_sigaction *oact)
3132{
3133}
3134
Oleg Nesterov88531f72006-03-28 16:11:24 -08003135int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003136{
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003137 struct task_struct *p = current, *t;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003138 struct k_sigaction *k;
George Anzinger71fabd52006-01-08 01:02:48 -08003139 sigset_t mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140
Jesper Juhl7ed20e12005-05-01 08:59:14 -07003141 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 return -EINVAL;
3143
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003144 k = &p->sighand->action[sig-1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003146 spin_lock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 if (oact)
3148 *oact = *k;
3149
Dmitry Safonov68463512016-09-05 16:33:08 +03003150 sigaction_compat_abi(act, oact);
3151
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 if (act) {
Oleg Nesterov9ac95f22006-02-09 22:41:50 +03003153 sigdelsetmask(&act->sa.sa_mask,
3154 sigmask(SIGKILL) | sigmask(SIGSTOP));
Oleg Nesterov88531f72006-03-28 16:11:24 -08003155 *k = *act;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 /*
3157 * POSIX 3.3.1.3:
3158 * "Setting a signal action to SIG_IGN for a signal that is
3159 * pending shall cause the pending signal to be discarded,
3160 * whether or not it is blocked."
3161 *
3162 * "Setting a signal action to SIG_DFL for a signal that is
3163 * pending and whose default action is to ignore the signal
3164 * (for example, SIGCHLD), shall cause the pending signal to
3165 * be discarded, whether or not it is blocked"
3166 */
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003167 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
George Anzinger71fabd52006-01-08 01:02:48 -08003168 sigemptyset(&mask);
3169 sigaddset(&mask, sig);
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003170 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3171 for_each_thread(p, t)
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003172 flush_sigqueue_mask(&mask, &t->pending);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 }
3175
Oleg Nesterovafe2b032014-06-06 14:36:51 -07003176 spin_unlock_irq(&p->sighand->siglock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 return 0;
3178}
3179
Oleg Nesterovc09c1442014-06-06 14:36:50 -07003180static int
Will Deacon1e7066a2018-09-05 15:34:42 +01003181do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp,
3182 size_t min_ss_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183{
3184 stack_t oss;
3185 int error;
3186
Linus Torvalds0083fc22009-08-01 10:34:56 -07003187 oss.ss_sp = (void __user *) current->sas_ss_sp;
3188 oss.ss_size = current->sas_ss_size;
Andy Lutomirski0318bc82016-05-03 10:31:51 -07003189 oss.ss_flags = sas_ss_flags(sp) |
3190 (current->sas_ss_flags & SS_FLAG_BITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191
3192 if (uss) {
3193 void __user *ss_sp;
3194 size_t ss_size;
Stas Sergeev407bc162016-04-14 23:20:03 +03003195 unsigned ss_flags;
3196 int ss_mode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003197
3198 error = -EFAULT;
Linus Torvalds0dd84862009-08-01 11:18:56 -07003199 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3200 goto out;
3201 error = __get_user(ss_sp, &uss->ss_sp) |
3202 __get_user(ss_flags, &uss->ss_flags) |
3203 __get_user(ss_size, &uss->ss_size);
3204 if (error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 goto out;
3206
3207 error = -EPERM;
3208 if (on_sig_stack(sp))
3209 goto out;
3210
Stas Sergeev407bc162016-04-14 23:20:03 +03003211 ss_mode = ss_flags & ~SS_FLAG_BITS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 error = -EINVAL;
Stas Sergeev407bc162016-04-14 23:20:03 +03003213 if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3214 ss_mode != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003215 goto out;
3216
Stas Sergeev407bc162016-04-14 23:20:03 +03003217 if (ss_mode == SS_DISABLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218 ss_size = 0;
3219 ss_sp = NULL;
3220 } else {
Will Deacon1e7066a2018-09-05 15:34:42 +01003221 if (unlikely(ss_size < min_ss_size))
3222 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003223 }
3224
3225 current->sas_ss_sp = (unsigned long) ss_sp;
3226 current->sas_ss_size = ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +03003227 current->sas_ss_flags = ss_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 }
3229
Linus Torvalds0083fc22009-08-01 10:34:56 -07003230 error = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231 if (uoss) {
3232 error = -EFAULT;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003233 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 goto out;
Linus Torvalds0083fc22009-08-01 10:34:56 -07003235 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3236 __put_user(oss.ss_size, &uoss->ss_size) |
3237 __put_user(oss.ss_flags, &uoss->ss_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003238 }
3239
Linus Torvalds1da177e2005-04-16 15:20:36 -07003240out:
3241 return error;
3242}
Al Viro6bf9adf2012-12-14 14:09:47 -05003243SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3244{
Will Deacon1e7066a2018-09-05 15:34:42 +01003245 return do_sigaltstack(uss, uoss, current_user_stack_pointer(),
3246 MINSIGSTKSZ);
Al Viro6bf9adf2012-12-14 14:09:47 -05003247}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
Al Viro5c495742012-11-18 15:29:16 -05003249int restore_altstack(const stack_t __user *uss)
3250{
Will Deacon1e7066a2018-09-05 15:34:42 +01003251 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer(),
3252 MINSIGSTKSZ);
Al Viro5c495742012-11-18 15:29:16 -05003253 /* squash all but EFAULT for now */
3254 return err == -EFAULT ? err : 0;
3255}
3256
Al Viroc40702c2012-11-20 14:24:26 -05003257int __save_altstack(stack_t __user *uss, unsigned long sp)
3258{
3259 struct task_struct *t = current;
Stas Sergeev2a742132016-04-14 23:20:04 +03003260 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3261 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003262 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev2a742132016-04-14 23:20:04 +03003263 if (err)
3264 return err;
3265 if (t->sas_ss_flags & SS_AUTODISARM)
3266 sas_ss_reset(t);
3267 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003268}
3269
Al Viro90268432012-12-14 14:47:53 -05003270#ifdef CONFIG_COMPAT
Al Viro90228fc2012-12-23 03:33:38 -05003271COMPAT_SYSCALL_DEFINE2(sigaltstack,
3272 const compat_stack_t __user *, uss_ptr,
3273 compat_stack_t __user *, uoss_ptr)
Al Viro90268432012-12-14 14:47:53 -05003274{
3275 stack_t uss, uoss;
3276 int ret;
3277 mm_segment_t seg;
3278
3279 if (uss_ptr) {
3280 compat_stack_t uss32;
3281
3282 memset(&uss, 0, sizeof(stack_t));
3283 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3284 return -EFAULT;
3285 uss.ss_sp = compat_ptr(uss32.ss_sp);
3286 uss.ss_flags = uss32.ss_flags;
3287 uss.ss_size = uss32.ss_size;
3288 }
3289 seg = get_fs();
3290 set_fs(KERNEL_DS);
3291 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3292 (stack_t __force __user *) &uoss,
Will Deacon1e7066a2018-09-05 15:34:42 +01003293 compat_user_stack_pointer(),
3294 COMPAT_MINSIGSTKSZ);
Al Viro90268432012-12-14 14:47:53 -05003295 set_fs(seg);
3296 if (ret >= 0 && uoss_ptr) {
3297 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3298 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3299 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3300 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3301 ret = -EFAULT;
3302 }
3303 return ret;
3304}
3305
3306int compat_restore_altstack(const compat_stack_t __user *uss)
3307{
3308 int err = compat_sys_sigaltstack(uss, NULL);
3309 /* squash all but -EFAULT for now */
3310 return err == -EFAULT ? err : 0;
3311}
Al Viroc40702c2012-11-20 14:24:26 -05003312
3313int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3314{
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003315 int err;
Al Viroc40702c2012-11-20 14:24:26 -05003316 struct task_struct *t = current;
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003317 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3318 &uss->ss_sp) |
3319 __put_user(t->sas_ss_flags, &uss->ss_flags) |
Al Viroc40702c2012-11-20 14:24:26 -05003320 __put_user(t->sas_ss_size, &uss->ss_size);
Stas Sergeev6d94a6b2017-02-27 14:27:25 -08003321 if (err)
3322 return err;
3323 if (t->sas_ss_flags & SS_AUTODISARM)
3324 sas_ss_reset(t);
3325 return 0;
Al Viroc40702c2012-11-20 14:24:26 -05003326}
Al Viro90268432012-12-14 14:47:53 -05003327#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
3329#ifdef __ARCH_WANT_SYS_SIGPENDING
3330
Randy Dunlap41c57892011-04-04 15:00:26 -07003331/**
3332 * sys_sigpending - examine pending signals
3333 * @set: where mask of pending signal is returned
3334 */
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003335SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336{
Al Virofe9c1db2012-12-25 14:31:38 -05003337 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338}
3339
3340#endif
3341
3342#ifdef __ARCH_WANT_SYS_SIGPROCMASK
Randy Dunlap41c57892011-04-04 15:00:26 -07003343/**
3344 * sys_sigprocmask - examine and change blocked signals
3345 * @how: whether to add, remove, or set signals
Oleg Nesterovb013c392011-04-28 11:36:20 +02003346 * @nset: signals to add or remove (if non-null)
Randy Dunlap41c57892011-04-04 15:00:26 -07003347 * @oset: previous value of signal mask if non-null
3348 *
Randy Dunlap5aba0852011-04-04 14:59:31 -07003349 * Some platforms have their own version with special arguments;
3350 * others support only sys_rt_sigprocmask.
3351 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352
Oleg Nesterovb013c392011-04-28 11:36:20 +02003353SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
Heiko Carstensb290ebe2009-01-14 14:14:06 +01003354 old_sigset_t __user *, oset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356 old_sigset_t old_set, new_set;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003357 sigset_t new_blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358
Oleg Nesterovb013c392011-04-28 11:36:20 +02003359 old_set = current->blocked.sig[0];
3360
3361 if (nset) {
3362 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3363 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003365 new_blocked = current->blocked;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003366
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 switch (how) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 case SIG_BLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003369 sigaddsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 break;
3371 case SIG_UNBLOCK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003372 sigdelsetmask(&new_blocked, new_set);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003373 break;
3374 case SIG_SETMASK:
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003375 new_blocked.sig[0] = new_set;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 break;
Oleg Nesterov2e4f7c72011-05-09 13:48:56 +02003377 default:
3378 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003379 }
3380
Oleg Nesterov0c4a8422013-01-05 19:13:29 +01003381 set_current_blocked(&new_blocked);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003382 }
Oleg Nesterovb013c392011-04-28 11:36:20 +02003383
3384 if (oset) {
3385 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3386 return -EFAULT;
3387 }
3388
3389 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390}
3391#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3392
Al Viroeaca6ea2012-11-25 23:12:10 -05003393#ifndef CONFIG_ODD_RT_SIGACTION
Randy Dunlap41c57892011-04-04 15:00:26 -07003394/**
3395 * sys_rt_sigaction - alter an action taken by a process
3396 * @sig: signal to be sent
Randy Dunlapf9fa0bc2011-04-08 10:53:46 -07003397 * @act: new sigaction
3398 * @oact: used to save the previous sigaction
Randy Dunlap41c57892011-04-04 15:00:26 -07003399 * @sigsetsize: size of sigset_t type
3400 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003401SYSCALL_DEFINE4(rt_sigaction, int, sig,
3402 const struct sigaction __user *, act,
3403 struct sigaction __user *, oact,
3404 size_t, sigsetsize)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405{
3406 struct k_sigaction new_sa, old_sa;
3407 int ret = -EINVAL;
3408
3409 /* XXX: Don't preclude handling different sized sigset_t's. */
3410 if (sigsetsize != sizeof(sigset_t))
3411 goto out;
3412
3413 if (act) {
3414 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3415 return -EFAULT;
3416 }
3417
3418 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3419
3420 if (!ret && oact) {
3421 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3422 return -EFAULT;
3423 }
3424out:
3425 return ret;
3426}
Al Viro08d32fe2012-12-25 18:38:15 -05003427#ifdef CONFIG_COMPAT
Al Viro08d32fe2012-12-25 18:38:15 -05003428COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3429 const struct compat_sigaction __user *, act,
3430 struct compat_sigaction __user *, oact,
3431 compat_size_t, sigsetsize)
3432{
3433 struct k_sigaction new_ka, old_ka;
3434 compat_sigset_t mask;
3435#ifdef __ARCH_HAS_SA_RESTORER
3436 compat_uptr_t restorer;
3437#endif
3438 int ret;
3439
3440 /* XXX: Don't preclude handling different sized sigset_t's. */
3441 if (sigsetsize != sizeof(compat_sigset_t))
3442 return -EINVAL;
3443
3444 if (act) {
3445 compat_uptr_t handler;
3446 ret = get_user(handler, &act->sa_handler);
3447 new_ka.sa.sa_handler = compat_ptr(handler);
3448#ifdef __ARCH_HAS_SA_RESTORER
3449 ret |= get_user(restorer, &act->sa_restorer);
3450 new_ka.sa.sa_restorer = compat_ptr(restorer);
3451#endif
3452 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003453 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003454 if (ret)
3455 return -EFAULT;
3456 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3457 }
3458
3459 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3460 if (!ret && oact) {
3461 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3462 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3463 &oact->sa_handler);
3464 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
Mathieu Desnoyers3ddc5b42013-09-11 14:23:18 -07003465 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
Al Viro08d32fe2012-12-25 18:38:15 -05003466#ifdef __ARCH_HAS_SA_RESTORER
3467 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3468 &oact->sa_restorer);
3469#endif
3470 }
3471 return ret;
3472}
3473#endif
Al Viroeaca6ea2012-11-25 23:12:10 -05003474#endif /* !CONFIG_ODD_RT_SIGACTION */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475
Al Viro495dfbf2012-12-25 19:09:45 -05003476#ifdef CONFIG_OLD_SIGACTION
3477SYSCALL_DEFINE3(sigaction, int, sig,
3478 const struct old_sigaction __user *, act,
3479 struct old_sigaction __user *, oact)
3480{
3481 struct k_sigaction new_ka, old_ka;
3482 int ret;
3483
3484 if (act) {
3485 old_sigset_t mask;
3486 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3487 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3488 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3489 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3490 __get_user(mask, &act->sa_mask))
3491 return -EFAULT;
3492#ifdef __ARCH_HAS_KA_RESTORER
3493 new_ka.ka_restorer = NULL;
3494#endif
3495 siginitset(&new_ka.sa.sa_mask, mask);
3496 }
3497
3498 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3499
3500 if (!ret && oact) {
3501 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3502 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3503 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3504 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3505 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3506 return -EFAULT;
3507 }
3508
3509 return ret;
3510}
3511#endif
3512#ifdef CONFIG_COMPAT_OLD_SIGACTION
3513COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3514 const struct compat_old_sigaction __user *, act,
3515 struct compat_old_sigaction __user *, oact)
3516{
3517 struct k_sigaction new_ka, old_ka;
3518 int ret;
3519 compat_old_sigset_t mask;
3520 compat_uptr_t handler, restorer;
3521
3522 if (act) {
3523 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3524 __get_user(handler, &act->sa_handler) ||
3525 __get_user(restorer, &act->sa_restorer) ||
3526 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3527 __get_user(mask, &act->sa_mask))
3528 return -EFAULT;
3529
3530#ifdef __ARCH_HAS_KA_RESTORER
3531 new_ka.ka_restorer = NULL;
3532#endif
3533 new_ka.sa.sa_handler = compat_ptr(handler);
3534 new_ka.sa.sa_restorer = compat_ptr(restorer);
3535 siginitset(&new_ka.sa.sa_mask, mask);
3536 }
3537
3538 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3539
3540 if (!ret && oact) {
3541 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3542 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3543 &oact->sa_handler) ||
3544 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3545 &oact->sa_restorer) ||
3546 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3547 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3548 return -EFAULT;
3549 }
3550 return ret;
3551}
3552#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003553
Fabian Frederickf6187762014-06-04 16:11:12 -07003554#ifdef CONFIG_SGETMASK_SYSCALL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555
3556/*
3557 * For backwards compatibility. Functionality superseded by sigprocmask.
3558 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003559SYSCALL_DEFINE0(sgetmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560{
3561 /* SMP safe */
3562 return current->blocked.sig[0];
3563}
3564
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003565SYSCALL_DEFINE1(ssetmask, int, newmask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003566{
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003567 int old = current->blocked.sig[0];
3568 sigset_t newset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569
Oleg Nesterov5ba53ff2013-01-05 19:13:13 +01003570 siginitset(&newset, newmask);
Oleg Nesterovc1095c62011-07-27 12:49:44 -07003571 set_current_blocked(&newset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003572
3573 return old;
3574}
Fabian Frederickf6187762014-06-04 16:11:12 -07003575#endif /* CONFIG_SGETMASK_SYSCALL */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003576
3577#ifdef __ARCH_WANT_SYS_SIGNAL
3578/*
3579 * For backwards compatibility. Functionality superseded by sigaction.
3580 */
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003581SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003582{
3583 struct k_sigaction new_sa, old_sa;
3584 int ret;
3585
3586 new_sa.sa.sa_handler = handler;
3587 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
Oleg Nesterovc70d3d702006-02-09 22:41:41 +03003588 sigemptyset(&new_sa.sa.sa_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003589
3590 ret = do_sigaction(sig, &new_sa, &old_sa);
3591
3592 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3593}
3594#endif /* __ARCH_WANT_SYS_SIGNAL */
3595
3596#ifdef __ARCH_WANT_SYS_PAUSE
3597
Heiko Carstensa5f8fa92009-01-14 14:14:11 +01003598SYSCALL_DEFINE0(pause)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599{
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003600 while (!signal_pending(current)) {
Davidlohr Bueso1df01352015-02-17 13:45:41 -08003601 __set_current_state(TASK_INTERRUPTIBLE);
Oleg Nesterovd92fcf02011-05-25 19:22:27 +02003602 schedule();
3603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003604 return -ERESTARTNOHAND;
3605}
3606
3607#endif
3608
Richard Weinberger9d8a7652015-11-20 15:57:21 -08003609static int sigsuspend(sigset_t *set)
Al Viro68f3f162012-05-21 21:42:32 -04003610{
Al Viro68f3f162012-05-21 21:42:32 -04003611 current->saved_sigmask = current->blocked;
3612 set_current_blocked(set);
3613
Sasha Levin823dd322016-02-05 15:36:05 -08003614 while (!signal_pending(current)) {
3615 __set_current_state(TASK_INTERRUPTIBLE);
3616 schedule();
3617 }
Al Viro68f3f162012-05-21 21:42:32 -04003618 set_restore_sigmask();
3619 return -ERESTARTNOHAND;
3620}
Al Viro68f3f162012-05-21 21:42:32 -04003621
Randy Dunlap41c57892011-04-04 15:00:26 -07003622/**
3623 * sys_rt_sigsuspend - replace the signal mask for a value with the
3624 * @unewset value until a signal is received
3625 * @unewset: new signal mask value
3626 * @sigsetsize: size of sigset_t type
3627 */
Heiko Carstensd4e82042009-01-14 14:14:34 +01003628SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
David Woodhouse150256d2006-01-18 17:43:57 -08003629{
3630 sigset_t newset;
3631
3632 /* XXX: Don't preclude handling different sized sigset_t's. */
3633 if (sigsetsize != sizeof(sigset_t))
3634 return -EINVAL;
3635
3636 if (copy_from_user(&newset, unewset, sizeof(newset)))
3637 return -EFAULT;
Al Viro68f3f162012-05-21 21:42:32 -04003638 return sigsuspend(&newset);
David Woodhouse150256d2006-01-18 17:43:57 -08003639}
Al Viroad4b65a2012-12-24 21:43:56 -05003640
3641#ifdef CONFIG_COMPAT
3642COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3643{
3644#ifdef __BIG_ENDIAN
3645 sigset_t newset;
3646 compat_sigset_t newset32;
3647
3648 /* XXX: Don't preclude handling different sized sigset_t's. */
3649 if (sigsetsize != sizeof(sigset_t))
3650 return -EINVAL;
3651
3652 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3653 return -EFAULT;
3654 sigset_from_compat(&newset, &newset32);
3655 return sigsuspend(&newset);
3656#else
3657 /* on little-endian bitmaps don't care about granularity */
3658 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3659#endif
3660}
3661#endif
David Woodhouse150256d2006-01-18 17:43:57 -08003662
Al Viro0a0e8cd2012-12-25 16:04:12 -05003663#ifdef CONFIG_OLD_SIGSUSPEND
3664SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3665{
3666 sigset_t blocked;
3667 siginitset(&blocked, mask);
3668 return sigsuspend(&blocked);
3669}
3670#endif
3671#ifdef CONFIG_OLD_SIGSUSPEND3
3672SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3673{
3674 sigset_t blocked;
3675 siginitset(&blocked, mask);
3676 return sigsuspend(&blocked);
3677}
3678#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003679
Gideon Israel Dsouza52f56842014-04-07 15:39:20 -07003680__weak const char *arch_vma_name(struct vm_area_struct *vma)
David Howellsf269fdd2006-09-27 01:50:23 -07003681{
3682 return NULL;
3683}
3684
Linus Torvalds1da177e2005-04-16 15:20:36 -07003685void __init signals_init(void)
3686{
Helge Deller41b27152016-03-22 14:27:54 -07003687 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3688 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3689 != offsetof(struct siginfo, _sifields._pad));
3690
Christoph Lameter0a31bd52007-05-06 14:49:57 -07003691 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692}
Jason Wessel67fc4e02010-05-20 21:04:21 -05003693
3694#ifdef CONFIG_KGDB_KDB
3695#include <linux/kdb.h>
3696/*
3697 * kdb_send_sig_info - Allows kdb to send signals without exposing
3698 * signal internals. This function checks if the required locks are
3699 * available before calling the main signal code, to avoid kdb
3700 * deadlocks.
3701 */
3702void
3703kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3704{
3705 static struct task_struct *kdb_prev_t;
3706 int sig, new_t;
3707 if (!spin_trylock(&t->sighand->siglock)) {
3708 kdb_printf("Can't do kill command now.\n"
3709 "The sigmask lock is held somewhere else in "
3710 "kernel, try again later\n");
3711 return;
3712 }
3713 spin_unlock(&t->sighand->siglock);
3714 new_t = kdb_prev_t != t;
3715 kdb_prev_t = t;
3716 if (t->state != TASK_RUNNING && new_t) {
3717 kdb_printf("Process is not RUNNING, sending a signal from "
3718 "kdb risks deadlock\n"
3719 "on the run queue locks. "
3720 "The signal has _not_ been sent.\n"
3721 "Reissue the kill command if you want to risk "
3722 "the deadlock.\n");
3723 return;
3724 }
3725 sig = info->si_signo;
3726 if (send_sig_info(sig, info, t))
3727 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3728 sig, t->pid);
3729 else
3730 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3731}
3732#endif /* CONFIG_KGDB_KDB */