Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/signal.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * |
| 6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
| 7 | * |
| 8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
| 9 | * Changes to use preallocated sigqueue structures |
| 10 | * to allow signals to be sent reliably. |
| 11 | */ |
| 12 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/slab.h> |
| 14 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/init.h> |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/fs.h> |
| 18 | #include <linux/tty.h> |
| 19 | #include <linux/binfmts.h> |
| 20 | #include <linux/security.h> |
| 21 | #include <linux/syscalls.h> |
| 22 | #include <linux/ptrace.h> |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 23 | #include <linux/signal.h> |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 24 | #include <linux/signalfd.h> |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 25 | #include <linux/ratelimit.h> |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 26 | #include <linux/tracehook.h> |
Randy.Dunlap | c59ede7 | 2006-01-11 12:17:46 -0800 | [diff] [blame] | 27 | #include <linux/capability.h> |
Nigel Cunningham | 7dfb710 | 2006-12-06 20:34:23 -0800 | [diff] [blame] | 28 | #include <linux/freezer.h> |
Sukadev Bhattiprolu | 84d7378 | 2006-12-08 02:38:01 -0800 | [diff] [blame] | 29 | #include <linux/pid_namespace.h> |
| 30 | #include <linux/nsproxy.h> |
Masami Hiramatsu | d1eb650 | 2009-11-24 16:56:45 -0500 | [diff] [blame] | 31 | #define CREATE_TRACE_POINTS |
| 32 | #include <trace/events/signal.h> |
Sukadev Bhattiprolu | 84d7378 | 2006-12-08 02:38:01 -0800 | [diff] [blame] | 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | #include <asm/param.h> |
| 35 | #include <asm/uaccess.h> |
| 36 | #include <asm/unistd.h> |
| 37 | #include <asm/siginfo.h> |
Al Viro | e139606 | 2006-05-25 10:19:47 -0400 | [diff] [blame] | 38 | #include "audit.h" /* audit_signal_info() */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
| 40 | /* |
| 41 | * SLAB caches for signal bits. |
| 42 | */ |
| 43 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 44 | static struct kmem_cache *sigqueue_cachep; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 46 | int print_fatal_signals __read_mostly; |
| 47 | |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 48 | static void __user *sig_handler(struct task_struct *t, int sig) |
Pavel Emelyanov | 93585ee | 2008-04-30 00:52:39 -0700 | [diff] [blame] | 49 | { |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 50 | return t->sighand->action[sig - 1].sa.sa_handler; |
| 51 | } |
Pavel Emelyanov | 93585ee | 2008-04-30 00:52:39 -0700 | [diff] [blame] | 52 | |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 53 | static int sig_handler_ignored(void __user *handler, int sig) |
| 54 | { |
Pavel Emelyanov | 93585ee | 2008-04-30 00:52:39 -0700 | [diff] [blame] | 55 | /* Is it explicitly or implicitly ignored? */ |
Pavel Emelyanov | 93585ee | 2008-04-30 00:52:39 -0700 | [diff] [blame] | 56 | return handler == SIG_IGN || |
| 57 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
| 58 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 60 | static int sig_task_ignored(struct task_struct *t, int sig, |
| 61 | int from_ancestor_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | { |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 63 | void __user *handler; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Oleg Nesterov | f008faf | 2009-04-02 16:58:02 -0700 | [diff] [blame] | 65 | handler = sig_handler(t, sig); |
| 66 | |
| 67 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 68 | handler == SIG_DFL && !from_ancestor_ns) |
Oleg Nesterov | f008faf | 2009-04-02 16:58:02 -0700 | [diff] [blame] | 69 | return 1; |
| 70 | |
| 71 | return sig_handler_ignored(handler, sig); |
| 72 | } |
| 73 | |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 74 | static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) |
Oleg Nesterov | f008faf | 2009-04-02 16:58:02 -0700 | [diff] [blame] | 75 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | /* |
| 77 | * Blocked signals are never ignored, since the |
| 78 | * signal handler may change by the time it is |
| 79 | * unblocked. |
| 80 | */ |
Roland McGrath | 325d22d | 2007-11-12 15:41:55 -0800 | [diff] [blame] | 81 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | return 0; |
| 83 | |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 84 | if (!sig_task_ignored(t, sig, from_ancestor_ns)) |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 85 | return 0; |
| 86 | |
| 87 | /* |
| 88 | * Tracers may want to know about even ignored signals. |
| 89 | */ |
Oleg Nesterov | 43918f2 | 2009-04-02 16:58:00 -0700 | [diff] [blame] | 90 | return !tracehook_consider_ignored_signal(t, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | /* |
| 94 | * Re-calculate pending state from the set of locally pending |
| 95 | * signals, globally pending signals, and blocked signals. |
| 96 | */ |
| 97 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
| 98 | { |
| 99 | unsigned long ready; |
| 100 | long i; |
| 101 | |
| 102 | switch (_NSIG_WORDS) { |
| 103 | default: |
| 104 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
| 105 | ready |= signal->sig[i] &~ blocked->sig[i]; |
| 106 | break; |
| 107 | |
| 108 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
| 109 | ready |= signal->sig[2] &~ blocked->sig[2]; |
| 110 | ready |= signal->sig[1] &~ blocked->sig[1]; |
| 111 | ready |= signal->sig[0] &~ blocked->sig[0]; |
| 112 | break; |
| 113 | |
| 114 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
| 115 | ready |= signal->sig[0] &~ blocked->sig[0]; |
| 116 | break; |
| 117 | |
| 118 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
| 119 | } |
| 120 | return ready != 0; |
| 121 | } |
| 122 | |
| 123 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
| 124 | |
Roland McGrath | 7bb44ad | 2007-05-23 13:57:44 -0700 | [diff] [blame] | 125 | static int recalc_sigpending_tsk(struct task_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | { |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 127 | if ((t->group_stop & GROUP_STOP_PENDING) || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | PENDING(&t->pending, &t->blocked) || |
Roland McGrath | 7bb44ad | 2007-05-23 13:57:44 -0700 | [diff] [blame] | 129 | PENDING(&t->signal->shared_pending, &t->blocked)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
Roland McGrath | 7bb44ad | 2007-05-23 13:57:44 -0700 | [diff] [blame] | 131 | return 1; |
| 132 | } |
Roland McGrath | b74d0de | 2007-06-06 03:59:00 -0700 | [diff] [blame] | 133 | /* |
| 134 | * We must never clear the flag in another thread, or in current |
| 135 | * when it's possible the current syscall is returning -ERESTART*. |
| 136 | * So we don't clear it here, and only callers who know they should do. |
| 137 | */ |
Roland McGrath | 7bb44ad | 2007-05-23 13:57:44 -0700 | [diff] [blame] | 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | /* |
| 142 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. |
| 143 | * This is superfluous when called on current, the wakeup is a harmless no-op. |
| 144 | */ |
| 145 | void recalc_sigpending_and_wake(struct task_struct *t) |
| 146 | { |
| 147 | if (recalc_sigpending_tsk(t)) |
| 148 | signal_wake_up(t, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | void recalc_sigpending(void) |
| 152 | { |
Roland McGrath | b787f7b | 2008-07-25 19:45:55 -0700 | [diff] [blame] | 153 | if (unlikely(tracehook_force_sigpending())) |
| 154 | set_thread_flag(TIF_SIGPENDING); |
| 155 | else if (!recalc_sigpending_tsk(current) && !freezing(current)) |
Roland McGrath | b74d0de | 2007-06-06 03:59:00 -0700 | [diff] [blame] | 156 | clear_thread_flag(TIF_SIGPENDING); |
| 157 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } |
| 159 | |
| 160 | /* Given the mask, find the first available signal that should be serviced. */ |
| 161 | |
Linus Torvalds | a27341c | 2010-03-02 08:36:46 -0800 | [diff] [blame] | 162 | #define SYNCHRONOUS_MASK \ |
| 163 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ |
| 164 | sigmask(SIGTRAP) | sigmask(SIGFPE)) |
| 165 | |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 166 | int next_signal(struct sigpending *pending, sigset_t *mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | { |
| 168 | unsigned long i, *s, *m, x; |
| 169 | int sig = 0; |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 170 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | s = pending->signal.sig; |
| 172 | m = mask->sig; |
Linus Torvalds | a27341c | 2010-03-02 08:36:46 -0800 | [diff] [blame] | 173 | |
| 174 | /* |
| 175 | * Handle the first word specially: it contains the |
| 176 | * synchronous signals that need to be dequeued first. |
| 177 | */ |
| 178 | x = *s &~ *m; |
| 179 | if (x) { |
| 180 | if (x & SYNCHRONOUS_MASK) |
| 181 | x &= SYNCHRONOUS_MASK; |
| 182 | sig = ffz(~x) + 1; |
| 183 | return sig; |
| 184 | } |
| 185 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | switch (_NSIG_WORDS) { |
| 187 | default: |
Linus Torvalds | a27341c | 2010-03-02 08:36:46 -0800 | [diff] [blame] | 188 | for (i = 1; i < _NSIG_WORDS; ++i) { |
| 189 | x = *++s &~ *++m; |
| 190 | if (!x) |
| 191 | continue; |
| 192 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | break; |
Linus Torvalds | a27341c | 2010-03-02 08:36:46 -0800 | [diff] [blame] | 194 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | break; |
| 196 | |
Linus Torvalds | a27341c | 2010-03-02 08:36:46 -0800 | [diff] [blame] | 197 | case 2: |
| 198 | x = s[1] &~ m[1]; |
| 199 | if (!x) |
| 200 | break; |
| 201 | sig = ffz(~x) + _NSIG_BPW + 1; |
| 202 | break; |
| 203 | |
| 204 | case 1: |
| 205 | /* Nothing to do */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | break; |
| 207 | } |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 208 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | return sig; |
| 210 | } |
| 211 | |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 212 | static inline void print_dropped_signal(int sig) |
| 213 | { |
| 214 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); |
| 215 | |
| 216 | if (!print_fatal_signals) |
| 217 | return; |
| 218 | |
| 219 | if (!__ratelimit(&ratelimit_state)) |
| 220 | return; |
| 221 | |
| 222 | printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
| 223 | current->comm, current->pid, sig); |
| 224 | } |
| 225 | |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 226 | /** |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 227 | * task_clear_group_stop_trapping - clear group stop trapping bit |
| 228 | * @task: target task |
| 229 | * |
| 230 | * If GROUP_STOP_TRAPPING is set, a ptracer is waiting for us. Clear it |
| 231 | * and wake up the ptracer. Note that we don't need any further locking. |
| 232 | * @task->siglock guarantees that @task->parent points to the ptracer. |
| 233 | * |
| 234 | * CONTEXT: |
| 235 | * Must be called with @task->sighand->siglock held. |
| 236 | */ |
| 237 | static void task_clear_group_stop_trapping(struct task_struct *task) |
| 238 | { |
| 239 | if (unlikely(task->group_stop & GROUP_STOP_TRAPPING)) { |
| 240 | task->group_stop &= ~GROUP_STOP_TRAPPING; |
| 241 | __wake_up_sync(&task->parent->signal->wait_chldexit, |
| 242 | TASK_UNINTERRUPTIBLE, 1); |
| 243 | } |
| 244 | } |
| 245 | |
| 246 | /** |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 247 | * task_clear_group_stop_pending - clear pending group stop |
| 248 | * @task: target task |
| 249 | * |
| 250 | * Clear group stop states for @task. |
| 251 | * |
| 252 | * CONTEXT: |
| 253 | * Must be called with @task->sighand->siglock held. |
| 254 | */ |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 255 | void task_clear_group_stop_pending(struct task_struct *task) |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 256 | { |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 257 | task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME); |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | /** |
| 261 | * task_participate_group_stop - participate in a group stop |
| 262 | * @task: task participating in a group stop |
| 263 | * |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 264 | * @task has GROUP_STOP_PENDING set and is participating in a group stop. |
| 265 | * Group stop states are cleared and the group stop count is consumed if |
| 266 | * %GROUP_STOP_CONSUME was set. If the consumption completes the group |
| 267 | * stop, the appropriate %SIGNAL_* flags are set. |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 268 | * |
| 269 | * CONTEXT: |
| 270 | * Must be called with @task->sighand->siglock held. |
Tejun Heo | 244056f | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 271 | * |
| 272 | * RETURNS: |
| 273 | * %true if group stop completion should be notified to the parent, %false |
| 274 | * otherwise. |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 275 | */ |
| 276 | static bool task_participate_group_stop(struct task_struct *task) |
| 277 | { |
| 278 | struct signal_struct *sig = task->signal; |
| 279 | bool consume = task->group_stop & GROUP_STOP_CONSUME; |
| 280 | |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 281 | WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING)); |
| 282 | |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 283 | task_clear_group_stop_pending(task); |
| 284 | |
| 285 | if (!consume) |
| 286 | return false; |
| 287 | |
| 288 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) |
| 289 | sig->group_stop_count--; |
| 290 | |
Tejun Heo | 244056f | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 291 | /* |
| 292 | * Tell the caller to notify completion iff we are entering into a |
| 293 | * fresh group stop. Read comment in do_signal_stop() for details. |
| 294 | */ |
| 295 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 296 | sig->flags = SIGNAL_STOP_STOPPED; |
| 297 | return true; |
| 298 | } |
| 299 | return false; |
| 300 | } |
| 301 | |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 302 | /* |
| 303 | * allocate a new signal queue record |
| 304 | * - this may be called without locks if and only if t == current, otherwise an |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 305 | * appopriate lock must be held to stop the target task from exiting |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 306 | */ |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 307 | static struct sigqueue * |
| 308 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 309 | { |
| 310 | struct sigqueue *q = NULL; |
Linus Torvalds | 10b1fbd | 2006-11-04 13:03:00 -0800 | [diff] [blame] | 311 | struct user_struct *user; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | |
Linus Torvalds | 10b1fbd | 2006-11-04 13:03:00 -0800 | [diff] [blame] | 313 | /* |
Thomas Gleixner | 7cf7db8 | 2009-12-10 00:53:21 +0000 | [diff] [blame] | 314 | * Protect access to @t credentials. This can go away when all |
| 315 | * callers hold rcu read lock. |
Linus Torvalds | 10b1fbd | 2006-11-04 13:03:00 -0800 | [diff] [blame] | 316 | */ |
Thomas Gleixner | 7cf7db8 | 2009-12-10 00:53:21 +0000 | [diff] [blame] | 317 | rcu_read_lock(); |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 318 | user = get_uid(__task_cred(t)->user); |
Linus Torvalds | 10b1fbd | 2006-11-04 13:03:00 -0800 | [diff] [blame] | 319 | atomic_inc(&user->sigpending); |
Thomas Gleixner | 7cf7db8 | 2009-12-10 00:53:21 +0000 | [diff] [blame] | 320 | rcu_read_unlock(); |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 321 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | if (override_rlimit || |
Linus Torvalds | 10b1fbd | 2006-11-04 13:03:00 -0800 | [diff] [blame] | 323 | atomic_read(&user->sigpending) <= |
Jiri Slaby | 78d7d40 | 2010-03-05 13:42:54 -0800 | [diff] [blame] | 324 | task_rlimit(t, RLIMIT_SIGPENDING)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 326 | } else { |
| 327 | print_dropped_signal(sig); |
| 328 | } |
| 329 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 330 | if (unlikely(q == NULL)) { |
Linus Torvalds | 10b1fbd | 2006-11-04 13:03:00 -0800 | [diff] [blame] | 331 | atomic_dec(&user->sigpending); |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 332 | free_uid(user); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | } else { |
| 334 | INIT_LIST_HEAD(&q->list); |
| 335 | q->flags = 0; |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 336 | q->user = user; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 337 | } |
David Howells | d84f4f9 | 2008-11-14 10:39:23 +1100 | [diff] [blame] | 338 | |
| 339 | return q; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | } |
| 341 | |
Andrew Morton | 514a01b | 2006-02-03 03:04:41 -0800 | [diff] [blame] | 342 | static void __sigqueue_free(struct sigqueue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | { |
| 344 | if (q->flags & SIGQUEUE_PREALLOC) |
| 345 | return; |
| 346 | atomic_dec(&q->user->sigpending); |
| 347 | free_uid(q->user); |
| 348 | kmem_cache_free(sigqueue_cachep, q); |
| 349 | } |
| 350 | |
Oleg Nesterov | 6a14c5c | 2006-03-28 16:11:18 -0800 | [diff] [blame] | 351 | void flush_sigqueue(struct sigpending *queue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 352 | { |
| 353 | struct sigqueue *q; |
| 354 | |
| 355 | sigemptyset(&queue->signal); |
| 356 | while (!list_empty(&queue->list)) { |
| 357 | q = list_entry(queue->list.next, struct sigqueue , list); |
| 358 | list_del_init(&q->list); |
| 359 | __sigqueue_free(q); |
| 360 | } |
| 361 | } |
| 362 | |
| 363 | /* |
| 364 | * Flush all pending signals for a task. |
| 365 | */ |
David Howells | 3bcac02 | 2009-04-29 13:45:05 +0100 | [diff] [blame] | 366 | void __flush_signals(struct task_struct *t) |
| 367 | { |
| 368 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
| 369 | flush_sigqueue(&t->pending); |
| 370 | flush_sigqueue(&t->signal->shared_pending); |
| 371 | } |
| 372 | |
Oleg Nesterov | c81addc | 2006-03-28 16:11:17 -0800 | [diff] [blame] | 373 | void flush_signals(struct task_struct *t) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | { |
| 375 | unsigned long flags; |
| 376 | |
| 377 | spin_lock_irqsave(&t->sighand->siglock, flags); |
David Howells | 3bcac02 | 2009-04-29 13:45:05 +0100 | [diff] [blame] | 378 | __flush_signals(t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
| 380 | } |
| 381 | |
Oleg Nesterov | cbaffba | 2008-05-26 20:55:42 +0400 | [diff] [blame] | 382 | static void __flush_itimer_signals(struct sigpending *pending) |
| 383 | { |
| 384 | sigset_t signal, retain; |
| 385 | struct sigqueue *q, *n; |
| 386 | |
| 387 | signal = pending->signal; |
| 388 | sigemptyset(&retain); |
| 389 | |
| 390 | list_for_each_entry_safe(q, n, &pending->list, list) { |
| 391 | int sig = q->info.si_signo; |
| 392 | |
| 393 | if (likely(q->info.si_code != SI_TIMER)) { |
| 394 | sigaddset(&retain, sig); |
| 395 | } else { |
| 396 | sigdelset(&signal, sig); |
| 397 | list_del_init(&q->list); |
| 398 | __sigqueue_free(q); |
| 399 | } |
| 400 | } |
| 401 | |
| 402 | sigorsets(&pending->signal, &signal, &retain); |
| 403 | } |
| 404 | |
| 405 | void flush_itimer_signals(void) |
| 406 | { |
| 407 | struct task_struct *tsk = current; |
| 408 | unsigned long flags; |
| 409 | |
| 410 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
| 411 | __flush_itimer_signals(&tsk->pending); |
| 412 | __flush_itimer_signals(&tsk->signal->shared_pending); |
| 413 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
| 414 | } |
| 415 | |
Oleg Nesterov | 10ab825 | 2007-05-09 02:34:37 -0700 | [diff] [blame] | 416 | void ignore_signals(struct task_struct *t) |
| 417 | { |
| 418 | int i; |
| 419 | |
| 420 | for (i = 0; i < _NSIG; ++i) |
| 421 | t->sighand->action[i].sa.sa_handler = SIG_IGN; |
| 422 | |
| 423 | flush_signals(t); |
| 424 | } |
| 425 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | * Flush all handlers for a task. |
| 428 | */ |
| 429 | |
| 430 | void |
| 431 | flush_signal_handlers(struct task_struct *t, int force_default) |
| 432 | { |
| 433 | int i; |
| 434 | struct k_sigaction *ka = &t->sighand->action[0]; |
| 435 | for (i = _NSIG ; i != 0 ; i--) { |
| 436 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
| 437 | ka->sa.sa_handler = SIG_DFL; |
| 438 | ka->sa.sa_flags = 0; |
| 439 | sigemptyset(&ka->sa.sa_mask); |
| 440 | ka++; |
| 441 | } |
| 442 | } |
| 443 | |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 444 | int unhandled_signal(struct task_struct *tsk, int sig) |
| 445 | { |
Roland McGrath | 445a91d | 2008-07-25 19:45:52 -0700 | [diff] [blame] | 446 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 447 | if (is_global_init(tsk)) |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 448 | return 1; |
Roland McGrath | 445a91d | 2008-07-25 19:45:52 -0700 | [diff] [blame] | 449 | if (handler != SIG_IGN && handler != SIG_DFL) |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 450 | return 0; |
Oleg Nesterov | 43918f2 | 2009-04-02 16:58:00 -0700 | [diff] [blame] | 451 | return !tracehook_consider_fatal_signal(tsk, sig); |
Masoud Asgharifard Sharbiani | abd4f75 | 2007-07-22 11:12:28 +0200 | [diff] [blame] | 452 | } |
| 453 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
| 455 | /* Notify the system that a driver wants to block all signals for this |
| 456 | * process, and wants to be notified if any signals at all were to be |
| 457 | * sent/acted upon. If the notifier routine returns non-zero, then the |
| 458 | * signal will be acted upon after all. If the notifier routine returns 0, |
| 459 | * then then signal will be blocked. Only one block per process is |
| 460 | * allowed. priv is a pointer to private data that the notifier routine |
| 461 | * can use to determine if the signal should be blocked or not. */ |
| 462 | |
| 463 | void |
| 464 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
| 465 | { |
| 466 | unsigned long flags; |
| 467 | |
| 468 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
| 469 | current->notifier_mask = mask; |
| 470 | current->notifier_data = priv; |
| 471 | current->notifier = notifier; |
| 472 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
| 473 | } |
| 474 | |
| 475 | /* Notify the system that blocking has ended. */ |
| 476 | |
| 477 | void |
| 478 | unblock_all_signals(void) |
| 479 | { |
| 480 | unsigned long flags; |
| 481 | |
| 482 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
| 483 | current->notifier = NULL; |
| 484 | current->notifier_data = NULL; |
| 485 | recalc_sigpending(); |
| 486 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
| 487 | } |
| 488 | |
Oleg Nesterov | 100360f | 2008-07-25 01:47:29 -0700 | [diff] [blame] | 489 | static void collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | { |
| 491 | struct sigqueue *q, *first = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | /* |
| 494 | * Collect the siginfo appropriate to this signal. Check if |
| 495 | * there is another siginfo for the same signal. |
| 496 | */ |
| 497 | list_for_each_entry(q, &list->list, list) { |
| 498 | if (q->info.si_signo == sig) { |
Oleg Nesterov | d443420 | 2008-07-25 01:47:28 -0700 | [diff] [blame] | 499 | if (first) |
| 500 | goto still_pending; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 501 | first = q; |
| 502 | } |
| 503 | } |
Oleg Nesterov | d443420 | 2008-07-25 01:47:28 -0700 | [diff] [blame] | 504 | |
| 505 | sigdelset(&list->signal, sig); |
| 506 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | if (first) { |
Oleg Nesterov | d443420 | 2008-07-25 01:47:28 -0700 | [diff] [blame] | 508 | still_pending: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | list_del_init(&first->list); |
| 510 | copy_siginfo(info, &first->info); |
| 511 | __sigqueue_free(first); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 512 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | /* Ok, it wasn't in the queue. This must be |
| 514 | a fast-pathed signal or we must have been |
| 515 | out of queue space. So zero out the info. |
| 516 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | info->si_signo = sig; |
| 518 | info->si_errno = 0; |
Oleg Nesterov | 7486e5d | 2009-12-15 16:47:24 -0800 | [diff] [blame] | 519 | info->si_code = SI_USER; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | info->si_pid = 0; |
| 521 | info->si_uid = 0; |
| 522 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | } |
| 524 | |
| 525 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
| 526 | siginfo_t *info) |
| 527 | { |
Roland McGrath | 27d91e0 | 2006-09-29 02:00:31 -0700 | [diff] [blame] | 528 | int sig = next_signal(pending, mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | if (sig) { |
| 531 | if (current->notifier) { |
| 532 | if (sigismember(current->notifier_mask, sig)) { |
| 533 | if (!(current->notifier)(current->notifier_data)) { |
| 534 | clear_thread_flag(TIF_SIGPENDING); |
| 535 | return 0; |
| 536 | } |
| 537 | } |
| 538 | } |
| 539 | |
Oleg Nesterov | 100360f | 2008-07-25 01:47:29 -0700 | [diff] [blame] | 540 | collect_signal(sig, pending, info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 542 | |
| 543 | return sig; |
| 544 | } |
| 545 | |
| 546 | /* |
| 547 | * Dequeue a signal and return the element to the caller, which is |
| 548 | * expected to free it. |
| 549 | * |
| 550 | * All callers have to hold the siglock. |
| 551 | */ |
| 552 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
| 553 | { |
Pavel Emelyanov | c5363d0 | 2008-04-30 00:52:40 -0700 | [diff] [blame] | 554 | int signr; |
Benjamin Herrenschmidt | caec4e8 | 2007-06-12 08:16:18 +1000 | [diff] [blame] | 555 | |
| 556 | /* We only dequeue private signals from ourselves, we don't let |
| 557 | * signalfd steal them |
| 558 | */ |
Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 559 | signr = __dequeue_signal(&tsk->pending, mask, info); |
Thomas Gleixner | 8bfd9a7 | 2007-02-16 01:28:12 -0800 | [diff] [blame] | 560 | if (!signr) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
| 562 | mask, info); |
Thomas Gleixner | 8bfd9a7 | 2007-02-16 01:28:12 -0800 | [diff] [blame] | 563 | /* |
| 564 | * itimer signal ? |
| 565 | * |
| 566 | * itimers are process shared and we restart periodic |
| 567 | * itimers in the signal delivery path to prevent DoS |
| 568 | * attacks in the high resolution timer case. This is |
| 569 | * compliant with the old way of self restarting |
| 570 | * itimers, as the SIGALRM is a legacy signal and only |
| 571 | * queued once. Changing the restart behaviour to |
| 572 | * restart the timer in the signal dequeue path is |
| 573 | * reducing the timer noise on heavy loaded !highres |
| 574 | * systems too. |
| 575 | */ |
| 576 | if (unlikely(signr == SIGALRM)) { |
| 577 | struct hrtimer *tmr = &tsk->signal->real_timer; |
| 578 | |
| 579 | if (!hrtimer_is_queued(tmr) && |
| 580 | tsk->signal->it_real_incr.tv64 != 0) { |
| 581 | hrtimer_forward(tmr, tmr->base->get_time(), |
| 582 | tsk->signal->it_real_incr); |
| 583 | hrtimer_restart(tmr); |
| 584 | } |
| 585 | } |
| 586 | } |
Pavel Emelyanov | c5363d0 | 2008-04-30 00:52:40 -0700 | [diff] [blame] | 587 | |
Davide Libenzi | b8fceee | 2007-09-20 12:40:16 -0700 | [diff] [blame] | 588 | recalc_sigpending(); |
Pavel Emelyanov | c5363d0 | 2008-04-30 00:52:40 -0700 | [diff] [blame] | 589 | if (!signr) |
| 590 | return 0; |
| 591 | |
| 592 | if (unlikely(sig_kernel_stop(signr))) { |
Thomas Gleixner | 8bfd9a7 | 2007-02-16 01:28:12 -0800 | [diff] [blame] | 593 | /* |
| 594 | * Set a marker that we have dequeued a stop signal. Our |
| 595 | * caller might release the siglock and then the pending |
| 596 | * stop signal it is about to process is no longer in the |
| 597 | * pending bitmasks, but must still be cleared by a SIGCONT |
| 598 | * (and overruled by a SIGKILL). So those cases clear this |
| 599 | * shared flag after we've set it. Note that this flag may |
| 600 | * remain set after the signal we return is ignored or |
| 601 | * handled. That doesn't matter because its only purpose |
| 602 | * is to alert stop-signal processing code when another |
| 603 | * processor has come along and cleared the flag. |
| 604 | */ |
Oleg Nesterov | 92413d7 | 2008-07-25 01:47:30 -0700 | [diff] [blame] | 605 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
Thomas Gleixner | 8bfd9a7 | 2007-02-16 01:28:12 -0800 | [diff] [blame] | 606 | } |
Pavel Emelyanov | c5363d0 | 2008-04-30 00:52:40 -0700 | [diff] [blame] | 607 | if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | /* |
| 609 | * Release the siglock to ensure proper locking order |
| 610 | * of timer locks outside of siglocks. Note, we leave |
| 611 | * irqs disabled here, since the posix-timers code is |
| 612 | * about to disable them again anyway. |
| 613 | */ |
| 614 | spin_unlock(&tsk->sighand->siglock); |
| 615 | do_schedule_next_timer(info); |
| 616 | spin_lock(&tsk->sighand->siglock); |
| 617 | } |
| 618 | return signr; |
| 619 | } |
| 620 | |
| 621 | /* |
| 622 | * Tell a process that it has a new active signal.. |
| 623 | * |
| 624 | * NOTE! we rely on the previous spin_lock to |
| 625 | * lock interrupts for us! We can only be called with |
| 626 | * "siglock" held, and the local interrupt must |
| 627 | * have been disabled when that got acquired! |
| 628 | * |
| 629 | * No need to set need_resched since signal event passing |
| 630 | * goes through ->blocked |
| 631 | */ |
| 632 | void signal_wake_up(struct task_struct *t, int resume) |
| 633 | { |
| 634 | unsigned int mask; |
| 635 | |
| 636 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
| 637 | |
| 638 | /* |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 639 | * For SIGKILL, we want to wake it up in the stopped/traced/killable |
| 640 | * case. We don't check t->state here because there is a race with it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | * executing another processor and just now entering stopped state. |
| 642 | * By using wake_up_state, we ensure the process will wake up and |
| 643 | * handle its death signal. |
| 644 | */ |
| 645 | mask = TASK_INTERRUPTIBLE; |
| 646 | if (resume) |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 647 | mask |= TASK_WAKEKILL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | if (!wake_up_state(t, mask)) |
| 649 | kick_process(t); |
| 650 | } |
| 651 | |
| 652 | /* |
| 653 | * Remove signals in mask from the pending set and queue. |
| 654 | * Returns 1 if any signals were found. |
| 655 | * |
| 656 | * All callers must be holding the siglock. |
George Anzinger | 71fabd5 | 2006-01-08 01:02:48 -0800 | [diff] [blame] | 657 | * |
| 658 | * This version takes a sigset mask and looks at all signals, |
| 659 | * not just those in the first mask word. |
| 660 | */ |
| 661 | static int rm_from_queue_full(sigset_t *mask, struct sigpending *s) |
| 662 | { |
| 663 | struct sigqueue *q, *n; |
| 664 | sigset_t m; |
| 665 | |
| 666 | sigandsets(&m, mask, &s->signal); |
| 667 | if (sigisemptyset(&m)) |
| 668 | return 0; |
| 669 | |
| 670 | signandsets(&s->signal, &s->signal, mask); |
| 671 | list_for_each_entry_safe(q, n, &s->list, list) { |
| 672 | if (sigismember(mask, q->info.si_signo)) { |
| 673 | list_del_init(&q->list); |
| 674 | __sigqueue_free(q); |
| 675 | } |
| 676 | } |
| 677 | return 1; |
| 678 | } |
| 679 | /* |
| 680 | * Remove signals in mask from the pending set and queue. |
| 681 | * Returns 1 if any signals were found. |
| 682 | * |
| 683 | * All callers must be holding the siglock. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 684 | */ |
| 685 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
| 686 | { |
| 687 | struct sigqueue *q, *n; |
| 688 | |
| 689 | if (!sigtestsetmask(&s->signal, mask)) |
| 690 | return 0; |
| 691 | |
| 692 | sigdelsetmask(&s->signal, mask); |
| 693 | list_for_each_entry_safe(q, n, &s->list, list) { |
| 694 | if (q->info.si_signo < SIGRTMIN && |
| 695 | (mask & sigmask(q->info.si_signo))) { |
| 696 | list_del_init(&q->list); |
| 697 | __sigqueue_free(q); |
| 698 | } |
| 699 | } |
| 700 | return 1; |
| 701 | } |
| 702 | |
Oleg Nesterov | 614c517 | 2009-12-15 16:47:22 -0800 | [diff] [blame] | 703 | static inline int is_si_special(const struct siginfo *info) |
| 704 | { |
| 705 | return info <= SEND_SIG_FORCED; |
| 706 | } |
| 707 | |
| 708 | static inline bool si_fromuser(const struct siginfo *info) |
| 709 | { |
| 710 | return info == SEND_SIG_NOINFO || |
| 711 | (!is_si_special(info) && SI_FROMUSER(info)); |
| 712 | } |
| 713 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 714 | /* |
| 715 | * Bad permissions for sending the signal |
David Howells | 694f690 | 2010-08-04 16:59:14 +0100 | [diff] [blame] | 716 | * - the caller must hold the RCU read lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 717 | */ |
| 718 | static int check_kill_permission(int sig, struct siginfo *info, |
| 719 | struct task_struct *t) |
| 720 | { |
Oleg Nesterov | 065add3 | 2010-05-26 14:42:54 -0700 | [diff] [blame] | 721 | const struct cred *cred, *tcred; |
Oleg Nesterov | 2e2ba22 | 2008-04-30 00:53:01 -0700 | [diff] [blame] | 722 | struct pid *sid; |
Oleg Nesterov | 3b5e9e5 | 2008-04-30 00:52:42 -0700 | [diff] [blame] | 723 | int error; |
| 724 | |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 725 | if (!valid_signal(sig)) |
Oleg Nesterov | 3b5e9e5 | 2008-04-30 00:52:42 -0700 | [diff] [blame] | 726 | return -EINVAL; |
| 727 | |
Oleg Nesterov | 614c517 | 2009-12-15 16:47:22 -0800 | [diff] [blame] | 728 | if (!si_fromuser(info)) |
Oleg Nesterov | 3b5e9e5 | 2008-04-30 00:52:42 -0700 | [diff] [blame] | 729 | return 0; |
| 730 | |
| 731 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
| 732 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 733 | return error; |
Amy Griffis | e54dc24 | 2007-03-29 18:01:04 -0400 | [diff] [blame] | 734 | |
Oleg Nesterov | 065add3 | 2010-05-26 14:42:54 -0700 | [diff] [blame] | 735 | cred = current_cred(); |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 736 | tcred = __task_cred(t); |
Oleg Nesterov | 065add3 | 2010-05-26 14:42:54 -0700 | [diff] [blame] | 737 | if (!same_thread_group(current, t) && |
| 738 | (cred->euid ^ tcred->suid) && |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 739 | (cred->euid ^ tcred->uid) && |
| 740 | (cred->uid ^ tcred->suid) && |
| 741 | (cred->uid ^ tcred->uid) && |
Oleg Nesterov | 2e2ba22 | 2008-04-30 00:53:01 -0700 | [diff] [blame] | 742 | !capable(CAP_KILL)) { |
| 743 | switch (sig) { |
| 744 | case SIGCONT: |
Oleg Nesterov | 2e2ba22 | 2008-04-30 00:53:01 -0700 | [diff] [blame] | 745 | sid = task_session(t); |
Oleg Nesterov | 2e2ba22 | 2008-04-30 00:53:01 -0700 | [diff] [blame] | 746 | /* |
| 747 | * We don't return the error if sid == NULL. The |
| 748 | * task was unhashed, the caller must notice this. |
| 749 | */ |
| 750 | if (!sid || sid == task_session(current)) |
| 751 | break; |
| 752 | default: |
| 753 | return -EPERM; |
| 754 | } |
| 755 | } |
Steve Grubb | c2f0c7c | 2005-05-06 12:38:39 +0100 | [diff] [blame] | 756 | |
Amy Griffis | e54dc24 | 2007-03-29 18:01:04 -0400 | [diff] [blame] | 757 | return security_task_kill(t, info, sig, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 758 | } |
| 759 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 760 | /* |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 761 | * Handle magic process-wide effects of stop/continue signals. Unlike |
| 762 | * the signal actions, these happen immediately at signal-generation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 763 | * time regardless of blocking, ignoring, or handling. This does the |
| 764 | * actual continuing for SIGCONT, but not the actual stopping for stop |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 765 | * signals. The process stop is done as a signal action for SIG_DFL. |
| 766 | * |
| 767 | * Returns true if the signal should be actually delivered, otherwise |
| 768 | * it should be dropped. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | */ |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 770 | static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | { |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 772 | struct signal_struct *signal = p->signal; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | struct task_struct *t; |
| 774 | |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 775 | if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | /* |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 777 | * The process is in the middle of dying, nothing to do. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | */ |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 779 | } else if (sig_kernel_stop(sig)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 780 | /* |
| 781 | * This is a stop signal. Remove SIGCONT from all queues. |
| 782 | */ |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 783 | rm_from_queue(sigmask(SIGCONT), &signal->shared_pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 784 | t = p; |
| 785 | do { |
| 786 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 787 | } while_each_thread(p, t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | } else if (sig == SIGCONT) { |
Oleg Nesterov | fc321d2 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 789 | unsigned int why; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | /* |
Oleg Nesterov | 1deac63 | 2011-04-01 20:11:50 +0200 | [diff] [blame] | 791 | * Remove all stop signals from all queues, wake all threads. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 792 | */ |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 793 | rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | t = p; |
| 795 | do { |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 796 | task_clear_group_stop_pending(t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
Oleg Nesterov | 1deac63 | 2011-04-01 20:11:50 +0200 | [diff] [blame] | 798 | wake_up_state(t, __TASK_STOPPED); |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 799 | } while_each_thread(p, t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | |
Oleg Nesterov | fc321d2 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 801 | /* |
| 802 | * Notify the parent with CLD_CONTINUED if we were stopped. |
| 803 | * |
| 804 | * If we were in the middle of a group stop, we pretend it |
| 805 | * was already finished, and then continued. Since SIGCHLD |
| 806 | * doesn't queue we report only CLD_STOPPED, as if the next |
| 807 | * CLD_CONTINUED was dropped. |
| 808 | */ |
| 809 | why = 0; |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 810 | if (signal->flags & SIGNAL_STOP_STOPPED) |
Oleg Nesterov | fc321d2 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 811 | why |= SIGNAL_CLD_CONTINUED; |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 812 | else if (signal->group_stop_count) |
Oleg Nesterov | fc321d2 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 813 | why |= SIGNAL_CLD_STOPPED; |
| 814 | |
| 815 | if (why) { |
Oleg Nesterov | 021e1ae | 2008-04-30 00:53:00 -0700 | [diff] [blame] | 816 | /* |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 817 | * The first thread which returns from do_signal_stop() |
Oleg Nesterov | 021e1ae | 2008-04-30 00:53:00 -0700 | [diff] [blame] | 818 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
| 819 | * notify its parent. See get_signal_to_deliver(). |
| 820 | */ |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 821 | signal->flags = why | SIGNAL_STOP_CONTINUED; |
| 822 | signal->group_stop_count = 0; |
| 823 | signal->group_exit_code = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | } else { |
| 825 | /* |
| 826 | * We are not stopped, but there could be a stop |
| 827 | * signal in the middle of being processed after |
| 828 | * being removed from the queue. Clear that too. |
| 829 | */ |
Oleg Nesterov | ad16a460 | 2008-04-30 00:52:46 -0700 | [diff] [blame] | 830 | signal->flags &= ~SIGNAL_STOP_DEQUEUED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 831 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | } |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 833 | |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 834 | return !sig_ignored(p, sig, from_ancestor_ns); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | } |
| 836 | |
Oleg Nesterov | 71f11dc | 2008-04-30 00:52:53 -0700 | [diff] [blame] | 837 | /* |
| 838 | * Test if P wants to take SIG. After we've checked all threads with this, |
| 839 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
| 840 | * blocking SIG were ruled out because they are not running and already |
| 841 | * have pending signals. Such threads will dequeue from the shared queue |
| 842 | * as soon as they're available, so putting the signal on the shared queue |
| 843 | * will be equivalent to sending it to one such thread. |
| 844 | */ |
| 845 | static inline int wants_signal(int sig, struct task_struct *p) |
| 846 | { |
| 847 | if (sigismember(&p->blocked, sig)) |
| 848 | return 0; |
| 849 | if (p->flags & PF_EXITING) |
| 850 | return 0; |
| 851 | if (sig == SIGKILL) |
| 852 | return 1; |
| 853 | if (task_is_stopped_or_traced(p)) |
| 854 | return 0; |
| 855 | return task_curr(p) || !signal_pending(p); |
| 856 | } |
| 857 | |
Oleg Nesterov | 5fcd835 | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 858 | static void complete_signal(int sig, struct task_struct *p, int group) |
Oleg Nesterov | 71f11dc | 2008-04-30 00:52:53 -0700 | [diff] [blame] | 859 | { |
| 860 | struct signal_struct *signal = p->signal; |
| 861 | struct task_struct *t; |
| 862 | |
| 863 | /* |
| 864 | * Now find a thread we can wake up to take the signal off the queue. |
| 865 | * |
| 866 | * If the main thread wants the signal, it gets first crack. |
| 867 | * Probably the least surprising to the average bear. |
| 868 | */ |
| 869 | if (wants_signal(sig, p)) |
| 870 | t = p; |
Oleg Nesterov | 5fcd835 | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 871 | else if (!group || thread_group_empty(p)) |
Oleg Nesterov | 71f11dc | 2008-04-30 00:52:53 -0700 | [diff] [blame] | 872 | /* |
| 873 | * There is just one thread and it does not need to be woken. |
| 874 | * It will dequeue unblocked signals before it runs again. |
| 875 | */ |
| 876 | return; |
| 877 | else { |
| 878 | /* |
| 879 | * Otherwise try to find a suitable thread. |
| 880 | */ |
| 881 | t = signal->curr_target; |
| 882 | while (!wants_signal(sig, t)) { |
| 883 | t = next_thread(t); |
| 884 | if (t == signal->curr_target) |
| 885 | /* |
| 886 | * No thread needs to be woken. |
| 887 | * Any eligible threads will see |
| 888 | * the signal in the queue soon. |
| 889 | */ |
| 890 | return; |
| 891 | } |
| 892 | signal->curr_target = t; |
| 893 | } |
| 894 | |
| 895 | /* |
| 896 | * Found a killable thread. If the signal will be fatal, |
| 897 | * then start taking the whole group down immediately. |
| 898 | */ |
Oleg Nesterov | fae5fa4 | 2008-04-30 00:53:03 -0700 | [diff] [blame] | 899 | if (sig_fatal(p, sig) && |
| 900 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
Oleg Nesterov | 71f11dc | 2008-04-30 00:52:53 -0700 | [diff] [blame] | 901 | !sigismember(&t->real_blocked, sig) && |
Roland McGrath | 445a91d | 2008-07-25 19:45:52 -0700 | [diff] [blame] | 902 | (sig == SIGKILL || |
Oleg Nesterov | 43918f2 | 2009-04-02 16:58:00 -0700 | [diff] [blame] | 903 | !tracehook_consider_fatal_signal(t, sig))) { |
Oleg Nesterov | 71f11dc | 2008-04-30 00:52:53 -0700 | [diff] [blame] | 904 | /* |
| 905 | * This signal will be fatal to the whole group. |
| 906 | */ |
| 907 | if (!sig_kernel_coredump(sig)) { |
| 908 | /* |
| 909 | * Start a group exit and wake everybody up. |
| 910 | * This way we don't have other threads |
| 911 | * running and doing things after a slower |
| 912 | * thread has the fatal signal pending. |
| 913 | */ |
| 914 | signal->flags = SIGNAL_GROUP_EXIT; |
| 915 | signal->group_exit_code = sig; |
| 916 | signal->group_stop_count = 0; |
| 917 | t = p; |
| 918 | do { |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 919 | task_clear_group_stop_pending(t); |
Oleg Nesterov | 71f11dc | 2008-04-30 00:52:53 -0700 | [diff] [blame] | 920 | sigaddset(&t->pending.signal, SIGKILL); |
| 921 | signal_wake_up(t, 1); |
| 922 | } while_each_thread(p, t); |
| 923 | return; |
| 924 | } |
| 925 | } |
| 926 | |
| 927 | /* |
| 928 | * The signal is already in the shared-pending queue. |
| 929 | * Tell the chosen thread to wake up and dequeue it. |
| 930 | */ |
| 931 | signal_wake_up(t, sig == SIGKILL); |
| 932 | return; |
| 933 | } |
| 934 | |
Pavel Emelyanov | af7fff9 | 2008-04-30 00:52:34 -0700 | [diff] [blame] | 935 | static inline int legacy_queue(struct sigpending *signals, int sig) |
| 936 | { |
| 937 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
| 938 | } |
| 939 | |
Sukadev Bhattiprolu | 7978b56 | 2009-04-02 16:58:04 -0700 | [diff] [blame] | 940 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
| 941 | int group, int from_ancestor_ns) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 942 | { |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 943 | struct sigpending *pending; |
Oleg Nesterov | 6e65acb | 2008-04-30 00:52:50 -0700 | [diff] [blame] | 944 | struct sigqueue *q; |
Vegard Nossum | 7a0aeb1 | 2009-05-16 11:28:33 +0200 | [diff] [blame] | 945 | int override_rlimit; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | |
Masami Hiramatsu | d1eb650 | 2009-11-24 16:56:45 -0500 | [diff] [blame] | 947 | trace_signal_generate(sig, info, t); |
Mathieu Desnoyers | 0a16b60 | 2008-07-18 12:16:17 -0400 | [diff] [blame] | 948 | |
Oleg Nesterov | 6e65acb | 2008-04-30 00:52:50 -0700 | [diff] [blame] | 949 | assert_spin_locked(&t->sighand->siglock); |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 950 | |
| 951 | if (!prepare_signal(sig, t, from_ancestor_ns)) |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 952 | return 0; |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 953 | |
| 954 | pending = group ? &t->signal->shared_pending : &t->pending; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | /* |
Pavel Emelyanov | 2acb024 | 2008-04-30 00:52:35 -0700 | [diff] [blame] | 956 | * Short-circuit ignored signals and support queuing |
| 957 | * exactly one non-rt signal, so that we can get more |
| 958 | * detailed information about the cause of the signal. |
| 959 | */ |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 960 | if (legacy_queue(pending, sig)) |
Pavel Emelyanov | 2acb024 | 2008-04-30 00:52:35 -0700 | [diff] [blame] | 961 | return 0; |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 962 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | * fast-pathed signals for kernel-internal things like SIGSTOP |
| 964 | * or SIGKILL. |
| 965 | */ |
Oleg Nesterov | b67a1b9 | 2005-10-30 15:03:44 -0800 | [diff] [blame] | 966 | if (info == SEND_SIG_FORCED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 | goto out_set; |
| 968 | |
| 969 | /* Real-time signals must be queued if sent by sigqueue, or |
| 970 | some other real-time mechanism. It is implementation |
| 971 | defined whether kill() does so. We attempt to do so, on |
| 972 | the principle of least surprise, but since kill is not |
| 973 | allowed to fail with EAGAIN when low on memory we just |
| 974 | make sure at least one signal gets delivered and don't |
| 975 | pass on the info struct. */ |
| 976 | |
Vegard Nossum | 7a0aeb1 | 2009-05-16 11:28:33 +0200 | [diff] [blame] | 977 | if (sig < SIGRTMIN) |
| 978 | override_rlimit = (is_si_special(info) || info->si_code >= 0); |
| 979 | else |
| 980 | override_rlimit = 0; |
| 981 | |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 982 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE, |
Vegard Nossum | 7a0aeb1 | 2009-05-16 11:28:33 +0200 | [diff] [blame] | 983 | override_rlimit); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | if (q) { |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 985 | list_add_tail(&q->list, &pending->list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 986 | switch ((unsigned long) info) { |
Oleg Nesterov | b67a1b9 | 2005-10-30 15:03:44 -0800 | [diff] [blame] | 987 | case (unsigned long) SEND_SIG_NOINFO: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | q->info.si_signo = sig; |
| 989 | q->info.si_errno = 0; |
| 990 | q->info.si_code = SI_USER; |
Sukadev Bhattiprolu | 9cd4fd1 | 2009-01-06 14:42:46 -0800 | [diff] [blame] | 991 | q->info.si_pid = task_tgid_nr_ns(current, |
Sukadev Bhattiprolu | 09bca05 | 2009-01-06 14:42:45 -0800 | [diff] [blame] | 992 | task_active_pid_ns(t)); |
David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 993 | q->info.si_uid = current_uid(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 994 | break; |
Oleg Nesterov | b67a1b9 | 2005-10-30 15:03:44 -0800 | [diff] [blame] | 995 | case (unsigned long) SEND_SIG_PRIV: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | q->info.si_signo = sig; |
| 997 | q->info.si_errno = 0; |
| 998 | q->info.si_code = SI_KERNEL; |
| 999 | q->info.si_pid = 0; |
| 1000 | q->info.si_uid = 0; |
| 1001 | break; |
| 1002 | default: |
| 1003 | copy_siginfo(&q->info, info); |
Sukadev Bhattiprolu | 6588c1e | 2009-04-02 16:58:09 -0700 | [diff] [blame] | 1004 | if (from_ancestor_ns) |
| 1005 | q->info.si_pid = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | break; |
| 1007 | } |
Oleg Nesterov | 621d312 | 2005-10-30 15:03:45 -0800 | [diff] [blame] | 1008 | } else if (!is_si_special(info)) { |
Masami Hiramatsu | ba005e1 | 2009-11-24 16:56:58 -0500 | [diff] [blame] | 1009 | if (sig >= SIGRTMIN && info->si_code != SI_USER) { |
| 1010 | /* |
| 1011 | * Queue overflow, abort. We may abort if the |
| 1012 | * signal was rt and sent by user using something |
| 1013 | * other than kill(). |
| 1014 | */ |
| 1015 | trace_signal_overflow_fail(sig, group, info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1016 | return -EAGAIN; |
Masami Hiramatsu | ba005e1 | 2009-11-24 16:56:58 -0500 | [diff] [blame] | 1017 | } else { |
| 1018 | /* |
| 1019 | * This is a silent loss of information. We still |
| 1020 | * send the signal, but the *info bits are lost. |
| 1021 | */ |
| 1022 | trace_signal_lose_info(sig, group, info); |
| 1023 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1024 | } |
| 1025 | |
| 1026 | out_set: |
Oleg Nesterov | 53c3033 | 2008-04-30 00:53:00 -0700 | [diff] [blame] | 1027 | signalfd_notify(t, sig); |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 1028 | sigaddset(&pending->signal, sig); |
Pavel Emelyanov | 4cd4b6d | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 1029 | complete_signal(sig, t, group); |
| 1030 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | } |
| 1032 | |
Sukadev Bhattiprolu | 7978b56 | 2009-04-02 16:58:04 -0700 | [diff] [blame] | 1033 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
| 1034 | int group) |
| 1035 | { |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 1036 | int from_ancestor_ns = 0; |
| 1037 | |
| 1038 | #ifdef CONFIG_PID_NS |
Oleg Nesterov | dd34200 | 2009-12-15 16:47:24 -0800 | [diff] [blame] | 1039 | from_ancestor_ns = si_fromuser(info) && |
| 1040 | !task_pid_nr_ns(current, task_active_pid_ns(t)); |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 1041 | #endif |
| 1042 | |
| 1043 | return __send_signal(sig, info, t, group, from_ancestor_ns); |
Sukadev Bhattiprolu | 7978b56 | 2009-04-02 16:58:04 -0700 | [diff] [blame] | 1044 | } |
| 1045 | |
Ingo Molnar | 45807a1 | 2007-07-15 23:40:10 -0700 | [diff] [blame] | 1046 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
| 1047 | { |
| 1048 | printk("%s/%d: potentially unexpected fatal signal %d.\n", |
Pavel Emelyanov | ba25f9d | 2007-10-18 23:40:40 -0700 | [diff] [blame] | 1049 | current->comm, task_pid_nr(current), signr); |
Ingo Molnar | 45807a1 | 2007-07-15 23:40:10 -0700 | [diff] [blame] | 1050 | |
Al Viro | ca5cd87 | 2007-10-29 04:31:16 +0000 | [diff] [blame] | 1051 | #if defined(__i386__) && !defined(__arch_um__) |
H. Peter Anvin | 65ea5b0 | 2008-01-30 13:30:56 +0100 | [diff] [blame] | 1052 | printk("code at %08lx: ", regs->ip); |
Ingo Molnar | 45807a1 | 2007-07-15 23:40:10 -0700 | [diff] [blame] | 1053 | { |
| 1054 | int i; |
| 1055 | for (i = 0; i < 16; i++) { |
| 1056 | unsigned char insn; |
| 1057 | |
Andi Kleen | b45c6e7 | 2010-01-08 14:42:52 -0800 | [diff] [blame] | 1058 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
| 1059 | break; |
Ingo Molnar | 45807a1 | 2007-07-15 23:40:10 -0700 | [diff] [blame] | 1060 | printk("%02x ", insn); |
| 1061 | } |
| 1062 | } |
| 1063 | #endif |
| 1064 | printk("\n"); |
Ed Swierk | 3a9f84d | 2009-01-26 15:33:31 -0800 | [diff] [blame] | 1065 | preempt_disable(); |
Ingo Molnar | 45807a1 | 2007-07-15 23:40:10 -0700 | [diff] [blame] | 1066 | show_regs(regs); |
Ed Swierk | 3a9f84d | 2009-01-26 15:33:31 -0800 | [diff] [blame] | 1067 | preempt_enable(); |
Ingo Molnar | 45807a1 | 2007-07-15 23:40:10 -0700 | [diff] [blame] | 1068 | } |
| 1069 | |
| 1070 | static int __init setup_print_fatal_signals(char *str) |
| 1071 | { |
| 1072 | get_option (&str, &print_fatal_signals); |
| 1073 | |
| 1074 | return 1; |
| 1075 | } |
| 1076 | |
| 1077 | __setup("print-fatal-signals=", setup_print_fatal_signals); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | |
Pavel Emelyanov | 4cd4b6d | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 1079 | int |
| 1080 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
| 1081 | { |
| 1082 | return send_signal(sig, info, p, 1); |
| 1083 | } |
| 1084 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1085 | static int |
| 1086 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
| 1087 | { |
Pavel Emelyanov | 4cd4b6d | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 1088 | return send_signal(sig, info, t, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1089 | } |
| 1090 | |
Oleg Nesterov | 4a30deb | 2009-09-23 15:57:00 -0700 | [diff] [blame] | 1091 | int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, |
| 1092 | bool group) |
| 1093 | { |
| 1094 | unsigned long flags; |
| 1095 | int ret = -ESRCH; |
| 1096 | |
| 1097 | if (lock_task_sighand(p, &flags)) { |
| 1098 | ret = send_signal(sig, info, p, group); |
| 1099 | unlock_task_sighand(p, &flags); |
| 1100 | } |
| 1101 | |
| 1102 | return ret; |
| 1103 | } |
| 1104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1105 | /* |
| 1106 | * Force a signal that the process can't ignore: if necessary |
| 1107 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
Linus Torvalds | ae74c3b | 2006-08-02 20:17:49 -0700 | [diff] [blame] | 1108 | * |
| 1109 | * Note: If we unblock the signal, we always reset it to SIG_DFL, |
| 1110 | * since we do not want to have a signal handler that was blocked |
| 1111 | * be invoked when user space had explicitly blocked it. |
| 1112 | * |
Oleg Nesterov | 80fe728 | 2008-04-30 00:53:05 -0700 | [diff] [blame] | 1113 | * We don't want to have recursive SIGSEGV's etc, for example, |
| 1114 | * that is why we also clear SIGNAL_UNKILLABLE. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1115 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | int |
| 1117 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
| 1118 | { |
| 1119 | unsigned long int flags; |
Linus Torvalds | ae74c3b | 2006-08-02 20:17:49 -0700 | [diff] [blame] | 1120 | int ret, blocked, ignored; |
| 1121 | struct k_sigaction *action; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1122 | |
| 1123 | spin_lock_irqsave(&t->sighand->siglock, flags); |
Linus Torvalds | ae74c3b | 2006-08-02 20:17:49 -0700 | [diff] [blame] | 1124 | action = &t->sighand->action[sig-1]; |
| 1125 | ignored = action->sa.sa_handler == SIG_IGN; |
| 1126 | blocked = sigismember(&t->blocked, sig); |
| 1127 | if (blocked || ignored) { |
| 1128 | action->sa.sa_handler = SIG_DFL; |
| 1129 | if (blocked) { |
| 1130 | sigdelset(&t->blocked, sig); |
Roland McGrath | 7bb44ad | 2007-05-23 13:57:44 -0700 | [diff] [blame] | 1131 | recalc_sigpending_and_wake(t); |
Linus Torvalds | ae74c3b | 2006-08-02 20:17:49 -0700 | [diff] [blame] | 1132 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1133 | } |
Oleg Nesterov | 80fe728 | 2008-04-30 00:53:05 -0700 | [diff] [blame] | 1134 | if (action->sa.sa_handler == SIG_DFL) |
| 1135 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1136 | ret = specific_send_sig_info(sig, info, t); |
| 1137 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
| 1138 | |
| 1139 | return ret; |
| 1140 | } |
| 1141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | /* |
| 1143 | * Nuke all other threads in the group. |
| 1144 | */ |
Oleg Nesterov | 09faef1 | 2010-05-26 14:43:11 -0700 | [diff] [blame] | 1145 | int zap_other_threads(struct task_struct *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 | { |
Oleg Nesterov | 09faef1 | 2010-05-26 14:43:11 -0700 | [diff] [blame] | 1147 | struct task_struct *t = p; |
| 1148 | int count = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1149 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | p->signal->group_stop_count = 0; |
| 1151 | |
Oleg Nesterov | 09faef1 | 2010-05-26 14:43:11 -0700 | [diff] [blame] | 1152 | while_each_thread(p, t) { |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1153 | task_clear_group_stop_pending(t); |
Oleg Nesterov | 09faef1 | 2010-05-26 14:43:11 -0700 | [diff] [blame] | 1154 | count++; |
| 1155 | |
| 1156 | /* Don't bother with already dead threads */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1157 | if (t->exit_state) |
| 1158 | continue; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 | sigaddset(&t->pending.signal, SIGKILL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1160 | signal_wake_up(t, 1); |
| 1161 | } |
Oleg Nesterov | 09faef1 | 2010-05-26 14:43:11 -0700 | [diff] [blame] | 1162 | |
| 1163 | return count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1164 | } |
| 1165 | |
Namhyung Kim | b8ed374 | 2010-10-27 15:34:06 -0700 | [diff] [blame] | 1166 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
| 1167 | unsigned long *flags) |
Oleg Nesterov | f63ee72 | 2006-03-28 16:11:13 -0800 | [diff] [blame] | 1168 | { |
| 1169 | struct sighand_struct *sighand; |
| 1170 | |
Oleg Nesterov | 1406f2d | 2008-04-30 00:52:37 -0700 | [diff] [blame] | 1171 | rcu_read_lock(); |
Oleg Nesterov | f63ee72 | 2006-03-28 16:11:13 -0800 | [diff] [blame] | 1172 | for (;;) { |
| 1173 | sighand = rcu_dereference(tsk->sighand); |
| 1174 | if (unlikely(sighand == NULL)) |
| 1175 | break; |
| 1176 | |
| 1177 | spin_lock_irqsave(&sighand->siglock, *flags); |
| 1178 | if (likely(sighand == tsk->sighand)) |
| 1179 | break; |
| 1180 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
| 1181 | } |
Oleg Nesterov | 1406f2d | 2008-04-30 00:52:37 -0700 | [diff] [blame] | 1182 | rcu_read_unlock(); |
Oleg Nesterov | f63ee72 | 2006-03-28 16:11:13 -0800 | [diff] [blame] | 1183 | |
| 1184 | return sighand; |
| 1185 | } |
| 1186 | |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1187 | /* |
| 1188 | * send signal info to all the members of a group |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1189 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
| 1191 | { |
David Howells | 694f690 | 2010-08-04 16:59:14 +0100 | [diff] [blame] | 1192 | int ret; |
| 1193 | |
| 1194 | rcu_read_lock(); |
| 1195 | ret = check_kill_permission(sig, info, p); |
| 1196 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | |
Oleg Nesterov | 4a30deb | 2009-09-23 15:57:00 -0700 | [diff] [blame] | 1198 | if (!ret && sig) |
| 1199 | ret = do_send_sig_info(sig, info, p, true); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | |
| 1201 | return ret; |
| 1202 | } |
| 1203 | |
| 1204 | /* |
Pavel Emelyanov | 146a505 | 2008-02-08 04:19:22 -0800 | [diff] [blame] | 1205 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | * control characters do (^C, ^Z etc) |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1207 | * - the caller must hold at least a readlock on tasklist_lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | */ |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1209 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1210 | { |
| 1211 | struct task_struct *p = NULL; |
| 1212 | int retval, success; |
| 1213 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | success = 0; |
| 1215 | retval = -ESRCH; |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1216 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1217 | int err = group_send_sig_info(sig, info, p); |
| 1218 | success |= !err; |
| 1219 | retval = err; |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1220 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1221 | return success ? 0 : retval; |
| 1222 | } |
| 1223 | |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1224 | int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1225 | { |
Oleg Nesterov | d36174b | 2008-02-08 04:19:18 -0800 | [diff] [blame] | 1226 | int error = -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | struct task_struct *p; |
| 1228 | |
Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 1229 | rcu_read_lock(); |
Oleg Nesterov | d36174b | 2008-02-08 04:19:18 -0800 | [diff] [blame] | 1230 | retry: |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1231 | p = pid_task(pid, PIDTYPE_PID); |
Oleg Nesterov | d36174b | 2008-02-08 04:19:18 -0800 | [diff] [blame] | 1232 | if (p) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1233 | error = group_send_sig_info(sig, info, p); |
Oleg Nesterov | d36174b | 2008-02-08 04:19:18 -0800 | [diff] [blame] | 1234 | if (unlikely(error == -ESRCH)) |
| 1235 | /* |
| 1236 | * The task was unhashed in between, try again. |
| 1237 | * If it is dead, pid_task() will return NULL, |
| 1238 | * if we race with de_thread() it will find the |
| 1239 | * new leader. |
| 1240 | */ |
| 1241 | goto retry; |
| 1242 | } |
Ingo Molnar | e56d090 | 2006-01-08 01:01:37 -0800 | [diff] [blame] | 1243 | rcu_read_unlock(); |
Oleg Nesterov | 6ca25b5 | 2008-04-30 00:52:45 -0700 | [diff] [blame] | 1244 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1245 | return error; |
| 1246 | } |
| 1247 | |
Matthew Wilcox | c3de4b3 | 2007-02-09 08:11:47 -0700 | [diff] [blame] | 1248 | int |
| 1249 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1250 | { |
| 1251 | int error; |
| 1252 | rcu_read_lock(); |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1253 | error = kill_pid_info(sig, info, find_vpid(pid)); |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1254 | rcu_read_unlock(); |
| 1255 | return error; |
| 1256 | } |
| 1257 | |
Eric W. Biederman | 2425c08 | 2006-10-02 02:17:28 -0700 | [diff] [blame] | 1258 | /* like kill_pid_info(), but doesn't use uid/euid of "current" */ |
| 1259 | int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, |
David Quigley | 8f95dc5 | 2006-06-30 01:55:47 -0700 | [diff] [blame] | 1260 | uid_t uid, uid_t euid, u32 secid) |
Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1261 | { |
| 1262 | int ret = -EINVAL; |
| 1263 | struct task_struct *p; |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1264 | const struct cred *pcred; |
Thomas Gleixner | 14d8c9f | 2009-12-10 00:53:17 +0000 | [diff] [blame] | 1265 | unsigned long flags; |
Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1266 | |
| 1267 | if (!valid_signal(sig)) |
| 1268 | return ret; |
| 1269 | |
Thomas Gleixner | 14d8c9f | 2009-12-10 00:53:17 +0000 | [diff] [blame] | 1270 | rcu_read_lock(); |
Eric W. Biederman | 2425c08 | 2006-10-02 02:17:28 -0700 | [diff] [blame] | 1271 | p = pid_task(pid, PIDTYPE_PID); |
Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1272 | if (!p) { |
| 1273 | ret = -ESRCH; |
| 1274 | goto out_unlock; |
| 1275 | } |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1276 | pcred = __task_cred(p); |
Oleg Nesterov | 614c517 | 2009-12-15 16:47:22 -0800 | [diff] [blame] | 1277 | if (si_fromuser(info) && |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1278 | euid != pcred->suid && euid != pcred->uid && |
| 1279 | uid != pcred->suid && uid != pcred->uid) { |
Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1280 | ret = -EPERM; |
| 1281 | goto out_unlock; |
| 1282 | } |
David Quigley | 8f95dc5 | 2006-06-30 01:55:47 -0700 | [diff] [blame] | 1283 | ret = security_task_kill(p, info, sig, secid); |
| 1284 | if (ret) |
| 1285 | goto out_unlock; |
Thomas Gleixner | 14d8c9f | 2009-12-10 00:53:17 +0000 | [diff] [blame] | 1286 | |
| 1287 | if (sig) { |
| 1288 | if (lock_task_sighand(p, &flags)) { |
| 1289 | ret = __send_signal(sig, info, p, 1, 0); |
| 1290 | unlock_task_sighand(p, &flags); |
| 1291 | } else |
| 1292 | ret = -ESRCH; |
Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1293 | } |
| 1294 | out_unlock: |
Thomas Gleixner | 14d8c9f | 2009-12-10 00:53:17 +0000 | [diff] [blame] | 1295 | rcu_read_unlock(); |
Harald Welte | 4611383 | 2005-10-10 19:44:29 +0200 | [diff] [blame] | 1296 | return ret; |
| 1297 | } |
Eric W. Biederman | 2425c08 | 2006-10-02 02:17:28 -0700 | [diff] [blame] | 1298 | EXPORT_SYMBOL_GPL(kill_pid_info_as_uid); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | |
| 1300 | /* |
| 1301 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
| 1302 | * |
| 1303 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
| 1304 | * is probably wrong. Should make it like BSD or SYSV. |
| 1305 | */ |
| 1306 | |
Gustavo Fernando Padovan | bc64efd | 2008-07-25 01:47:33 -0700 | [diff] [blame] | 1307 | static int kill_something_info(int sig, struct siginfo *info, pid_t pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | { |
Eric W. Biederman | 8d42db18 | 2007-02-12 00:52:55 -0800 | [diff] [blame] | 1309 | int ret; |
Pavel Emelyanov | d5df763 | 2008-02-08 04:19:22 -0800 | [diff] [blame] | 1310 | |
| 1311 | if (pid > 0) { |
| 1312 | rcu_read_lock(); |
| 1313 | ret = kill_pid_info(sig, info, find_vpid(pid)); |
| 1314 | rcu_read_unlock(); |
| 1315 | return ret; |
| 1316 | } |
| 1317 | |
| 1318 | read_lock(&tasklist_lock); |
| 1319 | if (pid != -1) { |
| 1320 | ret = __kill_pgrp_info(sig, info, |
| 1321 | pid ? find_vpid(-pid) : task_pgrp(current)); |
| 1322 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | int retval = 0, count = 0; |
| 1324 | struct task_struct * p; |
| 1325 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1326 | for_each_process(p) { |
Sukadev Bhattiprolu | d25141a | 2008-10-29 14:01:11 -0700 | [diff] [blame] | 1327 | if (task_pid_vnr(p) > 1 && |
| 1328 | !same_thread_group(p, current)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | int err = group_send_sig_info(sig, info, p); |
| 1330 | ++count; |
| 1331 | if (err != -EPERM) |
| 1332 | retval = err; |
| 1333 | } |
| 1334 | } |
Eric W. Biederman | 8d42db18 | 2007-02-12 00:52:55 -0800 | [diff] [blame] | 1335 | ret = count ? retval : -ESRCH; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | } |
Pavel Emelyanov | d5df763 | 2008-02-08 04:19:22 -0800 | [diff] [blame] | 1337 | read_unlock(&tasklist_lock); |
| 1338 | |
Eric W. Biederman | 8d42db18 | 2007-02-12 00:52:55 -0800 | [diff] [blame] | 1339 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1340 | } |
| 1341 | |
| 1342 | /* |
| 1343 | * These are for backward compatibility with the rest of the kernel source. |
| 1344 | */ |
| 1345 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1346 | int |
| 1347 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
| 1348 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1349 | /* |
| 1350 | * Make sure legacy kernel users don't send in bad values |
| 1351 | * (normal paths check this in check_kill_permission). |
| 1352 | */ |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 1353 | if (!valid_signal(sig)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1354 | return -EINVAL; |
| 1355 | |
Oleg Nesterov | 4a30deb | 2009-09-23 15:57:00 -0700 | [diff] [blame] | 1356 | return do_send_sig_info(sig, info, p, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1357 | } |
| 1358 | |
Oleg Nesterov | b67a1b9 | 2005-10-30 15:03:44 -0800 | [diff] [blame] | 1359 | #define __si_special(priv) \ |
| 1360 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) |
| 1361 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1362 | int |
| 1363 | send_sig(int sig, struct task_struct *p, int priv) |
| 1364 | { |
Oleg Nesterov | b67a1b9 | 2005-10-30 15:03:44 -0800 | [diff] [blame] | 1365 | return send_sig_info(sig, __si_special(priv), p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1366 | } |
| 1367 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1368 | void |
| 1369 | force_sig(int sig, struct task_struct *p) |
| 1370 | { |
Oleg Nesterov | b67a1b9 | 2005-10-30 15:03:44 -0800 | [diff] [blame] | 1371 | force_sig_info(sig, SEND_SIG_PRIV, p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1372 | } |
| 1373 | |
| 1374 | /* |
| 1375 | * When things go south during signal handling, we |
| 1376 | * will force a SIGSEGV. And if the signal that caused |
| 1377 | * the problem was already a SIGSEGV, we'll want to |
| 1378 | * make sure we don't even try to deliver the signal.. |
| 1379 | */ |
| 1380 | int |
| 1381 | force_sigsegv(int sig, struct task_struct *p) |
| 1382 | { |
| 1383 | if (sig == SIGSEGV) { |
| 1384 | unsigned long flags; |
| 1385 | spin_lock_irqsave(&p->sighand->siglock, flags); |
| 1386 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
| 1387 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
| 1388 | } |
| 1389 | force_sig(SIGSEGV, p); |
| 1390 | return 0; |
| 1391 | } |
| 1392 | |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1393 | int kill_pgrp(struct pid *pid, int sig, int priv) |
| 1394 | { |
Pavel Emelyanov | 146a505 | 2008-02-08 04:19:22 -0800 | [diff] [blame] | 1395 | int ret; |
| 1396 | |
| 1397 | read_lock(&tasklist_lock); |
| 1398 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); |
| 1399 | read_unlock(&tasklist_lock); |
| 1400 | |
| 1401 | return ret; |
Eric W. Biederman | c4b92fc | 2006-10-02 02:17:10 -0700 | [diff] [blame] | 1402 | } |
| 1403 | EXPORT_SYMBOL(kill_pgrp); |
| 1404 | |
| 1405 | int kill_pid(struct pid *pid, int sig, int priv) |
| 1406 | { |
| 1407 | return kill_pid_info(sig, __si_special(priv), pid); |
| 1408 | } |
| 1409 | EXPORT_SYMBOL(kill_pid); |
| 1410 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1411 | /* |
| 1412 | * These functions support sending signals using preallocated sigqueue |
| 1413 | * structures. This is needed "because realtime applications cannot |
| 1414 | * afford to lose notifications of asynchronous events, like timer |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 1415 | * expirations or I/O completions". In the case of Posix Timers |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 | * we allocate the sigqueue structure from the timer_create. If this |
| 1417 | * allocation fails we are able to report the failure to the application |
| 1418 | * with an EAGAIN error. |
| 1419 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1420 | struct sigqueue *sigqueue_alloc(void) |
| 1421 | { |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 1422 | struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1423 | |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 1424 | if (q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1425 | q->flags |= SIGQUEUE_PREALLOC; |
Naohiro Ooiwa | f84d49b | 2009-11-09 00:46:42 +0900 | [diff] [blame] | 1426 | |
| 1427 | return q; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1428 | } |
| 1429 | |
| 1430 | void sigqueue_free(struct sigqueue *q) |
| 1431 | { |
| 1432 | unsigned long flags; |
Oleg Nesterov | 60187d2 | 2007-08-30 23:56:35 -0700 | [diff] [blame] | 1433 | spinlock_t *lock = ¤t->sighand->siglock; |
| 1434 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1435 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
| 1436 | /* |
Oleg Nesterov | c8e85b4f | 2008-05-26 20:55:42 +0400 | [diff] [blame] | 1437 | * We must hold ->siglock while testing q->list |
| 1438 | * to serialize with collect_signal() or with |
Oleg Nesterov | da7978b | 2008-05-23 13:04:41 -0700 | [diff] [blame] | 1439 | * __exit_signal()->flush_sigqueue(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1440 | */ |
Oleg Nesterov | 60187d2 | 2007-08-30 23:56:35 -0700 | [diff] [blame] | 1441 | spin_lock_irqsave(lock, flags); |
Oleg Nesterov | c8e85b4f | 2008-05-26 20:55:42 +0400 | [diff] [blame] | 1442 | q->flags &= ~SIGQUEUE_PREALLOC; |
| 1443 | /* |
| 1444 | * If it is queued it will be freed when dequeued, |
| 1445 | * like the "regular" sigqueue. |
| 1446 | */ |
Oleg Nesterov | 60187d2 | 2007-08-30 23:56:35 -0700 | [diff] [blame] | 1447 | if (!list_empty(&q->list)) |
Oleg Nesterov | c8e85b4f | 2008-05-26 20:55:42 +0400 | [diff] [blame] | 1448 | q = NULL; |
Oleg Nesterov | 60187d2 | 2007-08-30 23:56:35 -0700 | [diff] [blame] | 1449 | spin_unlock_irqrestore(lock, flags); |
| 1450 | |
Oleg Nesterov | c8e85b4f | 2008-05-26 20:55:42 +0400 | [diff] [blame] | 1451 | if (q) |
| 1452 | __sigqueue_free(q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1453 | } |
| 1454 | |
Oleg Nesterov | ac5c215 | 2008-04-30 00:52:57 -0700 | [diff] [blame] | 1455 | int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1456 | { |
Oleg Nesterov | e62e665 | 2008-04-30 00:52:56 -0700 | [diff] [blame] | 1457 | int sig = q->info.si_signo; |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 1458 | struct sigpending *pending; |
Oleg Nesterov | e62e665 | 2008-04-30 00:52:56 -0700 | [diff] [blame] | 1459 | unsigned long flags; |
| 1460 | int ret; |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 1461 | |
Pavel Emelyanov | 4cd4b6d | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 1462 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
Oleg Nesterov | e62e665 | 2008-04-30 00:52:56 -0700 | [diff] [blame] | 1463 | |
| 1464 | ret = -1; |
| 1465 | if (!likely(lock_task_sighand(t, &flags))) |
| 1466 | goto ret; |
| 1467 | |
Oleg Nesterov | 7e695a5 | 2008-04-30 00:52:59 -0700 | [diff] [blame] | 1468 | ret = 1; /* the signal is ignored */ |
Sukadev Bhattiprolu | 921cf9f | 2009-04-02 16:58:05 -0700 | [diff] [blame] | 1469 | if (!prepare_signal(sig, t, 0)) |
Oleg Nesterov | e62e665 | 2008-04-30 00:52:56 -0700 | [diff] [blame] | 1470 | goto out; |
| 1471 | |
| 1472 | ret = 0; |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1473 | if (unlikely(!list_empty(&q->list))) { |
| 1474 | /* |
| 1475 | * If an SI_TIMER entry is already queue just increment |
| 1476 | * the overrun count. |
| 1477 | */ |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1478 | BUG_ON(q->info.si_code != SI_TIMER); |
| 1479 | q->info.si_overrun++; |
Oleg Nesterov | e62e665 | 2008-04-30 00:52:56 -0700 | [diff] [blame] | 1480 | goto out; |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1481 | } |
Oleg Nesterov | ba66129 | 2008-07-23 20:52:05 +0400 | [diff] [blame] | 1482 | q->info.si_overrun = 0; |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1483 | |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1484 | signalfd_notify(t, sig); |
Oleg Nesterov | 2ca3515 | 2008-04-30 00:52:54 -0700 | [diff] [blame] | 1485 | pending = group ? &t->signal->shared_pending : &t->pending; |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1486 | list_add_tail(&q->list, &pending->list); |
| 1487 | sigaddset(&pending->signal, sig); |
Pavel Emelyanov | 4cd4b6d | 2008-04-30 00:52:55 -0700 | [diff] [blame] | 1488 | complete_signal(sig, t, group); |
Oleg Nesterov | e62e665 | 2008-04-30 00:52:56 -0700 | [diff] [blame] | 1489 | out: |
| 1490 | unlock_task_sighand(t, &flags); |
| 1491 | ret: |
| 1492 | return ret; |
Pavel Emelyanov | 9e3bd6c | 2008-04-30 00:52:41 -0700 | [diff] [blame] | 1493 | } |
| 1494 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1495 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1496 | * Let a parent know about the death of a child. |
| 1497 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
Roland McGrath | 2b2a1ff | 2008-07-25 19:45:54 -0700 | [diff] [blame] | 1498 | * |
| 1499 | * Returns -1 if our parent ignored us and so we've switched to |
| 1500 | * self-reaping, or else @sig. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | */ |
Roland McGrath | 2b2a1ff | 2008-07-25 19:45:54 -0700 | [diff] [blame] | 1502 | int do_notify_parent(struct task_struct *tsk, int sig) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1503 | { |
| 1504 | struct siginfo info; |
| 1505 | unsigned long flags; |
| 1506 | struct sighand_struct *psig; |
Roland McGrath | 1b04624 | 2008-08-19 20:37:07 -0700 | [diff] [blame] | 1507 | int ret = sig; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | |
| 1509 | BUG_ON(sig == -1); |
| 1510 | |
| 1511 | /* do_notify_parent_cldstop should have been called instead. */ |
Matthew Wilcox | e1abb39 | 2007-12-06 11:07:35 -0500 | [diff] [blame] | 1512 | BUG_ON(task_is_stopped_or_traced(tsk)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | |
Oleg Nesterov | 5cb1144 | 2009-06-17 16:27:30 -0700 | [diff] [blame] | 1514 | BUG_ON(!task_ptrace(tsk) && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1515 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
| 1516 | |
| 1517 | info.si_signo = sig; |
| 1518 | info.si_errno = 0; |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1519 | /* |
| 1520 | * we are under tasklist_lock here so our parent is tied to |
| 1521 | * us and cannot exit and release its namespace. |
| 1522 | * |
| 1523 | * the only it can is to switch its nsproxy with sys_unshare, |
| 1524 | * bu uncharing pid namespaces is not allowed, so we'll always |
| 1525 | * see relevant namespace |
| 1526 | * |
| 1527 | * write_lock() currently calls preempt_disable() which is the |
| 1528 | * same as rcu_read_lock(), but according to Oleg, this is not |
| 1529 | * correct to rely on this |
| 1530 | */ |
| 1531 | rcu_read_lock(); |
| 1532 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1533 | info.si_uid = __task_cred(tsk)->uid; |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1534 | rcu_read_unlock(); |
| 1535 | |
Peter Zijlstra | 32bd671 | 2009-02-05 12:24:15 +0100 | [diff] [blame] | 1536 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, |
| 1537 | tsk->signal->utime)); |
| 1538 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, |
| 1539 | tsk->signal->stime)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1540 | |
| 1541 | info.si_status = tsk->exit_code & 0x7f; |
| 1542 | if (tsk->exit_code & 0x80) |
| 1543 | info.si_code = CLD_DUMPED; |
| 1544 | else if (tsk->exit_code & 0x7f) |
| 1545 | info.si_code = CLD_KILLED; |
| 1546 | else { |
| 1547 | info.si_code = CLD_EXITED; |
| 1548 | info.si_status = tsk->exit_code >> 8; |
| 1549 | } |
| 1550 | |
| 1551 | psig = tsk->parent->sighand; |
| 1552 | spin_lock_irqsave(&psig->siglock, flags); |
Oleg Nesterov | 5cb1144 | 2009-06-17 16:27:30 -0700 | [diff] [blame] | 1553 | if (!task_ptrace(tsk) && sig == SIGCHLD && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
| 1555 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
| 1556 | /* |
| 1557 | * We are exiting and our parent doesn't care. POSIX.1 |
| 1558 | * defines special semantics for setting SIGCHLD to SIG_IGN |
| 1559 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
| 1560 | * automatically and not left for our parent's wait4 call. |
| 1561 | * Rather than having the parent do it as a magic kind of |
| 1562 | * signal handler, we just set this to tell do_exit that we |
| 1563 | * can be cleaned up without becoming a zombie. Note that |
| 1564 | * we still call __wake_up_parent in this case, because a |
| 1565 | * blocked sys_wait4 might now return -ECHILD. |
| 1566 | * |
| 1567 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
| 1568 | * is implementation-defined: we do (if you don't want |
| 1569 | * it, just use SIG_IGN instead). |
| 1570 | */ |
Roland McGrath | 1b04624 | 2008-08-19 20:37:07 -0700 | [diff] [blame] | 1571 | ret = tsk->exit_signal = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1572 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
Roland McGrath | 2b2a1ff | 2008-07-25 19:45:54 -0700 | [diff] [blame] | 1573 | sig = -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1574 | } |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 1575 | if (valid_signal(sig) && sig > 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1576 | __group_send_sig_info(sig, &info, tsk->parent); |
| 1577 | __wake_up_parent(tsk, tsk->parent); |
| 1578 | spin_unlock_irqrestore(&psig->siglock, flags); |
Roland McGrath | 2b2a1ff | 2008-07-25 19:45:54 -0700 | [diff] [blame] | 1579 | |
Roland McGrath | 1b04624 | 2008-08-19 20:37:07 -0700 | [diff] [blame] | 1580 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1581 | } |
| 1582 | |
Tejun Heo | 75b9595 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1583 | /** |
| 1584 | * do_notify_parent_cldstop - notify parent of stopped/continued state change |
| 1585 | * @tsk: task reporting the state change |
| 1586 | * @for_ptracer: the notification is for ptracer |
| 1587 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report |
| 1588 | * |
| 1589 | * Notify @tsk's parent that the stopped/continued state has changed. If |
| 1590 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. |
| 1591 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. |
| 1592 | * |
| 1593 | * CONTEXT: |
| 1594 | * Must be called with tasklist_lock at least read locked. |
| 1595 | */ |
| 1596 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
| 1597 | bool for_ptracer, int why) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | { |
| 1599 | struct siginfo info; |
| 1600 | unsigned long flags; |
Oleg Nesterov | bc505a4 | 2005-09-06 15:17:32 -0700 | [diff] [blame] | 1601 | struct task_struct *parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1602 | struct sighand_struct *sighand; |
| 1603 | |
Tejun Heo | 75b9595 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1604 | if (for_ptracer) { |
Oleg Nesterov | bc505a4 | 2005-09-06 15:17:32 -0700 | [diff] [blame] | 1605 | parent = tsk->parent; |
Tejun Heo | 75b9595 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1606 | } else { |
Oleg Nesterov | bc505a4 | 2005-09-06 15:17:32 -0700 | [diff] [blame] | 1607 | tsk = tsk->group_leader; |
| 1608 | parent = tsk->real_parent; |
| 1609 | } |
| 1610 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1611 | info.si_signo = SIGCHLD; |
| 1612 | info.si_errno = 0; |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1613 | /* |
| 1614 | * see comment in do_notify_parent() abot the following 3 lines |
| 1615 | */ |
| 1616 | rcu_read_lock(); |
Oleg Nesterov | d926566 | 2009-06-17 16:27:35 -0700 | [diff] [blame] | 1617 | info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1618 | info.si_uid = __task_cred(tsk)->uid; |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1619 | rcu_read_unlock(); |
| 1620 | |
Michael Kerrisk | d8878ba | 2008-07-25 01:47:32 -0700 | [diff] [blame] | 1621 | info.si_utime = cputime_to_clock_t(tsk->utime); |
| 1622 | info.si_stime = cputime_to_clock_t(tsk->stime); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1623 | |
| 1624 | info.si_code = why; |
| 1625 | switch (why) { |
| 1626 | case CLD_CONTINUED: |
| 1627 | info.si_status = SIGCONT; |
| 1628 | break; |
| 1629 | case CLD_STOPPED: |
| 1630 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
| 1631 | break; |
| 1632 | case CLD_TRAPPED: |
| 1633 | info.si_status = tsk->exit_code & 0x7f; |
| 1634 | break; |
| 1635 | default: |
| 1636 | BUG(); |
| 1637 | } |
| 1638 | |
| 1639 | sighand = parent->sighand; |
| 1640 | spin_lock_irqsave(&sighand->siglock, flags); |
| 1641 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
| 1642 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
| 1643 | __group_send_sig_info(SIGCHLD, &info, parent); |
| 1644 | /* |
| 1645 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
| 1646 | */ |
| 1647 | __wake_up_parent(tsk, parent); |
| 1648 | spin_unlock_irqrestore(&sighand->siglock, flags); |
| 1649 | } |
| 1650 | |
Oleg Nesterov | d5f70c0 | 2006-06-26 00:26:07 -0700 | [diff] [blame] | 1651 | static inline int may_ptrace_stop(void) |
| 1652 | { |
Oleg Nesterov | 5cb1144 | 2009-06-17 16:27:30 -0700 | [diff] [blame] | 1653 | if (!likely(task_ptrace(current))) |
Oleg Nesterov | d5f70c0 | 2006-06-26 00:26:07 -0700 | [diff] [blame] | 1654 | return 0; |
Oleg Nesterov | d5f70c0 | 2006-06-26 00:26:07 -0700 | [diff] [blame] | 1655 | /* |
| 1656 | * Are we in the middle of do_coredump? |
| 1657 | * If so and our tracer is also part of the coredump stopping |
| 1658 | * is a deadlock situation, and pointless because our tracer |
| 1659 | * is dead so don't allow us to stop. |
| 1660 | * If SIGKILL was already sent before the caller unlocked |
Oleg Nesterov | 999d9fc | 2008-07-25 01:47:41 -0700 | [diff] [blame] | 1661 | * ->siglock we must see ->core_state != NULL. Otherwise it |
Oleg Nesterov | d5f70c0 | 2006-06-26 00:26:07 -0700 | [diff] [blame] | 1662 | * is safe to enter schedule(). |
| 1663 | */ |
Oleg Nesterov | 999d9fc | 2008-07-25 01:47:41 -0700 | [diff] [blame] | 1664 | if (unlikely(current->mm->core_state) && |
Oleg Nesterov | d5f70c0 | 2006-06-26 00:26:07 -0700 | [diff] [blame] | 1665 | unlikely(current->mm == current->parent->mm)) |
| 1666 | return 0; |
| 1667 | |
| 1668 | return 1; |
| 1669 | } |
| 1670 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | /* |
Roland McGrath | 1a669c2 | 2008-02-06 01:37:37 -0800 | [diff] [blame] | 1672 | * Return nonzero if there is a SIGKILL that should be waking us up. |
| 1673 | * Called with the siglock held. |
| 1674 | */ |
| 1675 | static int sigkill_pending(struct task_struct *tsk) |
| 1676 | { |
Oleg Nesterov | 3d749b9 | 2008-07-25 01:47:37 -0700 | [diff] [blame] | 1677 | return sigismember(&tsk->pending.signal, SIGKILL) || |
| 1678 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); |
Roland McGrath | 1a669c2 | 2008-02-06 01:37:37 -0800 | [diff] [blame] | 1679 | } |
| 1680 | |
| 1681 | /* |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1682 | * Test whether the target task of the usual cldstop notification - the |
| 1683 | * real_parent of @child - is in the same group as the ptracer. |
| 1684 | */ |
| 1685 | static bool real_parent_is_ptracer(struct task_struct *child) |
| 1686 | { |
| 1687 | return same_thread_group(child->parent, child->real_parent); |
| 1688 | } |
| 1689 | |
| 1690 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1691 | * This must be called with current->sighand->siglock held. |
| 1692 | * |
| 1693 | * This should be the path for all ptrace stops. |
| 1694 | * We always set current->last_siginfo while stopped here. |
| 1695 | * That makes it a way to test a stopped process for |
| 1696 | * being ptrace-stopped vs being job-control-stopped. |
| 1697 | * |
Oleg Nesterov | 20686a3 | 2008-02-08 04:19:03 -0800 | [diff] [blame] | 1698 | * If we actually decide not to stop at all because the tracer |
| 1699 | * is gone, we keep current->exit_code unless clear_code. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1700 | */ |
Tejun Heo | fe1bc6a | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1701 | static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) |
Namhyung Kim | b840115 | 2010-10-27 15:34:07 -0700 | [diff] [blame] | 1702 | __releases(¤t->sighand->siglock) |
| 1703 | __acquires(¤t->sighand->siglock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1704 | { |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1705 | bool gstop_done = false; |
| 1706 | |
Roland McGrath | 1a669c2 | 2008-02-06 01:37:37 -0800 | [diff] [blame] | 1707 | if (arch_ptrace_stop_needed(exit_code, info)) { |
| 1708 | /* |
| 1709 | * The arch code has something special to do before a |
| 1710 | * ptrace stop. This is allowed to block, e.g. for faults |
| 1711 | * on user stack pages. We can't keep the siglock while |
| 1712 | * calling arch_ptrace_stop, so we must release it now. |
| 1713 | * To preserve proper semantics, we must do this before |
| 1714 | * any signal bookkeeping like checking group_stop_count. |
| 1715 | * Meanwhile, a SIGKILL could come in before we retake the |
| 1716 | * siglock. That must prevent us from sleeping in TASK_TRACED. |
| 1717 | * So after regaining the lock, we must check for SIGKILL. |
| 1718 | */ |
| 1719 | spin_unlock_irq(¤t->sighand->siglock); |
| 1720 | arch_ptrace_stop(exit_code, info); |
| 1721 | spin_lock_irq(¤t->sighand->siglock); |
Oleg Nesterov | 3d749b9 | 2008-07-25 01:47:37 -0700 | [diff] [blame] | 1722 | if (sigkill_pending(current)) |
| 1723 | return; |
Roland McGrath | 1a669c2 | 2008-02-06 01:37:37 -0800 | [diff] [blame] | 1724 | } |
| 1725 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1726 | /* |
Tejun Heo | 0ae8ce1 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1727 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
| 1728 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered |
| 1729 | * while siglock was released for the arch hook, PENDING could be |
| 1730 | * clear now. We act as if SIGCONT is received after TASK_TRACED |
| 1731 | * is entered - ignore it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1732 | */ |
Tejun Heo | 0ae8ce1 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1733 | if (why == CLD_STOPPED && (current->group_stop & GROUP_STOP_PENDING)) |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1734 | gstop_done = task_participate_group_stop(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1735 | |
| 1736 | current->last_siginfo = info; |
| 1737 | current->exit_code = exit_code; |
| 1738 | |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1739 | /* |
| 1740 | * TRACED should be visible before TRAPPING is cleared; otherwise, |
| 1741 | * the tracer might fail do_wait(). |
| 1742 | */ |
| 1743 | set_current_state(TASK_TRACED); |
| 1744 | |
| 1745 | /* |
| 1746 | * We're committing to trapping. Clearing GROUP_STOP_TRAPPING and |
| 1747 | * transition to TASK_TRACED should be atomic with respect to |
| 1748 | * siglock. This hsould be done after the arch hook as siglock is |
| 1749 | * released and regrabbed across it. |
| 1750 | */ |
| 1751 | task_clear_group_stop_trapping(current); |
| 1752 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1753 | spin_unlock_irq(¤t->sighand->siglock); |
| 1754 | read_lock(&tasklist_lock); |
Oleg Nesterov | 3d749b9 | 2008-07-25 01:47:37 -0700 | [diff] [blame] | 1755 | if (may_ptrace_stop()) { |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1756 | /* |
| 1757 | * Notify parents of the stop. |
| 1758 | * |
| 1759 | * While ptraced, there are two parents - the ptracer and |
| 1760 | * the real_parent of the group_leader. The ptracer should |
| 1761 | * know about every stop while the real parent is only |
| 1762 | * interested in the completion of group stop. The states |
| 1763 | * for the two don't interact with each other. Notify |
| 1764 | * separately unless they're gonna be duplicates. |
| 1765 | */ |
| 1766 | do_notify_parent_cldstop(current, true, why); |
| 1767 | if (gstop_done && !real_parent_is_ptracer(current)) |
| 1768 | do_notify_parent_cldstop(current, false, why); |
| 1769 | |
Miklos Szeredi | 53da1d9 | 2009-03-23 16:07:24 +0100 | [diff] [blame] | 1770 | /* |
| 1771 | * Don't want to allow preemption here, because |
| 1772 | * sys_ptrace() needs this task to be inactive. |
| 1773 | * |
| 1774 | * XXX: implement read_unlock_no_resched(). |
| 1775 | */ |
| 1776 | preempt_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | read_unlock(&tasklist_lock); |
Miklos Szeredi | 53da1d9 | 2009-03-23 16:07:24 +0100 | [diff] [blame] | 1778 | preempt_enable_no_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1779 | schedule(); |
| 1780 | } else { |
| 1781 | /* |
| 1782 | * By the time we got the lock, our tracer went away. |
Oleg Nesterov | 6405f7f | 2008-02-08 04:19:00 -0800 | [diff] [blame] | 1783 | * Don't drop the lock yet, another tracer may come. |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1784 | * |
| 1785 | * If @gstop_done, the ptracer went away between group stop |
| 1786 | * completion and here. During detach, it would have set |
| 1787 | * GROUP_STOP_PENDING on us and we'll re-enter TASK_STOPPED |
| 1788 | * in do_signal_stop() on return, so notifying the real |
| 1789 | * parent of the group stop completion is enough. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1790 | */ |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1791 | if (gstop_done) |
| 1792 | do_notify_parent_cldstop(current, false, why); |
| 1793 | |
Oleg Nesterov | 6405f7f | 2008-02-08 04:19:00 -0800 | [diff] [blame] | 1794 | __set_current_state(TASK_RUNNING); |
Oleg Nesterov | 20686a3 | 2008-02-08 04:19:03 -0800 | [diff] [blame] | 1795 | if (clear_code) |
| 1796 | current->exit_code = 0; |
Oleg Nesterov | 6405f7f | 2008-02-08 04:19:00 -0800 | [diff] [blame] | 1797 | read_unlock(&tasklist_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1798 | } |
| 1799 | |
| 1800 | /* |
Roland McGrath | 13b1c3d | 2008-03-03 20:22:05 -0800 | [diff] [blame] | 1801 | * While in TASK_TRACED, we were considered "frozen enough". |
| 1802 | * Now that we woke up, it's crucial if we're supposed to be |
| 1803 | * frozen that we freeze now before running anything substantial. |
| 1804 | */ |
| 1805 | try_to_freeze(); |
| 1806 | |
| 1807 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1808 | * We are back. Now reacquire the siglock before touching |
| 1809 | * last_siginfo, so that we are sure to have synchronized with |
| 1810 | * any signal-sending on another CPU that wants to examine it. |
| 1811 | */ |
| 1812 | spin_lock_irq(¤t->sighand->siglock); |
| 1813 | current->last_siginfo = NULL; |
| 1814 | |
| 1815 | /* |
| 1816 | * Queued signals ignored us while we were stopped for tracing. |
| 1817 | * So check for any that we should take before resuming user mode. |
Roland McGrath | b74d0de | 2007-06-06 03:59:00 -0700 | [diff] [blame] | 1818 | * This sets TIF_SIGPENDING, but never clears it. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1819 | */ |
Roland McGrath | b74d0de | 2007-06-06 03:59:00 -0700 | [diff] [blame] | 1820 | recalc_sigpending_tsk(current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1821 | } |
| 1822 | |
| 1823 | void ptrace_notify(int exit_code) |
| 1824 | { |
| 1825 | siginfo_t info; |
| 1826 | |
| 1827 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
| 1828 | |
| 1829 | memset(&info, 0, sizeof info); |
| 1830 | info.si_signo = SIGTRAP; |
| 1831 | info.si_code = exit_code; |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 1832 | info.si_pid = task_pid_vnr(current); |
David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 1833 | info.si_uid = current_uid(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | |
| 1835 | /* Let the debugger run. */ |
| 1836 | spin_lock_irq(¤t->sighand->siglock); |
Tejun Heo | fe1bc6a | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1837 | ptrace_stop(exit_code, CLD_TRAPPED, 1, &info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1838 | spin_unlock_irq(¤t->sighand->siglock); |
| 1839 | } |
| 1840 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1841 | /* |
| 1842 | * This performs the stopping for SIGSTOP and other stop signals. |
| 1843 | * We have to stop all threads in the thread group. |
| 1844 | * Returns nonzero if we've actually stopped and released the siglock. |
| 1845 | * Returns zero if we didn't stop and still hold the siglock. |
| 1846 | */ |
Oleg Nesterov | a122b34 | 2006-03-28 16:11:22 -0800 | [diff] [blame] | 1847 | static int do_signal_stop(int signr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | { |
| 1849 | struct signal_struct *sig = current->signal; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1850 | |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1851 | if (!(current->group_stop & GROUP_STOP_PENDING)) { |
| 1852 | unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME; |
Oleg Nesterov | f558b7e | 2008-02-04 22:27:24 -0800 | [diff] [blame] | 1853 | struct task_struct *t; |
| 1854 | |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1855 | /* signr will be recorded in task->group_stop for retries */ |
| 1856 | WARN_ON_ONCE(signr & ~GROUP_STOP_SIGMASK); |
| 1857 | |
Oleg Nesterov | 2b201a9 | 2008-07-25 01:47:31 -0700 | [diff] [blame] | 1858 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) || |
Oleg Nesterov | 573cf9a | 2008-04-30 00:52:36 -0700 | [diff] [blame] | 1859 | unlikely(signal_group_exit(sig))) |
Oleg Nesterov | f558b7e | 2008-02-04 22:27:24 -0800 | [diff] [blame] | 1860 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1861 | /* |
Tejun Heo | 408a37d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1862 | * There is no group stop already in progress. We must |
| 1863 | * initiate one now. |
| 1864 | * |
| 1865 | * While ptraced, a task may be resumed while group stop is |
| 1866 | * still in effect and then receive a stop signal and |
| 1867 | * initiate another group stop. This deviates from the |
| 1868 | * usual behavior as two consecutive stop signals can't |
Oleg Nesterov | 780006eac | 2011-04-01 20:12:16 +0200 | [diff] [blame^] | 1869 | * cause two group stops when !ptraced. That is why we |
| 1870 | * also check !task_is_stopped(t) below. |
Tejun Heo | 408a37d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1871 | * |
| 1872 | * The condition can be distinguished by testing whether |
| 1873 | * SIGNAL_STOP_STOPPED is already set. Don't generate |
| 1874 | * group_exit_code in such case. |
| 1875 | * |
| 1876 | * This is not necessary for SIGNAL_STOP_CONTINUED because |
| 1877 | * an intervening stop signal is required to cause two |
| 1878 | * continued events regardless of ptrace. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1879 | */ |
Tejun Heo | 408a37d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1880 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
| 1881 | sig->group_exit_code = signr; |
| 1882 | else |
| 1883 | WARN_ON_ONCE(!task_ptrace(current)); |
Oleg Nesterov | a122b34 | 2006-03-28 16:11:22 -0800 | [diff] [blame] | 1884 | |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1885 | current->group_stop &= ~GROUP_STOP_SIGMASK; |
| 1886 | current->group_stop |= signr | gstop; |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 1887 | sig->group_stop_count = 1; |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1888 | for (t = next_thread(current); t != current; |
| 1889 | t = next_thread(t)) { |
| 1890 | t->group_stop &= ~GROUP_STOP_SIGMASK; |
Oleg Nesterov | a122b34 | 2006-03-28 16:11:22 -0800 | [diff] [blame] | 1891 | /* |
| 1892 | * Setting state to TASK_STOPPED for a group |
| 1893 | * stop is always done with the siglock held, |
| 1894 | * so this check has no races. |
| 1895 | */ |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1896 | if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) { |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1897 | t->group_stop |= signr | gstop; |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 1898 | sig->group_stop_count++; |
Oleg Nesterov | a122b34 | 2006-03-28 16:11:22 -0800 | [diff] [blame] | 1899 | signal_wake_up(t, 0); |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1900 | } |
| 1901 | } |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 1902 | } |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1903 | retry: |
Tejun Heo | 5224fa3 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1904 | if (likely(!task_ptrace(current))) { |
| 1905 | int notify = 0; |
| 1906 | |
| 1907 | /* |
| 1908 | * If there are no other threads in the group, or if there |
| 1909 | * is a group stop in progress and we are the last to stop, |
| 1910 | * report to the parent. |
| 1911 | */ |
| 1912 | if (task_participate_group_stop(current)) |
| 1913 | notify = CLD_STOPPED; |
| 1914 | |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1915 | __set_current_state(TASK_STOPPED); |
Tejun Heo | 5224fa3 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1916 | spin_unlock_irq(¤t->sighand->siglock); |
| 1917 | |
Tejun Heo | 62bcf9d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1918 | /* |
| 1919 | * Notify the parent of the group stop completion. Because |
| 1920 | * we're not holding either the siglock or tasklist_lock |
| 1921 | * here, ptracer may attach inbetween; however, this is for |
| 1922 | * group stop and should always be delivered to the real |
| 1923 | * parent of the group leader. The new ptracer will get |
| 1924 | * its notification when this task transitions into |
| 1925 | * TASK_TRACED. |
| 1926 | */ |
Tejun Heo | 5224fa3 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1927 | if (notify) { |
| 1928 | read_lock(&tasklist_lock); |
Tejun Heo | 62bcf9d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 1929 | do_notify_parent_cldstop(current, false, notify); |
Tejun Heo | 5224fa3 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1930 | read_unlock(&tasklist_lock); |
| 1931 | } |
| 1932 | |
| 1933 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ |
| 1934 | schedule(); |
| 1935 | |
| 1936 | spin_lock_irq(¤t->sighand->siglock); |
Tejun Heo | d79fdd6 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1937 | } else { |
| 1938 | ptrace_stop(current->group_stop & GROUP_STOP_SIGMASK, |
| 1939 | CLD_STOPPED, 0, NULL); |
| 1940 | current->exit_code = 0; |
| 1941 | } |
| 1942 | |
| 1943 | /* |
| 1944 | * GROUP_STOP_PENDING could be set if another group stop has |
| 1945 | * started since being woken up or ptrace wants us to transit |
| 1946 | * between TASK_STOPPED and TRACED. Retry group stop. |
| 1947 | */ |
| 1948 | if (current->group_stop & GROUP_STOP_PENDING) { |
| 1949 | WARN_ON_ONCE(!(current->group_stop & GROUP_STOP_SIGMASK)); |
| 1950 | goto retry; |
| 1951 | } |
| 1952 | |
| 1953 | /* PTRACE_ATTACH might have raced with task killing, clear trapping */ |
| 1954 | task_clear_group_stop_trapping(current); |
Tejun Heo | 5224fa3 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1955 | |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 1956 | spin_unlock_irq(¤t->sighand->siglock); |
| 1957 | |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 1958 | tracehook_finish_jctl(); |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 1959 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1960 | return 1; |
| 1961 | } |
| 1962 | |
Roland McGrath | 18c98b6 | 2008-04-17 18:44:38 -0700 | [diff] [blame] | 1963 | static int ptrace_signal(int signr, siginfo_t *info, |
| 1964 | struct pt_regs *regs, void *cookie) |
| 1965 | { |
Oleg Nesterov | 5cb1144 | 2009-06-17 16:27:30 -0700 | [diff] [blame] | 1966 | if (!task_ptrace(current)) |
Roland McGrath | 18c98b6 | 2008-04-17 18:44:38 -0700 | [diff] [blame] | 1967 | return signr; |
| 1968 | |
| 1969 | ptrace_signal_deliver(regs, cookie); |
| 1970 | |
| 1971 | /* Let the debugger run. */ |
Tejun Heo | fe1bc6a | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 1972 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
Roland McGrath | 18c98b6 | 2008-04-17 18:44:38 -0700 | [diff] [blame] | 1973 | |
| 1974 | /* We're back. Did the debugger cancel the sig? */ |
| 1975 | signr = current->exit_code; |
| 1976 | if (signr == 0) |
| 1977 | return signr; |
| 1978 | |
| 1979 | current->exit_code = 0; |
| 1980 | |
| 1981 | /* Update the siginfo structure if the signal has |
| 1982 | changed. If the debugger wanted something |
| 1983 | specific in the siginfo structure then it should |
| 1984 | have updated *info via PTRACE_SETSIGINFO. */ |
| 1985 | if (signr != info->si_signo) { |
| 1986 | info->si_signo = signr; |
| 1987 | info->si_errno = 0; |
| 1988 | info->si_code = SI_USER; |
| 1989 | info->si_pid = task_pid_vnr(current->parent); |
David Howells | c69e8d9 | 2008-11-14 10:39:19 +1100 | [diff] [blame] | 1990 | info->si_uid = task_uid(current->parent); |
Roland McGrath | 18c98b6 | 2008-04-17 18:44:38 -0700 | [diff] [blame] | 1991 | } |
| 1992 | |
| 1993 | /* If the (new) signal is now blocked, requeue it. */ |
| 1994 | if (sigismember(¤t->blocked, signr)) { |
| 1995 | specific_send_sig_info(signr, info, current); |
| 1996 | signr = 0; |
| 1997 | } |
| 1998 | |
| 1999 | return signr; |
| 2000 | } |
| 2001 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2002 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
| 2003 | struct pt_regs *regs, void *cookie) |
| 2004 | { |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2005 | struct sighand_struct *sighand = current->sighand; |
| 2006 | struct signal_struct *signal = current->signal; |
| 2007 | int signr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2008 | |
Roland McGrath | 13b1c3d | 2008-03-03 20:22:05 -0800 | [diff] [blame] | 2009 | relock: |
| 2010 | /* |
| 2011 | * We'll jump back here after any time we were stopped in TASK_STOPPED. |
| 2012 | * While in TASK_STOPPED, we were considered "frozen enough". |
| 2013 | * Now that we woke up, it's crucial if we're supposed to be |
| 2014 | * frozen that we freeze now before running anything substantial. |
| 2015 | */ |
Rafael J. Wysocki | fc558a7 | 2006-03-23 03:00:05 -0800 | [diff] [blame] | 2016 | try_to_freeze(); |
| 2017 | |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2018 | spin_lock_irq(&sighand->siglock); |
Oleg Nesterov | 021e1ae | 2008-04-30 00:53:00 -0700 | [diff] [blame] | 2019 | /* |
| 2020 | * Every stopped thread goes here after wakeup. Check to see if |
| 2021 | * we should notify the parent, prepare_signal(SIGCONT) encodes |
| 2022 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. |
| 2023 | */ |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2024 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
Tejun Heo | 75b9595 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2025 | struct task_struct *leader; |
Tejun Heo | c672af3 | 2011-03-23 10:36:59 +0100 | [diff] [blame] | 2026 | int why; |
| 2027 | |
| 2028 | if (signal->flags & SIGNAL_CLD_CONTINUED) |
| 2029 | why = CLD_CONTINUED; |
| 2030 | else |
| 2031 | why = CLD_STOPPED; |
| 2032 | |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2033 | signal->flags &= ~SIGNAL_CLD_MASK; |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 2034 | |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2035 | spin_unlock_irq(&sighand->siglock); |
Oleg Nesterov | e442055 | 2008-04-30 00:52:44 -0700 | [diff] [blame] | 2036 | |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2037 | /* |
| 2038 | * Notify the parent that we're continuing. This event is |
| 2039 | * always per-process and doesn't make whole lot of sense |
| 2040 | * for ptracers, who shouldn't consume the state via |
| 2041 | * wait(2) either, but, for backward compatibility, notify |
| 2042 | * the ptracer of the group leader too unless it's gonna be |
| 2043 | * a duplicate. |
| 2044 | */ |
Tejun Heo | edf2ed1 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 2045 | read_lock(&tasklist_lock); |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2046 | |
| 2047 | do_notify_parent_cldstop(current, false, why); |
| 2048 | |
Tejun Heo | 75b9595 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2049 | leader = current->group_leader; |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2050 | if (task_ptrace(leader) && !real_parent_is_ptracer(leader)) |
| 2051 | do_notify_parent_cldstop(leader, true, why); |
| 2052 | |
Tejun Heo | edf2ed1 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 2053 | read_unlock(&tasklist_lock); |
Tejun Heo | ceb6bd6 | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2054 | |
Oleg Nesterov | e442055 | 2008-04-30 00:52:44 -0700 | [diff] [blame] | 2055 | goto relock; |
| 2056 | } |
| 2057 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2058 | for (;;) { |
| 2059 | struct k_sigaction *ka; |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2060 | /* |
| 2061 | * Tracing can induce an artifical signal and choose sigaction. |
| 2062 | * The return value in @signr determines the default action, |
| 2063 | * but @info->si_signo is the signal number we will report. |
| 2064 | */ |
| 2065 | signr = tracehook_get_signal(current, regs, info, return_ka); |
| 2066 | if (unlikely(signr < 0)) |
| 2067 | goto relock; |
| 2068 | if (unlikely(signr != 0)) |
| 2069 | ka = return_ka; |
| 2070 | else { |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 2071 | if (unlikely(current->group_stop & |
| 2072 | GROUP_STOP_PENDING) && do_signal_stop(0)) |
Oleg Nesterov | 1be5396 | 2009-12-15 16:47:26 -0800 | [diff] [blame] | 2073 | goto relock; |
| 2074 | |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2075 | signr = dequeue_signal(current, ¤t->blocked, |
| 2076 | info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2077 | |
Roland McGrath | 18c98b6 | 2008-04-17 18:44:38 -0700 | [diff] [blame] | 2078 | if (!signr) |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2079 | break; /* will return 0 */ |
| 2080 | |
| 2081 | if (signr != SIGKILL) { |
| 2082 | signr = ptrace_signal(signr, info, |
| 2083 | regs, cookie); |
| 2084 | if (!signr) |
| 2085 | continue; |
| 2086 | } |
| 2087 | |
| 2088 | ka = &sighand->action[signr-1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2089 | } |
| 2090 | |
Masami Hiramatsu | f9d4257 | 2009-11-24 16:56:51 -0500 | [diff] [blame] | 2091 | /* Trace actually delivered signals. */ |
| 2092 | trace_signal_deliver(signr, info, ka); |
| 2093 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2094 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
| 2095 | continue; |
| 2096 | if (ka->sa.sa_handler != SIG_DFL) { |
| 2097 | /* Run the handler. */ |
| 2098 | *return_ka = *ka; |
| 2099 | |
| 2100 | if (ka->sa.sa_flags & SA_ONESHOT) |
| 2101 | ka->sa.sa_handler = SIG_DFL; |
| 2102 | |
| 2103 | break; /* will return non-zero "signr" value */ |
| 2104 | } |
| 2105 | |
| 2106 | /* |
| 2107 | * Now we are doing the default action for this signal. |
| 2108 | */ |
| 2109 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
| 2110 | continue; |
| 2111 | |
Sukadev Bhattiprolu | 84d7378 | 2006-12-08 02:38:01 -0800 | [diff] [blame] | 2112 | /* |
Sukadev Bhattiprolu | 0fbc26a | 2007-10-18 23:40:13 -0700 | [diff] [blame] | 2113 | * Global init gets no signals it doesn't want. |
Sukadev Bhattiprolu | b3bfa0c | 2009-04-02 16:58:08 -0700 | [diff] [blame] | 2114 | * Container-init gets no signals it doesn't want from same |
| 2115 | * container. |
| 2116 | * |
| 2117 | * Note that if global/container-init sees a sig_kernel_only() |
| 2118 | * signal here, the signal must have been generated internally |
| 2119 | * or must have come from an ancestor namespace. In either |
| 2120 | * case, the signal cannot be dropped. |
Sukadev Bhattiprolu | 84d7378 | 2006-12-08 02:38:01 -0800 | [diff] [blame] | 2121 | */ |
Oleg Nesterov | fae5fa4 | 2008-04-30 00:53:03 -0700 | [diff] [blame] | 2122 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
Sukadev Bhattiprolu | b3bfa0c | 2009-04-02 16:58:08 -0700 | [diff] [blame] | 2123 | !sig_kernel_only(signr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2124 | continue; |
| 2125 | |
| 2126 | if (sig_kernel_stop(signr)) { |
| 2127 | /* |
| 2128 | * The default action is to stop all threads in |
| 2129 | * the thread group. The job control signals |
| 2130 | * do nothing in an orphaned pgrp, but SIGSTOP |
| 2131 | * always works. Note that siglock needs to be |
| 2132 | * dropped during the call to is_orphaned_pgrp() |
| 2133 | * because of lock ordering with tasklist_lock. |
| 2134 | * This allows an intervening SIGCONT to be posted. |
| 2135 | * We need to check for that and bail out if necessary. |
| 2136 | */ |
| 2137 | if (signr != SIGSTOP) { |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2138 | spin_unlock_irq(&sighand->siglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2139 | |
| 2140 | /* signals can be posted during this window */ |
| 2141 | |
Eric W. Biederman | 3e7cd6c | 2007-02-12 00:52:58 -0800 | [diff] [blame] | 2142 | if (is_current_pgrp_orphaned()) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2143 | goto relock; |
| 2144 | |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2145 | spin_lock_irq(&sighand->siglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2146 | } |
| 2147 | |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2148 | if (likely(do_signal_stop(info->si_signo))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2149 | /* It released the siglock. */ |
| 2150 | goto relock; |
| 2151 | } |
| 2152 | |
| 2153 | /* |
| 2154 | * We didn't actually stop, due to a race |
| 2155 | * with SIGCONT or something like that. |
| 2156 | */ |
| 2157 | continue; |
| 2158 | } |
| 2159 | |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2160 | spin_unlock_irq(&sighand->siglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2161 | |
| 2162 | /* |
| 2163 | * Anything else is fatal, maybe with a core dump. |
| 2164 | */ |
| 2165 | current->flags |= PF_SIGNALED; |
Oleg Nesterov | 2dce81b | 2008-04-30 00:52:58 -0700 | [diff] [blame] | 2166 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2167 | if (sig_kernel_coredump(signr)) { |
Oleg Nesterov | 2dce81b | 2008-04-30 00:52:58 -0700 | [diff] [blame] | 2168 | if (print_fatal_signals) |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2169 | print_fatal_signal(regs, info->si_signo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2170 | /* |
| 2171 | * If it was able to dump core, this kills all |
| 2172 | * other threads in the group and synchronizes with |
| 2173 | * their demise. If we lost the race with another |
| 2174 | * thread getting here, it set group_exit_code |
| 2175 | * first and our do_group_exit call below will use |
| 2176 | * that value and ignore the one we pass it. |
| 2177 | */ |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2178 | do_coredump(info->si_signo, info->si_signo, regs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2179 | } |
| 2180 | |
| 2181 | /* |
| 2182 | * Death signals, no core dump. |
| 2183 | */ |
Roland McGrath | 7bcf6a2 | 2008-07-25 19:45:53 -0700 | [diff] [blame] | 2184 | do_group_exit(info->si_signo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | /* NOTREACHED */ |
| 2186 | } |
Oleg Nesterov | f6b76d4 | 2008-04-30 00:52:47 -0700 | [diff] [blame] | 2187 | spin_unlock_irq(&sighand->siglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2188 | return signr; |
| 2189 | } |
| 2190 | |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2191 | void exit_signals(struct task_struct *tsk) |
| 2192 | { |
| 2193 | int group_stop = 0; |
Oleg Nesterov | 5dee170 | 2008-02-08 04:19:13 -0800 | [diff] [blame] | 2194 | struct task_struct *t; |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2195 | |
Oleg Nesterov | 5dee170 | 2008-02-08 04:19:13 -0800 | [diff] [blame] | 2196 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
| 2197 | tsk->flags |= PF_EXITING; |
| 2198 | return; |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2199 | } |
| 2200 | |
Oleg Nesterov | 5dee170 | 2008-02-08 04:19:13 -0800 | [diff] [blame] | 2201 | spin_lock_irq(&tsk->sighand->siglock); |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2202 | /* |
| 2203 | * From now this task is not visible for group-wide signals, |
| 2204 | * see wants_signal(), do_signal_stop(). |
| 2205 | */ |
| 2206 | tsk->flags |= PF_EXITING; |
Oleg Nesterov | 5dee170 | 2008-02-08 04:19:13 -0800 | [diff] [blame] | 2207 | if (!signal_pending(tsk)) |
| 2208 | goto out; |
| 2209 | |
| 2210 | /* It could be that __group_complete_signal() choose us to |
| 2211 | * notify about group-wide signal. Another thread should be |
| 2212 | * woken now to take the signal since we will not. |
| 2213 | */ |
| 2214 | for (t = tsk; (t = next_thread(t)) != tsk; ) |
| 2215 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) |
| 2216 | recalc_sigpending_and_wake(t); |
| 2217 | |
Tejun Heo | 39efa3e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 2218 | if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) && |
Tejun Heo | e5c1902e | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 2219 | task_participate_group_stop(tsk)) |
Tejun Heo | edf2ed1 | 2011-03-23 10:37:00 +0100 | [diff] [blame] | 2220 | group_stop = CLD_STOPPED; |
Oleg Nesterov | 5dee170 | 2008-02-08 04:19:13 -0800 | [diff] [blame] | 2221 | out: |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2222 | spin_unlock_irq(&tsk->sighand->siglock); |
| 2223 | |
Tejun Heo | 62bcf9d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2224 | /* |
| 2225 | * If group stop has completed, deliver the notification. This |
| 2226 | * should always go to the real parent of the group leader. |
| 2227 | */ |
Roland McGrath | ae6d2ed | 2009-09-23 15:56:53 -0700 | [diff] [blame] | 2228 | if (unlikely(group_stop)) { |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2229 | read_lock(&tasklist_lock); |
Tejun Heo | 62bcf9d | 2011-03-23 10:37:01 +0100 | [diff] [blame] | 2230 | do_notify_parent_cldstop(tsk, false, group_stop); |
Oleg Nesterov | d12619b | 2008-02-08 04:19:12 -0800 | [diff] [blame] | 2231 | read_unlock(&tasklist_lock); |
| 2232 | } |
| 2233 | } |
| 2234 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2235 | EXPORT_SYMBOL(recalc_sigpending); |
| 2236 | EXPORT_SYMBOL_GPL(dequeue_signal); |
| 2237 | EXPORT_SYMBOL(flush_signals); |
| 2238 | EXPORT_SYMBOL(force_sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2239 | EXPORT_SYMBOL(send_sig); |
| 2240 | EXPORT_SYMBOL(send_sig_info); |
| 2241 | EXPORT_SYMBOL(sigprocmask); |
| 2242 | EXPORT_SYMBOL(block_all_signals); |
| 2243 | EXPORT_SYMBOL(unblock_all_signals); |
| 2244 | |
| 2245 | |
| 2246 | /* |
| 2247 | * System call entry points. |
| 2248 | */ |
| 2249 | |
Heiko Carstens | 754fe8d | 2009-01-14 14:14:09 +0100 | [diff] [blame] | 2250 | SYSCALL_DEFINE0(restart_syscall) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2251 | { |
| 2252 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
| 2253 | return restart->fn(restart); |
| 2254 | } |
| 2255 | |
| 2256 | long do_no_restart_syscall(struct restart_block *param) |
| 2257 | { |
| 2258 | return -EINTR; |
| 2259 | } |
| 2260 | |
| 2261 | /* |
| 2262 | * We don't need to get the kernel lock - this is all local to this |
| 2263 | * particular thread.. (and that's good, because this is _heavily_ |
| 2264 | * used by various programs) |
| 2265 | */ |
| 2266 | |
| 2267 | /* |
| 2268 | * This is also useful for kernel threads that want to temporarily |
| 2269 | * (or permanently) block certain signals. |
| 2270 | * |
| 2271 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
| 2272 | * interface happily blocks "unblockable" signals like SIGKILL |
| 2273 | * and friends. |
| 2274 | */ |
| 2275 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
| 2276 | { |
| 2277 | int error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2278 | |
| 2279 | spin_lock_irq(¤t->sighand->siglock); |
Oleg Nesterov | a26fd33 | 2006-03-23 03:00:49 -0800 | [diff] [blame] | 2280 | if (oldset) |
| 2281 | *oldset = current->blocked; |
| 2282 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2283 | error = 0; |
| 2284 | switch (how) { |
| 2285 | case SIG_BLOCK: |
| 2286 | sigorsets(¤t->blocked, ¤t->blocked, set); |
| 2287 | break; |
| 2288 | case SIG_UNBLOCK: |
| 2289 | signandsets(¤t->blocked, ¤t->blocked, set); |
| 2290 | break; |
| 2291 | case SIG_SETMASK: |
| 2292 | current->blocked = *set; |
| 2293 | break; |
| 2294 | default: |
| 2295 | error = -EINVAL; |
| 2296 | } |
| 2297 | recalc_sigpending(); |
| 2298 | spin_unlock_irq(¤t->sighand->siglock); |
Oleg Nesterov | a26fd33 | 2006-03-23 03:00:49 -0800 | [diff] [blame] | 2299 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2300 | return error; |
| 2301 | } |
| 2302 | |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 2303 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, |
| 2304 | sigset_t __user *, oset, size_t, sigsetsize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2305 | { |
| 2306 | int error = -EINVAL; |
| 2307 | sigset_t old_set, new_set; |
| 2308 | |
| 2309 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
| 2310 | if (sigsetsize != sizeof(sigset_t)) |
| 2311 | goto out; |
| 2312 | |
| 2313 | if (set) { |
| 2314 | error = -EFAULT; |
| 2315 | if (copy_from_user(&new_set, set, sizeof(*set))) |
| 2316 | goto out; |
| 2317 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 2318 | |
| 2319 | error = sigprocmask(how, &new_set, &old_set); |
| 2320 | if (error) |
| 2321 | goto out; |
| 2322 | if (oset) |
| 2323 | goto set_old; |
| 2324 | } else if (oset) { |
| 2325 | spin_lock_irq(¤t->sighand->siglock); |
| 2326 | old_set = current->blocked; |
| 2327 | spin_unlock_irq(¤t->sighand->siglock); |
| 2328 | |
| 2329 | set_old: |
| 2330 | error = -EFAULT; |
| 2331 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
| 2332 | goto out; |
| 2333 | } |
| 2334 | error = 0; |
| 2335 | out: |
| 2336 | return error; |
| 2337 | } |
| 2338 | |
| 2339 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
| 2340 | { |
| 2341 | long error = -EINVAL; |
| 2342 | sigset_t pending; |
| 2343 | |
| 2344 | if (sigsetsize > sizeof(sigset_t)) |
| 2345 | goto out; |
| 2346 | |
| 2347 | spin_lock_irq(¤t->sighand->siglock); |
| 2348 | sigorsets(&pending, ¤t->pending.signal, |
| 2349 | ¤t->signal->shared_pending.signal); |
| 2350 | spin_unlock_irq(¤t->sighand->siglock); |
| 2351 | |
| 2352 | /* Outside the lock because only this thread touches it. */ |
| 2353 | sigandsets(&pending, ¤t->blocked, &pending); |
| 2354 | |
| 2355 | error = -EFAULT; |
| 2356 | if (!copy_to_user(set, &pending, sigsetsize)) |
| 2357 | error = 0; |
| 2358 | |
| 2359 | out: |
| 2360 | return error; |
| 2361 | } |
| 2362 | |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 2363 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2364 | { |
| 2365 | return do_sigpending(set, sigsetsize); |
| 2366 | } |
| 2367 | |
| 2368 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
| 2369 | |
| 2370 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) |
| 2371 | { |
| 2372 | int err; |
| 2373 | |
| 2374 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
| 2375 | return -EFAULT; |
| 2376 | if (from->si_code < 0) |
| 2377 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
| 2378 | ? -EFAULT : 0; |
| 2379 | /* |
| 2380 | * If you change siginfo_t structure, please be sure |
| 2381 | * this code is fixed accordingly. |
Davide Libenzi | fba2afa | 2007-05-10 22:23:13 -0700 | [diff] [blame] | 2382 | * Please remember to update the signalfd_copyinfo() function |
| 2383 | * inside fs/signalfd.c too, in case siginfo_t changes. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2384 | * It should never copy any pad contained in the structure |
| 2385 | * to avoid security leaks, but must copy the generic |
| 2386 | * 3 ints plus the relevant union member. |
| 2387 | */ |
| 2388 | err = __put_user(from->si_signo, &to->si_signo); |
| 2389 | err |= __put_user(from->si_errno, &to->si_errno); |
| 2390 | err |= __put_user((short)from->si_code, &to->si_code); |
| 2391 | switch (from->si_code & __SI_MASK) { |
| 2392 | case __SI_KILL: |
| 2393 | err |= __put_user(from->si_pid, &to->si_pid); |
| 2394 | err |= __put_user(from->si_uid, &to->si_uid); |
| 2395 | break; |
| 2396 | case __SI_TIMER: |
| 2397 | err |= __put_user(from->si_tid, &to->si_tid); |
| 2398 | err |= __put_user(from->si_overrun, &to->si_overrun); |
| 2399 | err |= __put_user(from->si_ptr, &to->si_ptr); |
| 2400 | break; |
| 2401 | case __SI_POLL: |
| 2402 | err |= __put_user(from->si_band, &to->si_band); |
| 2403 | err |= __put_user(from->si_fd, &to->si_fd); |
| 2404 | break; |
| 2405 | case __SI_FAULT: |
| 2406 | err |= __put_user(from->si_addr, &to->si_addr); |
| 2407 | #ifdef __ARCH_SI_TRAPNO |
| 2408 | err |= __put_user(from->si_trapno, &to->si_trapno); |
| 2409 | #endif |
Andi Kleen | a337fda | 2010-09-27 20:32:19 +0200 | [diff] [blame] | 2410 | #ifdef BUS_MCEERR_AO |
| 2411 | /* |
| 2412 | * Other callers might not initialize the si_lsb field, |
| 2413 | * so check explicitely for the right codes here. |
| 2414 | */ |
| 2415 | if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) |
| 2416 | err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); |
| 2417 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2418 | break; |
| 2419 | case __SI_CHLD: |
| 2420 | err |= __put_user(from->si_pid, &to->si_pid); |
| 2421 | err |= __put_user(from->si_uid, &to->si_uid); |
| 2422 | err |= __put_user(from->si_status, &to->si_status); |
| 2423 | err |= __put_user(from->si_utime, &to->si_utime); |
| 2424 | err |= __put_user(from->si_stime, &to->si_stime); |
| 2425 | break; |
| 2426 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
| 2427 | case __SI_MESGQ: /* But this is */ |
| 2428 | err |= __put_user(from->si_pid, &to->si_pid); |
| 2429 | err |= __put_user(from->si_uid, &to->si_uid); |
| 2430 | err |= __put_user(from->si_ptr, &to->si_ptr); |
| 2431 | break; |
| 2432 | default: /* this is just in case for now ... */ |
| 2433 | err |= __put_user(from->si_pid, &to->si_pid); |
| 2434 | err |= __put_user(from->si_uid, &to->si_uid); |
| 2435 | break; |
| 2436 | } |
| 2437 | return err; |
| 2438 | } |
| 2439 | |
| 2440 | #endif |
| 2441 | |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 2442 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
| 2443 | siginfo_t __user *, uinfo, const struct timespec __user *, uts, |
| 2444 | size_t, sigsetsize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2445 | { |
| 2446 | int ret, sig; |
| 2447 | sigset_t these; |
| 2448 | struct timespec ts; |
| 2449 | siginfo_t info; |
| 2450 | long timeout = 0; |
| 2451 | |
| 2452 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
| 2453 | if (sigsetsize != sizeof(sigset_t)) |
| 2454 | return -EINVAL; |
| 2455 | |
| 2456 | if (copy_from_user(&these, uthese, sizeof(these))) |
| 2457 | return -EFAULT; |
| 2458 | |
| 2459 | /* |
| 2460 | * Invert the set of allowed signals to get those we |
| 2461 | * want to block. |
| 2462 | */ |
| 2463 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 2464 | signotset(&these); |
| 2465 | |
| 2466 | if (uts) { |
| 2467 | if (copy_from_user(&ts, uts, sizeof(ts))) |
| 2468 | return -EFAULT; |
| 2469 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 |
| 2470 | || ts.tv_sec < 0) |
| 2471 | return -EINVAL; |
| 2472 | } |
| 2473 | |
| 2474 | spin_lock_irq(¤t->sighand->siglock); |
| 2475 | sig = dequeue_signal(current, &these, &info); |
| 2476 | if (!sig) { |
| 2477 | timeout = MAX_SCHEDULE_TIMEOUT; |
| 2478 | if (uts) |
| 2479 | timeout = (timespec_to_jiffies(&ts) |
| 2480 | + (ts.tv_sec || ts.tv_nsec)); |
| 2481 | |
| 2482 | if (timeout) { |
| 2483 | /* None ready -- temporarily unblock those we're |
| 2484 | * interested while we are sleeping in so that we'll |
| 2485 | * be awakened when they arrive. */ |
| 2486 | current->real_blocked = current->blocked; |
| 2487 | sigandsets(¤t->blocked, ¤t->blocked, &these); |
| 2488 | recalc_sigpending(); |
| 2489 | spin_unlock_irq(¤t->sighand->siglock); |
| 2490 | |
Nishanth Aravamudan | 75bcc8c | 2005-09-10 00:27:24 -0700 | [diff] [blame] | 2491 | timeout = schedule_timeout_interruptible(timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2492 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2493 | spin_lock_irq(¤t->sighand->siglock); |
| 2494 | sig = dequeue_signal(current, &these, &info); |
| 2495 | current->blocked = current->real_blocked; |
| 2496 | siginitset(¤t->real_blocked, 0); |
| 2497 | recalc_sigpending(); |
| 2498 | } |
| 2499 | } |
| 2500 | spin_unlock_irq(¤t->sighand->siglock); |
| 2501 | |
| 2502 | if (sig) { |
| 2503 | ret = sig; |
| 2504 | if (uinfo) { |
| 2505 | if (copy_siginfo_to_user(uinfo, &info)) |
| 2506 | ret = -EFAULT; |
| 2507 | } |
| 2508 | } else { |
| 2509 | ret = -EAGAIN; |
| 2510 | if (timeout) |
| 2511 | ret = -EINTR; |
| 2512 | } |
| 2513 | |
| 2514 | return ret; |
| 2515 | } |
| 2516 | |
Heiko Carstens | 17da2bd | 2009-01-14 14:14:10 +0100 | [diff] [blame] | 2517 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2518 | { |
| 2519 | struct siginfo info; |
| 2520 | |
| 2521 | info.si_signo = sig; |
| 2522 | info.si_errno = 0; |
| 2523 | info.si_code = SI_USER; |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 2524 | info.si_pid = task_tgid_vnr(current); |
David Howells | 76aac0e | 2008-11-14 10:39:12 +1100 | [diff] [blame] | 2525 | info.si_uid = current_uid(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2526 | |
| 2527 | return kill_something_info(sig, &info, pid); |
| 2528 | } |
| 2529 | |
Thomas Gleixner | 30b4ae8 | 2009-04-04 21:01:01 +0000 | [diff] [blame] | 2530 | static int |
| 2531 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2532 | { |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2533 | struct task_struct *p; |
Thomas Gleixner | 30b4ae8 | 2009-04-04 21:01:01 +0000 | [diff] [blame] | 2534 | int error = -ESRCH; |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2535 | |
Oleg Nesterov | 3547ff3 | 2008-04-30 00:52:51 -0700 | [diff] [blame] | 2536 | rcu_read_lock(); |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 2537 | p = find_task_by_vpid(pid); |
Pavel Emelyanov | b488893 | 2007-10-18 23:40:14 -0700 | [diff] [blame] | 2538 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
Thomas Gleixner | 30b4ae8 | 2009-04-04 21:01:01 +0000 | [diff] [blame] | 2539 | error = check_kill_permission(sig, info, p); |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2540 | /* |
| 2541 | * The null signal is a permissions and process existence |
| 2542 | * probe. No signal is actually delivered. |
| 2543 | */ |
Oleg Nesterov | 4a30deb | 2009-09-23 15:57:00 -0700 | [diff] [blame] | 2544 | if (!error && sig) { |
| 2545 | error = do_send_sig_info(sig, info, p, false); |
| 2546 | /* |
| 2547 | * If lock_task_sighand() failed we pretend the task |
| 2548 | * dies after receiving the signal. The window is tiny, |
| 2549 | * and the signal is private anyway. |
| 2550 | */ |
| 2551 | if (unlikely(error == -ESRCH)) |
| 2552 | error = 0; |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2553 | } |
| 2554 | } |
Oleg Nesterov | 3547ff3 | 2008-04-30 00:52:51 -0700 | [diff] [blame] | 2555 | rcu_read_unlock(); |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2556 | |
| 2557 | return error; |
| 2558 | } |
| 2559 | |
Thomas Gleixner | 30b4ae8 | 2009-04-04 21:01:01 +0000 | [diff] [blame] | 2560 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
| 2561 | { |
| 2562 | struct siginfo info; |
| 2563 | |
| 2564 | info.si_signo = sig; |
| 2565 | info.si_errno = 0; |
| 2566 | info.si_code = SI_TKILL; |
| 2567 | info.si_pid = task_tgid_vnr(current); |
| 2568 | info.si_uid = current_uid(); |
| 2569 | |
| 2570 | return do_send_specific(tgid, pid, sig, &info); |
| 2571 | } |
| 2572 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2573 | /** |
| 2574 | * sys_tgkill - send signal to one specific thread |
| 2575 | * @tgid: the thread group ID of the thread |
| 2576 | * @pid: the PID of the thread |
| 2577 | * @sig: signal to be sent |
| 2578 | * |
Robert P. J. Day | 72fd4a3 | 2007-02-10 01:45:59 -0800 | [diff] [blame] | 2579 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2580 | * exists but it's not belonging to the target process anymore. This |
| 2581 | * method solves the problem of threads exiting and PIDs getting reused. |
| 2582 | */ |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2583 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2584 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2585 | /* This is only valid for single tasks */ |
| 2586 | if (pid <= 0 || tgid <= 0) |
| 2587 | return -EINVAL; |
| 2588 | |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2589 | return do_tkill(tgid, pid, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2590 | } |
| 2591 | |
| 2592 | /* |
| 2593 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
| 2594 | */ |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2595 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2596 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2597 | /* This is only valid for single tasks */ |
| 2598 | if (pid <= 0) |
| 2599 | return -EINVAL; |
| 2600 | |
Vadim Lobanov | 6dd69f1 | 2005-10-30 15:02:18 -0800 | [diff] [blame] | 2601 | return do_tkill(0, pid, sig); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2602 | } |
| 2603 | |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2604 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
| 2605 | siginfo_t __user *, uinfo) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2606 | { |
| 2607 | siginfo_t info; |
| 2608 | |
| 2609 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
| 2610 | return -EFAULT; |
| 2611 | |
| 2612 | /* Not even root can pretend to send signals from the kernel. |
Julien Tinnes | da48524 | 2011-03-18 15:05:21 -0700 | [diff] [blame] | 2613 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
| 2614 | */ |
| 2615 | if (info.si_code != SI_QUEUE) { |
| 2616 | /* We used to allow any < 0 si_code */ |
| 2617 | WARN_ON_ONCE(info.si_code < 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2618 | return -EPERM; |
Julien Tinnes | da48524 | 2011-03-18 15:05:21 -0700 | [diff] [blame] | 2619 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2620 | info.si_signo = sig; |
| 2621 | |
| 2622 | /* POSIX.1b doesn't mention process groups. */ |
| 2623 | return kill_proc_info(sig, &info, pid); |
| 2624 | } |
| 2625 | |
Thomas Gleixner | 62ab450 | 2009-04-04 21:01:06 +0000 | [diff] [blame] | 2626 | long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info) |
| 2627 | { |
| 2628 | /* This is only valid for single tasks */ |
| 2629 | if (pid <= 0 || tgid <= 0) |
| 2630 | return -EINVAL; |
| 2631 | |
| 2632 | /* Not even root can pretend to send signals from the kernel. |
Julien Tinnes | da48524 | 2011-03-18 15:05:21 -0700 | [diff] [blame] | 2633 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
| 2634 | */ |
| 2635 | if (info->si_code != SI_QUEUE) { |
| 2636 | /* We used to allow any < 0 si_code */ |
| 2637 | WARN_ON_ONCE(info->si_code < 0); |
Thomas Gleixner | 62ab450 | 2009-04-04 21:01:06 +0000 | [diff] [blame] | 2638 | return -EPERM; |
Julien Tinnes | da48524 | 2011-03-18 15:05:21 -0700 | [diff] [blame] | 2639 | } |
Thomas Gleixner | 62ab450 | 2009-04-04 21:01:06 +0000 | [diff] [blame] | 2640 | info->si_signo = sig; |
| 2641 | |
| 2642 | return do_send_specific(tgid, pid, sig, info); |
| 2643 | } |
| 2644 | |
| 2645 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, |
| 2646 | siginfo_t __user *, uinfo) |
| 2647 | { |
| 2648 | siginfo_t info; |
| 2649 | |
| 2650 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
| 2651 | return -EFAULT; |
| 2652 | |
| 2653 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
| 2654 | } |
| 2655 | |
Oleg Nesterov | 88531f7 | 2006-03-28 16:11:24 -0800 | [diff] [blame] | 2656 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2657 | { |
Pavel Emelyanov | 93585ee | 2008-04-30 00:52:39 -0700 | [diff] [blame] | 2658 | struct task_struct *t = current; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2659 | struct k_sigaction *k; |
George Anzinger | 71fabd5 | 2006-01-08 01:02:48 -0800 | [diff] [blame] | 2660 | sigset_t mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2661 | |
Jesper Juhl | 7ed20e1 | 2005-05-01 08:59:14 -0700 | [diff] [blame] | 2662 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2663 | return -EINVAL; |
| 2664 | |
Pavel Emelyanov | 93585ee | 2008-04-30 00:52:39 -0700 | [diff] [blame] | 2665 | k = &t->sighand->action[sig-1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2666 | |
| 2667 | spin_lock_irq(¤t->sighand->siglock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2668 | if (oact) |
| 2669 | *oact = *k; |
| 2670 | |
| 2671 | if (act) { |
Oleg Nesterov | 9ac95f2 | 2006-02-09 22:41:50 +0300 | [diff] [blame] | 2672 | sigdelsetmask(&act->sa.sa_mask, |
| 2673 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
Oleg Nesterov | 88531f7 | 2006-03-28 16:11:24 -0800 | [diff] [blame] | 2674 | *k = *act; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2675 | /* |
| 2676 | * POSIX 3.3.1.3: |
| 2677 | * "Setting a signal action to SIG_IGN for a signal that is |
| 2678 | * pending shall cause the pending signal to be discarded, |
| 2679 | * whether or not it is blocked." |
| 2680 | * |
| 2681 | * "Setting a signal action to SIG_DFL for a signal that is |
| 2682 | * pending and whose default action is to ignore the signal |
| 2683 | * (for example, SIGCHLD), shall cause the pending signal to |
| 2684 | * be discarded, whether or not it is blocked" |
| 2685 | */ |
Roland McGrath | 35de254 | 2008-07-25 19:45:51 -0700 | [diff] [blame] | 2686 | if (sig_handler_ignored(sig_handler(t, sig), sig)) { |
George Anzinger | 71fabd5 | 2006-01-08 01:02:48 -0800 | [diff] [blame] | 2687 | sigemptyset(&mask); |
| 2688 | sigaddset(&mask, sig); |
| 2689 | rm_from_queue_full(&mask, &t->signal->shared_pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2690 | do { |
George Anzinger | 71fabd5 | 2006-01-08 01:02:48 -0800 | [diff] [blame] | 2691 | rm_from_queue_full(&mask, &t->pending); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2692 | t = next_thread(t); |
| 2693 | } while (t != current); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2694 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2695 | } |
| 2696 | |
| 2697 | spin_unlock_irq(¤t->sighand->siglock); |
| 2698 | return 0; |
| 2699 | } |
| 2700 | |
| 2701 | int |
| 2702 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
| 2703 | { |
| 2704 | stack_t oss; |
| 2705 | int error; |
| 2706 | |
Linus Torvalds | 0083fc2 | 2009-08-01 10:34:56 -0700 | [diff] [blame] | 2707 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
| 2708 | oss.ss_size = current->sas_ss_size; |
| 2709 | oss.ss_flags = sas_ss_flags(sp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2710 | |
| 2711 | if (uss) { |
| 2712 | void __user *ss_sp; |
| 2713 | size_t ss_size; |
| 2714 | int ss_flags; |
| 2715 | |
| 2716 | error = -EFAULT; |
Linus Torvalds | 0dd8486 | 2009-08-01 11:18:56 -0700 | [diff] [blame] | 2717 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss))) |
| 2718 | goto out; |
| 2719 | error = __get_user(ss_sp, &uss->ss_sp) | |
| 2720 | __get_user(ss_flags, &uss->ss_flags) | |
| 2721 | __get_user(ss_size, &uss->ss_size); |
| 2722 | if (error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2723 | goto out; |
| 2724 | |
| 2725 | error = -EPERM; |
| 2726 | if (on_sig_stack(sp)) |
| 2727 | goto out; |
| 2728 | |
| 2729 | error = -EINVAL; |
| 2730 | /* |
| 2731 | * |
| 2732 | * Note - this code used to test ss_flags incorrectly |
| 2733 | * old code may have been written using ss_flags==0 |
| 2734 | * to mean ss_flags==SS_ONSTACK (as this was the only |
| 2735 | * way that worked) - this fix preserves that older |
| 2736 | * mechanism |
| 2737 | */ |
| 2738 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
| 2739 | goto out; |
| 2740 | |
| 2741 | if (ss_flags == SS_DISABLE) { |
| 2742 | ss_size = 0; |
| 2743 | ss_sp = NULL; |
| 2744 | } else { |
| 2745 | error = -ENOMEM; |
| 2746 | if (ss_size < MINSIGSTKSZ) |
| 2747 | goto out; |
| 2748 | } |
| 2749 | |
| 2750 | current->sas_ss_sp = (unsigned long) ss_sp; |
| 2751 | current->sas_ss_size = ss_size; |
| 2752 | } |
| 2753 | |
Linus Torvalds | 0083fc2 | 2009-08-01 10:34:56 -0700 | [diff] [blame] | 2754 | error = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2755 | if (uoss) { |
| 2756 | error = -EFAULT; |
Linus Torvalds | 0083fc2 | 2009-08-01 10:34:56 -0700 | [diff] [blame] | 2757 | if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2758 | goto out; |
Linus Torvalds | 0083fc2 | 2009-08-01 10:34:56 -0700 | [diff] [blame] | 2759 | error = __put_user(oss.ss_sp, &uoss->ss_sp) | |
| 2760 | __put_user(oss.ss_size, &uoss->ss_size) | |
| 2761 | __put_user(oss.ss_flags, &uoss->ss_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2762 | } |
| 2763 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2764 | out: |
| 2765 | return error; |
| 2766 | } |
| 2767 | |
| 2768 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
| 2769 | |
Heiko Carstens | b290ebe | 2009-01-14 14:14:06 +0100 | [diff] [blame] | 2770 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2771 | { |
| 2772 | return do_sigpending(set, sizeof(*set)); |
| 2773 | } |
| 2774 | |
| 2775 | #endif |
| 2776 | |
| 2777 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
| 2778 | /* Some platforms have their own version with special arguments others |
| 2779 | support only sys_rt_sigprocmask. */ |
| 2780 | |
Heiko Carstens | b290ebe | 2009-01-14 14:14:06 +0100 | [diff] [blame] | 2781 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, |
| 2782 | old_sigset_t __user *, oset) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2783 | { |
| 2784 | int error; |
| 2785 | old_sigset_t old_set, new_set; |
| 2786 | |
| 2787 | if (set) { |
| 2788 | error = -EFAULT; |
| 2789 | if (copy_from_user(&new_set, set, sizeof(*set))) |
| 2790 | goto out; |
| 2791 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
| 2792 | |
| 2793 | spin_lock_irq(¤t->sighand->siglock); |
| 2794 | old_set = current->blocked.sig[0]; |
| 2795 | |
| 2796 | error = 0; |
| 2797 | switch (how) { |
| 2798 | default: |
| 2799 | error = -EINVAL; |
| 2800 | break; |
| 2801 | case SIG_BLOCK: |
| 2802 | sigaddsetmask(¤t->blocked, new_set); |
| 2803 | break; |
| 2804 | case SIG_UNBLOCK: |
| 2805 | sigdelsetmask(¤t->blocked, new_set); |
| 2806 | break; |
| 2807 | case SIG_SETMASK: |
| 2808 | current->blocked.sig[0] = new_set; |
| 2809 | break; |
| 2810 | } |
| 2811 | |
| 2812 | recalc_sigpending(); |
| 2813 | spin_unlock_irq(¤t->sighand->siglock); |
| 2814 | if (error) |
| 2815 | goto out; |
| 2816 | if (oset) |
| 2817 | goto set_old; |
| 2818 | } else if (oset) { |
| 2819 | old_set = current->blocked.sig[0]; |
| 2820 | set_old: |
| 2821 | error = -EFAULT; |
| 2822 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
| 2823 | goto out; |
| 2824 | } |
| 2825 | error = 0; |
| 2826 | out: |
| 2827 | return error; |
| 2828 | } |
| 2829 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
| 2830 | |
| 2831 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
Heiko Carstens | d4e8204 | 2009-01-14 14:14:34 +0100 | [diff] [blame] | 2832 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
| 2833 | const struct sigaction __user *, act, |
| 2834 | struct sigaction __user *, oact, |
| 2835 | size_t, sigsetsize) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2836 | { |
| 2837 | struct k_sigaction new_sa, old_sa; |
| 2838 | int ret = -EINVAL; |
| 2839 | |
| 2840 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
| 2841 | if (sigsetsize != sizeof(sigset_t)) |
| 2842 | goto out; |
| 2843 | |
| 2844 | if (act) { |
| 2845 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
| 2846 | return -EFAULT; |
| 2847 | } |
| 2848 | |
| 2849 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
| 2850 | |
| 2851 | if (!ret && oact) { |
| 2852 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
| 2853 | return -EFAULT; |
| 2854 | } |
| 2855 | out: |
| 2856 | return ret; |
| 2857 | } |
| 2858 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ |
| 2859 | |
| 2860 | #ifdef __ARCH_WANT_SYS_SGETMASK |
| 2861 | |
| 2862 | /* |
| 2863 | * For backwards compatibility. Functionality superseded by sigprocmask. |
| 2864 | */ |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2865 | SYSCALL_DEFINE0(sgetmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2866 | { |
| 2867 | /* SMP safe */ |
| 2868 | return current->blocked.sig[0]; |
| 2869 | } |
| 2870 | |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2871 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2872 | { |
| 2873 | int old; |
| 2874 | |
| 2875 | spin_lock_irq(¤t->sighand->siglock); |
| 2876 | old = current->blocked.sig[0]; |
| 2877 | |
| 2878 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| |
| 2879 | sigmask(SIGSTOP))); |
| 2880 | recalc_sigpending(); |
| 2881 | spin_unlock_irq(¤t->sighand->siglock); |
| 2882 | |
| 2883 | return old; |
| 2884 | } |
| 2885 | #endif /* __ARCH_WANT_SGETMASK */ |
| 2886 | |
| 2887 | #ifdef __ARCH_WANT_SYS_SIGNAL |
| 2888 | /* |
| 2889 | * For backwards compatibility. Functionality superseded by sigaction. |
| 2890 | */ |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2891 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2892 | { |
| 2893 | struct k_sigaction new_sa, old_sa; |
| 2894 | int ret; |
| 2895 | |
| 2896 | new_sa.sa.sa_handler = handler; |
| 2897 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
Oleg Nesterov | c70d3d70 | 2006-02-09 22:41:41 +0300 | [diff] [blame] | 2898 | sigemptyset(&new_sa.sa.sa_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2899 | |
| 2900 | ret = do_sigaction(sig, &new_sa, &old_sa); |
| 2901 | |
| 2902 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
| 2903 | } |
| 2904 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
| 2905 | |
| 2906 | #ifdef __ARCH_WANT_SYS_PAUSE |
| 2907 | |
Heiko Carstens | a5f8fa9 | 2009-01-14 14:14:11 +0100 | [diff] [blame] | 2908 | SYSCALL_DEFINE0(pause) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2909 | { |
| 2910 | current->state = TASK_INTERRUPTIBLE; |
| 2911 | schedule(); |
| 2912 | return -ERESTARTNOHAND; |
| 2913 | } |
| 2914 | |
| 2915 | #endif |
| 2916 | |
David Woodhouse | 150256d | 2006-01-18 17:43:57 -0800 | [diff] [blame] | 2917 | #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND |
Heiko Carstens | d4e8204 | 2009-01-14 14:14:34 +0100 | [diff] [blame] | 2918 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
David Woodhouse | 150256d | 2006-01-18 17:43:57 -0800 | [diff] [blame] | 2919 | { |
| 2920 | sigset_t newset; |
| 2921 | |
| 2922 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
| 2923 | if (sigsetsize != sizeof(sigset_t)) |
| 2924 | return -EINVAL; |
| 2925 | |
| 2926 | if (copy_from_user(&newset, unewset, sizeof(newset))) |
| 2927 | return -EFAULT; |
| 2928 | sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
| 2929 | |
| 2930 | spin_lock_irq(¤t->sighand->siglock); |
| 2931 | current->saved_sigmask = current->blocked; |
| 2932 | current->blocked = newset; |
| 2933 | recalc_sigpending(); |
| 2934 | spin_unlock_irq(¤t->sighand->siglock); |
| 2935 | |
| 2936 | current->state = TASK_INTERRUPTIBLE; |
| 2937 | schedule(); |
Roland McGrath | 4e4c22c | 2008-04-30 00:53:06 -0700 | [diff] [blame] | 2938 | set_restore_sigmask(); |
David Woodhouse | 150256d | 2006-01-18 17:43:57 -0800 | [diff] [blame] | 2939 | return -ERESTARTNOHAND; |
| 2940 | } |
| 2941 | #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ |
| 2942 | |
David Howells | f269fdd | 2006-09-27 01:50:23 -0700 | [diff] [blame] | 2943 | __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma) |
| 2944 | { |
| 2945 | return NULL; |
| 2946 | } |
| 2947 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2948 | void __init signals_init(void) |
| 2949 | { |
Christoph Lameter | 0a31bd5 | 2007-05-06 14:49:57 -0700 | [diff] [blame] | 2950 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2951 | } |
Jason Wessel | 67fc4e0 | 2010-05-20 21:04:21 -0500 | [diff] [blame] | 2952 | |
| 2953 | #ifdef CONFIG_KGDB_KDB |
| 2954 | #include <linux/kdb.h> |
| 2955 | /* |
| 2956 | * kdb_send_sig_info - Allows kdb to send signals without exposing |
| 2957 | * signal internals. This function checks if the required locks are |
| 2958 | * available before calling the main signal code, to avoid kdb |
| 2959 | * deadlocks. |
| 2960 | */ |
| 2961 | void |
| 2962 | kdb_send_sig_info(struct task_struct *t, struct siginfo *info) |
| 2963 | { |
| 2964 | static struct task_struct *kdb_prev_t; |
| 2965 | int sig, new_t; |
| 2966 | if (!spin_trylock(&t->sighand->siglock)) { |
| 2967 | kdb_printf("Can't do kill command now.\n" |
| 2968 | "The sigmask lock is held somewhere else in " |
| 2969 | "kernel, try again later\n"); |
| 2970 | return; |
| 2971 | } |
| 2972 | spin_unlock(&t->sighand->siglock); |
| 2973 | new_t = kdb_prev_t != t; |
| 2974 | kdb_prev_t = t; |
| 2975 | if (t->state != TASK_RUNNING && new_t) { |
| 2976 | kdb_printf("Process is not RUNNING, sending a signal from " |
| 2977 | "kdb risks deadlock\n" |
| 2978 | "on the run queue locks. " |
| 2979 | "The signal has _not_ been sent.\n" |
| 2980 | "Reissue the kill command if you want to risk " |
| 2981 | "the deadlock.\n"); |
| 2982 | return; |
| 2983 | } |
| 2984 | sig = info->si_signo; |
| 2985 | if (send_sig_info(sig, info, t)) |
| 2986 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
| 2987 | sig, t->pid); |
| 2988 | else |
| 2989 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); |
| 2990 | } |
| 2991 | #endif /* CONFIG_KGDB_KDB */ |