Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_SIGNAL_H |
| 3 | #define _LINUX_SCHED_SIGNAL_H |
| 4 | |
Ingo Molnar | b2d0910 | 2017-02-04 01:27:20 +0100 | [diff] [blame] | 5 | #include <linux/rculist.h> |
Ingo Molnar | f361bf4 | 2017-02-03 23:47:37 +0100 | [diff] [blame] | 6 | #include <linux/signal.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 7 | #include <linux/sched.h> |
Ingo Molnar | 1e4bae6 | 2017-02-08 18:51:32 +0100 | [diff] [blame] | 8 | #include <linux/sched/jobctl.h> |
Ingo Molnar | 9164bb4 | 2017-02-04 01:20:53 +0100 | [diff] [blame] | 9 | #include <linux/sched/task.h> |
Ingo Molnar | 2a1f062 | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 10 | #include <linux/cred.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 11 | |
Ingo Molnar | c3edc40 | 2017-02-02 08:35:14 +0100 | [diff] [blame] | 12 | /* |
| 13 | * Types defining task->signal and task->sighand and APIs using them: |
| 14 | */ |
| 15 | |
| 16 | struct sighand_struct { |
| 17 | atomic_t count; |
| 18 | struct k_sigaction action[_NSIG]; |
| 19 | spinlock_t siglock; |
| 20 | wait_queue_head_t signalfd_wqh; |
| 21 | }; |
| 22 | |
| 23 | /* |
Ingo Molnar | 8d88460 | 2017-02-02 12:06:10 +0100 | [diff] [blame] | 24 | * Per-process accounting stats: |
| 25 | */ |
| 26 | struct pacct_struct { |
| 27 | int ac_flag; |
| 28 | long ac_exitcode; |
| 29 | unsigned long ac_mem; |
| 30 | u64 ac_utime, ac_stime; |
| 31 | unsigned long ac_minflt, ac_majflt; |
| 32 | }; |
| 33 | |
| 34 | struct cpu_itimer { |
| 35 | u64 expires; |
| 36 | u64 incr; |
| 37 | }; |
| 38 | |
| 39 | /* |
Ingo Molnar | 1050b27 | 2017-02-05 11:48:36 +0100 | [diff] [blame] | 40 | * This is the atomic variant of task_cputime, which can be used for |
| 41 | * storing and updating task_cputime statistics without locking. |
| 42 | */ |
| 43 | struct task_cputime_atomic { |
| 44 | atomic64_t utime; |
| 45 | atomic64_t stime; |
| 46 | atomic64_t sum_exec_runtime; |
| 47 | }; |
| 48 | |
| 49 | #define INIT_CPUTIME_ATOMIC \ |
| 50 | (struct task_cputime_atomic) { \ |
| 51 | .utime = ATOMIC64_INIT(0), \ |
| 52 | .stime = ATOMIC64_INIT(0), \ |
| 53 | .sum_exec_runtime = ATOMIC64_INIT(0), \ |
| 54 | } |
| 55 | /** |
| 56 | * struct thread_group_cputimer - thread group interval timer counts |
| 57 | * @cputime_atomic: atomic thread group interval timers. |
| 58 | * @running: true when there are timers running and |
| 59 | * @cputime_atomic receives updates. |
| 60 | * @checking_timer: true when a thread in the group is in the |
| 61 | * process of checking for thread group timers. |
| 62 | * |
| 63 | * This structure contains the version of task_cputime, above, that is |
| 64 | * used for thread group CPU timer calculations. |
| 65 | */ |
| 66 | struct thread_group_cputimer { |
| 67 | struct task_cputime_atomic cputime_atomic; |
| 68 | bool running; |
| 69 | bool checking_timer; |
| 70 | }; |
| 71 | |
| 72 | /* |
Ingo Molnar | c3edc40 | 2017-02-02 08:35:14 +0100 | [diff] [blame] | 73 | * NOTE! "signal_struct" does not have its own |
| 74 | * locking, because a shared signal_struct always |
| 75 | * implies a shared sighand_struct, so locking |
| 76 | * sighand_struct is always a proper superset of |
| 77 | * the locking of signal_struct. |
| 78 | */ |
| 79 | struct signal_struct { |
| 80 | atomic_t sigcnt; |
| 81 | atomic_t live; |
| 82 | int nr_threads; |
| 83 | struct list_head thread_head; |
| 84 | |
| 85 | wait_queue_head_t wait_chldexit; /* for wait4() */ |
| 86 | |
| 87 | /* current thread group signal load-balancing target: */ |
| 88 | struct task_struct *curr_target; |
| 89 | |
| 90 | /* shared signal handling: */ |
| 91 | struct sigpending shared_pending; |
| 92 | |
| 93 | /* thread group exit support */ |
| 94 | int group_exit_code; |
| 95 | /* overloaded: |
| 96 | * - notify group_exit_task when ->count is equal to notify_count |
| 97 | * - everyone except group_exit_task is stopped during signal delivery |
| 98 | * of fatal signals, group_exit_task processes the signal. |
| 99 | */ |
| 100 | int notify_count; |
| 101 | struct task_struct *group_exit_task; |
| 102 | |
| 103 | /* thread group stop support, overloads group_exit_code too */ |
| 104 | int group_stop_count; |
| 105 | unsigned int flags; /* see SIGNAL_* flags below */ |
| 106 | |
| 107 | /* |
| 108 | * PR_SET_CHILD_SUBREAPER marks a process, like a service |
| 109 | * manager, to re-parent orphan (double-forking) child processes |
| 110 | * to this process instead of 'init'. The service manager is |
| 111 | * able to receive SIGCHLD signals and is able to investigate |
| 112 | * the process until it calls wait(). All children of this |
| 113 | * process will inherit a flag if they should look for a |
| 114 | * child_subreaper process at exit. |
| 115 | */ |
| 116 | unsigned int is_child_subreaper:1; |
| 117 | unsigned int has_child_subreaper:1; |
| 118 | |
| 119 | #ifdef CONFIG_POSIX_TIMERS |
| 120 | |
| 121 | /* POSIX.1b Interval Timers */ |
| 122 | int posix_timer_id; |
| 123 | struct list_head posix_timers; |
| 124 | |
| 125 | /* ITIMER_REAL timer for the process */ |
| 126 | struct hrtimer real_timer; |
| 127 | ktime_t it_real_incr; |
| 128 | |
| 129 | /* |
| 130 | * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use |
| 131 | * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these |
| 132 | * values are defined to 0 and 1 respectively |
| 133 | */ |
| 134 | struct cpu_itimer it[2]; |
| 135 | |
| 136 | /* |
| 137 | * Thread group totals for process CPU timers. |
| 138 | * See thread_group_cputimer(), et al, for details. |
| 139 | */ |
| 140 | struct thread_group_cputimer cputimer; |
| 141 | |
| 142 | /* Earliest-expiration cache. */ |
| 143 | struct task_cputime cputime_expires; |
| 144 | |
| 145 | struct list_head cpu_timers[3]; |
| 146 | |
| 147 | #endif |
| 148 | |
| 149 | struct pid *leader_pid; |
| 150 | |
| 151 | #ifdef CONFIG_NO_HZ_FULL |
| 152 | atomic_t tick_dep_mask; |
| 153 | #endif |
| 154 | |
| 155 | struct pid *tty_old_pgrp; |
| 156 | |
| 157 | /* boolean value for session group leader */ |
| 158 | int leader; |
| 159 | |
| 160 | struct tty_struct *tty; /* NULL if no tty */ |
| 161 | |
| 162 | #ifdef CONFIG_SCHED_AUTOGROUP |
| 163 | struct autogroup *autogroup; |
| 164 | #endif |
| 165 | /* |
| 166 | * Cumulative resource counters for dead threads in the group, |
| 167 | * and for reaped dead child processes forked by this group. |
| 168 | * Live threads maintain their own counters and add to these |
| 169 | * in __exit_signal, except for the group leader. |
| 170 | */ |
| 171 | seqlock_t stats_lock; |
| 172 | u64 utime, stime, cutime, cstime; |
| 173 | u64 gtime; |
| 174 | u64 cgtime; |
| 175 | struct prev_cputime prev_cputime; |
| 176 | unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; |
| 177 | unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; |
| 178 | unsigned long inblock, oublock, cinblock, coublock; |
| 179 | unsigned long maxrss, cmaxrss; |
| 180 | struct task_io_accounting ioac; |
| 181 | |
| 182 | /* |
| 183 | * Cumulative ns of schedule CPU time fo dead threads in the |
| 184 | * group, not including a zombie group leader, (This only differs |
| 185 | * from jiffies_to_ns(utime + stime) if sched_clock uses something |
| 186 | * other than jiffies.) |
| 187 | */ |
| 188 | unsigned long long sum_sched_runtime; |
| 189 | |
| 190 | /* |
| 191 | * We don't bother to synchronize most readers of this at all, |
| 192 | * because there is no reader checking a limit that actually needs |
| 193 | * to get both rlim_cur and rlim_max atomically, and either one |
| 194 | * alone is a single word that can safely be read normally. |
| 195 | * getrlimit/setrlimit use task_lock(current->group_leader) to |
| 196 | * protect this instead of the siglock, because they really |
| 197 | * have no need to disable irqs. |
| 198 | */ |
| 199 | struct rlimit rlim[RLIM_NLIMITS]; |
| 200 | |
| 201 | #ifdef CONFIG_BSD_PROCESS_ACCT |
| 202 | struct pacct_struct pacct; /* per-process accounting information */ |
| 203 | #endif |
| 204 | #ifdef CONFIG_TASKSTATS |
| 205 | struct taskstats *stats; |
| 206 | #endif |
| 207 | #ifdef CONFIG_AUDIT |
| 208 | unsigned audit_tty; |
| 209 | struct tty_audit_buf *tty_audit_buf; |
| 210 | #endif |
| 211 | |
| 212 | /* |
| 213 | * Thread is the potential origin of an oom condition; kill first on |
| 214 | * oom |
| 215 | */ |
| 216 | bool oom_flag_origin; |
| 217 | short oom_score_adj; /* OOM kill score adjustment */ |
| 218 | short oom_score_adj_min; /* OOM kill score adjustment min value. |
| 219 | * Only settable by CAP_SYS_RESOURCE. */ |
| 220 | struct mm_struct *oom_mm; /* recorded mm when the thread group got |
| 221 | * killed by the oom killer */ |
| 222 | |
| 223 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
| 224 | * credential calculations |
| 225 | * (notably. ptrace) */ |
Kees Cook | 3859a27 | 2016-10-28 01:22:25 -0700 | [diff] [blame] | 226 | } __randomize_layout; |
Ingo Molnar | c3edc40 | 2017-02-02 08:35:14 +0100 | [diff] [blame] | 227 | |
| 228 | /* |
| 229 | * Bits in flags field of signal_struct. |
| 230 | */ |
| 231 | #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */ |
| 232 | #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */ |
| 233 | #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */ |
| 234 | #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */ |
| 235 | /* |
| 236 | * Pending notifications to parent. |
| 237 | */ |
| 238 | #define SIGNAL_CLD_STOPPED 0x00000010 |
| 239 | #define SIGNAL_CLD_CONTINUED 0x00000020 |
| 240 | #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED) |
| 241 | |
| 242 | #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */ |
| 243 | |
| 244 | #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \ |
| 245 | SIGNAL_STOP_CONTINUED) |
| 246 | |
| 247 | static inline void signal_set_stop_flags(struct signal_struct *sig, |
| 248 | unsigned int flags) |
| 249 | { |
| 250 | WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP)); |
| 251 | sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags; |
| 252 | } |
| 253 | |
| 254 | /* If true, all threads except ->group_exit_task have pending SIGKILL */ |
| 255 | static inline int signal_group_exit(const struct signal_struct *sig) |
| 256 | { |
| 257 | return (sig->flags & SIGNAL_GROUP_EXIT) || |
| 258 | (sig->group_exit_task != NULL); |
| 259 | } |
| 260 | |
| 261 | extern void flush_signals(struct task_struct *); |
| 262 | extern void ignore_signals(struct task_struct *); |
| 263 | extern void flush_signal_handlers(struct task_struct *, int force_default); |
| 264 | extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); |
| 265 | |
| 266 | static inline int kernel_dequeue_signal(siginfo_t *info) |
| 267 | { |
| 268 | struct task_struct *tsk = current; |
| 269 | siginfo_t __info; |
| 270 | int ret; |
| 271 | |
| 272 | spin_lock_irq(&tsk->sighand->siglock); |
| 273 | ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info); |
| 274 | spin_unlock_irq(&tsk->sighand->siglock); |
| 275 | |
| 276 | return ret; |
| 277 | } |
| 278 | |
| 279 | static inline void kernel_signal_stop(void) |
| 280 | { |
| 281 | spin_lock_irq(¤t->sighand->siglock); |
| 282 | if (current->jobctl & JOBCTL_STOP_DEQUEUED) |
| 283 | __set_current_state(TASK_STOPPED); |
| 284 | spin_unlock_irq(¤t->sighand->siglock); |
| 285 | |
| 286 | schedule(); |
| 287 | } |
| 288 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
| 289 | extern int force_sigsegv(int, struct task_struct *); |
| 290 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); |
| 291 | extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); |
| 292 | extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); |
| 293 | extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, |
| 294 | const struct cred *, u32); |
| 295 | extern int kill_pgrp(struct pid *pid, int sig, int priv); |
| 296 | extern int kill_pid(struct pid *pid, int sig, int priv); |
Ingo Molnar | c3edc40 | 2017-02-02 08:35:14 +0100 | [diff] [blame] | 297 | extern __must_check bool do_notify_parent(struct task_struct *, int); |
| 298 | extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); |
| 299 | extern void force_sig(int, struct task_struct *); |
| 300 | extern int send_sig(int, struct task_struct *, int); |
| 301 | extern int zap_other_threads(struct task_struct *p); |
| 302 | extern struct sigqueue *sigqueue_alloc(void); |
| 303 | extern void sigqueue_free(struct sigqueue *); |
| 304 | extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); |
| 305 | extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); |
| 306 | |
Ingo Molnar | 2a1f062 | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 307 | static inline int restart_syscall(void) |
| 308 | { |
| 309 | set_tsk_thread_flag(current, TIF_SIGPENDING); |
| 310 | return -ERESTARTNOINTR; |
| 311 | } |
| 312 | |
| 313 | static inline int signal_pending(struct task_struct *p) |
| 314 | { |
| 315 | return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING)); |
| 316 | } |
| 317 | |
| 318 | static inline int __fatal_signal_pending(struct task_struct *p) |
| 319 | { |
| 320 | return unlikely(sigismember(&p->pending.signal, SIGKILL)); |
| 321 | } |
| 322 | |
| 323 | static inline int fatal_signal_pending(struct task_struct *p) |
| 324 | { |
| 325 | return signal_pending(p) && __fatal_signal_pending(p); |
| 326 | } |
| 327 | |
| 328 | static inline int signal_pending_state(long state, struct task_struct *p) |
| 329 | { |
| 330 | if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL))) |
| 331 | return 0; |
| 332 | if (!signal_pending(p)) |
| 333 | return 0; |
| 334 | |
| 335 | return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); |
| 336 | } |
| 337 | |
| 338 | /* |
| 339 | * Reevaluate whether the task has signals pending delivery. |
| 340 | * Wake the task if so. |
| 341 | * This is required every time the blocked sigset_t changes. |
| 342 | * callers must hold sighand->siglock. |
| 343 | */ |
| 344 | extern void recalc_sigpending_and_wake(struct task_struct *t); |
| 345 | extern void recalc_sigpending(void); |
| 346 | |
| 347 | extern void signal_wake_up_state(struct task_struct *t, unsigned int state); |
| 348 | |
| 349 | static inline void signal_wake_up(struct task_struct *t, bool resume) |
| 350 | { |
| 351 | signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); |
| 352 | } |
| 353 | static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) |
| 354 | { |
| 355 | signal_wake_up_state(t, resume ? __TASK_TRACED : 0); |
| 356 | } |
| 357 | |
Ingo Molnar | c3edc40 | 2017-02-02 08:35:14 +0100 | [diff] [blame] | 358 | #ifdef TIF_RESTORE_SIGMASK |
| 359 | /* |
| 360 | * Legacy restore_sigmask accessors. These are inefficient on |
| 361 | * SMP architectures because they require atomic operations. |
| 362 | */ |
| 363 | |
| 364 | /** |
| 365 | * set_restore_sigmask() - make sure saved_sigmask processing gets done |
| 366 | * |
| 367 | * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code |
| 368 | * will run before returning to user mode, to process the flag. For |
| 369 | * all callers, TIF_SIGPENDING is already set or it's no harm to set |
| 370 | * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the |
| 371 | * arch code will notice on return to user mode, in case those bits |
| 372 | * are scarce. We set TIF_SIGPENDING here to ensure that the arch |
| 373 | * signal code always gets run when TIF_RESTORE_SIGMASK is set. |
| 374 | */ |
| 375 | static inline void set_restore_sigmask(void) |
| 376 | { |
| 377 | set_thread_flag(TIF_RESTORE_SIGMASK); |
| 378 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
| 379 | } |
| 380 | static inline void clear_restore_sigmask(void) |
| 381 | { |
| 382 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
| 383 | } |
| 384 | static inline bool test_restore_sigmask(void) |
| 385 | { |
| 386 | return test_thread_flag(TIF_RESTORE_SIGMASK); |
| 387 | } |
| 388 | static inline bool test_and_clear_restore_sigmask(void) |
| 389 | { |
| 390 | return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK); |
| 391 | } |
| 392 | |
| 393 | #else /* TIF_RESTORE_SIGMASK */ |
| 394 | |
| 395 | /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */ |
| 396 | static inline void set_restore_sigmask(void) |
| 397 | { |
| 398 | current->restore_sigmask = true; |
| 399 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
| 400 | } |
| 401 | static inline void clear_restore_sigmask(void) |
| 402 | { |
| 403 | current->restore_sigmask = false; |
| 404 | } |
| 405 | static inline bool test_restore_sigmask(void) |
| 406 | { |
| 407 | return current->restore_sigmask; |
| 408 | } |
| 409 | static inline bool test_and_clear_restore_sigmask(void) |
| 410 | { |
| 411 | if (!current->restore_sigmask) |
| 412 | return false; |
| 413 | current->restore_sigmask = false; |
| 414 | return true; |
| 415 | } |
| 416 | #endif |
| 417 | |
| 418 | static inline void restore_saved_sigmask(void) |
| 419 | { |
| 420 | if (test_and_clear_restore_sigmask()) |
| 421 | __set_current_blocked(¤t->saved_sigmask); |
| 422 | } |
| 423 | |
| 424 | static inline sigset_t *sigmask_to_save(void) |
| 425 | { |
| 426 | sigset_t *res = ¤t->blocked; |
| 427 | if (unlikely(test_restore_sigmask())) |
| 428 | res = ¤t->saved_sigmask; |
| 429 | return res; |
| 430 | } |
| 431 | |
| 432 | static inline int kill_cad_pid(int sig, int priv) |
| 433 | { |
| 434 | return kill_pid(cad_pid, sig, priv); |
| 435 | } |
| 436 | |
| 437 | /* These can be the second arg to send_sig_info/send_group_sig_info. */ |
| 438 | #define SEND_SIG_NOINFO ((struct siginfo *) 0) |
| 439 | #define SEND_SIG_PRIV ((struct siginfo *) 1) |
| 440 | #define SEND_SIG_FORCED ((struct siginfo *) 2) |
| 441 | |
| 442 | /* |
| 443 | * True if we are on the alternate signal stack. |
| 444 | */ |
| 445 | static inline int on_sig_stack(unsigned long sp) |
| 446 | { |
| 447 | /* |
| 448 | * If the signal stack is SS_AUTODISARM then, by construction, we |
| 449 | * can't be on the signal stack unless user code deliberately set |
| 450 | * SS_AUTODISARM when we were already on it. |
| 451 | * |
| 452 | * This improves reliability: if user state gets corrupted such that |
| 453 | * the stack pointer points very close to the end of the signal stack, |
| 454 | * then this check will enable the signal to be handled anyway. |
| 455 | */ |
| 456 | if (current->sas_ss_flags & SS_AUTODISARM) |
| 457 | return 0; |
| 458 | |
| 459 | #ifdef CONFIG_STACK_GROWSUP |
| 460 | return sp >= current->sas_ss_sp && |
| 461 | sp - current->sas_ss_sp < current->sas_ss_size; |
| 462 | #else |
| 463 | return sp > current->sas_ss_sp && |
| 464 | sp - current->sas_ss_sp <= current->sas_ss_size; |
| 465 | #endif |
| 466 | } |
| 467 | |
| 468 | static inline int sas_ss_flags(unsigned long sp) |
| 469 | { |
| 470 | if (!current->sas_ss_size) |
| 471 | return SS_DISABLE; |
| 472 | |
| 473 | return on_sig_stack(sp) ? SS_ONSTACK : 0; |
| 474 | } |
| 475 | |
| 476 | static inline void sas_ss_reset(struct task_struct *p) |
| 477 | { |
| 478 | p->sas_ss_sp = 0; |
| 479 | p->sas_ss_size = 0; |
| 480 | p->sas_ss_flags = SS_DISABLE; |
| 481 | } |
| 482 | |
| 483 | static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig) |
| 484 | { |
| 485 | if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp)) |
| 486 | #ifdef CONFIG_STACK_GROWSUP |
| 487 | return current->sas_ss_sp; |
| 488 | #else |
| 489 | return current->sas_ss_sp + current->sas_ss_size; |
| 490 | #endif |
| 491 | return sp; |
| 492 | } |
| 493 | |
| 494 | extern void __cleanup_sighand(struct sighand_struct *); |
| 495 | extern void flush_itimer_signals(void); |
| 496 | |
| 497 | #define tasklist_empty() \ |
| 498 | list_empty(&init_task.tasks) |
| 499 | |
| 500 | #define next_task(p) \ |
| 501 | list_entry_rcu((p)->tasks.next, struct task_struct, tasks) |
| 502 | |
| 503 | #define for_each_process(p) \ |
| 504 | for (p = &init_task ; (p = next_task(p)) != &init_task ; ) |
| 505 | |
| 506 | extern bool current_is_single_threaded(void); |
| 507 | |
| 508 | /* |
| 509 | * Careful: do_each_thread/while_each_thread is a double loop so |
| 510 | * 'break' will not work as expected - use goto instead. |
| 511 | */ |
| 512 | #define do_each_thread(g, t) \ |
| 513 | for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do |
| 514 | |
| 515 | #define while_each_thread(g, t) \ |
| 516 | while ((t = next_thread(t)) != g) |
| 517 | |
| 518 | #define __for_each_thread(signal, t) \ |
| 519 | list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node) |
| 520 | |
| 521 | #define for_each_thread(p, t) \ |
| 522 | __for_each_thread((p)->signal, t) |
| 523 | |
| 524 | /* Careful: this is a double loop, 'break' won't work as expected. */ |
| 525 | #define for_each_process_thread(p, t) \ |
| 526 | for_each_process(p) for_each_thread(p, t) |
| 527 | |
| 528 | typedef int (*proc_visitor)(struct task_struct *p, void *data); |
| 529 | void walk_process_tree(struct task_struct *top, proc_visitor, void *); |
| 530 | |
| 531 | static inline int get_nr_threads(struct task_struct *tsk) |
| 532 | { |
| 533 | return tsk->signal->nr_threads; |
| 534 | } |
| 535 | |
| 536 | static inline bool thread_group_leader(struct task_struct *p) |
| 537 | { |
| 538 | return p->exit_signal >= 0; |
| 539 | } |
| 540 | |
| 541 | /* Do to the insanities of de_thread it is possible for a process |
| 542 | * to have the pid of the thread group leader without actually being |
| 543 | * the thread group leader. For iteration through the pids in proc |
| 544 | * all we care about is that we have a task with the appropriate |
| 545 | * pid, we don't actually care if we have the right task. |
| 546 | */ |
| 547 | static inline bool has_group_leader_pid(struct task_struct *p) |
| 548 | { |
| 549 | return task_pid(p) == p->signal->leader_pid; |
| 550 | } |
| 551 | |
| 552 | static inline |
| 553 | bool same_thread_group(struct task_struct *p1, struct task_struct *p2) |
| 554 | { |
| 555 | return p1->signal == p2->signal; |
| 556 | } |
| 557 | |
| 558 | static inline struct task_struct *next_thread(const struct task_struct *p) |
| 559 | { |
| 560 | return list_entry_rcu(p->thread_group.next, |
| 561 | struct task_struct, thread_group); |
| 562 | } |
| 563 | |
| 564 | static inline int thread_group_empty(struct task_struct *p) |
| 565 | { |
| 566 | return list_empty(&p->thread_group); |
| 567 | } |
| 568 | |
| 569 | #define delay_group_leader(p) \ |
| 570 | (thread_group_leader(p) && !thread_group_empty(p)) |
| 571 | |
| 572 | extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
| 573 | unsigned long *flags); |
| 574 | |
| 575 | static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk, |
| 576 | unsigned long *flags) |
| 577 | { |
| 578 | struct sighand_struct *ret; |
| 579 | |
| 580 | ret = __lock_task_sighand(tsk, flags); |
| 581 | (void)__cond_lock(&tsk->sighand->siglock, ret); |
| 582 | return ret; |
| 583 | } |
| 584 | |
| 585 | static inline void unlock_task_sighand(struct task_struct *tsk, |
| 586 | unsigned long *flags) |
| 587 | { |
| 588 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
| 589 | } |
| 590 | |
| 591 | static inline unsigned long task_rlimit(const struct task_struct *tsk, |
| 592 | unsigned int limit) |
| 593 | { |
| 594 | return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); |
| 595 | } |
| 596 | |
| 597 | static inline unsigned long task_rlimit_max(const struct task_struct *tsk, |
| 598 | unsigned int limit) |
| 599 | { |
| 600 | return READ_ONCE(tsk->signal->rlim[limit].rlim_max); |
| 601 | } |
| 602 | |
| 603 | static inline unsigned long rlimit(unsigned int limit) |
| 604 | { |
| 605 | return task_rlimit(current, limit); |
| 606 | } |
| 607 | |
| 608 | static inline unsigned long rlimit_max(unsigned int limit) |
| 609 | { |
| 610 | return task_rlimit_max(current, limit); |
| 611 | } |
| 612 | |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 613 | #endif /* _LINUX_SCHED_SIGNAL_H */ |