Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_SCHED_H |
| 3 | #define _LINUX_SCHED_H |
| 4 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 5 | /* |
| 6 | * Define 'struct task_struct' and provide the main scheduler |
| 7 | * APIs (schedule(), wakeup variants, etc.) |
| 8 | */ |
| 9 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 10 | #include <uapi/linux/sched.h> |
David Woodhouse | b7b3c76 | 2006-04-27 00:12:56 +0100 | [diff] [blame] | 11 | |
Ingo Molnar | 70b8157 | 2017-02-03 12:11:00 +0100 | [diff] [blame] | 12 | #include <asm/current.h> |
| 13 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 14 | #include <linux/pid.h> |
| 15 | #include <linux/sem.h> |
| 16 | #include <linux/shm.h> |
| 17 | #include <linux/kcov.h> |
| 18 | #include <linux/mutex.h> |
| 19 | #include <linux/plist.h> |
| 20 | #include <linux/hrtimer.h> |
| 21 | #include <linux/seccomp.h> |
| 22 | #include <linux/nodemask.h> |
| 23 | #include <linux/rcupdate.h> |
Elena Reshetova | ec1d281 | 2019-01-18 14:27:29 +0200 | [diff] [blame] | 24 | #include <linux/refcount.h> |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 25 | #include <linux/resource.h> |
| 26 | #include <linux/latencytop.h> |
| 27 | #include <linux/sched/prio.h> |
Thomas Gleixner | 9eacb5c | 2019-08-21 21:09:05 +0200 | [diff] [blame] | 28 | #include <linux/sched/types.h> |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 29 | #include <linux/signal_types.h> |
| 30 | #include <linux/mm_types_task.h> |
| 31 | #include <linux/task_io_accounting.h> |
Thomas Gleixner | 2b69942 | 2019-08-21 21:09:04 +0200 | [diff] [blame] | 32 | #include <linux/posix-timers.h> |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 33 | #include <linux/rseq.h> |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 34 | |
| 35 | /* task_struct member predeclarations (sorted alphabetically): */ |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 36 | struct audit_context; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 37 | struct backing_dev_info; |
| 38 | struct bio_list; |
| 39 | struct blk_plug; |
Qais Yousef | 3c93a0c | 2019-06-04 12:14:55 +0100 | [diff] [blame] | 40 | struct capture_control; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 41 | struct cfs_rq; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 42 | struct fs_struct; |
| 43 | struct futex_pi_state; |
| 44 | struct io_context; |
| 45 | struct mempolicy; |
| 46 | struct nameidata; |
| 47 | struct nsproxy; |
| 48 | struct perf_event_context; |
| 49 | struct pid_namespace; |
| 50 | struct pipe_inode_info; |
| 51 | struct rcu_node; |
| 52 | struct reclaim_state; |
| 53 | struct robust_list_head; |
Qais Yousef | 3c93a0c | 2019-06-04 12:14:55 +0100 | [diff] [blame] | 54 | struct root_domain; |
| 55 | struct rq; |
Ingo Molnar | e2d1e2a | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 56 | struct sched_attr; |
| 57 | struct sched_param; |
Ingo Molnar | 43ae34c | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 58 | struct seq_file; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 59 | struct sighand_struct; |
| 60 | struct signal_struct; |
| 61 | struct task_delay_info; |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 62 | struct task_group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | |
Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 64 | /* |
| 65 | * Task state bitmask. NOTE! These bits are also |
| 66 | * encoded in fs/proc/array.c: get_task_state(). |
| 67 | * |
| 68 | * We have two separate sets of flags: task->state |
| 69 | * is about runnability, while task->exit_state are |
| 70 | * about the task exiting. Confusing, but this way |
| 71 | * modifying one set can't modify the other one by |
| 72 | * mistake. |
| 73 | */ |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 74 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 75 | /* Used in tsk->state: */ |
Peter Zijlstra | 92c4bc9 | 2017-09-22 18:13:36 +0200 | [diff] [blame] | 76 | #define TASK_RUNNING 0x0000 |
| 77 | #define TASK_INTERRUPTIBLE 0x0001 |
| 78 | #define TASK_UNINTERRUPTIBLE 0x0002 |
| 79 | #define __TASK_STOPPED 0x0004 |
| 80 | #define __TASK_TRACED 0x0008 |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 81 | /* Used in tsk->exit_state: */ |
Peter Zijlstra | 92c4bc9 | 2017-09-22 18:13:36 +0200 | [diff] [blame] | 82 | #define EXIT_DEAD 0x0010 |
| 83 | #define EXIT_ZOMBIE 0x0020 |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 84 | #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) |
| 85 | /* Used in tsk->state again: */ |
Peter Zijlstra | 8ef9925 | 2017-09-22 18:37:28 +0200 | [diff] [blame] | 86 | #define TASK_PARKED 0x0040 |
| 87 | #define TASK_DEAD 0x0080 |
| 88 | #define TASK_WAKEKILL 0x0100 |
| 89 | #define TASK_WAKING 0x0200 |
Peter Zijlstra | 92c4bc9 | 2017-09-22 18:13:36 +0200 | [diff] [blame] | 90 | #define TASK_NOLOAD 0x0400 |
| 91 | #define TASK_NEW 0x0800 |
| 92 | #define TASK_STATE_MAX 0x1000 |
Peter Zijlstra | 7334215 | 2009-12-17 13:16:27 +0100 | [diff] [blame] | 93 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 94 | /* Convenience macros for the sake of set_current_state: */ |
| 95 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
| 96 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) |
| 97 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) |
Peter Zijlstra | 80ed87c | 2015-05-08 14:23:45 +0200 | [diff] [blame] | 98 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 99 | #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 100 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 101 | /* Convenience macros for the sake of wake_up(): */ |
| 102 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 103 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 104 | /* get_task_state(): */ |
| 105 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ |
| 106 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
Peter Zijlstra | 8ef9925 | 2017-09-22 18:37:28 +0200 | [diff] [blame] | 107 | __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \ |
| 108 | TASK_PARKED) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 109 | |
| 110 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
| 111 | |
| 112 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
| 113 | |
| 114 | #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
| 115 | |
| 116 | #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
| 117 | (task->flags & PF_FROZEN) == 0 && \ |
| 118 | (task->state & TASK_NOLOAD) == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 120 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 121 | |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 122 | /* |
| 123 | * Special states are those that do not use the normal wait-loop pattern. See |
| 124 | * the comment with set_special_state(). |
| 125 | */ |
| 126 | #define is_special_task_state(state) \ |
Peter Zijlstra | 1cef115 | 2018-06-07 11:45:49 +0200 | [diff] [blame] | 127 | ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 128 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 129 | #define __set_current_state(state_value) \ |
| 130 | do { \ |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 131 | WARN_ON_ONCE(is_special_task_state(state_value));\ |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 132 | current->task_state_change = _THIS_IP_; \ |
| 133 | current->state = (state_value); \ |
| 134 | } while (0) |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 135 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 136 | #define set_current_state(state_value) \ |
| 137 | do { \ |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 138 | WARN_ON_ONCE(is_special_task_state(state_value));\ |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 139 | current->task_state_change = _THIS_IP_; \ |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 140 | smp_store_mb(current->state, (state_value)); \ |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 141 | } while (0) |
| 142 | |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 143 | #define set_special_state(state_value) \ |
| 144 | do { \ |
| 145 | unsigned long flags; /* may shadow */ \ |
| 146 | WARN_ON_ONCE(!is_special_task_state(state_value)); \ |
| 147 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ |
| 148 | current->task_state_change = _THIS_IP_; \ |
| 149 | current->state = (state_value); \ |
| 150 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ |
| 151 | } while (0) |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 152 | #else |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 153 | /* |
| 154 | * set_current_state() includes a barrier so that the write of current->state |
| 155 | * is correctly serialised wrt the caller's subsequent test of whether to |
| 156 | * actually sleep: |
| 157 | * |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 158 | * for (;;) { |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 159 | * set_current_state(TASK_UNINTERRUPTIBLE); |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 160 | * if (!need_sleep) |
| 161 | * break; |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 162 | * |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 163 | * schedule(); |
| 164 | * } |
| 165 | * __set_current_state(TASK_RUNNING); |
| 166 | * |
| 167 | * If the caller does not need such serialisation (because, for instance, the |
| 168 | * condition test and condition change and wakeup are under the same lock) then |
| 169 | * use __set_current_state(). |
| 170 | * |
| 171 | * The above is typically ordered against the wakeup, which does: |
| 172 | * |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 173 | * need_sleep = false; |
| 174 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 175 | * |
Andrea Parri | 7696f99 | 2018-07-16 11:06:03 -0700 | [diff] [blame] | 176 | * where wake_up_state() executes a full memory barrier before accessing the |
| 177 | * task state. |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 178 | * |
| 179 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, |
| 180 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
| 181 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
| 182 | * |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 183 | * However, with slightly different timing the wakeup TASK_RUNNING store can |
Ingo Molnar | dfcb245 | 2018-12-03 10:05:56 +0100 | [diff] [blame] | 184 | * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 185 | * a problem either because that will result in one extra go around the loop |
| 186 | * and our @cond test will save the day. |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 187 | * |
| 188 | * Also see the comments of try_to_wake_up(). |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 189 | */ |
Peter Zijlstra | b5bf9a9 | 2018-04-30 14:51:01 +0200 | [diff] [blame] | 190 | #define __set_current_state(state_value) \ |
| 191 | current->state = (state_value) |
| 192 | |
| 193 | #define set_current_state(state_value) \ |
| 194 | smp_store_mb(current->state, (state_value)) |
| 195 | |
| 196 | /* |
| 197 | * set_special_state() should be used for those states when the blocking task |
| 198 | * can not use the regular condition based wait-loop. In that case we must |
| 199 | * serialize against wakeups such that any possible in-flight TASK_RUNNING stores |
| 200 | * will not collide with our state change. |
| 201 | */ |
| 202 | #define set_special_state(state_value) \ |
| 203 | do { \ |
| 204 | unsigned long flags; /* may shadow */ \ |
| 205 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ |
| 206 | current->state = (state_value); \ |
| 207 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ |
| 208 | } while (0) |
| 209 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 210 | #endif |
| 211 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 212 | /* Task command name length: */ |
| 213 | #define TASK_COMM_LEN 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | extern void scheduler_tick(void); |
| 216 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 217 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
| 218 | |
| 219 | extern long schedule_timeout(long timeout); |
| 220 | extern long schedule_timeout_interruptible(long timeout); |
| 221 | extern long schedule_timeout_killable(long timeout); |
| 222 | extern long schedule_timeout_uninterruptible(long timeout); |
| 223 | extern long schedule_timeout_idle(long timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | asmlinkage void schedule(void); |
Thomas Gleixner | c5491ea | 2011-03-21 12:09:35 +0100 | [diff] [blame] | 225 | extern void schedule_preempt_disabled(void); |
Julien Thierry | 19c95f2 | 2019-10-15 18:25:44 +0100 | [diff] [blame] | 226 | asmlinkage void preempt_schedule_irq(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | |
Tejun Heo | 10ab564 | 2016-10-28 12:58:10 -0400 | [diff] [blame] | 228 | extern int __must_check io_schedule_prepare(void); |
| 229 | extern void io_schedule_finish(int token); |
NeilBrown | 9cff8ad | 2015-02-13 15:49:17 +1100 | [diff] [blame] | 230 | extern long io_schedule_timeout(long timeout); |
Tejun Heo | 10ab564 | 2016-10-28 12:58:10 -0400 | [diff] [blame] | 231 | extern void io_schedule(void); |
NeilBrown | 9cff8ad | 2015-02-13 15:49:17 +1100 | [diff] [blame] | 232 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 233 | /** |
Masanari Iida | 0ba42a5 | 2017-03-07 20:48:02 +0900 | [diff] [blame] | 234 | * struct prev_cputime - snapshot of system and user cputime |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 235 | * @utime: time spent in user mode |
| 236 | * @stime: time spent in system mode |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 237 | * @lock: protects the above two fields |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 238 | * |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 239 | * Stores previous user/system time values such that we can guarantee |
| 240 | * monotonicity. |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 241 | */ |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 242 | struct prev_cputime { |
| 243 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 244 | u64 utime; |
| 245 | u64 stime; |
| 246 | raw_spinlock_t lock; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 247 | #endif |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 248 | }; |
| 249 | |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 250 | enum vtime_state { |
| 251 | /* Task is sleeping or running in a CPU with VTIME inactive: */ |
| 252 | VTIME_INACTIVE = 0, |
Frederic Weisbecker | 14faf6f | 2019-10-16 04:56:48 +0200 | [diff] [blame] | 253 | /* Task is idle */ |
| 254 | VTIME_IDLE, |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 255 | /* Task runs in kernelspace in a CPU with VTIME active: */ |
| 256 | VTIME_SYS, |
Frederic Weisbecker | 14faf6f | 2019-10-16 04:56:48 +0200 | [diff] [blame] | 257 | /* Task runs in userspace in a CPU with VTIME active: */ |
| 258 | VTIME_USER, |
Frederic Weisbecker | e6d5bf3 | 2019-10-16 04:56:49 +0200 | [diff] [blame] | 259 | /* Task runs as guests in a CPU with VTIME active: */ |
| 260 | VTIME_GUEST, |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 261 | }; |
| 262 | |
| 263 | struct vtime { |
| 264 | seqcount_t seqcount; |
| 265 | unsigned long long starttime; |
| 266 | enum vtime_state state; |
Frederic Weisbecker | 802f4a8 | 2019-10-16 04:56:47 +0200 | [diff] [blame] | 267 | unsigned int cpu; |
Wanpeng Li | 2a42eb9 | 2017-06-29 19:15:11 +0200 | [diff] [blame] | 268 | u64 utime; |
| 269 | u64 stime; |
| 270 | u64 gtime; |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 271 | }; |
| 272 | |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 273 | /* |
| 274 | * Utilization clamp constraints. |
| 275 | * @UCLAMP_MIN: Minimum utilization |
| 276 | * @UCLAMP_MAX: Maximum utilization |
| 277 | * @UCLAMP_CNT: Utilization clamp constraints count |
| 278 | */ |
| 279 | enum uclamp_id { |
| 280 | UCLAMP_MIN = 0, |
| 281 | UCLAMP_MAX, |
| 282 | UCLAMP_CNT |
| 283 | }; |
| 284 | |
Mathieu Poirier | f9a25f7 | 2019-07-19 15:59:55 +0200 | [diff] [blame] | 285 | #ifdef CONFIG_SMP |
| 286 | extern struct root_domain def_root_domain; |
| 287 | extern struct mutex sched_domains_mutex; |
| 288 | #endif |
| 289 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | struct sched_info { |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 291 | #ifdef CONFIG_SCHED_INFO |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 292 | /* Cumulative counters: */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 294 | /* # of times we have run on this CPU: */ |
| 295 | unsigned long pcount; |
| 296 | |
| 297 | /* Time spent waiting on a runqueue: */ |
| 298 | unsigned long long run_delay; |
| 299 | |
| 300 | /* Timestamps: */ |
| 301 | |
| 302 | /* When did we last run on a CPU? */ |
| 303 | unsigned long long last_arrival; |
| 304 | |
| 305 | /* When were we last queued to run? */ |
| 306 | unsigned long long last_queued; |
| 307 | |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 308 | #endif /* CONFIG_SCHED_INFO */ |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 309 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | /* |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 312 | * Integer metrics need fixed point arithmetic, e.g., sched/fair |
| 313 | * has a few: load, load_avg, util_avg, freq, and capacity. |
| 314 | * |
| 315 | * We define a basic fixed point arithmetic range, and then formalize |
| 316 | * all these metrics based on that basic range. |
| 317 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 318 | # define SCHED_FIXEDPOINT_SHIFT 10 |
| 319 | # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 320 | |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 321 | /* Increase resolution of cpu_capacity calculations */ |
| 322 | # define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT |
| 323 | # define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) |
| 324 | |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 325 | struct load_weight { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 326 | unsigned long weight; |
| 327 | u32 inv_weight; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 328 | }; |
| 329 | |
Patrick Bellasi | 7f65ea4 | 2018-03-09 09:52:42 +0000 | [diff] [blame] | 330 | /** |
| 331 | * struct util_est - Estimation utilization of FAIR tasks |
| 332 | * @enqueued: instantaneous estimated utilization of a task/cpu |
| 333 | * @ewma: the Exponential Weighted Moving Average (EWMA) |
| 334 | * utilization of a task |
| 335 | * |
| 336 | * Support data structure to track an Exponential Weighted Moving Average |
| 337 | * (EWMA) of a FAIR task's utilization. New samples are added to the moving |
| 338 | * average each time a task completes an activation. Sample's weight is chosen |
| 339 | * so that the EWMA will be relatively insensitive to transient changes to the |
| 340 | * task's workload. |
| 341 | * |
| 342 | * The enqueued attribute has a slightly different meaning for tasks and cpus: |
| 343 | * - task: the task's util_avg at last task dequeue time |
| 344 | * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU |
| 345 | * Thus, the util_est.enqueued of a task represents the contribution on the |
| 346 | * estimated utilization of the CPU where that task is currently enqueued. |
| 347 | * |
| 348 | * Only for tasks we track a moving average of the past instantaneous |
| 349 | * estimated utilization. This allows to absorb sporadic drops in utilization |
| 350 | * of an otherwise almost periodic task. |
| 351 | */ |
| 352 | struct util_est { |
| 353 | unsigned int enqueued; |
| 354 | unsigned int ewma; |
| 355 | #define UTIL_EST_WEIGHT_SHIFT 2 |
Peter Zijlstra | 317d359 | 2018-04-05 10:05:21 +0200 | [diff] [blame] | 356 | } __attribute__((__aligned__(sizeof(u64)))); |
Patrick Bellasi | 7f65ea4 | 2018-03-09 09:52:42 +0000 | [diff] [blame] | 357 | |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 358 | /* |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 359 | * The load_avg/util_avg accumulates an infinite geometric series |
| 360 | * (see __update_load_avg() in kernel/sched/fair.c). |
| 361 | * |
| 362 | * [load_avg definition] |
| 363 | * |
| 364 | * load_avg = runnable% * scale_load_down(load) |
| 365 | * |
| 366 | * where runnable% is the time ratio that a sched_entity is runnable. |
| 367 | * For cfs_rq, it is the aggregated load_avg of all runnable and |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 368 | * blocked sched_entities. |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 369 | * |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 370 | * [util_avg definition] |
| 371 | * |
| 372 | * util_avg = running% * SCHED_CAPACITY_SCALE |
| 373 | * |
| 374 | * where running% is the time ratio that a sched_entity is running on |
| 375 | * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable |
| 376 | * and blocked sched_entities. |
| 377 | * |
Vincent Guittot | 2312729 | 2019-01-23 16:26:53 +0100 | [diff] [blame] | 378 | * load_avg and util_avg don't direcly factor frequency scaling and CPU |
| 379 | * capacity scaling. The scaling is done through the rq_clock_pelt that |
| 380 | * is used for computing those signals (see update_rq_clock_pelt()) |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 381 | * |
Vincent Guittot | 2312729 | 2019-01-23 16:26:53 +0100 | [diff] [blame] | 382 | * N.B., the above ratios (runnable% and running%) themselves are in the |
| 383 | * range of [0, 1]. To do fixed point arithmetics, we therefore scale them |
| 384 | * to as large a range as necessary. This is for example reflected by |
| 385 | * util_avg's SCHED_CAPACITY_SCALE. |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 386 | * |
| 387 | * [Overflow issue] |
| 388 | * |
| 389 | * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities |
| 390 | * with the highest load (=88761), always runnable on a single cfs_rq, |
| 391 | * and should not overflow as the number already hits PID_MAX_LIMIT. |
| 392 | * |
| 393 | * For all other cases (including 32-bit kernels), struct load_weight's |
| 394 | * weight will overflow first before we do, because: |
| 395 | * |
| 396 | * Max(load_avg) <= Max(load.weight) |
| 397 | * |
| 398 | * Then it is the load_weight's responsibility to consider overflow |
| 399 | * issues. |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 400 | */ |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 401 | struct sched_avg { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 402 | u64 last_update_time; |
| 403 | u64 load_sum; |
Peter Zijlstra | 1ea6c46 | 2017-05-06 15:59:54 +0200 | [diff] [blame] | 404 | u64 runnable_load_sum; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 405 | u32 util_sum; |
| 406 | u32 period_contrib; |
| 407 | unsigned long load_avg; |
Peter Zijlstra | 1ea6c46 | 2017-05-06 15:59:54 +0200 | [diff] [blame] | 408 | unsigned long runnable_load_avg; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 409 | unsigned long util_avg; |
Patrick Bellasi | 7f65ea4 | 2018-03-09 09:52:42 +0000 | [diff] [blame] | 410 | struct util_est util_est; |
Peter Zijlstra | 317d359 | 2018-04-05 10:05:21 +0200 | [diff] [blame] | 411 | } ____cacheline_aligned; |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 412 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 413 | struct sched_statistics { |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 414 | #ifdef CONFIG_SCHEDSTATS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 415 | u64 wait_start; |
| 416 | u64 wait_max; |
| 417 | u64 wait_count; |
| 418 | u64 wait_sum; |
| 419 | u64 iowait_count; |
| 420 | u64 iowait_sum; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 421 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 422 | u64 sleep_start; |
| 423 | u64 sleep_max; |
| 424 | s64 sum_sleep_runtime; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 425 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 426 | u64 block_start; |
| 427 | u64 block_max; |
| 428 | u64 exec_max; |
| 429 | u64 slice_max; |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 430 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 431 | u64 nr_migrations_cold; |
| 432 | u64 nr_failed_migrations_affine; |
| 433 | u64 nr_failed_migrations_running; |
| 434 | u64 nr_failed_migrations_hot; |
| 435 | u64 nr_forced_migrations; |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 436 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 437 | u64 nr_wakeups; |
| 438 | u64 nr_wakeups_sync; |
| 439 | u64 nr_wakeups_migrate; |
| 440 | u64 nr_wakeups_local; |
| 441 | u64 nr_wakeups_remote; |
| 442 | u64 nr_wakeups_affine; |
| 443 | u64 nr_wakeups_affine_attempts; |
| 444 | u64 nr_wakeups_passive; |
| 445 | u64 nr_wakeups_idle; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 446 | #endif |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 447 | }; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 448 | |
| 449 | struct sched_entity { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 450 | /* For load-balancing: */ |
| 451 | struct load_weight load; |
Peter Zijlstra | 1ea6c46 | 2017-05-06 15:59:54 +0200 | [diff] [blame] | 452 | unsigned long runnable_weight; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 453 | struct rb_node run_node; |
| 454 | struct list_head group_node; |
| 455 | unsigned int on_rq; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 456 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 457 | u64 exec_start; |
| 458 | u64 sum_exec_runtime; |
| 459 | u64 vruntime; |
| 460 | u64 prev_sum_exec_runtime; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 461 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 462 | u64 nr_migrations; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 463 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 464 | struct sched_statistics statistics; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 465 | |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 466 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 467 | int depth; |
| 468 | struct sched_entity *parent; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 469 | /* rq on which this entity is (to be) queued: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 470 | struct cfs_rq *cfs_rq; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 471 | /* rq "owned" by this entity/group: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 472 | struct cfs_rq *my_q; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 473 | #endif |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 474 | |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 475 | #ifdef CONFIG_SMP |
Jiri Olsa | 5a10780 | 2015-12-08 21:23:59 +0100 | [diff] [blame] | 476 | /* |
| 477 | * Per entity load average tracking. |
| 478 | * |
| 479 | * Put into separate cache line so it does not |
| 480 | * collide with read-mostly values above. |
| 481 | */ |
Peter Zijlstra | 317d359 | 2018-04-05 10:05:21 +0200 | [diff] [blame] | 482 | struct sched_avg avg; |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 483 | #endif |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 484 | }; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 485 | |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 486 | struct sched_rt_entity { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 487 | struct list_head run_list; |
| 488 | unsigned long timeout; |
| 489 | unsigned long watchdog_stamp; |
| 490 | unsigned int time_slice; |
| 491 | unsigned short on_rq; |
| 492 | unsigned short on_list; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 493 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 494 | struct sched_rt_entity *back; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 495 | #ifdef CONFIG_RT_GROUP_SCHED |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 496 | struct sched_rt_entity *parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 497 | /* rq on which this entity is (to be) queued: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 498 | struct rt_rq *rt_rq; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 499 | /* rq "owned" by this entity/group: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 500 | struct rt_rq *my_q; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 501 | #endif |
Kees Cook | 3859a27 | 2016-10-28 01:22:25 -0700 | [diff] [blame] | 502 | } __randomize_layout; |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 503 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 504 | struct sched_dl_entity { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 505 | struct rb_node rb_node; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 506 | |
| 507 | /* |
| 508 | * Original scheduling parameters. Copied here from sched_attr |
xiaofeng.yan | 4027d08 | 2014-05-09 03:21:27 +0000 | [diff] [blame] | 509 | * during sched_setattr(), they will remain the same until |
| 510 | * the next sched_setattr(). |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 511 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 512 | u64 dl_runtime; /* Maximum runtime for each instance */ |
| 513 | u64 dl_deadline; /* Relative deadline of each instance */ |
| 514 | u64 dl_period; /* Separation of two instances (period) */ |
Daniel Bristot de Oliveira | 54d6d30 | 2017-05-29 16:24:02 +0200 | [diff] [blame] | 515 | u64 dl_bw; /* dl_runtime / dl_period */ |
Daniel Bristot de Oliveira | 3effcb4 | 2017-05-29 16:24:03 +0200 | [diff] [blame] | 516 | u64 dl_density; /* dl_runtime / dl_deadline */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 517 | |
| 518 | /* |
| 519 | * Actual scheduling parameters. Initialized with the values above, |
Ingo Molnar | dfcb245 | 2018-12-03 10:05:56 +0100 | [diff] [blame] | 520 | * they are continuously updated during task execution. Note that |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 521 | * the remaining runtime could be < 0 in case we are in overrun. |
| 522 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 523 | s64 runtime; /* Remaining runtime for this instance */ |
| 524 | u64 deadline; /* Absolute deadline for this instance */ |
| 525 | unsigned int flags; /* Specifying the scheduler behaviour */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 526 | |
| 527 | /* |
| 528 | * Some bool flags: |
| 529 | * |
| 530 | * @dl_throttled tells if we exhausted the runtime. If so, the |
| 531 | * task has to wait for a replenishment to be performed at the |
| 532 | * next firing of dl_timer. |
| 533 | * |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 534 | * @dl_boosted tells if we are boosted due to DI. If so we are |
| 535 | * outside bandwidth enforcement mechanism (but only until we |
Juri Lelli | 5bfd126 | 2014-04-15 13:49:04 +0200 | [diff] [blame] | 536 | * exit the critical section); |
| 537 | * |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 538 | * @dl_yielded tells if task gave up the CPU before consuming |
Juri Lelli | 5bfd126 | 2014-04-15 13:49:04 +0200 | [diff] [blame] | 539 | * all its available runtime during the last job. |
Luca Abeni | 209a0cb | 2017-05-18 22:13:29 +0200 | [diff] [blame] | 540 | * |
| 541 | * @dl_non_contending tells if the task is inactive while still |
| 542 | * contributing to the active utilization. In other words, it |
| 543 | * indicates if the inactive timer has been armed and its handler |
| 544 | * has not been executed yet. This flag is useful to avoid race |
| 545 | * conditions between the inactive timer handler and the wakeup |
| 546 | * code. |
Juri Lelli | 34be393 | 2017-12-12 12:10:24 +0100 | [diff] [blame] | 547 | * |
| 548 | * @dl_overrun tells if the task asked to be informed about runtime |
| 549 | * overruns. |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 550 | */ |
Dan Carpenter | aa5222e | 2017-10-13 10:01:22 +0300 | [diff] [blame] | 551 | unsigned int dl_throttled : 1; |
| 552 | unsigned int dl_boosted : 1; |
| 553 | unsigned int dl_yielded : 1; |
| 554 | unsigned int dl_non_contending : 1; |
Juri Lelli | 34be393 | 2017-12-12 12:10:24 +0100 | [diff] [blame] | 555 | unsigned int dl_overrun : 1; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 556 | |
| 557 | /* |
| 558 | * Bandwidth enforcement timer. Each -deadline task has its |
| 559 | * own bandwidth to be enforced, thus we need one timer per task. |
| 560 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 561 | struct hrtimer dl_timer; |
Luca Abeni | 209a0cb | 2017-05-18 22:13:29 +0200 | [diff] [blame] | 562 | |
| 563 | /* |
| 564 | * Inactive timer, responsible for decreasing the active utilization |
| 565 | * at the "0-lag time". When a -deadline task blocks, it contributes |
| 566 | * to GRUB's active utilization until the "0-lag time", hence a |
| 567 | * timer is needed to decrease the active utilization at the correct |
| 568 | * time. |
| 569 | */ |
| 570 | struct hrtimer inactive_timer; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 571 | }; |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 572 | |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 573 | #ifdef CONFIG_UCLAMP_TASK |
| 574 | /* Number of utilization clamp buckets (shorter alias) */ |
| 575 | #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT |
| 576 | |
| 577 | /* |
| 578 | * Utilization clamp for a scheduling entity |
| 579 | * @value: clamp value "assigned" to a se |
| 580 | * @bucket_id: bucket index corresponding to the "assigned" value |
Patrick Bellasi | e8f1417 | 2019-06-21 09:42:05 +0100 | [diff] [blame] | 581 | * @active: the se is currently refcounted in a rq's bucket |
Patrick Bellasi | a509a7c | 2019-06-21 09:42:07 +0100 | [diff] [blame] | 582 | * @user_defined: the requested clamp value comes from user-space |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 583 | * |
| 584 | * The bucket_id is the index of the clamp bucket matching the clamp value |
| 585 | * which is pre-computed and stored to avoid expensive integer divisions from |
| 586 | * the fast path. |
Patrick Bellasi | e8f1417 | 2019-06-21 09:42:05 +0100 | [diff] [blame] | 587 | * |
| 588 | * The active bit is set whenever a task has got an "effective" value assigned, |
| 589 | * which can be different from the clamp value "requested" from user-space. |
| 590 | * This allows to know a task is refcounted in the rq's bucket corresponding |
| 591 | * to the "effective" bucket_id. |
Patrick Bellasi | a509a7c | 2019-06-21 09:42:07 +0100 | [diff] [blame] | 592 | * |
| 593 | * The user_defined bit is set whenever a task has got a task-specific clamp |
| 594 | * value requested from userspace, i.e. the system defaults apply to this task |
| 595 | * just as a restriction. This allows to relax default clamps when a less |
| 596 | * restrictive task-specific value has been requested, thus allowing to |
| 597 | * implement a "nice" semantic. For example, a task running with a 20% |
| 598 | * default boost can still drop its own boosting to 0%. |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 599 | */ |
| 600 | struct uclamp_se { |
| 601 | unsigned int value : bits_per(SCHED_CAPACITY_SCALE); |
| 602 | unsigned int bucket_id : bits_per(UCLAMP_BUCKETS); |
Patrick Bellasi | e8f1417 | 2019-06-21 09:42:05 +0100 | [diff] [blame] | 603 | unsigned int active : 1; |
Patrick Bellasi | a509a7c | 2019-06-21 09:42:07 +0100 | [diff] [blame] | 604 | unsigned int user_defined : 1; |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 605 | }; |
| 606 | #endif /* CONFIG_UCLAMP_TASK */ |
| 607 | |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 608 | union rcu_special { |
| 609 | struct { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 610 | u8 blocked; |
| 611 | u8 need_qs; |
Paul E. McKenney | 05f4157 | 2018-10-16 04:12:58 -0700 | [diff] [blame] | 612 | u8 exp_hint; /* Hint for performance. */ |
Paul E. McKenney | 23634eb | 2019-03-24 15:25:51 -0700 | [diff] [blame] | 613 | u8 deferred_qs; |
Paul E. McKenney | 8203d6d | 2015-08-02 13:53:17 -0700 | [diff] [blame] | 614 | } b; /* Bits. */ |
Paul E. McKenney | 05f4157 | 2018-10-16 04:12:58 -0700 | [diff] [blame] | 615 | u32 s; /* Set of bits. */ |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 616 | }; |
Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 617 | |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 618 | enum perf_event_task_context { |
| 619 | perf_invalid_context = -1, |
| 620 | perf_hw_context = 0, |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 621 | perf_sw_context, |
Peter Zijlstra | 8dc85d5 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 622 | perf_nr_task_contexts, |
| 623 | }; |
| 624 | |
Ingo Molnar | eb61baf | 2017-02-01 17:09:06 +0100 | [diff] [blame] | 625 | struct wake_q_node { |
| 626 | struct wake_q_node *next; |
| 627 | }; |
| 628 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 629 | struct task_struct { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 630 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 631 | /* |
| 632 | * For reasons of header soup (see current_thread_info()), this |
| 633 | * must be the first element of task_struct. |
| 634 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 635 | struct thread_info thread_info; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 636 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 637 | /* -1 unrunnable, 0 runnable, >0 stopped: */ |
| 638 | volatile long state; |
Kees Cook | 29e48ce | 2017-04-05 22:43:33 -0700 | [diff] [blame] | 639 | |
| 640 | /* |
| 641 | * This begins the randomizable portion of task_struct. Only |
| 642 | * scheduling-critical items should be added above here. |
| 643 | */ |
| 644 | randomized_struct_fields_start |
| 645 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 646 | void *stack; |
Elena Reshetova | ec1d281 | 2019-01-18 14:27:29 +0200 | [diff] [blame] | 647 | refcount_t usage; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 648 | /* Per task flags (PF_*), defined further below: */ |
| 649 | unsigned int flags; |
| 650 | unsigned int ptrace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 652 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 653 | struct llist_node wake_entry; |
| 654 | int on_cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 655 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 656 | /* Current CPU: */ |
| 657 | unsigned int cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 658 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 659 | unsigned int wakee_flips; |
| 660 | unsigned long wakee_flip_decay_ts; |
| 661 | struct task_struct *last_wakee; |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 662 | |
Mel Gorman | 32e839d | 2018-01-30 10:45:55 +0000 | [diff] [blame] | 663 | /* |
| 664 | * recent_used_cpu is initially set as the last CPU used by a task |
| 665 | * that wakes affine another task. Waker/wakee relationships can |
| 666 | * push tasks around a CPU where each wakeup moves to the next one. |
| 667 | * Tracking a recently used CPU allows a quick search for a recently |
| 668 | * used CPU that may be idle. |
| 669 | */ |
| 670 | int recent_used_cpu; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 671 | int wake_cpu; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 672 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 673 | int on_rq; |
Ingo Molnar | 50e645a | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 674 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 675 | int prio; |
| 676 | int static_prio; |
| 677 | int normal_prio; |
| 678 | unsigned int rt_priority; |
| 679 | |
| 680 | const struct sched_class *sched_class; |
| 681 | struct sched_entity se; |
| 682 | struct sched_rt_entity rt; |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 683 | #ifdef CONFIG_CGROUP_SCHED |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 684 | struct task_group *sched_task_group; |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 685 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 686 | struct sched_dl_entity dl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 688 | #ifdef CONFIG_UCLAMP_TASK |
Patrick Bellasi | e8f1417 | 2019-06-21 09:42:05 +0100 | [diff] [blame] | 689 | /* Clamp values requested for a scheduling entity */ |
| 690 | struct uclamp_se uclamp_req[UCLAMP_CNT]; |
| 691 | /* Effective clamp values used for a scheduling entity */ |
Patrick Bellasi | 69842cb | 2019-06-21 09:42:02 +0100 | [diff] [blame] | 692 | struct uclamp_se uclamp[UCLAMP_CNT]; |
| 693 | #endif |
| 694 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 695 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 696 | /* List of struct preempt_notifier: */ |
| 697 | struct hlist_head preempt_notifiers; |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 698 | #endif |
| 699 | |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 700 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 701 | unsigned int btrace_seq; |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 702 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 703 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 704 | unsigned int policy; |
| 705 | int nr_cpus_allowed; |
Sebastian Andrzej Siewior | 3bd3706 | 2019-04-23 16:26:36 +0200 | [diff] [blame] | 706 | const cpumask_t *cpus_ptr; |
| 707 | cpumask_t cpus_mask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 708 | |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 709 | #ifdef CONFIG_PREEMPT_RCU |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 710 | int rcu_read_lock_nesting; |
| 711 | union rcu_special rcu_read_unlock_special; |
| 712 | struct list_head rcu_node_entry; |
| 713 | struct rcu_node *rcu_blocked_node; |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 714 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 715 | |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 716 | #ifdef CONFIG_TASKS_RCU |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 717 | unsigned long rcu_tasks_nvcsw; |
Paul E. McKenney | ccdd29f | 2017-05-25 08:51:48 -0700 | [diff] [blame] | 718 | u8 rcu_tasks_holdout; |
| 719 | u8 rcu_tasks_idx; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 720 | int rcu_tasks_idle_cpu; |
Paul E. McKenney | ccdd29f | 2017-05-25 08:51:48 -0700 | [diff] [blame] | 721 | struct list_head rcu_tasks_holdout_list; |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 722 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 723 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 724 | struct sched_info sched_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 725 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 726 | struct list_head tasks; |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 727 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 728 | struct plist_node pushable_tasks; |
| 729 | struct rb_node pushable_dl_tasks; |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 730 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 732 | struct mm_struct *mm; |
| 733 | struct mm_struct *active_mm; |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 734 | |
| 735 | /* Per-thread vma caching: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 736 | struct vmacache vmacache; |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 737 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 738 | #ifdef SPLIT_RSS_COUNTING |
| 739 | struct task_rss_stat rss_stat; |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 740 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 741 | int exit_state; |
| 742 | int exit_code; |
| 743 | int exit_signal; |
| 744 | /* The signal sent when the parent dies: */ |
| 745 | int pdeath_signal; |
| 746 | /* JOBCTL_*, siglock protected: */ |
| 747 | unsigned long jobctl; |
Andrei Epure | 9b89f6b | 2013-04-11 20:30:29 +0300 | [diff] [blame] | 748 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 749 | /* Used for emulating ABI behavior of previous Linux versions: */ |
| 750 | unsigned int personality; |
Andrei Epure | 9b89f6b | 2013-04-11 20:30:29 +0300 | [diff] [blame] | 751 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 752 | /* Scheduler bits, serialized by scheduler locks: */ |
| 753 | unsigned sched_reset_on_fork:1; |
| 754 | unsigned sched_contributes_to_load:1; |
| 755 | unsigned sched_migrated:1; |
| 756 | unsigned sched_remote_wakeup:1; |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 757 | #ifdef CONFIG_PSI |
| 758 | unsigned sched_psi_wake_requeue:1; |
| 759 | #endif |
| 760 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 761 | /* Force alignment to the next boundary: */ |
| 762 | unsigned :0; |
Peter Zijlstra | be958bd | 2015-11-25 16:02:07 +0100 | [diff] [blame] | 763 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 764 | /* Unserialized, strictly 'current' */ |
| 765 | |
| 766 | /* Bit to tell LSMs we're in execve(): */ |
| 767 | unsigned in_execve:1; |
| 768 | unsigned in_iowait:1; |
| 769 | #ifndef TIF_RESTORE_SIGMASK |
| 770 | unsigned restore_sigmask:1; |
Andy Lutomirski | 7e78141 | 2016-08-02 14:05:36 -0700 | [diff] [blame] | 771 | #endif |
Tejun Heo | 626ebc4 | 2015-11-05 18:46:09 -0800 | [diff] [blame] | 772 | #ifdef CONFIG_MEMCG |
Michal Hocko | 29ef680 | 2018-08-17 15:47:11 -0700 | [diff] [blame] | 773 | unsigned in_user_fault:1; |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 774 | #endif |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 775 | #ifdef CONFIG_COMPAT_BRK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 776 | unsigned brk_randomized:1; |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 777 | #endif |
Tejun Heo | 77f8879 | 2017-03-16 16:54:24 -0400 | [diff] [blame] | 778 | #ifdef CONFIG_CGROUPS |
| 779 | /* disallow userland-initiated cgroup migration */ |
| 780 | unsigned no_cgroup_migration:1; |
Roman Gushchin | 76f969e | 2019-04-19 10:03:04 -0700 | [diff] [blame] | 781 | /* task is frozen/stopped (used by the cgroup freezer) */ |
| 782 | unsigned frozen:1; |
Tejun Heo | 77f8879 | 2017-03-16 16:54:24 -0400 | [diff] [blame] | 783 | #endif |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 784 | #ifdef CONFIG_BLK_CGROUP |
| 785 | /* to be used once the psi infrastructure lands upstream. */ |
| 786 | unsigned use_memdelay:1; |
| 787 | #endif |
Vladimir Davydov | 6f185c2 | 2014-12-12 16:55:15 -0800 | [diff] [blame] | 788 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 789 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 790 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 791 | struct restart_block restart_block; |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 792 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 793 | pid_t pid; |
| 794 | pid_t tgid; |
Arjan van de Ven | 0a425405 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 795 | |
Linus Torvalds | 050e9ba | 2018-06-14 12:21:18 +0900 | [diff] [blame] | 796 | #ifdef CONFIG_STACKPROTECTOR |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 797 | /* Canary value for the -fstack-protector GCC feature: */ |
| 798 | unsigned long stack_canary; |
Hiroshi Shimamoto | 1314562 | 2009-08-18 15:06:02 +0900 | [diff] [blame] | 799 | #endif |
Oleg Nesterov | 4d1d61a | 2012-05-11 10:59:08 +1000 | [diff] [blame] | 800 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 801 | * Pointers to the (original) parent process, youngest child, younger sibling, |
Oleg Nesterov | 4d1d61a | 2012-05-11 10:59:08 +1000 | [diff] [blame] | 802 | * older sibling, respectively. (p->father can be replaced with |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 803 | * p->real_parent->pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 804 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 805 | |
| 806 | /* Real parent process: */ |
| 807 | struct task_struct __rcu *real_parent; |
| 808 | |
| 809 | /* Recipient of SIGCHLD, wait4() reports: */ |
| 810 | struct task_struct __rcu *parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 811 | |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 812 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 813 | * Children/sibling form the list of natural children: |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 814 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 815 | struct list_head children; |
| 816 | struct list_head sibling; |
| 817 | struct task_struct *group_leader; |
| 818 | |
| 819 | /* |
| 820 | * 'ptraced' is the list of tasks this task is using ptrace() on. |
| 821 | * |
| 822 | * This includes both natural children and PTRACE_ATTACH targets. |
| 823 | * 'ptrace_entry' is this task's link on the p->parent->ptraced list. |
| 824 | */ |
| 825 | struct list_head ptraced; |
| 826 | struct list_head ptrace_entry; |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 827 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 828 | /* PID/PID hash table linkage. */ |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 829 | struct pid *thread_pid; |
| 830 | struct hlist_node pid_links[PIDTYPE_MAX]; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 831 | struct list_head thread_group; |
| 832 | struct list_head thread_node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 834 | struct completion *vfork_done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 835 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 836 | /* CLONE_CHILD_SETTID: */ |
| 837 | int __user *set_child_tid; |
| 838 | |
| 839 | /* CLONE_CHILD_CLEARTID: */ |
| 840 | int __user *clear_child_tid; |
| 841 | |
| 842 | u64 utime; |
| 843 | u64 stime; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 844 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 845 | u64 utimescaled; |
| 846 | u64 stimescaled; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 847 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 848 | u64 gtime; |
| 849 | struct prev_cputime prev_cputime; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 850 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 851 | struct vtime vtime; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 852 | #endif |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 853 | |
| 854 | #ifdef CONFIG_NO_HZ_FULL |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 855 | atomic_t tick_dep_mask; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 856 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 857 | /* Context switch counts: */ |
| 858 | unsigned long nvcsw; |
| 859 | unsigned long nivcsw; |
| 860 | |
| 861 | /* Monotonic time in nsecs: */ |
| 862 | u64 start_time; |
| 863 | |
| 864 | /* Boot based time in nsecs: */ |
Peter Zijlstra | cf25e24 | 2019-11-07 11:07:58 +0100 | [diff] [blame] | 865 | u64 start_boottime; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 866 | |
| 867 | /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ |
| 868 | unsigned long min_flt; |
| 869 | unsigned long maj_flt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | |
Thomas Gleixner | 2b69942 | 2019-08-21 21:09:04 +0200 | [diff] [blame] | 871 | /* Empty if CONFIG_POSIX_CPUTIMERS=n */ |
| 872 | struct posix_cputimers posix_cputimers; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 873 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 874 | /* Process credentials: */ |
| 875 | |
| 876 | /* Tracer's credentials at attach: */ |
| 877 | const struct cred __rcu *ptracer_cred; |
| 878 | |
| 879 | /* Objective and real subjective task credentials (COW): */ |
| 880 | const struct cred __rcu *real_cred; |
| 881 | |
| 882 | /* Effective (overridable) subjective task credentials (COW): */ |
| 883 | const struct cred __rcu *cred; |
| 884 | |
David Howells | 7743c48 | 2019-06-19 16:10:15 +0100 | [diff] [blame] | 885 | #ifdef CONFIG_KEYS |
| 886 | /* Cached requested key. */ |
| 887 | struct key *cached_requested_key; |
| 888 | #endif |
| 889 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 890 | /* |
| 891 | * executable name, excluding path. |
| 892 | * |
| 893 | * - normally initialized setup_new_exec() |
| 894 | * - access it with [gs]et_task_comm() |
| 895 | * - lock it with task_lock() |
| 896 | */ |
| 897 | char comm[TASK_COMM_LEN]; |
| 898 | |
| 899 | struct nameidata *nameidata; |
| 900 | |
Alexey Dobriyan | 3d5b6fc | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 901 | #ifdef CONFIG_SYSVIPC |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 902 | struct sysv_sem sysvsem; |
| 903 | struct sysv_shm sysvshm; |
Alexey Dobriyan | 3d5b6fc | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 904 | #endif |
Mandeep Singh Baines | e162b39 | 2009-01-15 11:08:40 -0800 | [diff] [blame] | 905 | #ifdef CONFIG_DETECT_HUNG_TASK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 906 | unsigned long last_switch_count; |
Dmitry Vyukov | a2e5144 | 2018-08-21 21:55:52 -0700 | [diff] [blame] | 907 | unsigned long last_switch_time; |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 908 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 909 | /* Filesystem information: */ |
| 910 | struct fs_struct *fs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 912 | /* Open file information: */ |
| 913 | struct files_struct *files; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 915 | /* Namespaces: */ |
| 916 | struct nsproxy *nsproxy; |
Oleg Nesterov | 2e01fab | 2015-11-06 16:32:19 -0800 | [diff] [blame] | 917 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 918 | /* Signal handlers: */ |
| 919 | struct signal_struct *signal; |
Madhuparna Bhowmik | 913292c | 2020-01-24 10:29:08 +0530 | [diff] [blame] | 920 | struct sighand_struct __rcu *sighand; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 921 | sigset_t blocked; |
| 922 | sigset_t real_blocked; |
| 923 | /* Restored if set_restore_sigmask() was used: */ |
| 924 | sigset_t saved_sigmask; |
| 925 | struct sigpending pending; |
| 926 | unsigned long sas_ss_sp; |
| 927 | size_t sas_ss_size; |
| 928 | unsigned int sas_ss_flags; |
Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 929 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 930 | struct callback_head *task_works; |
| 931 | |
Richard Guy Briggs | 4b7d248 | 2019-01-22 17:06:39 -0500 | [diff] [blame] | 932 | #ifdef CONFIG_AUDIT |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 933 | #ifdef CONFIG_AUDITSYSCALL |
Richard Guy Briggs | 5f3d544 | 2019-02-01 22:45:17 -0500 | [diff] [blame] | 934 | struct audit_context *audit_context; |
| 935 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 936 | kuid_t loginuid; |
| 937 | unsigned int sessionid; |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 938 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 939 | struct seccomp seccomp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 941 | /* Thread group tracking: */ |
| 942 | u32 parent_exec_id; |
| 943 | u32 self_exec_id; |
| 944 | |
| 945 | /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ |
| 946 | spinlock_t alloc_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 948 | /* Protection of the PI data structures: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 949 | raw_spinlock_t pi_lock; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 950 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 951 | struct wake_q_node wake_q; |
Peter Zijlstra | 7675104 | 2015-05-01 08:27:50 -0700 | [diff] [blame] | 952 | |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 953 | #ifdef CONFIG_RT_MUTEXES |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 954 | /* PI waiters blocked on a rt_mutex held by this task: */ |
Davidlohr Bueso | a23ba90 | 2017-09-08 16:15:01 -0700 | [diff] [blame] | 955 | struct rb_root_cached pi_waiters; |
Xunlei Pang | e96a7705 | 2017-03-23 15:56:08 +0100 | [diff] [blame] | 956 | /* Updated under owner's pi_lock and rq lock */ |
| 957 | struct task_struct *pi_top_task; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 958 | /* Deadlock detection and priority inheritance handling: */ |
| 959 | struct rt_mutex_waiter *pi_blocked_on; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 960 | #endif |
| 961 | |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 962 | #ifdef CONFIG_DEBUG_MUTEXES |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 963 | /* Mutex deadlock detection: */ |
| 964 | struct mutex_waiter *blocked_on; |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 965 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 966 | |
Daniel Vetter | 312364f3 | 2019-08-26 22:14:23 +0200 | [diff] [blame] | 967 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 968 | int non_block_count; |
| 969 | #endif |
| 970 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 971 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 972 | unsigned int irq_events; |
Peter Zijlstra | de8f5e4 | 2020-03-21 12:26:01 +0100 | [diff] [blame^] | 973 | unsigned int hardirq_threaded; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 974 | unsigned long hardirq_enable_ip; |
| 975 | unsigned long hardirq_disable_ip; |
| 976 | unsigned int hardirq_enable_event; |
| 977 | unsigned int hardirq_disable_event; |
| 978 | int hardirqs_enabled; |
| 979 | int hardirq_context; |
| 980 | unsigned long softirq_disable_ip; |
| 981 | unsigned long softirq_enable_ip; |
| 982 | unsigned int softirq_disable_event; |
| 983 | unsigned int softirq_enable_event; |
| 984 | int softirqs_enabled; |
| 985 | int softirq_context; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 986 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 987 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 988 | #ifdef CONFIG_LOCKDEP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 989 | # define MAX_LOCK_DEPTH 48UL |
| 990 | u64 curr_chain_key; |
| 991 | int lockdep_depth; |
| 992 | unsigned int lockdep_recursion; |
| 993 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 994 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 995 | |
Andrey Ryabinin | c6d3085 | 2016-01-20 15:00:55 -0800 | [diff] [blame] | 996 | #ifdef CONFIG_UBSAN |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 997 | unsigned int in_ubsan; |
Andrey Ryabinin | c6d3085 | 2016-01-20 15:00:55 -0800 | [diff] [blame] | 998 | #endif |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 999 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1000 | /* Journalling filesystem info: */ |
| 1001 | void *journal_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1003 | /* Stacked block device info: */ |
| 1004 | struct bio_list *bio_list; |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 1005 | |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1006 | #ifdef CONFIG_BLOCK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1007 | /* Stack plugging: */ |
| 1008 | struct blk_plug *plug; |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 1009 | #endif |
| 1010 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1011 | /* VM state: */ |
| 1012 | struct reclaim_state *reclaim_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1014 | struct backing_dev_info *backing_dev_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1015 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1016 | struct io_context *io_context; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 | |
Mel Gorman | 5e1f0f0 | 2019-03-05 15:45:41 -0800 | [diff] [blame] | 1018 | #ifdef CONFIG_COMPACTION |
| 1019 | struct capture_control *capture_control; |
| 1020 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1021 | /* Ptrace state: */ |
| 1022 | unsigned long ptrace_message; |
Eric W. Biederman | ae7795b | 2018-09-25 11:27:20 +0200 | [diff] [blame] | 1023 | kernel_siginfo_t *last_siginfo; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1024 | |
| 1025 | struct task_io_accounting ioac; |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 1026 | #ifdef CONFIG_PSI |
| 1027 | /* Pressure stall state */ |
| 1028 | unsigned int psi_flags; |
| 1029 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1030 | #ifdef CONFIG_TASK_XACCT |
| 1031 | /* Accumulated RSS usage: */ |
| 1032 | u64 acct_rss_mem1; |
| 1033 | /* Accumulated virtual memory usage: */ |
| 1034 | u64 acct_vm_mem1; |
| 1035 | /* stime + utime since last update: */ |
| 1036 | u64 acct_timexpd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | #endif |
| 1038 | #ifdef CONFIG_CPUSETS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1039 | /* Protected by ->alloc_lock: */ |
| 1040 | nodemask_t mems_allowed; |
| 1041 | /* Seqence number to catch updates: */ |
| 1042 | seqcount_t mems_allowed_seq; |
| 1043 | int cpuset_mem_spread_rotor; |
| 1044 | int cpuset_slab_spread_rotor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1045 | #endif |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1046 | #ifdef CONFIG_CGROUPS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1047 | /* Control Group info protected by css_set_lock: */ |
| 1048 | struct css_set __rcu *cgroups; |
| 1049 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
| 1050 | struct list_head cg_list; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 1051 | #endif |
Johannes Weiner | e6d4293 | 2019-01-29 17:44:36 -0500 | [diff] [blame] | 1052 | #ifdef CONFIG_X86_CPU_RESCTRL |
Vikas Shivappa | 0734ded | 2017-07-25 14:14:33 -0700 | [diff] [blame] | 1053 | u32 closid; |
Vikas Shivappa | d6aaba6 | 2017-07-25 14:14:34 -0700 | [diff] [blame] | 1054 | u32 rmid; |
Fenghua Yu | e02737d | 2016-10-28 15:04:46 -0700 | [diff] [blame] | 1055 | #endif |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1056 | #ifdef CONFIG_FUTEX |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1057 | struct robust_list_head __user *robust_list; |
Ingo Molnar | 34f192c | 2006-03-27 01:16:24 -0800 | [diff] [blame] | 1058 | #ifdef CONFIG_COMPAT |
| 1059 | struct compat_robust_list_head __user *compat_robust_list; |
| 1060 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1061 | struct list_head pi_state_list; |
| 1062 | struct futex_pi_state *pi_state_cache; |
Thomas Gleixner | 3f186d9 | 2019-11-06 22:55:44 +0100 | [diff] [blame] | 1063 | struct mutex futex_exit_mutex; |
Thomas Gleixner | 3d4775d | 2019-11-06 22:55:37 +0100 | [diff] [blame] | 1064 | unsigned int futex_state; |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 1065 | #endif |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 1066 | #ifdef CONFIG_PERF_EVENTS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1067 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
| 1068 | struct mutex perf_event_mutex; |
| 1069 | struct list_head perf_event_list; |
Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 1070 | #endif |
Thomas Gleixner | 8f47b18 | 2014-02-07 20:58:39 +0100 | [diff] [blame] | 1071 | #ifdef CONFIG_DEBUG_PREEMPT |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1072 | unsigned long preempt_disable_ip; |
Thomas Gleixner | 8f47b18 | 2014-02-07 20:58:39 +0100 | [diff] [blame] | 1073 | #endif |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 1074 | #ifdef CONFIG_NUMA |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1075 | /* Protected by alloc_lock: */ |
| 1076 | struct mempolicy *mempolicy; |
Vlastimil Babka | 4581668 | 2017-07-06 15:39:59 -0700 | [diff] [blame] | 1077 | short il_prev; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1078 | short pref_node_fork; |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 1079 | #endif |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1080 | #ifdef CONFIG_NUMA_BALANCING |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1081 | int numa_scan_seq; |
| 1082 | unsigned int numa_scan_period; |
| 1083 | unsigned int numa_scan_period_max; |
| 1084 | int numa_preferred_nid; |
| 1085 | unsigned long numa_migrate_retry; |
| 1086 | /* Migration stamp: */ |
| 1087 | u64 node_stamp; |
| 1088 | u64 last_task_numa_placement; |
| 1089 | u64 last_sum_exec_runtime; |
| 1090 | struct callback_head numa_work; |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 1091 | |
Jann Horn | cb361d8 | 2019-07-16 17:20:47 +0200 | [diff] [blame] | 1092 | /* |
| 1093 | * This pointer is only modified for current in syscall and |
| 1094 | * pagefault context (and for tasks being destroyed), so it can be read |
| 1095 | * from any of the following contexts: |
| 1096 | * - RCU read-side critical section |
| 1097 | * - current->numa_group from everywhere |
| 1098 | * - task's runqueue locked, task not running |
| 1099 | */ |
| 1100 | struct numa_group __rcu *numa_group; |
Peter Zijlstra | 8c8a743 | 2013-10-07 11:29:21 +0100 | [diff] [blame] | 1101 | |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 1102 | /* |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 1103 | * numa_faults is an array split into four regions: |
| 1104 | * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer |
| 1105 | * in this precise order. |
| 1106 | * |
| 1107 | * faults_memory: Exponential decaying average of faults on a per-node |
| 1108 | * basis. Scheduling placement decisions are made based on these |
| 1109 | * counts. The values remain static for the duration of a PTE scan. |
| 1110 | * faults_cpu: Track the nodes the process was running on when a NUMA |
| 1111 | * hinting fault was incurred. |
| 1112 | * faults_memory_buffer and faults_cpu_buffer: Record faults per node |
| 1113 | * during the current scan window. When the scan completes, the counts |
| 1114 | * in faults_memory and faults_cpu decay and these values are copied. |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 1115 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1116 | unsigned long *numa_faults; |
| 1117 | unsigned long total_numa_faults; |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 1118 | |
| 1119 | /* |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1120 | * numa_faults_locality tracks if faults recorded during the last |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 1121 | * scan window were remote/local or failed to migrate. The task scan |
| 1122 | * period is adapted based on the locality of the faults with different |
| 1123 | * weights depending on whether they were shared or private faults |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1124 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1125 | unsigned long numa_faults_locality[3]; |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1126 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1127 | unsigned long numa_pages_migrated; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1128 | #endif /* CONFIG_NUMA_BALANCING */ |
| 1129 | |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1130 | #ifdef CONFIG_RSEQ |
| 1131 | struct rseq __user *rseq; |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1132 | u32 rseq_sig; |
| 1133 | /* |
| 1134 | * RmW on rseq_event_mask must be performed atomically |
| 1135 | * with respect to preemption. |
| 1136 | */ |
| 1137 | unsigned long rseq_event_mask; |
| 1138 | #endif |
| 1139 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1140 | struct tlbflush_unmap_batch tlb_ubc; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 1141 | |
Eric W. Biederman | 3fbd7ee | 2019-09-14 07:33:34 -0500 | [diff] [blame] | 1142 | union { |
| 1143 | refcount_t rcu_users; |
| 1144 | struct rcu_head rcu; |
| 1145 | }; |
Jens Axboe | b92ce55 | 2006-04-11 13:52:07 +0200 | [diff] [blame] | 1146 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1147 | /* Cache last used pipe for splice(): */ |
| 1148 | struct pipe_inode_info *splice_pipe; |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1149 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1150 | struct page_frag task_frag; |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 1151 | |
Ingo Molnar | 47913d4 | 2017-02-01 18:00:26 +0100 | [diff] [blame] | 1152 | #ifdef CONFIG_TASK_DELAY_ACCT |
| 1153 | struct task_delay_info *delays; |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 1154 | #endif |
Ingo Molnar | 47913d4 | 2017-02-01 18:00:26 +0100 | [diff] [blame] | 1155 | |
Akinobu Mita | f4f154f | 2006-12-08 02:39:47 -0800 | [diff] [blame] | 1156 | #ifdef CONFIG_FAULT_INJECTION |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1157 | int make_it_fail; |
Akinobu Mita | 9049f2f | 2017-07-14 14:49:52 -0700 | [diff] [blame] | 1158 | unsigned int fail_nth; |
Akinobu Mita | f4f154f | 2006-12-08 02:39:47 -0800 | [diff] [blame] | 1159 | #endif |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1160 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1161 | * When (nr_dirtied >= nr_dirtied_pause), it's time to call |
| 1162 | * balance_dirty_pages() for a dirty throttling pause: |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1163 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1164 | int nr_dirtied; |
| 1165 | int nr_dirtied_pause; |
| 1166 | /* Start of a write-and-pause period: */ |
| 1167 | unsigned long dirty_paused_when; |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1168 | |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1169 | #ifdef CONFIG_LATENCYTOP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1170 | int latency_record_count; |
| 1171 | struct latency_record latency_record[LT_SAVECOUNT]; |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1172 | #endif |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 1173 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1174 | * Time slack values; these are used to round up poll() and |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 1175 | * select() etc timeout values. These are in nanoseconds. |
| 1176 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1177 | u64 timer_slack_ns; |
| 1178 | u64 default_timer_slack_ns; |
David Miller | f8d570a | 2008-11-06 00:37:40 -0800 | [diff] [blame] | 1179 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 1180 | #ifdef CONFIG_KASAN |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1181 | unsigned int kasan_depth; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 1182 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1183 | |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1184 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1185 | /* Index of current stored address in ret_stack: */ |
| 1186 | int curr_ret_stack; |
Steven Rostedt (VMware) | 39eb456 | 2018-11-19 08:07:12 -0500 | [diff] [blame] | 1187 | int curr_ret_depth; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1188 | |
| 1189 | /* Stack of return addresses for return function tracing: */ |
| 1190 | struct ftrace_ret_stack *ret_stack; |
| 1191 | |
| 1192 | /* Timestamp for last schedule: */ |
| 1193 | unsigned long long ftrace_timestamp; |
| 1194 | |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 1195 | /* |
| 1196 | * Number of functions that haven't been traced |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1197 | * because of depth overrun: |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 1198 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1199 | atomic_t trace_overrun; |
Tejun Heo | b23afb9 | 2015-11-05 18:46:11 -0800 | [diff] [blame] | 1200 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1201 | /* Pause tracing: */ |
| 1202 | atomic_t tracing_graph_pause; |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 1203 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1204 | |
| 1205 | #ifdef CONFIG_TRACING |
| 1206 | /* State flags for use by tracers: */ |
| 1207 | unsigned long trace; |
| 1208 | |
| 1209 | /* Bitmask and counter of trace recursion: */ |
| 1210 | unsigned long trace_recursion; |
| 1211 | #endif /* CONFIG_TRACING */ |
| 1212 | |
| 1213 | #ifdef CONFIG_KCOV |
Andrey Konovalov | eec028c | 2019-12-04 16:52:43 -0800 | [diff] [blame] | 1214 | /* See kernel/kcov.c for more details. */ |
| 1215 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1216 | /* Coverage collection mode enabled for this task (0 if disabled): */ |
Mark Rutland | 0ed557a | 2018-06-14 15:27:41 -0700 | [diff] [blame] | 1217 | unsigned int kcov_mode; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1218 | |
| 1219 | /* Size of the kcov_area: */ |
| 1220 | unsigned int kcov_size; |
| 1221 | |
| 1222 | /* Buffer for coverage collection: */ |
| 1223 | void *kcov_area; |
| 1224 | |
| 1225 | /* KCOV descriptor wired with this task or NULL: */ |
| 1226 | struct kcov *kcov; |
Andrey Konovalov | eec028c | 2019-12-04 16:52:43 -0800 | [diff] [blame] | 1227 | |
| 1228 | /* KCOV common handle for remote coverage collection: */ |
| 1229 | u64 kcov_handle; |
| 1230 | |
| 1231 | /* KCOV sequence number: */ |
| 1232 | int kcov_sequence; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1233 | #endif |
| 1234 | |
| 1235 | #ifdef CONFIG_MEMCG |
| 1236 | struct mem_cgroup *memcg_in_oom; |
| 1237 | gfp_t memcg_oom_gfp_mask; |
| 1238 | int memcg_oom_order; |
| 1239 | |
| 1240 | /* Number of pages to reclaim on returning to userland: */ |
| 1241 | unsigned int memcg_nr_pages_over_high; |
Shakeel Butt | d46eb14b | 2018-08-17 15:46:39 -0700 | [diff] [blame] | 1242 | |
| 1243 | /* Used by memcontrol for targeted memcg charge: */ |
| 1244 | struct mem_cgroup *active_memcg; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1245 | #endif |
| 1246 | |
Josef Bacik | d09d8df | 2018-07-03 11:14:55 -0400 | [diff] [blame] | 1247 | #ifdef CONFIG_BLK_CGROUP |
| 1248 | struct request_queue *throttle_queue; |
| 1249 | #endif |
| 1250 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1251 | #ifdef CONFIG_UPROBES |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1252 | struct uprobe_task *utask; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1253 | #endif |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1254 | #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1255 | unsigned int sequential_io; |
| 1256 | unsigned int sequential_io_avg; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1257 | #endif |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 1258 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1259 | unsigned long task_state_change; |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 1260 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1261 | int pagefault_disabled; |
Michal Hocko | 0304926 | 2016-03-25 14:20:33 -0700 | [diff] [blame] | 1262 | #ifdef CONFIG_MMU |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1263 | struct task_struct *oom_reaper_list; |
Michal Hocko | 0304926 | 2016-03-25 14:20:33 -0700 | [diff] [blame] | 1264 | #endif |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 1265 | #ifdef CONFIG_VMAP_STACK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1266 | struct vm_struct *stack_vm_area; |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 1267 | #endif |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 1268 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1269 | /* A live task holds one reference: */ |
Elena Reshetova | f0b89d3 | 2019-01-18 14:27:30 +0200 | [diff] [blame] | 1270 | refcount_t stack_refcount; |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 1271 | #endif |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 1272 | #ifdef CONFIG_LIVEPATCH |
| 1273 | int patch_state; |
| 1274 | #endif |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 1275 | #ifdef CONFIG_SECURITY |
| 1276 | /* Used by LSM modules for access restriction: */ |
| 1277 | void *security; |
| 1278 | #endif |
Kees Cook | 29e48ce | 2017-04-05 22:43:33 -0700 | [diff] [blame] | 1279 | |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 1280 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
| 1281 | unsigned long lowest_stack; |
Alexander Popov | c8d1262 | 2018-08-17 01:17:01 +0300 | [diff] [blame] | 1282 | unsigned long prev_lowest_stack; |
Alexander Popov | afaef01 | 2018-08-17 01:16:58 +0300 | [diff] [blame] | 1283 | #endif |
| 1284 | |
Kees Cook | 29e48ce | 2017-04-05 22:43:33 -0700 | [diff] [blame] | 1285 | /* |
| 1286 | * New fields for task_struct should be added above here, so that |
| 1287 | * they are included in the randomized portion of task_struct. |
| 1288 | */ |
| 1289 | randomized_struct_fields_end |
| 1290 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1291 | /* CPU-specific state of this task: */ |
| 1292 | struct thread_struct thread; |
| 1293 | |
| 1294 | /* |
| 1295 | * WARNING: on x86, 'thread_struct' contains a variable-sized |
| 1296 | * structure. It *MUST* be at the end of 'task_struct'. |
| 1297 | * |
| 1298 | * Do not put anything below here! |
| 1299 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | }; |
| 1301 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1302 | static inline struct pid *task_pid(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 1303 | { |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 1304 | return task->thread_pid; |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 1305 | } |
| 1306 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1307 | /* |
| 1308 | * the helpers to get the task's different pids as they are seen |
| 1309 | * from various namespaces |
| 1310 | * |
| 1311 | * task_xid_nr() : global id, i.e. the id seen from the init namespace; |
Eric W. Biederman | 44c4e1b | 2008-02-08 04:19:15 -0800 | [diff] [blame] | 1312 | * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of |
| 1313 | * current. |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1314 | * task_xid_nr_ns() : id seen from the ns specified; |
| 1315 | * |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1316 | * see also pid_nr() etc in include/linux/pid.h |
| 1317 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1318 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1319 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1320 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1321 | { |
| 1322 | return tsk->pid; |
| 1323 | } |
| 1324 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1325 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1326 | { |
| 1327 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); |
| 1328 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1329 | |
| 1330 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
| 1331 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1332 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1333 | } |
| 1334 | |
| 1335 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1336 | static inline pid_t task_tgid_nr(struct task_struct *tsk) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1337 | { |
| 1338 | return tsk->tgid; |
| 1339 | } |
| 1340 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1341 | /** |
| 1342 | * pid_alive - check that a task structure is not stale |
| 1343 | * @p: Task structure to be checked. |
| 1344 | * |
| 1345 | * Test if a process is not yet dead (at most zombie state) |
| 1346 | * If pid_alive fails, then pointers within the task structure |
| 1347 | * can be stale and must not be dereferenced. |
| 1348 | * |
| 1349 | * Return: 1 if the process is alive. 0 otherwise. |
| 1350 | */ |
| 1351 | static inline int pid_alive(const struct task_struct *p) |
| 1352 | { |
Eric W. Biederman | 2c47047 | 2017-09-26 13:06:43 -0500 | [diff] [blame] | 1353 | return p->thread_pid != NULL; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1354 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1355 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1356 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1357 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1358 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1359 | } |
| 1360 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1361 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
| 1362 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1363 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1364 | } |
| 1365 | |
| 1366 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1367 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1368 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1369 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1370 | } |
| 1371 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1372 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
| 1373 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1374 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1375 | } |
| 1376 | |
Oleg Nesterov | dd1c1f2 | 2017-08-21 17:35:02 +0200 | [diff] [blame] | 1377 | static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
| 1378 | { |
Eric W. Biederman | 6883f81 | 2017-06-04 04:32:13 -0500 | [diff] [blame] | 1379 | return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); |
Oleg Nesterov | dd1c1f2 | 2017-08-21 17:35:02 +0200 | [diff] [blame] | 1380 | } |
| 1381 | |
| 1382 | static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
| 1383 | { |
Eric W. Biederman | 6883f81 | 2017-06-04 04:32:13 -0500 | [diff] [blame] | 1384 | return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL); |
Oleg Nesterov | dd1c1f2 | 2017-08-21 17:35:02 +0200 | [diff] [blame] | 1385 | } |
| 1386 | |
| 1387 | static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) |
| 1388 | { |
| 1389 | pid_t pid = 0; |
| 1390 | |
| 1391 | rcu_read_lock(); |
| 1392 | if (pid_alive(tsk)) |
| 1393 | pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); |
| 1394 | rcu_read_unlock(); |
| 1395 | |
| 1396 | return pid; |
| 1397 | } |
| 1398 | |
| 1399 | static inline pid_t task_ppid_nr(const struct task_struct *tsk) |
| 1400 | { |
| 1401 | return task_ppid_nr_ns(tsk, &init_pid_ns); |
| 1402 | } |
| 1403 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1404 | /* Obsolete, do not use: */ |
Oleg Nesterov | 1b0f7ffd | 2009-04-02 16:58:39 -0700 | [diff] [blame] | 1405 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
| 1406 | { |
| 1407 | return task_pgrp_nr_ns(tsk, &init_pid_ns); |
| 1408 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1409 | |
Peter Zijlstra | 06eb618 | 2017-09-22 18:30:40 +0200 | [diff] [blame] | 1410 | #define TASK_REPORT_IDLE (TASK_REPORT + 1) |
| 1411 | #define TASK_REPORT_MAX (TASK_REPORT_IDLE << 1) |
| 1412 | |
Peter Zijlstra | 1d48b08 | 2017-09-29 13:50:16 +0200 | [diff] [blame] | 1413 | static inline unsigned int task_state_index(struct task_struct *tsk) |
Xie XiuQi | 20435d8 | 2017-08-07 16:44:23 +0800 | [diff] [blame] | 1414 | { |
Peter Zijlstra | 1593baa | 2017-09-22 18:09:26 +0200 | [diff] [blame] | 1415 | unsigned int tsk_state = READ_ONCE(tsk->state); |
| 1416 | unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; |
Xie XiuQi | 20435d8 | 2017-08-07 16:44:23 +0800 | [diff] [blame] | 1417 | |
Peter Zijlstra | 06eb618 | 2017-09-22 18:30:40 +0200 | [diff] [blame] | 1418 | BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX); |
| 1419 | |
Peter Zijlstra | 06eb618 | 2017-09-22 18:30:40 +0200 | [diff] [blame] | 1420 | if (tsk_state == TASK_IDLE) |
| 1421 | state = TASK_REPORT_IDLE; |
| 1422 | |
Peter Zijlstra | 1593baa | 2017-09-22 18:09:26 +0200 | [diff] [blame] | 1423 | return fls(state); |
| 1424 | } |
Xie XiuQi | 20435d8 | 2017-08-07 16:44:23 +0800 | [diff] [blame] | 1425 | |
Peter Zijlstra | 1d48b08 | 2017-09-29 13:50:16 +0200 | [diff] [blame] | 1426 | static inline char task_index_to_char(unsigned int state) |
Peter Zijlstra | 1593baa | 2017-09-22 18:09:26 +0200 | [diff] [blame] | 1427 | { |
Peter Zijlstra | 8ef9925 | 2017-09-22 18:37:28 +0200 | [diff] [blame] | 1428 | static const char state_char[] = "RSDTtXZPI"; |
Peter Zijlstra | 1593baa | 2017-09-22 18:09:26 +0200 | [diff] [blame] | 1429 | |
Peter Zijlstra | 06eb618 | 2017-09-22 18:30:40 +0200 | [diff] [blame] | 1430 | BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); |
Peter Zijlstra | 1593baa | 2017-09-22 18:09:26 +0200 | [diff] [blame] | 1431 | |
| 1432 | return state_char[state]; |
| 1433 | } |
| 1434 | |
| 1435 | static inline char task_state_to_char(struct task_struct *tsk) |
| 1436 | { |
Peter Zijlstra | 1d48b08 | 2017-09-29 13:50:16 +0200 | [diff] [blame] | 1437 | return task_index_to_char(task_state_index(tsk)); |
Xie XiuQi | 20435d8 | 2017-08-07 16:44:23 +0800 | [diff] [blame] | 1438 | } |
| 1439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1440 | /** |
Sergey Senozhatsky | 570f524 | 2016-01-01 23:03:01 +0900 | [diff] [blame] | 1441 | * is_global_init - check if a task structure is init. Since init |
| 1442 | * is free to have sub-threads we need to check tgid. |
Henne | 3260259 | 2006-10-06 00:44:01 -0700 | [diff] [blame] | 1443 | * @tsk: Task structure to be checked. |
| 1444 | * |
| 1445 | * Check if a task structure is the first user space task the kernel created. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1446 | * |
| 1447 | * Return: 1 if the task structure is init. 0 otherwise. |
Sukadev Bhattiprolu | f400e19 | 2006-09-29 02:00:07 -0700 | [diff] [blame] | 1448 | */ |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1449 | static inline int is_global_init(struct task_struct *tsk) |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 1450 | { |
Sergey Senozhatsky | 570f524 | 2016-01-01 23:03:01 +0900 | [diff] [blame] | 1451 | return task_tgid_nr(tsk) == 1; |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 1452 | } |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 1453 | |
Cedric Le Goater | 9ec5209 | 2006-10-02 02:19:00 -0700 | [diff] [blame] | 1454 | extern struct pid *cad_pid; |
| 1455 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1456 | /* |
| 1457 | * Per process flags |
| 1458 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1459 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
| 1460 | #define PF_EXITING 0x00000004 /* Getting shut down */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1461 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
| 1462 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
| 1463 | #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ |
| 1464 | #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ |
| 1465 | #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ |
| 1466 | #define PF_DUMPCORE 0x00000200 /* Dumped core */ |
| 1467 | #define PF_SIGNALED 0x00000400 /* Killed by a signal */ |
| 1468 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
| 1469 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ |
| 1470 | #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ |
| 1471 | #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ |
| 1472 | #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ |
| 1473 | #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 1474 | #define PF_KSWAPD 0x00020000 /* I am kswapd */ |
| 1475 | #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ |
| 1476 | #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1477 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
| 1478 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
| 1479 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
| 1480 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
Johannes Weiner | eb41468 | 2018-10-26 15:06:27 -0700 | [diff] [blame] | 1481 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ |
Taehee Yoo | 73ab1cb | 2019-01-09 02:23:56 +0900 | [diff] [blame] | 1482 | #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ |
Sebastian Andrzej Siewior | 3bd3706 | 2019-04-23 16:26:36 +0200 | [diff] [blame] | 1483 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1484 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
Aneesh Kumar K.V | d7fefcc | 2019-03-05 15:47:40 -0800 | [diff] [blame] | 1485 | #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ |
Jens Axboe | 771b53d0 | 2019-10-22 10:25:58 -0600 | [diff] [blame] | 1486 | #define PF_IO_WORKER 0x20000000 /* Task is an IO worker */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1487 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
| 1488 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | |
| 1490 | /* |
| 1491 | * Only the _current_ task can read/write to tsk->flags, but other |
| 1492 | * tasks can access tsk->flags in readonly mode for example |
| 1493 | * with tsk_used_math (like during threaded core dumping). |
| 1494 | * There is however an exception to this rule during ptrace |
| 1495 | * or during fork: the ptracer task is allowed to write to the |
| 1496 | * child->flags of its traced child (same goes for fork, the parent |
| 1497 | * can write to the child->flags), because we're guaranteed the |
| 1498 | * child is not running and in turn not changing child->flags |
| 1499 | * at the same time the parent does it. |
| 1500 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1501 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) |
| 1502 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) |
| 1503 | #define clear_used_math() clear_stopped_child_used_math(current) |
| 1504 | #define set_used_math() set_stopped_child_used_math(current) |
| 1505 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1506 | #define conditional_stopped_child_used_math(condition, child) \ |
| 1507 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1508 | |
| 1509 | #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) |
| 1510 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1511 | #define copy_to_stopped_child_used_math(child) \ |
| 1512 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1513 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1514 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1515 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
| 1516 | #define used_math() tsk_used_math(current) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | |
Thomas Gleixner | 62ec05dd | 2017-05-24 10:15:41 +0200 | [diff] [blame] | 1518 | static inline bool is_percpu_thread(void) |
| 1519 | { |
| 1520 | #ifdef CONFIG_SMP |
| 1521 | return (current->flags & PF_NO_SETAFFINITY) && |
| 1522 | (current->nr_cpus_allowed == 1); |
| 1523 | #else |
| 1524 | return true; |
| 1525 | #endif |
| 1526 | } |
| 1527 | |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1528 | /* Per-process atomic flags. */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1529 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
| 1530 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ |
| 1531 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ |
Thomas Gleixner | 356e4bf | 2018-05-03 22:09:15 +0200 | [diff] [blame] | 1532 | #define PFA_SPEC_SSB_DISABLE 3 /* Speculative Store Bypass disabled */ |
| 1533 | #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ |
Thomas Gleixner | 9137bb2 | 2018-11-25 19:33:53 +0100 | [diff] [blame] | 1534 | #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ |
| 1535 | #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ |
Waiman Long | 71368af | 2019-01-16 17:01:36 -0500 | [diff] [blame] | 1536 | #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1537 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1538 | #define TASK_PFA_TEST(name, func) \ |
| 1539 | static inline bool task_##func(struct task_struct *p) \ |
| 1540 | { return test_bit(PFA_##name, &p->atomic_flags); } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1541 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1542 | #define TASK_PFA_SET(name, func) \ |
| 1543 | static inline void task_set_##func(struct task_struct *p) \ |
| 1544 | { set_bit(PFA_##name, &p->atomic_flags); } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1545 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1546 | #define TASK_PFA_CLEAR(name, func) \ |
| 1547 | static inline void task_clear_##func(struct task_struct *p) \ |
| 1548 | { clear_bit(PFA_##name, &p->atomic_flags); } |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1549 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1550 | TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) |
| 1551 | TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1552 | |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 1553 | TASK_PFA_TEST(SPREAD_PAGE, spread_page) |
| 1554 | TASK_PFA_SET(SPREAD_PAGE, spread_page) |
| 1555 | TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) |
| 1556 | |
| 1557 | TASK_PFA_TEST(SPREAD_SLAB, spread_slab) |
| 1558 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) |
| 1559 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) |
Tejun Heo | 544b2c9 | 2011-06-14 11:20:18 +0200 | [diff] [blame] | 1560 | |
Thomas Gleixner | 356e4bf | 2018-05-03 22:09:15 +0200 | [diff] [blame] | 1561 | TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) |
| 1562 | TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) |
| 1563 | TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) |
| 1564 | |
Waiman Long | 71368af | 2019-01-16 17:01:36 -0500 | [diff] [blame] | 1565 | TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
| 1566 | TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
| 1567 | TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) |
| 1568 | |
Thomas Gleixner | 356e4bf | 2018-05-03 22:09:15 +0200 | [diff] [blame] | 1569 | TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
| 1570 | TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
| 1571 | |
Thomas Gleixner | 9137bb2 | 2018-11-25 19:33:53 +0100 | [diff] [blame] | 1572 | TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable) |
| 1573 | TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable) |
| 1574 | TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable) |
| 1575 | |
| 1576 | TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) |
| 1577 | TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable) |
| 1578 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1579 | static inline void |
NeilBrown | 717a94b | 2017-04-07 10:03:26 +1000 | [diff] [blame] | 1580 | current_restore_flags(unsigned long orig_flags, unsigned long flags) |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 1581 | { |
NeilBrown | 717a94b | 2017-04-07 10:03:26 +1000 | [diff] [blame] | 1582 | current->flags &= ~flags; |
| 1583 | current->flags |= orig_flags & flags; |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 1584 | } |
| 1585 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1586 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
| 1587 | extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1589 | extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); |
| 1590 | extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1591 | #else |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1592 | static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 1593 | { |
| 1594 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1595 | static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | { |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1597 | if (!cpumask_test_cpu(0, new_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | return -EINVAL; |
| 1599 | return 0; |
| 1600 | } |
| 1601 | #endif |
Rusty Russell | e0ad955 | 2009-09-24 09:34:38 -0600 | [diff] [blame] | 1602 | |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 1603 | extern int yield_to(struct task_struct *p, bool preempt); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1604 | extern void set_user_nice(struct task_struct *p, long nice); |
| 1605 | extern int task_prio(const struct task_struct *p); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1606 | |
Dongsheng Yang | d0ea026 | 2014-01-27 22:00:45 -0500 | [diff] [blame] | 1607 | /** |
| 1608 | * task_nice - return the nice value of a given task. |
| 1609 | * @p: the task in question. |
| 1610 | * |
| 1611 | * Return: The nice value [ -20 ... 0 ... 19 ]. |
| 1612 | */ |
| 1613 | static inline int task_nice(const struct task_struct *p) |
| 1614 | { |
| 1615 | return PRIO_TO_NICE((p)->static_prio); |
| 1616 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1617 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1618 | extern int can_nice(const struct task_struct *p, const int nice); |
| 1619 | extern int task_curr(const struct task_struct *p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | extern int idle_cpu(int cpu); |
Rohit Jain | 943d355 | 2018-05-09 09:39:48 -0700 | [diff] [blame] | 1621 | extern int available_idle_cpu(int cpu); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1622 | extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); |
| 1623 | extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); |
| 1624 | extern int sched_setattr(struct task_struct *, const struct sched_attr *); |
Juri Lelli | 794a56e | 2017-12-04 11:23:20 +0100 | [diff] [blame] | 1625 | extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1626 | extern struct task_struct *idle_task(int cpu); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1627 | |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1628 | /** |
| 1629 | * is_idle_task - is the specified task an idle task? |
Randy Dunlap | fa75728 | 2012-01-21 11:03:13 -0800 | [diff] [blame] | 1630 | * @p: the task in question. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1631 | * |
| 1632 | * Return: 1 if @p is an idle task. 0 otherwise. |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1633 | */ |
Paul E. McKenney | 7061ca3 | 2011-12-20 08:20:46 -0800 | [diff] [blame] | 1634 | static inline bool is_idle_task(const struct task_struct *p) |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1635 | { |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 1636 | return !!(p->flags & PF_IDLE); |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1637 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1638 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1639 | extern struct task_struct *curr_task(int cpu); |
Peter Zijlstra | a458ae2 | 2016-09-20 20:29:40 +0200 | [diff] [blame] | 1640 | extern void ia64_set_curr_task(int cpu, struct task_struct *p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1641 | |
| 1642 | void yield(void); |
| 1643 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | union thread_union { |
David Howells | 0500871 | 2018-01-02 15:12:01 +0000 | [diff] [blame] | 1645 | #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK |
| 1646 | struct task_struct task; |
| 1647 | #endif |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1648 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | struct thread_info thread_info; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1650 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
| 1652 | }; |
| 1653 | |
David Howells | 0500871 | 2018-01-02 15:12:01 +0000 | [diff] [blame] | 1654 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
| 1655 | extern struct thread_info init_thread_info; |
| 1656 | #endif |
| 1657 | |
| 1658 | extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; |
| 1659 | |
Ingo Molnar | f3ac606 | 2017-02-03 22:59:33 +0100 | [diff] [blame] | 1660 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1661 | static inline struct thread_info *task_thread_info(struct task_struct *task) |
| 1662 | { |
| 1663 | return &task->thread_info; |
| 1664 | } |
| 1665 | #elif !defined(__HAVE_THREAD_FUNCTIONS) |
| 1666 | # define task_thread_info(task) ((struct thread_info *)(task)->stack) |
| 1667 | #endif |
| 1668 | |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1669 | /* |
| 1670 | * find a task by one of its numerical ids |
| 1671 | * |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1672 | * find_task_by_pid_ns(): |
| 1673 | * finds a task by its pid in the specified namespace |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1674 | * find_task_by_vpid(): |
| 1675 | * finds a task by its virtual pid |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1676 | * |
Pavel Emelyanov | e49859e | 2008-07-25 01:48:36 -0700 | [diff] [blame] | 1677 | * see also find_vpid() etc in include/linux/pid.h |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1678 | */ |
| 1679 | |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1680 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1681 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1682 | |
Mike Rapoport | 2ee0826 | 2018-02-06 15:40:17 -0800 | [diff] [blame] | 1683 | /* |
| 1684 | * find a task by its virtual pid and get the task struct |
| 1685 | */ |
| 1686 | extern struct task_struct *find_get_task_by_vpid(pid_t nr); |
| 1687 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 1688 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
| 1689 | extern int wake_up_process(struct task_struct *tsk); |
Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 1690 | extern void wake_up_new_task(struct task_struct *tsk); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1691 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1692 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1693 | extern void kick_process(struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1694 | #else |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1695 | static inline void kick_process(struct task_struct *tsk) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1696 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1697 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1698 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1699 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1700 | static inline void set_task_comm(struct task_struct *tsk, const char *from) |
| 1701 | { |
| 1702 | __set_task_comm(tsk, from, false); |
| 1703 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1704 | |
Arnd Bergmann | 3756f64 | 2017-12-14 15:32:41 -0800 | [diff] [blame] | 1705 | extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); |
| 1706 | #define get_task_comm(buf, tsk) ({ \ |
| 1707 | BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ |
| 1708 | __get_task_comm(buf, sizeof(buf), tsk); \ |
| 1709 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1710 | |
| 1711 | #ifdef CONFIG_SMP |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 1712 | void scheduler_ipi(void); |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 1713 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1714 | #else |
Peter Zijlstra | 184748c | 2011-04-05 17:23:39 +0200 | [diff] [blame] | 1715 | static inline void scheduler_ipi(void) { } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1716 | static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 1717 | { |
| 1718 | return 1; |
| 1719 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1720 | #endif |
| 1721 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1722 | /* |
| 1723 | * Set thread flags in other task's structures. |
| 1724 | * See asm/thread_info.h for TIF_xxxx flags available: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1725 | */ |
| 1726 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1727 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1728 | set_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 | } |
| 1730 | |
| 1731 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1732 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1733 | clear_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1734 | } |
| 1735 | |
Dave Martin | 93ee37c | 2018-04-11 17:54:20 +0100 | [diff] [blame] | 1736 | static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag, |
| 1737 | bool value) |
| 1738 | { |
| 1739 | update_ti_thread_flag(task_thread_info(tsk), flag, value); |
| 1740 | } |
| 1741 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1742 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1743 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1744 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | } |
| 1746 | |
| 1747 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1748 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1749 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1750 | } |
| 1751 | |
| 1752 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1753 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1754 | return test_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1755 | } |
| 1756 | |
| 1757 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
| 1758 | { |
| 1759 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1760 | } |
| 1761 | |
| 1762 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
| 1763 | { |
| 1764 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1765 | } |
| 1766 | |
Gregory Haskins | 8ae121a | 2008-04-23 07:13:29 -0400 | [diff] [blame] | 1767 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
| 1768 | { |
| 1769 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
| 1770 | } |
| 1771 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1772 | /* |
| 1773 | * cond_resched() and cond_resched_lock(): latency reduction via |
| 1774 | * explicit rescheduling in places that are safe. The return |
| 1775 | * value indicates whether a reschedule was done in fact. |
| 1776 | * cond_resched_lock() will drop the spinlock before scheduling, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1777 | */ |
Thomas Gleixner | c1a280b | 2019-07-26 23:19:37 +0200 | [diff] [blame] | 1778 | #ifndef CONFIG_PREEMPTION |
Linus Torvalds | c3921ab | 2008-05-11 16:04:48 -0700 | [diff] [blame] | 1779 | extern int _cond_resched(void); |
Peter Zijlstra | 35a773a | 2016-09-19 12:57:53 +0200 | [diff] [blame] | 1780 | #else |
| 1781 | static inline int _cond_resched(void) { return 0; } |
| 1782 | #endif |
Frederic Weisbecker | 6f80bd9 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1783 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1784 | #define cond_resched() ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1785 | ___might_sleep(__FILE__, __LINE__, 0); \ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1786 | _cond_resched(); \ |
| 1787 | }) |
Frederic Weisbecker | 6f80bd9 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1788 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1789 | extern int __cond_resched_lock(spinlock_t *lock); |
| 1790 | |
| 1791 | #define cond_resched_lock(lock) ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1792 | ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1793 | __cond_resched_lock(lock); \ |
| 1794 | }) |
| 1795 | |
Simon Horman | f6f3c43 | 2013-05-22 14:50:31 +0900 | [diff] [blame] | 1796 | static inline void cond_resched_rcu(void) |
| 1797 | { |
| 1798 | #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) |
| 1799 | rcu_read_unlock(); |
| 1800 | cond_resched(); |
| 1801 | rcu_read_lock(); |
| 1802 | #endif |
| 1803 | } |
| 1804 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1805 | /* |
| 1806 | * Does a critical section need to be broken due to another |
Thomas Gleixner | c1a280b | 2019-07-26 23:19:37 +0200 | [diff] [blame] | 1807 | * task waiting?: (technically does not depend on CONFIG_PREEMPTION, |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1808 | * but a general need for low latency) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | */ |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1810 | static inline int spin_needbreak(spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1811 | { |
Thomas Gleixner | c1a280b | 2019-07-26 23:19:37 +0200 | [diff] [blame] | 1812 | #ifdef CONFIG_PREEMPTION |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1813 | return spin_is_contended(lock); |
| 1814 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | return 0; |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1816 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 | } |
| 1818 | |
Peter Zijlstra | 75f93fe | 2013-09-27 17:30:03 +0200 | [diff] [blame] | 1819 | static __always_inline bool need_resched(void) |
| 1820 | { |
| 1821 | return unlikely(tif_need_resched()); |
| 1822 | } |
| 1823 | |
Thomas Gleixner | ee761f6 | 2013-03-21 22:49:32 +0100 | [diff] [blame] | 1824 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1825 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
| 1826 | */ |
| 1827 | #ifdef CONFIG_SMP |
| 1828 | |
| 1829 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1830 | { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1831 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
Andrea Parri | c546951 | 2019-01-21 16:52:40 +0100 | [diff] [blame] | 1832 | return READ_ONCE(p->cpu); |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1833 | #else |
Andrea Parri | c546951 | 2019-01-21 16:52:40 +0100 | [diff] [blame] | 1834 | return READ_ONCE(task_thread_info(p)->cpu); |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1835 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | } |
| 1837 | |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1838 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 | |
| 1840 | #else |
| 1841 | |
| 1842 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1843 | { |
| 1844 | return 0; |
| 1845 | } |
| 1846 | |
| 1847 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1848 | { |
| 1849 | } |
| 1850 | |
| 1851 | #endif /* CONFIG_SMP */ |
| 1852 | |
Pan Xinhui | d9345c6 | 2016-11-02 05:08:28 -0400 | [diff] [blame] | 1853 | /* |
| 1854 | * In order to reduce various lock holder preemption latencies provide an |
| 1855 | * interface to see if a vCPU is currently running or not. |
| 1856 | * |
| 1857 | * This allows us to terminate optimistic spin loops and block, analogous to |
| 1858 | * the native optimistic spin heuristic of testing if the lock owner task is |
| 1859 | * running or not. |
| 1860 | */ |
| 1861 | #ifndef vcpu_is_preempted |
Qian Cai | 42fd8ba | 2019-09-17 10:34:54 -0400 | [diff] [blame] | 1862 | static inline bool vcpu_is_preempted(int cpu) |
| 1863 | { |
| 1864 | return false; |
| 1865 | } |
Pan Xinhui | d9345c6 | 2016-11-02 05:08:28 -0400 | [diff] [blame] | 1866 | #endif |
| 1867 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1868 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
| 1869 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 1870 | |
Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 1871 | #ifndef TASK_SIZE_OF |
| 1872 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
| 1873 | #endif |
| 1874 | |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1875 | #ifdef CONFIG_RSEQ |
| 1876 | |
| 1877 | /* |
| 1878 | * Map the event mask on the user-space ABI enum rseq_cs_flags |
| 1879 | * for direct mask checks. |
| 1880 | */ |
| 1881 | enum rseq_event_mask_bits { |
| 1882 | RSEQ_EVENT_PREEMPT_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT, |
| 1883 | RSEQ_EVENT_SIGNAL_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT, |
| 1884 | RSEQ_EVENT_MIGRATE_BIT = RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT, |
| 1885 | }; |
| 1886 | |
| 1887 | enum rseq_event_mask { |
| 1888 | RSEQ_EVENT_PREEMPT = (1U << RSEQ_EVENT_PREEMPT_BIT), |
| 1889 | RSEQ_EVENT_SIGNAL = (1U << RSEQ_EVENT_SIGNAL_BIT), |
| 1890 | RSEQ_EVENT_MIGRATE = (1U << RSEQ_EVENT_MIGRATE_BIT), |
| 1891 | }; |
| 1892 | |
| 1893 | static inline void rseq_set_notify_resume(struct task_struct *t) |
| 1894 | { |
| 1895 | if (t->rseq) |
| 1896 | set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); |
| 1897 | } |
| 1898 | |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1899 | void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs); |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1900 | |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1901 | static inline void rseq_handle_notify_resume(struct ksignal *ksig, |
| 1902 | struct pt_regs *regs) |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1903 | { |
| 1904 | if (current->rseq) |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1905 | __rseq_handle_notify_resume(ksig, regs); |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1906 | } |
| 1907 | |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1908 | static inline void rseq_signal_deliver(struct ksignal *ksig, |
| 1909 | struct pt_regs *regs) |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1910 | { |
| 1911 | preempt_disable(); |
| 1912 | __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); |
| 1913 | preempt_enable(); |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1914 | rseq_handle_notify_resume(ksig, regs); |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1915 | } |
| 1916 | |
| 1917 | /* rseq_preempt() requires preemption to be disabled. */ |
| 1918 | static inline void rseq_preempt(struct task_struct *t) |
| 1919 | { |
| 1920 | __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); |
| 1921 | rseq_set_notify_resume(t); |
| 1922 | } |
| 1923 | |
| 1924 | /* rseq_migrate() requires preemption to be disabled. */ |
| 1925 | static inline void rseq_migrate(struct task_struct *t) |
| 1926 | { |
| 1927 | __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); |
| 1928 | rseq_set_notify_resume(t); |
| 1929 | } |
| 1930 | |
| 1931 | /* |
| 1932 | * If parent process has a registered restartable sequences area, the |
Mathieu Desnoyers | 463f550 | 2019-12-11 11:17:12 -0500 | [diff] [blame] | 1933 | * child inherits. Unregister rseq for a clone with CLONE_VM set. |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1934 | */ |
| 1935 | static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) |
| 1936 | { |
Mathieu Desnoyers | 463f550 | 2019-12-11 11:17:12 -0500 | [diff] [blame] | 1937 | if (clone_flags & CLONE_VM) { |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1938 | t->rseq = NULL; |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1939 | t->rseq_sig = 0; |
| 1940 | t->rseq_event_mask = 0; |
| 1941 | } else { |
| 1942 | t->rseq = current->rseq; |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1943 | t->rseq_sig = current->rseq_sig; |
| 1944 | t->rseq_event_mask = current->rseq_event_mask; |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1945 | } |
| 1946 | } |
| 1947 | |
| 1948 | static inline void rseq_execve(struct task_struct *t) |
| 1949 | { |
| 1950 | t->rseq = NULL; |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1951 | t->rseq_sig = 0; |
| 1952 | t->rseq_event_mask = 0; |
| 1953 | } |
| 1954 | |
| 1955 | #else |
| 1956 | |
| 1957 | static inline void rseq_set_notify_resume(struct task_struct *t) |
| 1958 | { |
| 1959 | } |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1960 | static inline void rseq_handle_notify_resume(struct ksignal *ksig, |
| 1961 | struct pt_regs *regs) |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1962 | { |
| 1963 | } |
Will Deacon | 784e030 | 2018-06-22 11:45:07 +0100 | [diff] [blame] | 1964 | static inline void rseq_signal_deliver(struct ksignal *ksig, |
| 1965 | struct pt_regs *regs) |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1966 | { |
| 1967 | } |
| 1968 | static inline void rseq_preempt(struct task_struct *t) |
| 1969 | { |
| 1970 | } |
| 1971 | static inline void rseq_migrate(struct task_struct *t) |
| 1972 | { |
| 1973 | } |
| 1974 | static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags) |
| 1975 | { |
| 1976 | } |
| 1977 | static inline void rseq_execve(struct task_struct *t) |
| 1978 | { |
| 1979 | } |
| 1980 | |
| 1981 | #endif |
| 1982 | |
Taehee Yoo | 73ab1cb | 2019-01-09 02:23:56 +0900 | [diff] [blame] | 1983 | void __exit_umh(struct task_struct *tsk); |
| 1984 | |
| 1985 | static inline void exit_umh(struct task_struct *tsk) |
| 1986 | { |
| 1987 | if (unlikely(tsk->flags & PF_UMH)) |
| 1988 | __exit_umh(tsk); |
| 1989 | } |
| 1990 | |
Mathieu Desnoyers | d7822b1 | 2018-06-02 08:43:54 -0400 | [diff] [blame] | 1991 | #ifdef CONFIG_DEBUG_RSEQ |
| 1992 | |
| 1993 | void rseq_syscall(struct pt_regs *regs); |
| 1994 | |
| 1995 | #else |
| 1996 | |
| 1997 | static inline void rseq_syscall(struct pt_regs *regs) |
| 1998 | { |
| 1999 | } |
| 2000 | |
| 2001 | #endif |
| 2002 | |
Qais Yousef | 3c93a0c | 2019-06-04 12:14:55 +0100 | [diff] [blame] | 2003 | const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq); |
| 2004 | char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len); |
| 2005 | int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq); |
| 2006 | |
| 2007 | const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq); |
| 2008 | const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); |
| 2009 | const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); |
| 2010 | |
| 2011 | int sched_trace_rq_cpu(struct rq *rq); |
| 2012 | |
| 2013 | const struct cpumask *sched_trace_rd_span(struct root_domain *rd); |
| 2014 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2015 | #endif |