Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_SCHED_H |
| 2 | #define _LINUX_SCHED_H |
| 3 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 4 | /* |
| 5 | * Define 'struct task_struct' and provide the main scheduler |
| 6 | * APIs (schedule(), wakeup variants, etc.) |
| 7 | */ |
| 8 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 9 | #include <uapi/linux/sched.h> |
David Woodhouse | b7b3c76 | 2006-04-27 00:12:56 +0100 | [diff] [blame] | 10 | |
Ingo Molnar | 70b8157 | 2017-02-03 12:11:00 +0100 | [diff] [blame] | 11 | #include <asm/current.h> |
| 12 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 13 | #include <linux/pid.h> |
| 14 | #include <linux/sem.h> |
| 15 | #include <linux/shm.h> |
| 16 | #include <linux/kcov.h> |
| 17 | #include <linux/mutex.h> |
| 18 | #include <linux/plist.h> |
| 19 | #include <linux/hrtimer.h> |
| 20 | #include <linux/seccomp.h> |
| 21 | #include <linux/nodemask.h> |
| 22 | #include <linux/rcupdate.h> |
| 23 | #include <linux/resource.h> |
| 24 | #include <linux/latencytop.h> |
| 25 | #include <linux/sched/prio.h> |
| 26 | #include <linux/signal_types.h> |
| 27 | #include <linux/mm_types_task.h> |
| 28 | #include <linux/task_io_accounting.h> |
| 29 | |
| 30 | /* task_struct member predeclarations (sorted alphabetically): */ |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 31 | struct audit_context; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 32 | struct backing_dev_info; |
| 33 | struct bio_list; |
| 34 | struct blk_plug; |
| 35 | struct cfs_rq; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 36 | struct fs_struct; |
| 37 | struct futex_pi_state; |
| 38 | struct io_context; |
| 39 | struct mempolicy; |
| 40 | struct nameidata; |
| 41 | struct nsproxy; |
| 42 | struct perf_event_context; |
| 43 | struct pid_namespace; |
| 44 | struct pipe_inode_info; |
| 45 | struct rcu_node; |
| 46 | struct reclaim_state; |
| 47 | struct robust_list_head; |
Ingo Molnar | e2d1e2a | 2017-02-01 18:07:51 +0100 | [diff] [blame] | 48 | struct sched_attr; |
| 49 | struct sched_param; |
Ingo Molnar | 43ae34c | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 50 | struct seq_file; |
Ingo Molnar | c7af787 | 2017-02-03 22:01:58 +0100 | [diff] [blame] | 51 | struct sighand_struct; |
| 52 | struct signal_struct; |
| 53 | struct task_delay_info; |
Ingo Molnar | 4cf86d7 | 2007-10-15 17:00:14 +0200 | [diff] [blame] | 54 | struct task_group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
Linus Torvalds | 4a8342d | 2005-09-29 15:18:21 -0700 | [diff] [blame] | 56 | /* |
| 57 | * Task state bitmask. NOTE! These bits are also |
| 58 | * encoded in fs/proc/array.c: get_task_state(). |
| 59 | * |
| 60 | * We have two separate sets of flags: task->state |
| 61 | * is about runnability, while task->exit_state are |
| 62 | * about the task exiting. Confusing, but this way |
| 63 | * modifying one set can't modify the other one by |
| 64 | * mistake. |
| 65 | */ |
Matthew Wilcox | f021a3c | 2007-12-06 11:13:16 -0500 | [diff] [blame] | 66 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 67 | /* Used in tsk->state: */ |
| 68 | #define TASK_RUNNING 0 |
| 69 | #define TASK_INTERRUPTIBLE 1 |
| 70 | #define TASK_UNINTERRUPTIBLE 2 |
| 71 | #define __TASK_STOPPED 4 |
| 72 | #define __TASK_TRACED 8 |
| 73 | /* Used in tsk->exit_state: */ |
| 74 | #define EXIT_DEAD 16 |
| 75 | #define EXIT_ZOMBIE 32 |
| 76 | #define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD) |
| 77 | /* Used in tsk->state again: */ |
| 78 | #define TASK_DEAD 64 |
| 79 | #define TASK_WAKEKILL 128 |
| 80 | #define TASK_WAKING 256 |
| 81 | #define TASK_PARKED 512 |
| 82 | #define TASK_NOLOAD 1024 |
| 83 | #define TASK_NEW 2048 |
| 84 | #define TASK_STATE_MAX 4096 |
Peter Zijlstra | 7334215 | 2009-12-17 13:16:27 +0100 | [diff] [blame] | 85 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 86 | #define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 88 | /* Convenience macros for the sake of set_current_state: */ |
| 89 | #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) |
| 90 | #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) |
| 91 | #define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) |
Peter Zijlstra | 80ed87c | 2015-05-08 14:23:45 +0200 | [diff] [blame] | 92 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 93 | #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 94 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 95 | /* Convenience macros for the sake of wake_up(): */ |
| 96 | #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) |
| 97 | #define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) |
Matthew Wilcox | 92a1f4b | 2007-12-06 10:55:25 -0500 | [diff] [blame] | 98 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 99 | /* get_task_state(): */ |
| 100 | #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ |
| 101 | TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ |
| 102 | __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) |
| 103 | |
| 104 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
| 105 | |
| 106 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
| 107 | |
| 108 | #define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
| 109 | |
| 110 | #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ |
| 111 | (task->flags & PF_FROZEN) == 0 && \ |
| 112 | (task->state & TASK_NOLOAD) == 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 114 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 115 | |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 116 | #define __set_current_state(state_value) \ |
| 117 | do { \ |
| 118 | current->task_state_change = _THIS_IP_; \ |
| 119 | current->state = (state_value); \ |
| 120 | } while (0) |
| 121 | #define set_current_state(state_value) \ |
| 122 | do { \ |
| 123 | current->task_state_change = _THIS_IP_; \ |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 124 | smp_store_mb(current->state, (state_value)); \ |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 125 | } while (0) |
| 126 | |
| 127 | #else |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 128 | /* |
| 129 | * set_current_state() includes a barrier so that the write of current->state |
| 130 | * is correctly serialised wrt the caller's subsequent test of whether to |
| 131 | * actually sleep: |
| 132 | * |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 133 | * for (;;) { |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 134 | * set_current_state(TASK_UNINTERRUPTIBLE); |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 135 | * if (!need_sleep) |
| 136 | * break; |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 137 | * |
Peter Zijlstra | a225023 | 2016-10-19 15:45:27 +0200 | [diff] [blame] | 138 | * schedule(); |
| 139 | * } |
| 140 | * __set_current_state(TASK_RUNNING); |
| 141 | * |
| 142 | * If the caller does not need such serialisation (because, for instance, the |
| 143 | * condition test and condition change and wakeup are under the same lock) then |
| 144 | * use __set_current_state(). |
| 145 | * |
| 146 | * The above is typically ordered against the wakeup, which does: |
| 147 | * |
| 148 | * need_sleep = false; |
| 149 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
| 150 | * |
| 151 | * Where wake_up_state() (and all other wakeup primitives) imply enough |
| 152 | * barriers to order the store of the variable against wakeup. |
| 153 | * |
| 154 | * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is, |
| 155 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
| 156 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
| 157 | * |
| 158 | * This is obviously fine, since they both store the exact same value. |
| 159 | * |
| 160 | * Also see the comments of try_to_wake_up(). |
Andrew Morton | 498d0c5 | 2005-09-13 01:25:14 -0700 | [diff] [blame] | 161 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 162 | #define __set_current_state(state_value) do { current->state = (state_value); } while (0) |
| 163 | #define set_current_state(state_value) smp_store_mb(current->state, (state_value)) |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 164 | #endif |
| 165 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 166 | /* Task command name length: */ |
| 167 | #define TASK_COMM_LEN 16 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 169 | extern cpumask_var_t cpu_isolated_map; |
Rik van Riel | 3fa0818 | 2015-03-09 12:12:07 -0400 | [diff] [blame] | 170 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | extern void scheduler_tick(void); |
| 172 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 173 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX |
| 174 | |
| 175 | extern long schedule_timeout(long timeout); |
| 176 | extern long schedule_timeout_interruptible(long timeout); |
| 177 | extern long schedule_timeout_killable(long timeout); |
| 178 | extern long schedule_timeout_uninterruptible(long timeout); |
| 179 | extern long schedule_timeout_idle(long timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | asmlinkage void schedule(void); |
Thomas Gleixner | c5491ea | 2011-03-21 12:09:35 +0100 | [diff] [blame] | 181 | extern void schedule_preempt_disabled(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
Tejun Heo | 10ab564 | 2016-10-28 12:58:10 -0400 | [diff] [blame] | 183 | extern int __must_check io_schedule_prepare(void); |
| 184 | extern void io_schedule_finish(int token); |
NeilBrown | 9cff8ad | 2015-02-13 15:49:17 +1100 | [diff] [blame] | 185 | extern long io_schedule_timeout(long timeout); |
Tejun Heo | 10ab564 | 2016-10-28 12:58:10 -0400 | [diff] [blame] | 186 | extern void io_schedule(void); |
NeilBrown | 9cff8ad | 2015-02-13 15:49:17 +1100 | [diff] [blame] | 187 | |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 188 | /** |
Masanari Iida | 0ba42a5 | 2017-03-07 20:48:02 +0900 | [diff] [blame] | 189 | * struct prev_cputime - snapshot of system and user cputime |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 190 | * @utime: time spent in user mode |
| 191 | * @stime: time spent in system mode |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 192 | * @lock: protects the above two fields |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 193 | * |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 194 | * Stores previous user/system time values such that we can guarantee |
| 195 | * monotonicity. |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 196 | */ |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 197 | struct prev_cputime { |
| 198 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 199 | u64 utime; |
| 200 | u64 stime; |
| 201 | raw_spinlock_t lock; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 202 | #endif |
Frederic Weisbecker | d37f761d | 2012-11-22 00:58:35 +0100 | [diff] [blame] | 203 | }; |
| 204 | |
| 205 | /** |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 206 | * struct task_cputime - collected CPU time counts |
Frederic Weisbecker | 5613fda | 2017-01-31 04:09:23 +0100 | [diff] [blame] | 207 | * @utime: time spent in user mode, in nanoseconds |
| 208 | * @stime: time spent in kernel mode, in nanoseconds |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 209 | * @sum_exec_runtime: total time spent on the CPU, in nanoseconds |
Ingo Molnar | 5ce73a4 | 2008-09-14 17:11:46 +0200 | [diff] [blame] | 210 | * |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 211 | * This structure groups together three kinds of CPU time that are tracked for |
| 212 | * threads and thread groups. Most things considering CPU time want to group |
| 213 | * these counts together and treat all three of them in parallel. |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 214 | */ |
| 215 | struct task_cputime { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 216 | u64 utime; |
| 217 | u64 stime; |
| 218 | unsigned long long sum_exec_runtime; |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 219 | }; |
Peter Zijlstra | 9d7fb04 | 2015-06-30 11:30:54 +0200 | [diff] [blame] | 220 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 221 | /* Alternate field names when used on cache expirations: */ |
| 222 | #define virt_exp utime |
| 223 | #define prof_exp stime |
| 224 | #define sched_exp sum_exec_runtime |
Frank Mayhar | f06febc | 2008-09-12 09:54:39 -0700 | [diff] [blame] | 225 | |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 226 | enum vtime_state { |
| 227 | /* Task is sleeping or running in a CPU with VTIME inactive: */ |
| 228 | VTIME_INACTIVE = 0, |
| 229 | /* Task runs in userspace in a CPU with VTIME active: */ |
| 230 | VTIME_USER, |
| 231 | /* Task runs in kernelspace in a CPU with VTIME active: */ |
| 232 | VTIME_SYS, |
| 233 | }; |
| 234 | |
| 235 | struct vtime { |
| 236 | seqcount_t seqcount; |
| 237 | unsigned long long starttime; |
| 238 | enum vtime_state state; |
Wanpeng Li | 2a42eb9 | 2017-06-29 19:15:11 +0200 | [diff] [blame] | 239 | u64 utime; |
| 240 | u64 stime; |
| 241 | u64 gtime; |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 242 | }; |
| 243 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | struct sched_info { |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 245 | #ifdef CONFIG_SCHED_INFO |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 246 | /* Cumulative counters: */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 248 | /* # of times we have run on this CPU: */ |
| 249 | unsigned long pcount; |
| 250 | |
| 251 | /* Time spent waiting on a runqueue: */ |
| 252 | unsigned long long run_delay; |
| 253 | |
| 254 | /* Timestamps: */ |
| 255 | |
| 256 | /* When did we last run on a CPU? */ |
| 257 | unsigned long long last_arrival; |
| 258 | |
| 259 | /* When were we last queued to run? */ |
| 260 | unsigned long long last_queued; |
| 261 | |
Naveen N. Rao | f6db834 | 2015-06-25 23:53:37 +0530 | [diff] [blame] | 262 | #endif /* CONFIG_SCHED_INFO */ |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 263 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | /* |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 266 | * Integer metrics need fixed point arithmetic, e.g., sched/fair |
| 267 | * has a few: load, load_avg, util_avg, freq, and capacity. |
| 268 | * |
| 269 | * We define a basic fixed point arithmetic range, and then formalize |
| 270 | * all these metrics based on that basic range. |
| 271 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 272 | # define SCHED_FIXEDPOINT_SHIFT 10 |
| 273 | # define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT) |
Yuyang Du | 6ecdd74 | 2016-04-05 12:12:26 +0800 | [diff] [blame] | 274 | |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 275 | struct load_weight { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 276 | unsigned long weight; |
| 277 | u32 inv_weight; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 278 | }; |
| 279 | |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 280 | /* |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 281 | * The load_avg/util_avg accumulates an infinite geometric series |
| 282 | * (see __update_load_avg() in kernel/sched/fair.c). |
| 283 | * |
| 284 | * [load_avg definition] |
| 285 | * |
| 286 | * load_avg = runnable% * scale_load_down(load) |
| 287 | * |
| 288 | * where runnable% is the time ratio that a sched_entity is runnable. |
| 289 | * For cfs_rq, it is the aggregated load_avg of all runnable and |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 290 | * blocked sched_entities. |
Yuyang Du | 7b59533 | 2016-04-05 12:12:28 +0800 | [diff] [blame] | 291 | * |
| 292 | * load_avg may also take frequency scaling into account: |
| 293 | * |
| 294 | * load_avg = runnable% * scale_load_down(load) * freq% |
| 295 | * |
| 296 | * where freq% is the CPU frequency normalized to the highest frequency. |
| 297 | * |
| 298 | * [util_avg definition] |
| 299 | * |
| 300 | * util_avg = running% * SCHED_CAPACITY_SCALE |
| 301 | * |
| 302 | * where running% is the time ratio that a sched_entity is running on |
| 303 | * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable |
| 304 | * and blocked sched_entities. |
| 305 | * |
| 306 | * util_avg may also factor frequency scaling and CPU capacity scaling: |
| 307 | * |
| 308 | * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% |
| 309 | * |
| 310 | * where freq% is the same as above, and capacity% is the CPU capacity |
| 311 | * normalized to the greatest capacity (due to uarch differences, etc). |
| 312 | * |
| 313 | * N.B., the above ratios (runnable%, running%, freq%, and capacity%) |
| 314 | * themselves are in the range of [0, 1]. To do fixed point arithmetics, |
| 315 | * we therefore scale them to as large a range as necessary. This is for |
| 316 | * example reflected by util_avg's SCHED_CAPACITY_SCALE. |
| 317 | * |
| 318 | * [Overflow issue] |
| 319 | * |
| 320 | * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities |
| 321 | * with the highest load (=88761), always runnable on a single cfs_rq, |
| 322 | * and should not overflow as the number already hits PID_MAX_LIMIT. |
| 323 | * |
| 324 | * For all other cases (including 32-bit kernels), struct load_weight's |
| 325 | * weight will overflow first before we do, because: |
| 326 | * |
| 327 | * Max(load_avg) <= Max(load.weight) |
| 328 | * |
| 329 | * Then it is the load_weight's responsibility to consider overflow |
| 330 | * issues. |
Yuyang Du | 9d89c25 | 2015-07-15 08:04:37 +0800 | [diff] [blame] | 331 | */ |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 332 | struct sched_avg { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 333 | u64 last_update_time; |
| 334 | u64 load_sum; |
| 335 | u32 util_sum; |
| 336 | u32 period_contrib; |
| 337 | unsigned long load_avg; |
| 338 | unsigned long util_avg; |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 339 | }; |
| 340 | |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 341 | struct sched_statistics { |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 342 | #ifdef CONFIG_SCHEDSTATS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 343 | u64 wait_start; |
| 344 | u64 wait_max; |
| 345 | u64 wait_count; |
| 346 | u64 wait_sum; |
| 347 | u64 iowait_count; |
| 348 | u64 iowait_sum; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 349 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 350 | u64 sleep_start; |
| 351 | u64 sleep_max; |
| 352 | s64 sum_sleep_runtime; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 353 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 354 | u64 block_start; |
| 355 | u64 block_max; |
| 356 | u64 exec_max; |
| 357 | u64 slice_max; |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 358 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 359 | u64 nr_migrations_cold; |
| 360 | u64 nr_failed_migrations_affine; |
| 361 | u64 nr_failed_migrations_running; |
| 362 | u64 nr_failed_migrations_hot; |
| 363 | u64 nr_forced_migrations; |
Ingo Molnar | cc36773 | 2007-10-15 17:00:18 +0200 | [diff] [blame] | 364 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 365 | u64 nr_wakeups; |
| 366 | u64 nr_wakeups_sync; |
| 367 | u64 nr_wakeups_migrate; |
| 368 | u64 nr_wakeups_local; |
| 369 | u64 nr_wakeups_remote; |
| 370 | u64 nr_wakeups_affine; |
| 371 | u64 nr_wakeups_affine_attempts; |
| 372 | u64 nr_wakeups_passive; |
| 373 | u64 nr_wakeups_idle; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 374 | #endif |
Ingo Molnar | 7f5f8e8 | 2017-02-06 11:44:12 +0100 | [diff] [blame] | 375 | }; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 376 | |
| 377 | struct sched_entity { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 378 | /* For load-balancing: */ |
| 379 | struct load_weight load; |
| 380 | struct rb_node run_node; |
| 381 | struct list_head group_node; |
| 382 | unsigned int on_rq; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 383 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 384 | u64 exec_start; |
| 385 | u64 sum_exec_runtime; |
| 386 | u64 vruntime; |
| 387 | u64 prev_sum_exec_runtime; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 388 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 389 | u64 nr_migrations; |
Lucas De Marchi | 41acab8 | 2010-03-10 23:37:45 -0300 | [diff] [blame] | 390 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 391 | struct sched_statistics statistics; |
Ingo Molnar | 94c1822 | 2007-08-02 17:41:40 +0200 | [diff] [blame] | 392 | |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 393 | #ifdef CONFIG_FAIR_GROUP_SCHED |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 394 | int depth; |
| 395 | struct sched_entity *parent; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 396 | /* rq on which this entity is (to be) queued: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 397 | struct cfs_rq *cfs_rq; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 398 | /* rq "owned" by this entity/group: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 399 | struct cfs_rq *my_q; |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 400 | #endif |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 401 | |
Alex Shi | 141965c | 2013-06-26 13:05:39 +0800 | [diff] [blame] | 402 | #ifdef CONFIG_SMP |
Jiri Olsa | 5a10780 | 2015-12-08 21:23:59 +0100 | [diff] [blame] | 403 | /* |
| 404 | * Per entity load average tracking. |
| 405 | * |
| 406 | * Put into separate cache line so it does not |
| 407 | * collide with read-mostly values above. |
| 408 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 409 | struct sched_avg avg ____cacheline_aligned_in_smp; |
Paul Turner | 9d85f21 | 2012-10-04 13:18:29 +0200 | [diff] [blame] | 410 | #endif |
Ingo Molnar | 20b8a59 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 411 | }; |
Ingo Molnar | 70b97a7 | 2006-07-03 00:25:42 -0700 | [diff] [blame] | 412 | |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 413 | struct sched_rt_entity { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 414 | struct list_head run_list; |
| 415 | unsigned long timeout; |
| 416 | unsigned long watchdog_stamp; |
| 417 | unsigned int time_slice; |
| 418 | unsigned short on_rq; |
| 419 | unsigned short on_list; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 420 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 421 | struct sched_rt_entity *back; |
Peter Zijlstra | 052f1dc | 2008-02-13 15:45:40 +0100 | [diff] [blame] | 422 | #ifdef CONFIG_RT_GROUP_SCHED |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 423 | struct sched_rt_entity *parent; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 424 | /* rq on which this entity is (to be) queued: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 425 | struct rt_rq *rt_rq; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 426 | /* rq "owned" by this entity/group: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 427 | struct rt_rq *my_q; |
Peter Zijlstra | 6f505b1 | 2008-01-25 21:08:30 +0100 | [diff] [blame] | 428 | #endif |
Kees Cook | 3859a27 | 2016-10-28 01:22:25 -0700 | [diff] [blame] | 429 | } __randomize_layout; |
Peter Zijlstra | fa71706 | 2008-01-25 21:08:27 +0100 | [diff] [blame] | 430 | |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 431 | struct sched_dl_entity { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 432 | struct rb_node rb_node; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 433 | |
| 434 | /* |
| 435 | * Original scheduling parameters. Copied here from sched_attr |
xiaofeng.yan | 4027d08 | 2014-05-09 03:21:27 +0000 | [diff] [blame] | 436 | * during sched_setattr(), they will remain the same until |
| 437 | * the next sched_setattr(). |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 438 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 439 | u64 dl_runtime; /* Maximum runtime for each instance */ |
| 440 | u64 dl_deadline; /* Relative deadline of each instance */ |
| 441 | u64 dl_period; /* Separation of two instances (period) */ |
Daniel Bristot de Oliveira | 54d6d30 | 2017-05-29 16:24:02 +0200 | [diff] [blame] | 442 | u64 dl_bw; /* dl_runtime / dl_period */ |
Daniel Bristot de Oliveira | 3effcb4 | 2017-05-29 16:24:03 +0200 | [diff] [blame] | 443 | u64 dl_density; /* dl_runtime / dl_deadline */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 444 | |
| 445 | /* |
| 446 | * Actual scheduling parameters. Initialized with the values above, |
| 447 | * they are continously updated during task execution. Note that |
| 448 | * the remaining runtime could be < 0 in case we are in overrun. |
| 449 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 450 | s64 runtime; /* Remaining runtime for this instance */ |
| 451 | u64 deadline; /* Absolute deadline for this instance */ |
| 452 | unsigned int flags; /* Specifying the scheduler behaviour */ |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 453 | |
| 454 | /* |
| 455 | * Some bool flags: |
| 456 | * |
| 457 | * @dl_throttled tells if we exhausted the runtime. If so, the |
| 458 | * task has to wait for a replenishment to be performed at the |
| 459 | * next firing of dl_timer. |
| 460 | * |
Dario Faggioli | 2d3d891 | 2013-11-07 14:43:44 +0100 | [diff] [blame] | 461 | * @dl_boosted tells if we are boosted due to DI. If so we are |
| 462 | * outside bandwidth enforcement mechanism (but only until we |
Juri Lelli | 5bfd126 | 2014-04-15 13:49:04 +0200 | [diff] [blame] | 463 | * exit the critical section); |
| 464 | * |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 465 | * @dl_yielded tells if task gave up the CPU before consuming |
Juri Lelli | 5bfd126 | 2014-04-15 13:49:04 +0200 | [diff] [blame] | 466 | * all its available runtime during the last job. |
Luca Abeni | 209a0cb | 2017-05-18 22:13:29 +0200 | [diff] [blame] | 467 | * |
| 468 | * @dl_non_contending tells if the task is inactive while still |
| 469 | * contributing to the active utilization. In other words, it |
| 470 | * indicates if the inactive timer has been armed and its handler |
| 471 | * has not been executed yet. This flag is useful to avoid race |
| 472 | * conditions between the inactive timer handler and the wakeup |
| 473 | * code. |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 474 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 475 | int dl_throttled; |
| 476 | int dl_boosted; |
| 477 | int dl_yielded; |
Luca Abeni | 209a0cb | 2017-05-18 22:13:29 +0200 | [diff] [blame] | 478 | int dl_non_contending; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 479 | |
| 480 | /* |
| 481 | * Bandwidth enforcement timer. Each -deadline task has its |
| 482 | * own bandwidth to be enforced, thus we need one timer per task. |
| 483 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 484 | struct hrtimer dl_timer; |
Luca Abeni | 209a0cb | 2017-05-18 22:13:29 +0200 | [diff] [blame] | 485 | |
| 486 | /* |
| 487 | * Inactive timer, responsible for decreasing the active utilization |
| 488 | * at the "0-lag time". When a -deadline task blocks, it contributes |
| 489 | * to GRUB's active utilization until the "0-lag time", hence a |
| 490 | * timer is needed to decrease the active utilization at the correct |
| 491 | * time. |
| 492 | */ |
| 493 | struct hrtimer inactive_timer; |
Dario Faggioli | aab03e0 | 2013-11-28 11:14:43 +0100 | [diff] [blame] | 494 | }; |
Clark Williams | 8bd75c7 | 2013-02-07 09:47:07 -0600 | [diff] [blame] | 495 | |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 496 | union rcu_special { |
| 497 | struct { |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 498 | u8 blocked; |
| 499 | u8 need_qs; |
| 500 | u8 exp_need_qs; |
| 501 | |
| 502 | /* Otherwise the compiler can store garbage here: */ |
| 503 | u8 pad; |
Paul E. McKenney | 8203d6d | 2015-08-02 13:53:17 -0700 | [diff] [blame] | 504 | } b; /* Bits. */ |
| 505 | u32 s; /* Set of bits. */ |
Paul E. McKenney | 1d082fd | 2014-08-14 16:01:53 -0700 | [diff] [blame] | 506 | }; |
Paul E. McKenney | 8684896 | 2009-08-27 15:00:12 -0700 | [diff] [blame] | 507 | |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 508 | enum perf_event_task_context { |
| 509 | perf_invalid_context = -1, |
| 510 | perf_hw_context = 0, |
Peter Zijlstra | 89a1e18 | 2010-09-07 17:34:50 +0200 | [diff] [blame] | 511 | perf_sw_context, |
Peter Zijlstra | 8dc85d547 | 2010-09-02 16:50:03 +0200 | [diff] [blame] | 512 | perf_nr_task_contexts, |
| 513 | }; |
| 514 | |
Ingo Molnar | eb61baf | 2017-02-01 17:09:06 +0100 | [diff] [blame] | 515 | struct wake_q_node { |
| 516 | struct wake_q_node *next; |
| 517 | }; |
| 518 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | struct task_struct { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 520 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 521 | /* |
| 522 | * For reasons of header soup (see current_thread_info()), this |
| 523 | * must be the first element of task_struct. |
| 524 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 525 | struct thread_info thread_info; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 526 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 527 | /* -1 unrunnable, 0 runnable, >0 stopped: */ |
| 528 | volatile long state; |
Kees Cook | 29e48ce | 2017-04-05 22:43:33 -0700 | [diff] [blame] | 529 | |
| 530 | /* |
| 531 | * This begins the randomizable portion of task_struct. Only |
| 532 | * scheduling-critical items should be added above here. |
| 533 | */ |
| 534 | randomized_struct_fields_start |
| 535 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 536 | void *stack; |
| 537 | atomic_t usage; |
| 538 | /* Per task flags (PF_*), defined further below: */ |
| 539 | unsigned int flags; |
| 540 | unsigned int ptrace; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | |
Peter Williams | 2dd73a4 | 2006-06-27 02:54:34 -0700 | [diff] [blame] | 542 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 543 | struct llist_node wake_entry; |
| 544 | int on_cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 545 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 546 | /* Current CPU: */ |
| 547 | unsigned int cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 548 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 549 | unsigned int wakee_flips; |
| 550 | unsigned long wakee_flip_decay_ts; |
| 551 | struct task_struct *last_wakee; |
Peter Zijlstra | ac66f54 | 2013-10-07 11:29:16 +0100 | [diff] [blame] | 552 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 553 | int wake_cpu; |
Nick Piggin | 4866cde | 2005-06-25 14:57:23 -0700 | [diff] [blame] | 554 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 555 | int on_rq; |
Ingo Molnar | 50e645a | 2007-07-09 18:52:00 +0200 | [diff] [blame] | 556 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 557 | int prio; |
| 558 | int static_prio; |
| 559 | int normal_prio; |
| 560 | unsigned int rt_priority; |
| 561 | |
| 562 | const struct sched_class *sched_class; |
| 563 | struct sched_entity se; |
| 564 | struct sched_rt_entity rt; |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 565 | #ifdef CONFIG_CGROUP_SCHED |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 566 | struct task_group *sched_task_group; |
Peter Zijlstra | 8323f26 | 2012-06-22 13:36:05 +0200 | [diff] [blame] | 567 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 568 | struct sched_dl_entity dl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 569 | |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 570 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 571 | /* List of struct preempt_notifier: */ |
| 572 | struct hlist_head preempt_notifiers; |
Avi Kivity | e107be3 | 2007-07-26 13:40:43 +0200 | [diff] [blame] | 573 | #endif |
| 574 | |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 575 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 576 | unsigned int btrace_seq; |
Alexey Dobriyan | 6c5c934 | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 577 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 579 | unsigned int policy; |
| 580 | int nr_cpus_allowed; |
| 581 | cpumask_t cpus_allowed; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 583 | #ifdef CONFIG_PREEMPT_RCU |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 584 | int rcu_read_lock_nesting; |
| 585 | union rcu_special rcu_read_unlock_special; |
| 586 | struct list_head rcu_node_entry; |
| 587 | struct rcu_node *rcu_blocked_node; |
Pranith Kumar | 28f6569 | 2014-09-22 14:00:48 -0400 | [diff] [blame] | 588 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 589 | |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 590 | #ifdef CONFIG_TASKS_RCU |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 591 | unsigned long rcu_tasks_nvcsw; |
Paul E. McKenney | ccdd29f | 2017-05-25 08:51:48 -0700 | [diff] [blame] | 592 | u8 rcu_tasks_holdout; |
| 593 | u8 rcu_tasks_idx; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 594 | int rcu_tasks_idle_cpu; |
Paul E. McKenney | ccdd29f | 2017-05-25 08:51:48 -0700 | [diff] [blame] | 595 | struct list_head rcu_tasks_holdout_list; |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 596 | #endif /* #ifdef CONFIG_TASKS_RCU */ |
Paul E. McKenney | e260be6 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 597 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 598 | struct sched_info sched_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 600 | struct list_head tasks; |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 601 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 602 | struct plist_node pushable_tasks; |
| 603 | struct rb_node pushable_dl_tasks; |
Dario Faggioli | 806c09a | 2010-11-30 19:51:33 +0100 | [diff] [blame] | 604 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 606 | struct mm_struct *mm; |
| 607 | struct mm_struct *active_mm; |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 608 | |
| 609 | /* Per-thread vma caching: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 610 | struct vmacache vmacache; |
Ingo Molnar | 314ff78 | 2017-02-03 11:03:31 +0100 | [diff] [blame] | 611 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 612 | #ifdef SPLIT_RSS_COUNTING |
| 613 | struct task_rss_stat rss_stat; |
KAMEZAWA Hiroyuki | 34e5523 | 2010-03-05 13:41:40 -0800 | [diff] [blame] | 614 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 615 | int exit_state; |
| 616 | int exit_code; |
| 617 | int exit_signal; |
| 618 | /* The signal sent when the parent dies: */ |
| 619 | int pdeath_signal; |
| 620 | /* JOBCTL_*, siglock protected: */ |
| 621 | unsigned long jobctl; |
Andrei Epure | 9b89f6b | 2013-04-11 20:30:29 +0300 | [diff] [blame] | 622 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 623 | /* Used for emulating ABI behavior of previous Linux versions: */ |
| 624 | unsigned int personality; |
Andrei Epure | 9b89f6b | 2013-04-11 20:30:29 +0300 | [diff] [blame] | 625 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 626 | /* Scheduler bits, serialized by scheduler locks: */ |
| 627 | unsigned sched_reset_on_fork:1; |
| 628 | unsigned sched_contributes_to_load:1; |
| 629 | unsigned sched_migrated:1; |
| 630 | unsigned sched_remote_wakeup:1; |
| 631 | /* Force alignment to the next boundary: */ |
| 632 | unsigned :0; |
Peter Zijlstra | be958bd | 2015-11-25 16:02:07 +0100 | [diff] [blame] | 633 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 634 | /* Unserialized, strictly 'current' */ |
| 635 | |
| 636 | /* Bit to tell LSMs we're in execve(): */ |
| 637 | unsigned in_execve:1; |
| 638 | unsigned in_iowait:1; |
| 639 | #ifndef TIF_RESTORE_SIGMASK |
| 640 | unsigned restore_sigmask:1; |
Andy Lutomirski | 7e78141 | 2016-08-02 14:05:36 -0700 | [diff] [blame] | 641 | #endif |
Tejun Heo | 626ebc4 | 2015-11-05 18:46:09 -0800 | [diff] [blame] | 642 | #ifdef CONFIG_MEMCG |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 643 | unsigned memcg_may_oom:1; |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 644 | #ifndef CONFIG_SLOB |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 645 | unsigned memcg_kmem_skip_account:1; |
Vladimir Davydov | 6f185c2 | 2014-12-12 16:55:15 -0800 | [diff] [blame] | 646 | #endif |
Johannes Weiner | 127424c | 2016-01-20 15:02:32 -0800 | [diff] [blame] | 647 | #endif |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 648 | #ifdef CONFIG_COMPAT_BRK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 649 | unsigned brk_randomized:1; |
Peter Zijlstra | ff303e6 | 2015-04-17 20:05:30 +0200 | [diff] [blame] | 650 | #endif |
Tejun Heo | 77f8879 | 2017-03-16 16:54:24 -0400 | [diff] [blame] | 651 | #ifdef CONFIG_CGROUPS |
| 652 | /* disallow userland-initiated cgroup migration */ |
| 653 | unsigned no_cgroup_migration:1; |
| 654 | #endif |
Vladimir Davydov | 6f185c2 | 2014-12-12 16:55:15 -0800 | [diff] [blame] | 655 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 656 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 657 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 658 | struct restart_block restart_block; |
Andy Lutomirski | f56141e | 2015-02-12 15:01:14 -0800 | [diff] [blame] | 659 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 660 | pid_t pid; |
| 661 | pid_t tgid; |
Arjan van de Ven | 0a425405 | 2006-09-26 10:52:38 +0200 | [diff] [blame] | 662 | |
Hiroshi Shimamoto | 1314562 | 2009-08-18 15:06:02 +0900 | [diff] [blame] | 663 | #ifdef CONFIG_CC_STACKPROTECTOR |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 664 | /* Canary value for the -fstack-protector GCC feature: */ |
| 665 | unsigned long stack_canary; |
Hiroshi Shimamoto | 1314562 | 2009-08-18 15:06:02 +0900 | [diff] [blame] | 666 | #endif |
Oleg Nesterov | 4d1d61a | 2012-05-11 10:59:08 +1000 | [diff] [blame] | 667 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 668 | * Pointers to the (original) parent process, youngest child, younger sibling, |
Oleg Nesterov | 4d1d61a | 2012-05-11 10:59:08 +1000 | [diff] [blame] | 669 | * older sibling, respectively. (p->father can be replaced with |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 670 | * p->real_parent->pid) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 671 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 672 | |
| 673 | /* Real parent process: */ |
| 674 | struct task_struct __rcu *real_parent; |
| 675 | |
| 676 | /* Recipient of SIGCHLD, wait4() reports: */ |
| 677 | struct task_struct __rcu *parent; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 678 | |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 679 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 680 | * Children/sibling form the list of natural children: |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 681 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 682 | struct list_head children; |
| 683 | struct list_head sibling; |
| 684 | struct task_struct *group_leader; |
| 685 | |
| 686 | /* |
| 687 | * 'ptraced' is the list of tasks this task is using ptrace() on. |
| 688 | * |
| 689 | * This includes both natural children and PTRACE_ATTACH targets. |
| 690 | * 'ptrace_entry' is this task's link on the p->parent->ptraced list. |
| 691 | */ |
| 692 | struct list_head ptraced; |
| 693 | struct list_head ptrace_entry; |
Roland McGrath | f470021 | 2008-03-24 18:36:23 -0700 | [diff] [blame] | 694 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 695 | /* PID/PID hash table linkage. */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 696 | struct pid_link pids[PIDTYPE_MAX]; |
| 697 | struct list_head thread_group; |
| 698 | struct list_head thread_node; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 699 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 700 | struct completion *vfork_done; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 701 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 702 | /* CLONE_CHILD_SETTID: */ |
| 703 | int __user *set_child_tid; |
| 704 | |
| 705 | /* CLONE_CHILD_CLEARTID: */ |
| 706 | int __user *clear_child_tid; |
| 707 | |
| 708 | u64 utime; |
| 709 | u64 stime; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 710 | #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 711 | u64 utimescaled; |
| 712 | u64 stimescaled; |
Stanislaw Gruszka | 40565b5 | 2016-11-15 03:06:51 +0100 | [diff] [blame] | 713 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 714 | u64 gtime; |
| 715 | struct prev_cputime prev_cputime; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 716 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
Frederic Weisbecker | bac5b6b | 2017-06-29 19:15:10 +0200 | [diff] [blame] | 717 | struct vtime vtime; |
Frederic Weisbecker | 6a61671 | 2012-12-16 20:00:34 +0100 | [diff] [blame] | 718 | #endif |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 719 | |
| 720 | #ifdef CONFIG_NO_HZ_FULL |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 721 | atomic_t tick_dep_mask; |
Frederic Weisbecker | d027d45 | 2015-06-07 15:54:30 +0200 | [diff] [blame] | 722 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 723 | /* Context switch counts: */ |
| 724 | unsigned long nvcsw; |
| 725 | unsigned long nivcsw; |
| 726 | |
| 727 | /* Monotonic time in nsecs: */ |
| 728 | u64 start_time; |
| 729 | |
| 730 | /* Boot based time in nsecs: */ |
| 731 | u64 real_start_time; |
| 732 | |
| 733 | /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */ |
| 734 | unsigned long min_flt; |
| 735 | unsigned long maj_flt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 737 | #ifdef CONFIG_POSIX_TIMERS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 738 | struct task_cputime cputime_expires; |
| 739 | struct list_head cpu_timers[3]; |
Nicolas Pitre | b18b6a9 | 2017-01-21 00:09:08 -0500 | [diff] [blame] | 740 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 741 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 742 | /* Process credentials: */ |
| 743 | |
| 744 | /* Tracer's credentials at attach: */ |
| 745 | const struct cred __rcu *ptracer_cred; |
| 746 | |
| 747 | /* Objective and real subjective task credentials (COW): */ |
| 748 | const struct cred __rcu *real_cred; |
| 749 | |
| 750 | /* Effective (overridable) subjective task credentials (COW): */ |
| 751 | const struct cred __rcu *cred; |
| 752 | |
| 753 | /* |
| 754 | * executable name, excluding path. |
| 755 | * |
| 756 | * - normally initialized setup_new_exec() |
| 757 | * - access it with [gs]et_task_comm() |
| 758 | * - lock it with task_lock() |
| 759 | */ |
| 760 | char comm[TASK_COMM_LEN]; |
| 761 | |
| 762 | struct nameidata *nameidata; |
| 763 | |
Alexey Dobriyan | 3d5b6fc | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 764 | #ifdef CONFIG_SYSVIPC |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 765 | struct sysv_sem sysvsem; |
| 766 | struct sysv_shm sysvshm; |
Alexey Dobriyan | 3d5b6fc | 2006-09-29 01:59:40 -0700 | [diff] [blame] | 767 | #endif |
Mandeep Singh Baines | e162b39 | 2009-01-15 11:08:40 -0800 | [diff] [blame] | 768 | #ifdef CONFIG_DETECT_HUNG_TASK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 769 | unsigned long last_switch_count; |
Ingo Molnar | 82a1fcb | 2008-01-25 21:08:02 +0100 | [diff] [blame] | 770 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 771 | /* Filesystem information: */ |
| 772 | struct fs_struct *fs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 774 | /* Open file information: */ |
| 775 | struct files_struct *files; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 776 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 777 | /* Namespaces: */ |
| 778 | struct nsproxy *nsproxy; |
Oleg Nesterov | 2e01fab | 2015-11-06 16:32:19 -0800 | [diff] [blame] | 779 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 780 | /* Signal handlers: */ |
| 781 | struct signal_struct *signal; |
| 782 | struct sighand_struct *sighand; |
| 783 | sigset_t blocked; |
| 784 | sigset_t real_blocked; |
| 785 | /* Restored if set_restore_sigmask() was used: */ |
| 786 | sigset_t saved_sigmask; |
| 787 | struct sigpending pending; |
| 788 | unsigned long sas_ss_sp; |
| 789 | size_t sas_ss_size; |
| 790 | unsigned int sas_ss_flags; |
Oleg Nesterov | e73f895 | 2012-05-11 10:59:07 +1000 | [diff] [blame] | 791 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 792 | struct callback_head *task_works; |
| 793 | |
| 794 | struct audit_context *audit_context; |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 795 | #ifdef CONFIG_AUDITSYSCALL |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 796 | kuid_t loginuid; |
| 797 | unsigned int sessionid; |
Al Viro | bfef93a | 2008-01-10 04:53:18 -0500 | [diff] [blame] | 798 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 799 | struct seccomp seccomp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 801 | /* Thread group tracking: */ |
| 802 | u32 parent_exec_id; |
| 803 | u32 self_exec_id; |
| 804 | |
| 805 | /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */ |
| 806 | spinlock_t alloc_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 808 | /* Protection of the PI data structures: */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 809 | raw_spinlock_t pi_lock; |
Ingo Molnar | b29739f | 2006-06-27 02:54:51 -0700 | [diff] [blame] | 810 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 811 | struct wake_q_node wake_q; |
Peter Zijlstra | 7675104 | 2015-05-01 08:27:50 -0700 | [diff] [blame] | 812 | |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 813 | #ifdef CONFIG_RT_MUTEXES |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 814 | /* PI waiters blocked on a rt_mutex held by this task: */ |
Davidlohr Bueso | a23ba90 | 2017-09-08 16:15:01 -0700 | [diff] [blame^] | 815 | struct rb_root_cached pi_waiters; |
Xunlei Pang | e96a7705 | 2017-03-23 15:56:08 +0100 | [diff] [blame] | 816 | /* Updated under owner's pi_lock and rq lock */ |
| 817 | struct task_struct *pi_top_task; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 818 | /* Deadlock detection and priority inheritance handling: */ |
| 819 | struct rt_mutex_waiter *pi_blocked_on; |
Ingo Molnar | 23f78d4a | 2006-06-27 02:54:53 -0700 | [diff] [blame] | 820 | #endif |
| 821 | |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 822 | #ifdef CONFIG_DEBUG_MUTEXES |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 823 | /* Mutex deadlock detection: */ |
| 824 | struct mutex_waiter *blocked_on; |
Ingo Molnar | 408894e | 2006-01-09 15:59:20 -0800 | [diff] [blame] | 825 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 826 | |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 827 | #ifdef CONFIG_TRACE_IRQFLAGS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 828 | unsigned int irq_events; |
| 829 | unsigned long hardirq_enable_ip; |
| 830 | unsigned long hardirq_disable_ip; |
| 831 | unsigned int hardirq_enable_event; |
| 832 | unsigned int hardirq_disable_event; |
| 833 | int hardirqs_enabled; |
| 834 | int hardirq_context; |
| 835 | unsigned long softirq_disable_ip; |
| 836 | unsigned long softirq_enable_ip; |
| 837 | unsigned int softirq_disable_event; |
| 838 | unsigned int softirq_enable_event; |
| 839 | int softirqs_enabled; |
| 840 | int softirq_context; |
Ingo Molnar | de30a2b | 2006-07-03 00:24:42 -0700 | [diff] [blame] | 841 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 842 | |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 843 | #ifdef CONFIG_LOCKDEP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 844 | # define MAX_LOCK_DEPTH 48UL |
| 845 | u64 curr_chain_key; |
| 846 | int lockdep_depth; |
| 847 | unsigned int lockdep_recursion; |
| 848 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
Ingo Molnar | fbb9ce95 | 2006-07-03 00:24:50 -0700 | [diff] [blame] | 849 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 850 | |
Byungchul Park | b09be67 | 2017-08-07 16:12:52 +0900 | [diff] [blame] | 851 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE |
| 852 | #define MAX_XHLOCKS_NR 64UL |
| 853 | struct hist_lock *xhlocks; /* Crossrelease history locks */ |
| 854 | unsigned int xhlock_idx; |
| 855 | /* For restoring at history boundaries */ |
| 856 | unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; |
Byungchul Park | 23f873d | 2017-08-07 16:12:53 +0900 | [diff] [blame] | 857 | unsigned int hist_id; |
| 858 | /* For overwrite check at each context exit */ |
| 859 | unsigned int hist_id_save[XHLOCK_CTX_NR]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 860 | #endif |
| 861 | |
Andrey Ryabinin | c6d3085 | 2016-01-20 15:00:55 -0800 | [diff] [blame] | 862 | #ifdef CONFIG_UBSAN |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 863 | unsigned int in_ubsan; |
Andrey Ryabinin | c6d3085 | 2016-01-20 15:00:55 -0800 | [diff] [blame] | 864 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 865 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 866 | /* Journalling filesystem info: */ |
| 867 | void *journal_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 868 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 869 | /* Stacked block device info: */ |
| 870 | struct bio_list *bio_list; |
Neil Brown | d89d879 | 2007-05-01 09:53:42 +0200 | [diff] [blame] | 871 | |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 872 | #ifdef CONFIG_BLOCK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 873 | /* Stack plugging: */ |
| 874 | struct blk_plug *plug; |
Jens Axboe | 73c1010 | 2011-03-08 13:19:51 +0100 | [diff] [blame] | 875 | #endif |
| 876 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 877 | /* VM state: */ |
| 878 | struct reclaim_state *reclaim_state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 879 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 880 | struct backing_dev_info *backing_dev_info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 882 | struct io_context *io_context; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 883 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 884 | /* Ptrace state: */ |
| 885 | unsigned long ptrace_message; |
| 886 | siginfo_t *last_siginfo; |
| 887 | |
| 888 | struct task_io_accounting ioac; |
| 889 | #ifdef CONFIG_TASK_XACCT |
| 890 | /* Accumulated RSS usage: */ |
| 891 | u64 acct_rss_mem1; |
| 892 | /* Accumulated virtual memory usage: */ |
| 893 | u64 acct_vm_mem1; |
| 894 | /* stime + utime since last update: */ |
| 895 | u64 acct_timexpd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 | #endif |
| 897 | #ifdef CONFIG_CPUSETS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 898 | /* Protected by ->alloc_lock: */ |
| 899 | nodemask_t mems_allowed; |
| 900 | /* Seqence number to catch updates: */ |
| 901 | seqcount_t mems_allowed_seq; |
| 902 | int cpuset_mem_spread_rotor; |
| 903 | int cpuset_slab_spread_rotor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | #endif |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 905 | #ifdef CONFIG_CGROUPS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 906 | /* Control Group info protected by css_set_lock: */ |
| 907 | struct css_set __rcu *cgroups; |
| 908 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
| 909 | struct list_head cg_list; |
Paul Menage | ddbcc7e | 2007-10-18 23:39:30 -0700 | [diff] [blame] | 910 | #endif |
Vikas Shivappa | f01d7d51 | 2017-07-25 14:14:22 -0700 | [diff] [blame] | 911 | #ifdef CONFIG_INTEL_RDT |
Vikas Shivappa | 0734ded | 2017-07-25 14:14:33 -0700 | [diff] [blame] | 912 | u32 closid; |
Vikas Shivappa | d6aaba6 | 2017-07-25 14:14:34 -0700 | [diff] [blame] | 913 | u32 rmid; |
Fenghua Yu | e02737d | 2016-10-28 15:04:46 -0700 | [diff] [blame] | 914 | #endif |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 915 | #ifdef CONFIG_FUTEX |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 916 | struct robust_list_head __user *robust_list; |
Ingo Molnar | 34f192c | 2006-03-27 01:16:24 -0800 | [diff] [blame] | 917 | #ifdef CONFIG_COMPAT |
| 918 | struct compat_robust_list_head __user *compat_robust_list; |
| 919 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 920 | struct list_head pi_state_list; |
| 921 | struct futex_pi_state *pi_state_cache; |
Alexey Dobriyan | 42b2dd0 | 2007-10-16 23:27:30 -0700 | [diff] [blame] | 922 | #endif |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 923 | #ifdef CONFIG_PERF_EVENTS |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 924 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
| 925 | struct mutex perf_event_mutex; |
| 926 | struct list_head perf_event_list; |
Paul Mackerras | a63eaf3 | 2009-05-22 14:17:31 +1000 | [diff] [blame] | 927 | #endif |
Thomas Gleixner | 8f47b18 | 2014-02-07 20:58:39 +0100 | [diff] [blame] | 928 | #ifdef CONFIG_DEBUG_PREEMPT |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 929 | unsigned long preempt_disable_ip; |
Thomas Gleixner | 8f47b18 | 2014-02-07 20:58:39 +0100 | [diff] [blame] | 930 | #endif |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 931 | #ifdef CONFIG_NUMA |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 932 | /* Protected by alloc_lock: */ |
| 933 | struct mempolicy *mempolicy; |
Vlastimil Babka | 4581668 | 2017-07-06 15:39:59 -0700 | [diff] [blame] | 934 | short il_prev; |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 935 | short pref_node_fork; |
Richard Kennedy | c7aceab | 2008-05-15 12:09:15 +0100 | [diff] [blame] | 936 | #endif |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 937 | #ifdef CONFIG_NUMA_BALANCING |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 938 | int numa_scan_seq; |
| 939 | unsigned int numa_scan_period; |
| 940 | unsigned int numa_scan_period_max; |
| 941 | int numa_preferred_nid; |
| 942 | unsigned long numa_migrate_retry; |
| 943 | /* Migration stamp: */ |
| 944 | u64 node_stamp; |
| 945 | u64 last_task_numa_placement; |
| 946 | u64 last_sum_exec_runtime; |
| 947 | struct callback_head numa_work; |
Mel Gorman | f809ca9 | 2013-10-07 11:28:57 +0100 | [diff] [blame] | 948 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 949 | struct list_head numa_entry; |
| 950 | struct numa_group *numa_group; |
Peter Zijlstra | 8c8a743 | 2013-10-07 11:29:21 +0100 | [diff] [blame] | 951 | |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 952 | /* |
Iulia Manda | 44dba3d | 2014-10-31 02:13:31 +0200 | [diff] [blame] | 953 | * numa_faults is an array split into four regions: |
| 954 | * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer |
| 955 | * in this precise order. |
| 956 | * |
| 957 | * faults_memory: Exponential decaying average of faults on a per-node |
| 958 | * basis. Scheduling placement decisions are made based on these |
| 959 | * counts. The values remain static for the duration of a PTE scan. |
| 960 | * faults_cpu: Track the nodes the process was running on when a NUMA |
| 961 | * hinting fault was incurred. |
| 962 | * faults_memory_buffer and faults_cpu_buffer: Record faults per node |
| 963 | * during the current scan window. When the scan completes, the counts |
| 964 | * in faults_memory and faults_cpu decay and these values are copied. |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 965 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 966 | unsigned long *numa_faults; |
| 967 | unsigned long total_numa_faults; |
Mel Gorman | 745d614 | 2013-10-07 11:28:59 +0100 | [diff] [blame] | 968 | |
| 969 | /* |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 970 | * numa_faults_locality tracks if faults recorded during the last |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 971 | * scan window were remote/local or failed to migrate. The task scan |
| 972 | * period is adapted based on the locality of the faults with different |
| 973 | * weights depending on whether they were shared or private faults |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 974 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 975 | unsigned long numa_faults_locality[3]; |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 976 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 977 | unsigned long numa_pages_migrated; |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 978 | #endif /* CONFIG_NUMA_BALANCING */ |
| 979 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 980 | struct tlbflush_unmap_batch tlb_ubc; |
Mel Gorman | 72b252a | 2015-09-04 15:47:32 -0700 | [diff] [blame] | 981 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 982 | struct rcu_head rcu; |
Jens Axboe | b92ce55 | 2006-04-11 13:52:07 +0200 | [diff] [blame] | 983 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 984 | /* Cache last used pipe for splice(): */ |
| 985 | struct pipe_inode_info *splice_pipe; |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 986 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 987 | struct page_frag task_frag; |
Eric Dumazet | 5640f76 | 2012-09-23 23:04:42 +0000 | [diff] [blame] | 988 | |
Ingo Molnar | 47913d4 | 2017-02-01 18:00:26 +0100 | [diff] [blame] | 989 | #ifdef CONFIG_TASK_DELAY_ACCT |
| 990 | struct task_delay_info *delays; |
Shailabh Nagar | ca74e92 | 2006-07-14 00:24:36 -0700 | [diff] [blame] | 991 | #endif |
Ingo Molnar | 47913d4 | 2017-02-01 18:00:26 +0100 | [diff] [blame] | 992 | |
Akinobu Mita | f4f154f | 2006-12-08 02:39:47 -0800 | [diff] [blame] | 993 | #ifdef CONFIG_FAULT_INJECTION |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 994 | int make_it_fail; |
Akinobu Mita | 9049f2f | 2017-07-14 14:49:52 -0700 | [diff] [blame] | 995 | unsigned int fail_nth; |
Akinobu Mita | f4f154f | 2006-12-08 02:39:47 -0800 | [diff] [blame] | 996 | #endif |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 997 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 998 | * When (nr_dirtied >= nr_dirtied_pause), it's time to call |
| 999 | * balance_dirty_pages() for a dirty throttling pause: |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1000 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1001 | int nr_dirtied; |
| 1002 | int nr_dirtied_pause; |
| 1003 | /* Start of a write-and-pause period: */ |
| 1004 | unsigned long dirty_paused_when; |
Wu Fengguang | 9d823e8 | 2011-06-11 18:10:12 -0600 | [diff] [blame] | 1005 | |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1006 | #ifdef CONFIG_LATENCYTOP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1007 | int latency_record_count; |
| 1008 | struct latency_record latency_record[LT_SAVECOUNT]; |
Arjan van de Ven | 9745512 | 2008-01-25 21:08:34 +0100 | [diff] [blame] | 1009 | #endif |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 1010 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1011 | * Time slack values; these are used to round up poll() and |
Arjan van de Ven | 6976675 | 2008-09-01 15:52:40 -0700 | [diff] [blame] | 1012 | * select() etc timeout values. These are in nanoseconds. |
| 1013 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1014 | u64 timer_slack_ns; |
| 1015 | u64 default_timer_slack_ns; |
David Miller | f8d570a | 2008-11-06 00:37:40 -0800 | [diff] [blame] | 1016 | |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 1017 | #ifdef CONFIG_KASAN |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1018 | unsigned int kasan_depth; |
Andrey Ryabinin | 0b24bec | 2015-02-13 14:39:17 -0800 | [diff] [blame] | 1019 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1020 | |
Frederic Weisbecker | fb52607 | 2008-11-25 21:07:04 +0100 | [diff] [blame] | 1021 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1022 | /* Index of current stored address in ret_stack: */ |
| 1023 | int curr_ret_stack; |
| 1024 | |
| 1025 | /* Stack of return addresses for return function tracing: */ |
| 1026 | struct ftrace_ret_stack *ret_stack; |
| 1027 | |
| 1028 | /* Timestamp for last schedule: */ |
| 1029 | unsigned long long ftrace_timestamp; |
| 1030 | |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 1031 | /* |
| 1032 | * Number of functions that haven't been traced |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1033 | * because of depth overrun: |
Frederic Weisbecker | f201ae2 | 2008-11-23 06:22:56 +0100 | [diff] [blame] | 1034 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1035 | atomic_t trace_overrun; |
Tejun Heo | b23afb9 | 2015-11-05 18:46:11 -0800 | [diff] [blame] | 1036 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1037 | /* Pause tracing: */ |
| 1038 | atomic_t tracing_graph_pause; |
KAMEZAWA Hiroyuki | 569b846 | 2009-12-15 16:47:03 -0800 | [diff] [blame] | 1039 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1040 | |
| 1041 | #ifdef CONFIG_TRACING |
| 1042 | /* State flags for use by tracers: */ |
| 1043 | unsigned long trace; |
| 1044 | |
| 1045 | /* Bitmask and counter of trace recursion: */ |
| 1046 | unsigned long trace_recursion; |
| 1047 | #endif /* CONFIG_TRACING */ |
| 1048 | |
| 1049 | #ifdef CONFIG_KCOV |
| 1050 | /* Coverage collection mode enabled for this task (0 if disabled): */ |
| 1051 | enum kcov_mode kcov_mode; |
| 1052 | |
| 1053 | /* Size of the kcov_area: */ |
| 1054 | unsigned int kcov_size; |
| 1055 | |
| 1056 | /* Buffer for coverage collection: */ |
| 1057 | void *kcov_area; |
| 1058 | |
| 1059 | /* KCOV descriptor wired with this task or NULL: */ |
| 1060 | struct kcov *kcov; |
| 1061 | #endif |
| 1062 | |
| 1063 | #ifdef CONFIG_MEMCG |
| 1064 | struct mem_cgroup *memcg_in_oom; |
| 1065 | gfp_t memcg_oom_gfp_mask; |
| 1066 | int memcg_oom_order; |
| 1067 | |
| 1068 | /* Number of pages to reclaim on returning to userland: */ |
| 1069 | unsigned int memcg_nr_pages_over_high; |
| 1070 | #endif |
| 1071 | |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1072 | #ifdef CONFIG_UPROBES |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1073 | struct uprobe_task *utask; |
Srikar Dronamraju | 0326f5a | 2012-03-13 23:30:11 +0530 | [diff] [blame] | 1074 | #endif |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1075 | #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1076 | unsigned int sequential_io; |
| 1077 | unsigned int sequential_io_avg; |
Kent Overstreet | cafe563 | 2013-03-23 16:11:31 -0700 | [diff] [blame] | 1078 | #endif |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 1079 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1080 | unsigned long task_state_change; |
Peter Zijlstra | 8eb23b9 | 2014-09-24 10:18:55 +0200 | [diff] [blame] | 1081 | #endif |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1082 | int pagefault_disabled; |
Michal Hocko | 0304926 | 2016-03-25 14:20:33 -0700 | [diff] [blame] | 1083 | #ifdef CONFIG_MMU |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1084 | struct task_struct *oom_reaper_list; |
Michal Hocko | 0304926 | 2016-03-25 14:20:33 -0700 | [diff] [blame] | 1085 | #endif |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 1086 | #ifdef CONFIG_VMAP_STACK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1087 | struct vm_struct *stack_vm_area; |
Andy Lutomirski | ba14a19 | 2016-08-11 02:35:21 -0700 | [diff] [blame] | 1088 | #endif |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 1089 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1090 | /* A live task holds one reference: */ |
| 1091 | atomic_t stack_refcount; |
Andy Lutomirski | 68f24b08 | 2016-09-15 22:45:48 -0700 | [diff] [blame] | 1092 | #endif |
Josh Poimboeuf | d83a7cb | 2017-02-13 19:42:40 -0600 | [diff] [blame] | 1093 | #ifdef CONFIG_LIVEPATCH |
| 1094 | int patch_state; |
| 1095 | #endif |
Tetsuo Handa | e4e55b4 | 2017-03-24 20:46:33 +0900 | [diff] [blame] | 1096 | #ifdef CONFIG_SECURITY |
| 1097 | /* Used by LSM modules for access restriction: */ |
| 1098 | void *security; |
| 1099 | #endif |
Kees Cook | 29e48ce | 2017-04-05 22:43:33 -0700 | [diff] [blame] | 1100 | |
| 1101 | /* |
| 1102 | * New fields for task_struct should be added above here, so that |
| 1103 | * they are included in the randomized portion of task_struct. |
| 1104 | */ |
| 1105 | randomized_struct_fields_end |
| 1106 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1107 | /* CPU-specific state of this task: */ |
| 1108 | struct thread_struct thread; |
| 1109 | |
| 1110 | /* |
| 1111 | * WARNING: on x86, 'thread_struct' contains a variable-sized |
| 1112 | * structure. It *MUST* be at the end of 'task_struct'. |
| 1113 | * |
| 1114 | * Do not put anything below here! |
| 1115 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | }; |
| 1117 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1118 | static inline struct pid *task_pid(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 1119 | { |
| 1120 | return task->pids[PIDTYPE_PID].pid; |
| 1121 | } |
| 1122 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1123 | static inline struct pid *task_tgid(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 1124 | { |
| 1125 | return task->group_leader->pids[PIDTYPE_PID].pid; |
| 1126 | } |
| 1127 | |
Oleg Nesterov | 6dda81f | 2009-04-02 16:58:35 -0700 | [diff] [blame] | 1128 | /* |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1129 | * Without tasklist or RCU lock it is not safe to dereference |
Oleg Nesterov | 6dda81f | 2009-04-02 16:58:35 -0700 | [diff] [blame] | 1130 | * the result of task_pgrp/task_session even if task == current, |
| 1131 | * we can race with another thread doing sys_setsid/sys_setpgid. |
| 1132 | */ |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1133 | static inline struct pid *task_pgrp(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 1134 | { |
| 1135 | return task->group_leader->pids[PIDTYPE_PGID].pid; |
| 1136 | } |
| 1137 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1138 | static inline struct pid *task_session(struct task_struct *task) |
Eric W. Biederman | 22c935f | 2006-10-02 02:17:09 -0700 | [diff] [blame] | 1139 | { |
| 1140 | return task->group_leader->pids[PIDTYPE_SID].pid; |
| 1141 | } |
| 1142 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1143 | /* |
| 1144 | * the helpers to get the task's different pids as they are seen |
| 1145 | * from various namespaces |
| 1146 | * |
| 1147 | * task_xid_nr() : global id, i.e. the id seen from the init namespace; |
Eric W. Biederman | 44c4e1b | 2008-02-08 04:19:15 -0800 | [diff] [blame] | 1148 | * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of |
| 1149 | * current. |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1150 | * task_xid_nr_ns() : id seen from the ns specified; |
| 1151 | * |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1152 | * see also pid_nr() etc in include/linux/pid.h |
| 1153 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1154 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1155 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1156 | static inline pid_t task_pid_nr(struct task_struct *tsk) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1157 | { |
| 1158 | return tsk->pid; |
| 1159 | } |
| 1160 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1161 | static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1162 | { |
| 1163 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); |
| 1164 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1165 | |
| 1166 | static inline pid_t task_pid_vnr(struct task_struct *tsk) |
| 1167 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1168 | return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1169 | } |
| 1170 | |
| 1171 | |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1172 | static inline pid_t task_tgid_nr(struct task_struct *tsk) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1173 | { |
| 1174 | return tsk->tgid; |
| 1175 | } |
| 1176 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1177 | /** |
| 1178 | * pid_alive - check that a task structure is not stale |
| 1179 | * @p: Task structure to be checked. |
| 1180 | * |
| 1181 | * Test if a process is not yet dead (at most zombie state) |
| 1182 | * If pid_alive fails, then pointers within the task structure |
| 1183 | * can be stale and must not be dereferenced. |
| 1184 | * |
| 1185 | * Return: 1 if the process is alive. 0 otherwise. |
| 1186 | */ |
| 1187 | static inline int pid_alive(const struct task_struct *p) |
| 1188 | { |
| 1189 | return p->pids[PIDTYPE_PID].pid != NULL; |
| 1190 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1191 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1192 | static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1193 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1194 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1195 | } |
| 1196 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1197 | static inline pid_t task_pgrp_vnr(struct task_struct *tsk) |
| 1198 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1199 | return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1200 | } |
| 1201 | |
| 1202 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1203 | static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1204 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1205 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1206 | } |
| 1207 | |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1208 | static inline pid_t task_session_vnr(struct task_struct *tsk) |
| 1209 | { |
Oleg Nesterov | 52ee2df | 2009-04-02 16:58:38 -0700 | [diff] [blame] | 1210 | return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL); |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1211 | } |
| 1212 | |
Oleg Nesterov | dd1c1f2 | 2017-08-21 17:35:02 +0200 | [diff] [blame] | 1213 | static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
| 1214 | { |
| 1215 | return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns); |
| 1216 | } |
| 1217 | |
| 1218 | static inline pid_t task_tgid_vnr(struct task_struct *tsk) |
| 1219 | { |
| 1220 | return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL); |
| 1221 | } |
| 1222 | |
| 1223 | static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) |
| 1224 | { |
| 1225 | pid_t pid = 0; |
| 1226 | |
| 1227 | rcu_read_lock(); |
| 1228 | if (pid_alive(tsk)) |
| 1229 | pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); |
| 1230 | rcu_read_unlock(); |
| 1231 | |
| 1232 | return pid; |
| 1233 | } |
| 1234 | |
| 1235 | static inline pid_t task_ppid_nr(const struct task_struct *tsk) |
| 1236 | { |
| 1237 | return task_ppid_nr_ns(tsk, &init_pid_ns); |
| 1238 | } |
| 1239 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1240 | /* Obsolete, do not use: */ |
Oleg Nesterov | 1b0f7ffd | 2009-04-02 16:58:39 -0700 | [diff] [blame] | 1241 | static inline pid_t task_pgrp_nr(struct task_struct *tsk) |
| 1242 | { |
| 1243 | return task_pgrp_nr_ns(tsk, &init_pid_ns); |
| 1244 | } |
Pavel Emelyanov | 7af5729 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1245 | |
Xie XiuQi | 20435d8 | 2017-08-07 16:44:23 +0800 | [diff] [blame] | 1246 | static inline char task_state_to_char(struct task_struct *task) |
| 1247 | { |
| 1248 | const char stat_nam[] = TASK_STATE_TO_CHAR_STR; |
| 1249 | unsigned long state = task->state; |
| 1250 | |
| 1251 | state = state ? __ffs(state) + 1 : 0; |
| 1252 | |
| 1253 | /* Make sure the string lines up properly with the number of task states: */ |
| 1254 | BUILD_BUG_ON(sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1); |
| 1255 | |
| 1256 | return state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'; |
| 1257 | } |
| 1258 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | /** |
Sergey Senozhatsky | 570f524 | 2016-01-01 23:03:01 +0900 | [diff] [blame] | 1260 | * is_global_init - check if a task structure is init. Since init |
| 1261 | * is free to have sub-threads we need to check tgid. |
Henne | 3260259 | 2006-10-06 00:44:01 -0700 | [diff] [blame] | 1262 | * @tsk: Task structure to be checked. |
| 1263 | * |
| 1264 | * Check if a task structure is the first user space task the kernel created. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1265 | * |
| 1266 | * Return: 1 if the task structure is init. 0 otherwise. |
Sukadev Bhattiprolu | f400e19 | 2006-09-29 02:00:07 -0700 | [diff] [blame] | 1267 | */ |
Alexey Dobriyan | e868171 | 2007-10-26 12:17:22 +0400 | [diff] [blame] | 1268 | static inline int is_global_init(struct task_struct *tsk) |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 1269 | { |
Sergey Senozhatsky | 570f524 | 2016-01-01 23:03:01 +0900 | [diff] [blame] | 1270 | return task_tgid_nr(tsk) == 1; |
Pavel Emelyanov | b461cc0 | 2007-10-18 23:40:09 -0700 | [diff] [blame] | 1271 | } |
Serge E. Hallyn | b460cbc | 2007-10-18 23:39:52 -0700 | [diff] [blame] | 1272 | |
Cedric Le Goater | 9ec5209 | 2006-10-02 02:19:00 -0700 | [diff] [blame] | 1273 | extern struct pid *cad_pid; |
| 1274 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | /* |
| 1276 | * Per process flags |
| 1277 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1278 | #define PF_IDLE 0x00000002 /* I am an IDLE thread */ |
| 1279 | #define PF_EXITING 0x00000004 /* Getting shut down */ |
| 1280 | #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */ |
| 1281 | #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ |
| 1282 | #define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */ |
| 1283 | #define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */ |
| 1284 | #define PF_MCE_PROCESS 0x00000080 /* Process policy on mce errors */ |
| 1285 | #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */ |
| 1286 | #define PF_DUMPCORE 0x00000200 /* Dumped core */ |
| 1287 | #define PF_SIGNALED 0x00000400 /* Killed by a signal */ |
| 1288 | #define PF_MEMALLOC 0x00000800 /* Allocating memory */ |
| 1289 | #define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */ |
| 1290 | #define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */ |
| 1291 | #define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */ |
| 1292 | #define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */ |
| 1293 | #define PF_FROZEN 0x00010000 /* Frozen for system suspend */ |
Michal Hocko | 7dea19f | 2017-05-03 14:53:15 -0700 | [diff] [blame] | 1294 | #define PF_KSWAPD 0x00020000 /* I am kswapd */ |
| 1295 | #define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */ |
| 1296 | #define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1297 | #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */ |
| 1298 | #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ |
| 1299 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
| 1300 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
| 1301 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
| 1302 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
| 1303 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
| 1304 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
| 1305 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1306 | |
| 1307 | /* |
| 1308 | * Only the _current_ task can read/write to tsk->flags, but other |
| 1309 | * tasks can access tsk->flags in readonly mode for example |
| 1310 | * with tsk_used_math (like during threaded core dumping). |
| 1311 | * There is however an exception to this rule during ptrace |
| 1312 | * or during fork: the ptracer task is allowed to write to the |
| 1313 | * child->flags of its traced child (same goes for fork, the parent |
| 1314 | * can write to the child->flags), because we're guaranteed the |
| 1315 | * child is not running and in turn not changing child->flags |
| 1316 | * at the same time the parent does it. |
| 1317 | */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1318 | #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0) |
| 1319 | #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0) |
| 1320 | #define clear_used_math() clear_stopped_child_used_math(current) |
| 1321 | #define set_used_math() set_stopped_child_used_math(current) |
| 1322 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | #define conditional_stopped_child_used_math(condition, child) \ |
| 1324 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1325 | |
| 1326 | #define conditional_used_math(condition) conditional_stopped_child_used_math(condition, current) |
| 1327 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | #define copy_to_stopped_child_used_math(child) \ |
| 1329 | do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0) |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1330 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1331 | /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1332 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
| 1333 | #define used_math() tsk_used_math(current) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | |
Thomas Gleixner | 62ec05dd | 2017-05-24 10:15:41 +0200 | [diff] [blame] | 1335 | static inline bool is_percpu_thread(void) |
| 1336 | { |
| 1337 | #ifdef CONFIG_SMP |
| 1338 | return (current->flags & PF_NO_SETAFFINITY) && |
| 1339 | (current->nr_cpus_allowed == 1); |
| 1340 | #else |
| 1341 | return true; |
| 1342 | #endif |
| 1343 | } |
| 1344 | |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1345 | /* Per-process atomic flags. */ |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1346 | #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ |
| 1347 | #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ |
| 1348 | #define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */ |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1349 | |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1350 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1351 | #define TASK_PFA_TEST(name, func) \ |
| 1352 | static inline bool task_##func(struct task_struct *p) \ |
| 1353 | { return test_bit(PFA_##name, &p->atomic_flags); } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1354 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1355 | #define TASK_PFA_SET(name, func) \ |
| 1356 | static inline void task_set_##func(struct task_struct *p) \ |
| 1357 | { set_bit(PFA_##name, &p->atomic_flags); } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1358 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1359 | #define TASK_PFA_CLEAR(name, func) \ |
| 1360 | static inline void task_clear_##func(struct task_struct *p) \ |
| 1361 | { clear_bit(PFA_##name, &p->atomic_flags); } |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1362 | |
Zefan Li | e0e5070 | 2014-09-25 09:40:40 +0800 | [diff] [blame] | 1363 | TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs) |
| 1364 | TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs) |
Kees Cook | 1d4457f | 2014-05-21 15:23:46 -0700 | [diff] [blame] | 1365 | |
Zefan Li | 2ad654b | 2014-09-25 09:41:02 +0800 | [diff] [blame] | 1366 | TASK_PFA_TEST(SPREAD_PAGE, spread_page) |
| 1367 | TASK_PFA_SET(SPREAD_PAGE, spread_page) |
| 1368 | TASK_PFA_CLEAR(SPREAD_PAGE, spread_page) |
| 1369 | |
| 1370 | TASK_PFA_TEST(SPREAD_SLAB, spread_slab) |
| 1371 | TASK_PFA_SET(SPREAD_SLAB, spread_slab) |
| 1372 | TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab) |
Tejun Heo | 544b2c9 | 2011-06-14 11:20:18 +0200 | [diff] [blame] | 1373 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1374 | static inline void |
NeilBrown | 717a94b | 2017-04-07 10:03:26 +1000 | [diff] [blame] | 1375 | current_restore_flags(unsigned long orig_flags, unsigned long flags) |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 1376 | { |
NeilBrown | 717a94b | 2017-04-07 10:03:26 +1000 | [diff] [blame] | 1377 | current->flags &= ~flags; |
| 1378 | current->flags |= orig_flags & flags; |
Mel Gorman | 907aed4 | 2012-07-31 16:44:07 -0700 | [diff] [blame] | 1379 | } |
| 1380 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1381 | extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); |
| 1382 | extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1383 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1384 | extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); |
| 1385 | extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1386 | #else |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1387 | static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) |
KOSAKI Motohiro | 1e1b6c5 | 2011-05-19 15:08:58 +0900 | [diff] [blame] | 1388 | { |
| 1389 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1390 | static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1391 | { |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1392 | if (!cpumask_test_cpu(0, new_mask)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1393 | return -EINVAL; |
| 1394 | return 0; |
| 1395 | } |
| 1396 | #endif |
Rusty Russell | e0ad955 | 2009-09-24 09:34:38 -0600 | [diff] [blame] | 1397 | |
Christian Borntraeger | 6d0d287 | 2016-11-16 13:23:05 +0100 | [diff] [blame] | 1398 | #ifndef cpu_relax_yield |
| 1399 | #define cpu_relax_yield() cpu_relax() |
| 1400 | #endif |
| 1401 | |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 1402 | extern int yield_to(struct task_struct *p, bool preempt); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1403 | extern void set_user_nice(struct task_struct *p, long nice); |
| 1404 | extern int task_prio(const struct task_struct *p); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1405 | |
Dongsheng Yang | d0ea026 | 2014-01-27 22:00:45 -0500 | [diff] [blame] | 1406 | /** |
| 1407 | * task_nice - return the nice value of a given task. |
| 1408 | * @p: the task in question. |
| 1409 | * |
| 1410 | * Return: The nice value [ -20 ... 0 ... 19 ]. |
| 1411 | */ |
| 1412 | static inline int task_nice(const struct task_struct *p) |
| 1413 | { |
| 1414 | return PRIO_TO_NICE((p)->static_prio); |
| 1415 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1416 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1417 | extern int can_nice(const struct task_struct *p, const int nice); |
| 1418 | extern int task_curr(const struct task_struct *p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1419 | extern int idle_cpu(int cpu); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1420 | extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); |
| 1421 | extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); |
| 1422 | extern int sched_setattr(struct task_struct *, const struct sched_attr *); |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1423 | extern struct task_struct *idle_task(int cpu); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1424 | |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1425 | /** |
| 1426 | * is_idle_task - is the specified task an idle task? |
Randy Dunlap | fa75728 | 2012-01-21 11:03:13 -0800 | [diff] [blame] | 1427 | * @p: the task in question. |
Yacine Belkadi | e69f618 | 2013-07-12 20:45:47 +0200 | [diff] [blame] | 1428 | * |
| 1429 | * Return: 1 if @p is an idle task. 0 otherwise. |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1430 | */ |
Paul E. McKenney | 7061ca3 | 2011-12-20 08:20:46 -0800 | [diff] [blame] | 1431 | static inline bool is_idle_task(const struct task_struct *p) |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1432 | { |
Peter Zijlstra | c1de45c | 2016-11-28 23:03:05 -0800 | [diff] [blame] | 1433 | return !!(p->flags & PF_IDLE); |
Paul E. McKenney | c4f3060 | 2011-11-10 12:41:56 -0800 | [diff] [blame] | 1434 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1435 | |
Ingo Molnar | 36c8b58 | 2006-07-03 00:25:41 -0700 | [diff] [blame] | 1436 | extern struct task_struct *curr_task(int cpu); |
Peter Zijlstra | a458ae2 | 2016-09-20 20:29:40 +0200 | [diff] [blame] | 1437 | extern void ia64_set_curr_task(int cpu, struct task_struct *p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 | |
| 1439 | void yield(void); |
| 1440 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1441 | union thread_union { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1442 | #ifndef CONFIG_THREAD_INFO_IN_TASK |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1443 | struct thread_info thread_info; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1444 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1445 | unsigned long stack[THREAD_SIZE/sizeof(long)]; |
| 1446 | }; |
| 1447 | |
Ingo Molnar | f3ac606 | 2017-02-03 22:59:33 +0100 | [diff] [blame] | 1448 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1449 | static inline struct thread_info *task_thread_info(struct task_struct *task) |
| 1450 | { |
| 1451 | return &task->thread_info; |
| 1452 | } |
| 1453 | #elif !defined(__HAVE_THREAD_FUNCTIONS) |
| 1454 | # define task_thread_info(task) ((struct thread_info *)(task)->stack) |
| 1455 | #endif |
| 1456 | |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1457 | /* |
| 1458 | * find a task by one of its numerical ids |
| 1459 | * |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1460 | * find_task_by_pid_ns(): |
| 1461 | * finds a task by its pid in the specified namespace |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1462 | * find_task_by_vpid(): |
| 1463 | * finds a task by its virtual pid |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1464 | * |
Pavel Emelyanov | e49859e | 2008-07-25 01:48:36 -0700 | [diff] [blame] | 1465 | * see also find_vpid() etc in include/linux/pid.h |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1466 | */ |
| 1467 | |
Pavel Emelyanov | 228ebcb | 2007-10-18 23:40:16 -0700 | [diff] [blame] | 1468 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1469 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); |
Pavel Emelyanov | 198fe21 | 2007-10-18 23:40:06 -0700 | [diff] [blame] | 1470 | |
Harvey Harrison | b3c9752 | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 1471 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
| 1472 | extern int wake_up_process(struct task_struct *tsk); |
Samir Bellabes | 3e51e3e | 2011-05-11 18:18:05 +0200 | [diff] [blame] | 1473 | extern void wake_up_new_task(struct task_struct *tsk); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1474 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1475 | #ifdef CONFIG_SMP |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1476 | extern void kick_process(struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1477 | #else |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1478 | static inline void kick_process(struct task_struct *tsk) { } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1479 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1480 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1481 | extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec); |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1482 | |
Adrian Hunter | 82b8977 | 2014-05-28 11:45:04 +0300 | [diff] [blame] | 1483 | static inline void set_task_comm(struct task_struct *tsk, const char *from) |
| 1484 | { |
| 1485 | __set_task_comm(tsk, from, false); |
| 1486 | } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1487 | |
Andrew Morton | 59714d6 | 2008-02-04 22:27:21 -0800 | [diff] [blame] | 1488 | extern char *get_task_comm(char *to, struct task_struct *tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | |
| 1490 | #ifdef CONFIG_SMP |
Peter Zijlstra | 317f394 | 2011-04-05 17:23:58 +0200 | [diff] [blame] | 1491 | void scheduler_ipi(void); |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 1492 | extern unsigned long wait_task_inactive(struct task_struct *, long match_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1493 | #else |
Peter Zijlstra | 184748c | 2011-04-05 17:23:39 +0200 | [diff] [blame] | 1494 | static inline void scheduler_ipi(void) { } |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1495 | static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
Roland McGrath | 85ba2d8 | 2008-07-25 19:45:58 -0700 | [diff] [blame] | 1496 | { |
| 1497 | return 1; |
| 1498 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1499 | #endif |
| 1500 | |
Ingo Molnar | 5eca1c1 | 2017-02-06 22:06:35 +0100 | [diff] [blame] | 1501 | /* |
| 1502 | * Set thread flags in other task's structures. |
| 1503 | * See asm/thread_info.h for TIF_xxxx flags available: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1504 | */ |
| 1505 | static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1506 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1507 | set_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1508 | } |
| 1509 | |
| 1510 | static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1511 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1512 | clear_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1513 | } |
| 1514 | |
| 1515 | static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1516 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1517 | return test_and_set_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1518 | } |
| 1519 | |
| 1520 | static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1521 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1522 | return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1523 | } |
| 1524 | |
| 1525 | static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag) |
| 1526 | { |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1527 | return test_ti_thread_flag(task_thread_info(tsk), flag); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1528 | } |
| 1529 | |
| 1530 | static inline void set_tsk_need_resched(struct task_struct *tsk) |
| 1531 | { |
| 1532 | set_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1533 | } |
| 1534 | |
| 1535 | static inline void clear_tsk_need_resched(struct task_struct *tsk) |
| 1536 | { |
| 1537 | clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); |
| 1538 | } |
| 1539 | |
Gregory Haskins | 8ae121a | 2008-04-23 07:13:29 -0400 | [diff] [blame] | 1540 | static inline int test_tsk_need_resched(struct task_struct *tsk) |
| 1541 | { |
| 1542 | return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); |
| 1543 | } |
| 1544 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1545 | /* |
| 1546 | * cond_resched() and cond_resched_lock(): latency reduction via |
| 1547 | * explicit rescheduling in places that are safe. The return |
| 1548 | * value indicates whether a reschedule was done in fact. |
| 1549 | * cond_resched_lock() will drop the spinlock before scheduling, |
| 1550 | * cond_resched_softirq() will enable bhs before scheduling. |
| 1551 | */ |
Peter Zijlstra | 35a773a | 2016-09-19 12:57:53 +0200 | [diff] [blame] | 1552 | #ifndef CONFIG_PREEMPT |
Linus Torvalds | c3921ab | 2008-05-11 16:04:48 -0700 | [diff] [blame] | 1553 | extern int _cond_resched(void); |
Peter Zijlstra | 35a773a | 2016-09-19 12:57:53 +0200 | [diff] [blame] | 1554 | #else |
| 1555 | static inline int _cond_resched(void) { return 0; } |
| 1556 | #endif |
Frederic Weisbecker | 6f80bd9 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1557 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1558 | #define cond_resched() ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1559 | ___might_sleep(__FILE__, __LINE__, 0); \ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1560 | _cond_resched(); \ |
| 1561 | }) |
Frederic Weisbecker | 6f80bd9 | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1562 | |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1563 | extern int __cond_resched_lock(spinlock_t *lock); |
| 1564 | |
| 1565 | #define cond_resched_lock(lock) ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1566 | ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1567 | __cond_resched_lock(lock); \ |
| 1568 | }) |
| 1569 | |
| 1570 | extern int __cond_resched_softirq(void); |
| 1571 | |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 1572 | #define cond_resched_softirq() ({ \ |
Peter Zijlstra | 3427445 | 2014-09-24 10:18:56 +0200 | [diff] [blame] | 1573 | ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
Venkatesh Pallipadi | 75e1056 | 2010-10-04 17:03:16 -0700 | [diff] [blame] | 1574 | __cond_resched_softirq(); \ |
Frederic Weisbecker | 613afbf | 2009-07-16 15:44:29 +0200 | [diff] [blame] | 1575 | }) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1576 | |
Simon Horman | f6f3c43 | 2013-05-22 14:50:31 +0900 | [diff] [blame] | 1577 | static inline void cond_resched_rcu(void) |
| 1578 | { |
| 1579 | #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU) |
| 1580 | rcu_read_unlock(); |
| 1581 | cond_resched(); |
| 1582 | rcu_read_lock(); |
| 1583 | #endif |
| 1584 | } |
| 1585 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1586 | /* |
| 1587 | * Does a critical section need to be broken due to another |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1588 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
| 1589 | * but a general need for low latency) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1590 | */ |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1591 | static inline int spin_needbreak(spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | { |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1593 | #ifdef CONFIG_PREEMPT |
| 1594 | return spin_is_contended(lock); |
| 1595 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1596 | return 0; |
Nick Piggin | 95c354f | 2008-01-30 13:31:20 +0100 | [diff] [blame] | 1597 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | } |
| 1599 | |
Peter Zijlstra | 75f93fe | 2013-09-27 17:30:03 +0200 | [diff] [blame] | 1600 | static __always_inline bool need_resched(void) |
| 1601 | { |
| 1602 | return unlikely(tif_need_resched()); |
| 1603 | } |
| 1604 | |
Thomas Gleixner | ee761f6 | 2013-03-21 22:49:32 +0100 | [diff] [blame] | 1605 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1606 | * Wrappers for p->thread_info->cpu access. No-op on UP. |
| 1607 | */ |
| 1608 | #ifdef CONFIG_SMP |
| 1609 | |
| 1610 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1611 | { |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1612 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1613 | return p->cpu; |
| 1614 | #else |
Al Viro | a1261f54 | 2005-11-13 16:06:55 -0800 | [diff] [blame] | 1615 | return task_thread_info(p)->cpu; |
Andy Lutomirski | c65eacb | 2016-09-13 14:29:24 -0700 | [diff] [blame] | 1616 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1617 | } |
| 1618 | |
Ingo Molnar | c65cc87 | 2007-07-09 18:51:58 +0200 | [diff] [blame] | 1619 | extern void set_task_cpu(struct task_struct *p, unsigned int cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1620 | |
| 1621 | #else |
| 1622 | |
| 1623 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1624 | { |
| 1625 | return 0; |
| 1626 | } |
| 1627 | |
| 1628 | static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) |
| 1629 | { |
| 1630 | } |
| 1631 | |
| 1632 | #endif /* CONFIG_SMP */ |
| 1633 | |
Pan Xinhui | d9345c6 | 2016-11-02 05:08:28 -0400 | [diff] [blame] | 1634 | /* |
| 1635 | * In order to reduce various lock holder preemption latencies provide an |
| 1636 | * interface to see if a vCPU is currently running or not. |
| 1637 | * |
| 1638 | * This allows us to terminate optimistic spin loops and block, analogous to |
| 1639 | * the native optimistic spin heuristic of testing if the lock owner task is |
| 1640 | * running or not. |
| 1641 | */ |
| 1642 | #ifndef vcpu_is_preempted |
| 1643 | # define vcpu_is_preempted(cpu) false |
| 1644 | #endif |
| 1645 | |
Rusty Russell | 96f874e2 | 2008-11-25 02:35:14 +1030 | [diff] [blame] | 1646 | extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); |
| 1647 | extern long sched_getaffinity(pid_t pid, struct cpumask *mask); |
Siddha, Suresh B | 5c45bf2 | 2006-06-27 02:54:42 -0700 | [diff] [blame] | 1648 | |
Dave Hansen | 8245525 | 2008-02-04 22:28:59 -0800 | [diff] [blame] | 1649 | #ifndef TASK_SIZE_OF |
| 1650 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
| 1651 | #endif |
| 1652 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1653 | #endif |