blob: ac0fed4c31307faf7c67ac0ca791ad91b27155ef [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _LINUX_SCHED_H
2#define _LINUX_SCHED_H
3
David Howells607ca462012-10-13 10:46:48 +01004#include <uapi/linux/sched.h>
David Woodhouseb7b3c762006-04-27 00:12:56 +01005
Dongsheng Yang5c228072014-01-27 17:15:37 -05006#include <linux/sched/prio.h>
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/capability.h>
Ingo Molnarb69339b2017-02-05 16:15:03 +01009#include <linux/mutex.h>
Peter Zijlstrafb00aca2013-11-07 14:43:43 +010010#include <linux/plist.h>
Martin Schwidefskyc92ff1b2007-10-16 01:24:43 -070011#include <linux/mm_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/ptrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/sem.h>
Jack Millerab602f72014-08-08 14:23:19 -070015#include <linux/shm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/signal.h>
Ingo Molnarf361bf42017-02-03 23:47:37 +010017#include <linux/signal_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/pid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/seccomp.h>
Jiri Pirko05725f72009-04-14 20:17:16 +020020#include <linux/rculist.h>
Ingo Molnar23f78d4a2006-06-27 02:54:53 -070021#include <linux/rtmutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
David Woodhousea3b67142006-04-25 14:54:40 +010023#include <linux/resource.h>
David Woodhousea3b67142006-04-25 14:54:40 +010024#include <linux/hrtimer.h>
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -070025#include <linux/kcov.h>
Andrew Morton7c3ab7382006-12-10 02:19:19 -080026#include <linux/task_io_accounting.h>
Arjan van de Ven97455122008-01-25 21:08:34 +010027#include <linux/latencytop.h>
David Howells9e2b2dc2008-08-13 16:20:04 +010028#include <linux/cred.h>
Ming Lei21caf2f2013-02-22 16:34:08 -080029#include <linux/gfp.h>
Ingo Molnarfd771232017-02-02 20:56:33 +010030#include <linux/topology.h>
Aaron Tomlind4311ff2014-09-12 14:16:17 +010031#include <linux/magic.h>
Tejun Heo7d7efec2015-05-13 16:35:16 -040032#include <linux/cgroup-defs.h>
David Woodhousea3b67142006-04-25 14:54:40 +010033
Ingo Molnar70b81572017-02-03 12:11:00 +010034#include <asm/current.h>
35
Ingo Molnare2d1e2a2017-02-01 18:07:51 +010036struct sched_attr;
37struct sched_param;
Dario Faggiolid50dde52013-11-07 14:43:36 +010038
Ingo Molnarc87e2832006-06-27 02:54:58 -070039struct futex_pi_state;
Alexey Dobriyan286100a2008-01-25 21:08:34 +010040struct robust_list_head;
Akinobu Mitabddd87c2010-02-23 08:55:42 +010041struct bio_list;
Al Viro5ad4e532009-03-29 19:50:06 -040042struct fs_struct;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020043struct perf_event_context;
Jens Axboe73c10102011-03-08 13:19:51 +010044struct blk_plug;
Linus Torvaldsc4ad8f92014-02-05 12:54:53 -080045struct filename;
Al Viro89076bc2015-05-12 08:29:38 -040046struct nameidata;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Ingo Molnarc3edc402017-02-02 08:35:14 +010048struct signal_struct;
49struct sighand_struct;
50
Ingo Molnar43ae34c2007-07-09 18:52:00 +020051struct seq_file;
52struct cfs_rq;
Ingo Molnar4cf86d72007-10-15 17:00:14 +020053struct task_group;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Linus Torvalds4a8342d2005-09-29 15:18:21 -070055/*
56 * Task state bitmask. NOTE! These bits are also
57 * encoded in fs/proc/array.c: get_task_state().
58 *
59 * We have two separate sets of flags: task->state
60 * is about runnability, while task->exit_state are
61 * about the task exiting. Confusing, but this way
62 * modifying one set can't modify the other one by
63 * mistake.
64 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#define TASK_RUNNING 0
66#define TASK_INTERRUPTIBLE 1
67#define TASK_UNINTERRUPTIBLE 2
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050068#define __TASK_STOPPED 4
69#define __TASK_TRACED 8
Linus Torvalds4a8342d2005-09-29 15:18:21 -070070/* in tsk->exit_state */
Oleg Nesterovad866222014-04-07 15:38:46 -070071#define EXIT_DEAD 16
72#define EXIT_ZOMBIE 32
Oleg Nesterovabd50b32014-04-07 15:38:42 -070073#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
Linus Torvalds4a8342d2005-09-29 15:18:21 -070074/* in tsk->state again */
Mike Galbraithaf927232007-10-15 17:00:13 +020075#define TASK_DEAD 64
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050076#define TASK_WAKEKILL 128
Peter Zijlstrae9c84312009-09-15 14:43:03 +020077#define TASK_WAKING 256
Thomas Gleixnerf2530dc2013-04-09 09:33:34 +020078#define TASK_PARKED 512
Peter Zijlstra80ed87c2015-05-08 14:23:45 +020079#define TASK_NOLOAD 1024
Peter Zijlstra7dc603c2016-06-16 13:29:28 +020080#define TASK_NEW 2048
81#define TASK_STATE_MAX 4096
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050082
Peter Zijlstra7dc603c2016-06-16 13:29:28 +020083#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPNn"
Peter Zijlstra73342152009-12-17 13:16:27 +010084
Davidlohr Bueso642fa442017-01-03 13:43:14 -080085/* Convenience macros for the sake of set_current_state */
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050086#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
87#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
88#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Peter Zijlstra80ed87c2015-05-08 14:23:45 +020090#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
91
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -050092/* Convenience macros for the sake of wake_up */
93#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050094#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -050095
96/* get_task_state() */
97#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -050098 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
Oleg Nesterov74e37202014-01-23 15:55:35 -080099 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500100
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500101#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
102#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500103#define task_is_stopped_or_traced(task) \
Matthew Wilcoxf021a3c2007-12-06 11:13:16 -0500104 ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
Matthew Wilcox92a1f4b2007-12-06 10:55:25 -0500105#define task_contributes_to_load(task) \
Nathan Lynche3c8ca82009-04-08 19:45:12 -0500106 ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
Peter Zijlstra80ed87c2015-05-08 14:23:45 +0200107 (task->flags & PF_FROZEN) == 0 && \
108 (task->state & TASK_NOLOAD) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200110#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
111
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200112#define __set_current_state(state_value) \
113 do { \
114 current->task_state_change = _THIS_IP_; \
115 current->state = (state_value); \
116 } while (0)
117#define set_current_state(state_value) \
118 do { \
119 current->task_state_change = _THIS_IP_; \
Peter Zijlstraa2250232016-10-19 15:45:27 +0200120 smp_store_mb(current->state, (state_value)); \
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200121 } while (0)
122
123#else
Andrew Morton498d0c52005-09-13 01:25:14 -0700124/*
125 * set_current_state() includes a barrier so that the write of current->state
126 * is correctly serialised wrt the caller's subsequent test of whether to
127 * actually sleep:
128 *
Peter Zijlstraa2250232016-10-19 15:45:27 +0200129 * for (;;) {
Andrew Morton498d0c52005-09-13 01:25:14 -0700130 * set_current_state(TASK_UNINTERRUPTIBLE);
Peter Zijlstraa2250232016-10-19 15:45:27 +0200131 * if (!need_sleep)
132 * break;
Andrew Morton498d0c52005-09-13 01:25:14 -0700133 *
Peter Zijlstraa2250232016-10-19 15:45:27 +0200134 * schedule();
135 * }
136 * __set_current_state(TASK_RUNNING);
137 *
138 * If the caller does not need such serialisation (because, for instance, the
139 * condition test and condition change and wakeup are under the same lock) then
140 * use __set_current_state().
141 *
142 * The above is typically ordered against the wakeup, which does:
143 *
144 * need_sleep = false;
145 * wake_up_state(p, TASK_UNINTERRUPTIBLE);
146 *
147 * Where wake_up_state() (and all other wakeup primitives) imply enough
148 * barriers to order the store of the variable against wakeup.
149 *
150 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
151 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
152 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
153 *
154 * This is obviously fine, since they both store the exact same value.
155 *
156 * Also see the comments of try_to_wake_up().
Andrew Morton498d0c52005-09-13 01:25:14 -0700157 */
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200158#define __set_current_state(state_value) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 do { current->state = (state_value); } while (0)
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200160#define set_current_state(state_value) \
Peter Zijlstrab92b8b32015-05-12 10:51:55 +0200161 smp_store_mb(current->state, (state_value))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Peter Zijlstra8eb23b92014-09-24 10:18:55 +0200163#endif
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165/* Task command name length */
166#define TASK_COMM_LEN 16
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168#include <linux/spinlock.h>
169
170/*
171 * This serializes "schedule()" and also protects
172 * the run-queue from deletions/modifications (but
173 * _adding_ to the beginning of the run-queue has
174 * a separate lock).
175 */
176extern rwlock_t tasklist_lock;
177extern spinlock_t mmlist_lock;
178
Ingo Molnar36c8b582006-07-03 00:25:41 -0700179struct task_struct;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180
Paul E. McKenneydb1466b2010-03-03 07:46:56 -0800181#ifdef CONFIG_PROVE_RCU
182extern int lockdep_tasklist_lock_is_held(void);
183#endif /* #ifdef CONFIG_PROVE_RCU */
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185extern void sched_init(void);
186extern void sched_init_smp(void);
Harvey Harrison2d07b252008-02-15 09:56:34 -0800187extern asmlinkage void schedule_tail(struct task_struct *prev);
Ingo Molnar36c8b582006-07-03 00:25:41 -0700188extern void init_idle(struct task_struct *idle, int cpu);
Ingo Molnar1df21052007-07-09 18:51:58 +0200189extern void init_idle_bootup_task(struct task_struct *idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190
Rik van Riel3fa08182015-03-09 12:12:07 -0400191extern cpumask_var_t cpu_isolated_map;
192
Andrew Morton89f19f02009-09-19 11:55:44 -0700193extern int runqueue_is_locked(int cpu);
Ingo Molnar017730c2008-05-12 21:20:52 +0200194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195extern void cpu_init (void);
196extern void trap_init(void);
197extern void update_process_times(int user);
198extern void scheduler_tick(void);
Thomas Gleixner9cf72432016-03-10 12:54:09 +0100199extern int sched_cpu_starting(unsigned int cpu);
Thomas Gleixner40190a72016-03-10 12:54:13 +0100200extern int sched_cpu_activate(unsigned int cpu);
201extern int sched_cpu_deactivate(unsigned int cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Thomas Gleixnerf2785dd2016-03-10 12:54:18 +0100203#ifdef CONFIG_HOTPLUG_CPU
204extern int sched_cpu_dying(unsigned int cpu);
205#else
206# define sched_cpu_dying NULL
207#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209#define MAX_SCHEDULE_TIMEOUT LONG_MAX
Harvey Harrisonb3c97522008-02-13 15:03:15 -0800210extern signed long schedule_timeout(signed long timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -0700211extern signed long schedule_timeout_interruptible(signed long timeout);
Matthew Wilcox294d5cc2007-12-06 11:59:46 -0500212extern signed long schedule_timeout_killable(signed long timeout);
Nishanth Aravamudan64ed93a2005-09-10 00:27:21 -0700213extern signed long schedule_timeout_uninterruptible(signed long timeout);
Andrew Morton69b27ba2016-03-25 14:20:21 -0700214extern signed long schedule_timeout_idle(signed long timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215asmlinkage void schedule(void);
Thomas Gleixnerc5491ea2011-03-21 12:09:35 +0100216extern void schedule_preempt_disabled(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Tejun Heo10ab5642016-10-28 12:58:10 -0400218extern int __must_check io_schedule_prepare(void);
219extern void io_schedule_finish(int token);
NeilBrown9cff8ad2015-02-13 15:49:17 +1100220extern long io_schedule_timeout(long timeout);
Tejun Heo10ab5642016-10-28 12:58:10 -0400221extern void io_schedule(void);
NeilBrown9cff8ad2015-02-13 15:49:17 +1100222
Peter Zijlstra9af65282016-09-13 18:37:29 +0200223void __noreturn do_task_dead(void);
224
Serge E. Hallynab516012006-10-02 02:18:06 -0700225struct nsproxy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Frank Mayharf06febc2008-09-12 09:54:39 -0700227/**
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200228 * struct prev_cputime - snaphsot of system and user cputime
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100229 * @utime: time spent in user mode
230 * @stime: time spent in system mode
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200231 * @lock: protects the above two fields
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100232 *
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200233 * Stores previous user/system time values such that we can guarantee
234 * monotonicity.
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100235 */
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200236struct prev_cputime {
237#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100238 u64 utime;
239 u64 stime;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200240 raw_spinlock_t lock;
241#endif
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100242};
243
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200244static inline void prev_cputime_init(struct prev_cputime *prev)
245{
246#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
247 prev->utime = prev->stime = 0;
248 raw_spin_lock_init(&prev->lock);
249#endif
250}
251
Frederic Weisbeckerd37f761d2012-11-22 00:58:35 +0100252/**
Frank Mayharf06febc2008-09-12 09:54:39 -0700253 * struct task_cputime - collected CPU time counts
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100254 * @utime: time spent in user mode, in nanoseconds
255 * @stime: time spent in kernel mode, in nanoseconds
Frank Mayharf06febc2008-09-12 09:54:39 -0700256 * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
Ingo Molnar5ce73a42008-09-14 17:11:46 +0200257 *
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200258 * This structure groups together three kinds of CPU time that are tracked for
259 * threads and thread groups. Most things considering CPU time want to group
260 * these counts together and treat all three of them in parallel.
Frank Mayharf06febc2008-09-12 09:54:39 -0700261 */
262struct task_cputime {
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100263 u64 utime;
264 u64 stime;
Frank Mayharf06febc2008-09-12 09:54:39 -0700265 unsigned long long sum_exec_runtime;
266};
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200267
Frank Mayharf06febc2008-09-12 09:54:39 -0700268/* Alternate field names when used to cache expirations. */
Frank Mayharf06febc2008-09-12 09:54:39 -0700269#define virt_exp utime
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200270#define prof_exp stime
Frank Mayharf06febc2008-09-12 09:54:39 -0700271#define sched_exp sum_exec_runtime
272
Jason Low971e8a982015-04-28 13:00:23 -0700273/*
274 * This is the atomic variant of task_cputime, which can be used for
275 * storing and updating task_cputime statistics without locking.
276 */
277struct task_cputime_atomic {
278 atomic64_t utime;
279 atomic64_t stime;
280 atomic64_t sum_exec_runtime;
281};
282
283#define INIT_CPUTIME_ATOMIC \
284 (struct task_cputime_atomic) { \
285 .utime = ATOMIC64_INIT(0), \
286 .stime = ATOMIC64_INIT(0), \
287 .sum_exec_runtime = ATOMIC64_INIT(0), \
288 }
289
Peter Zijlstra609ca062015-09-28 17:52:18 +0200290#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
Peter Zijlstraa233f112013-09-23 19:04:26 +0200291
Peter Zijlstrac99e6ef2009-07-10 14:57:56 +0200292/*
Peter Zijlstra87dcbc02015-09-28 17:45:40 +0200293 * Disable preemption until the scheduler is running -- use an unconditional
294 * value so that it also works on !PREEMPT_COUNT kernels.
Peter Zijlstrad86ee482009-07-10 14:57:57 +0200295 *
Peter Zijlstra87dcbc02015-09-28 17:45:40 +0200296 * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
Peter Zijlstrac99e6ef2009-07-10 14:57:56 +0200297 */
Peter Zijlstra87dcbc02015-09-28 17:45:40 +0200298#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
Peter Zijlstrac99e6ef2009-07-10 14:57:56 +0200299
Peter Zijlstra609ca062015-09-28 17:52:18 +0200300/*
301 * Initial preempt_count value; reflects the preempt_count schedule invariant
302 * which states that during context switches:
303 *
304 * preempt_count() == 2*PREEMPT_DISABLE_OFFSET
305 *
306 * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
307 * Note: See finish_task_switch().
308 */
309#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100310
Frank Mayharf06febc2008-09-12 09:54:39 -0700311/**
312 * struct thread_group_cputimer - thread group interval timer counts
Jason Low920ce392015-05-08 14:31:50 -0700313 * @cputime_atomic: atomic thread group interval timers.
Jason Lowd5c373e2015-10-14 12:07:55 -0700314 * @running: true when there are timers running and
315 * @cputime_atomic receives updates.
Jason Lowc8d75aa2015-10-14 12:07:56 -0700316 * @checking_timer: true when a thread in the group is in the
317 * process of checking for thread group timers.
Frank Mayharf06febc2008-09-12 09:54:39 -0700318 *
319 * This structure contains the version of task_cputime, above, that is
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100320 * used for thread group CPU timer calculations.
Frank Mayharf06febc2008-09-12 09:54:39 -0700321 */
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +0100322struct thread_group_cputimer {
Jason Low71107442015-04-28 13:00:24 -0700323 struct task_cputime_atomic cputime_atomic;
Jason Lowd5c373e2015-10-14 12:07:55 -0700324 bool running;
Jason Lowc8d75aa2015-10-14 12:07:56 -0700325 bool checking_timer;
Frank Mayharf06febc2008-09-12 09:54:39 -0700326};
Frank Mayharf06febc2008-09-12 09:54:39 -0700327
Ben Blum4714d1d2011-05-26 16:25:18 -0700328#include <linux/rwsem.h>
Mike Galbraith5091faa2010-11-30 14:18:03 +0100329struct autogroup;
330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331struct backing_dev_info;
332struct reclaim_state;
333
Naveen N. Raof6db8342015-06-25 23:53:37 +0530334#ifdef CONFIG_SCHED_INFO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335struct sched_info {
336 /* cumulative counters */
Ingo Molnar2d723762007-10-15 17:00:12 +0200337 unsigned long pcount; /* # of times run on this cpu */
Ken Chen9c2c4802008-12-16 23:41:22 -0800338 unsigned long long run_delay; /* time spent waiting on a runqueue */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340 /* timestamps */
Balbir Singh172ba842007-07-09 18:52:00 +0200341 unsigned long long last_arrival,/* when we last ran on a cpu */
342 last_queued; /* when we were last queued to run */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343};
Naveen N. Raof6db8342015-06-25 23:53:37 +0530344#endif /* CONFIG_SCHED_INFO */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Ingo Molnar47913d42017-02-01 18:00:26 +0100346struct task_delay_info;
Chandra Seetharaman52f17b62006-07-14 00:24:38 -0700347
348static inline int sched_info_on(void)
349{
350#ifdef CONFIG_SCHEDSTATS
351 return 1;
352#elif defined(CONFIG_TASK_DELAY_ACCT)
353 extern int delayacct_on;
354 return delayacct_on;
355#else
356 return 0;
Shailabh Nagarca74e922006-07-14 00:24:36 -0700357#endif
Chandra Seetharaman52f17b62006-07-14 00:24:38 -0700358}
Shailabh Nagarca74e922006-07-14 00:24:36 -0700359
Mel Gormancb251762016-02-05 09:08:36 +0000360#ifdef CONFIG_SCHEDSTATS
361void force_schedstat_enabled(void);
362#endif
363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/*
Yuyang Du6ecdd742016-04-05 12:12:26 +0800365 * Integer metrics need fixed point arithmetic, e.g., sched/fair
366 * has a few: load, load_avg, util_avg, freq, and capacity.
367 *
368 * We define a basic fixed point arithmetic range, and then formalize
369 * all these metrics based on that basic range.
370 */
371# define SCHED_FIXEDPOINT_SHIFT 10
372# define SCHED_FIXEDPOINT_SCALE (1L << SCHED_FIXEDPOINT_SHIFT)
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374struct io_context; /* See blkdev.h */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
Chen, Kenneth W383f2832005-09-09 13:02:02 -0700377#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
Ingo Molnar36c8b582006-07-03 00:25:41 -0700378extern void prefetch_stack(struct task_struct *t);
Chen, Kenneth W383f2832005-09-09 13:02:02 -0700379#else
380static inline void prefetch_stack(struct task_struct *t) { }
381#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
383struct audit_context; /* See audit.c */
384struct mempolicy;
Jens Axboeb92ce552006-04-11 13:52:07 +0200385struct pipe_inode_info;
Serge E. Hallyn4865ecf2006-10-02 02:18:14 -0700386struct uts_namespace;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Ingo Molnar20b8a592007-07-09 18:51:58 +0200388struct load_weight {
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100389 unsigned long weight;
390 u32 inv_weight;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200391};
392
Yuyang Du9d89c252015-07-15 08:04:37 +0800393/*
Yuyang Du7b595332016-04-05 12:12:28 +0800394 * The load_avg/util_avg accumulates an infinite geometric series
395 * (see __update_load_avg() in kernel/sched/fair.c).
396 *
397 * [load_avg definition]
398 *
399 * load_avg = runnable% * scale_load_down(load)
400 *
401 * where runnable% is the time ratio that a sched_entity is runnable.
402 * For cfs_rq, it is the aggregated load_avg of all runnable and
Yuyang Du9d89c252015-07-15 08:04:37 +0800403 * blocked sched_entities.
Yuyang Du7b595332016-04-05 12:12:28 +0800404 *
405 * load_avg may also take frequency scaling into account:
406 *
407 * load_avg = runnable% * scale_load_down(load) * freq%
408 *
409 * where freq% is the CPU frequency normalized to the highest frequency.
410 *
411 * [util_avg definition]
412 *
413 * util_avg = running% * SCHED_CAPACITY_SCALE
414 *
415 * where running% is the time ratio that a sched_entity is running on
416 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
417 * and blocked sched_entities.
418 *
419 * util_avg may also factor frequency scaling and CPU capacity scaling:
420 *
421 * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
422 *
423 * where freq% is the same as above, and capacity% is the CPU capacity
424 * normalized to the greatest capacity (due to uarch differences, etc).
425 *
426 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
427 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
428 * we therefore scale them to as large a range as necessary. This is for
429 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
430 *
431 * [Overflow issue]
432 *
433 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
434 * with the highest load (=88761), always runnable on a single cfs_rq,
435 * and should not overflow as the number already hits PID_MAX_LIMIT.
436 *
437 * For all other cases (including 32-bit kernels), struct load_weight's
438 * weight will overflow first before we do, because:
439 *
440 * Max(load_avg) <= Max(load.weight)
441 *
442 * Then it is the load_weight's responsibility to consider overflow
443 * issues.
Yuyang Du9d89c252015-07-15 08:04:37 +0800444 */
Paul Turner9d85f212012-10-04 13:18:29 +0200445struct sched_avg {
Yuyang Du9d89c252015-07-15 08:04:37 +0800446 u64 last_update_time, load_sum;
447 u32 util_sum, period_contrib;
448 unsigned long load_avg, util_avg;
Paul Turner9d85f212012-10-04 13:18:29 +0200449};
450
Ingo Molnar94c18222007-08-02 17:41:40 +0200451#ifdef CONFIG_SCHEDSTATS
Lucas De Marchi41acab82010-03-10 23:37:45 -0300452struct sched_statistics {
Ingo Molnar94c18222007-08-02 17:41:40 +0200453 u64 wait_start;
454 u64 wait_max;
Arjan van de Ven6d082592008-01-25 21:08:35 +0100455 u64 wait_count;
456 u64 wait_sum;
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -0700457 u64 iowait_count;
458 u64 iowait_sum;
Ingo Molnar94c18222007-08-02 17:41:40 +0200459
460 u64 sleep_start;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200461 u64 sleep_max;
Ingo Molnar94c18222007-08-02 17:41:40 +0200462 s64 sum_sleep_runtime;
463
464 u64 block_start;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200465 u64 block_max;
466 u64 exec_max;
Ingo Molnareba1ed42007-10-15 17:00:02 +0200467 u64 slice_max;
Ingo Molnarcc367732007-10-15 17:00:18 +0200468
Ingo Molnarcc367732007-10-15 17:00:18 +0200469 u64 nr_migrations_cold;
470 u64 nr_failed_migrations_affine;
471 u64 nr_failed_migrations_running;
472 u64 nr_failed_migrations_hot;
473 u64 nr_forced_migrations;
Ingo Molnarcc367732007-10-15 17:00:18 +0200474
475 u64 nr_wakeups;
476 u64 nr_wakeups_sync;
477 u64 nr_wakeups_migrate;
478 u64 nr_wakeups_local;
479 u64 nr_wakeups_remote;
480 u64 nr_wakeups_affine;
481 u64 nr_wakeups_affine_attempts;
482 u64 nr_wakeups_passive;
483 u64 nr_wakeups_idle;
Lucas De Marchi41acab82010-03-10 23:37:45 -0300484};
485#endif
486
487struct sched_entity {
488 struct load_weight load; /* for load-balancing */
489 struct rb_node run_node;
490 struct list_head group_node;
491 unsigned int on_rq;
492
493 u64 exec_start;
494 u64 sum_exec_runtime;
495 u64 vruntime;
496 u64 prev_sum_exec_runtime;
497
Lucas De Marchi41acab82010-03-10 23:37:45 -0300498 u64 nr_migrations;
499
Lucas De Marchi41acab82010-03-10 23:37:45 -0300500#ifdef CONFIG_SCHEDSTATS
501 struct sched_statistics statistics;
Ingo Molnar94c18222007-08-02 17:41:40 +0200502#endif
503
Ingo Molnar20b8a592007-07-09 18:51:58 +0200504#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100505 int depth;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200506 struct sched_entity *parent;
507 /* rq on which this entity is (to be) queued: */
508 struct cfs_rq *cfs_rq;
509 /* rq "owned" by this entity/group: */
510 struct cfs_rq *my_q;
511#endif
Clark Williams8bd75c72013-02-07 09:47:07 -0600512
Alex Shi141965c2013-06-26 13:05:39 +0800513#ifdef CONFIG_SMP
Jiri Olsa5a107802015-12-08 21:23:59 +0100514 /*
515 * Per entity load average tracking.
516 *
517 * Put into separate cache line so it does not
518 * collide with read-mostly values above.
519 */
520 struct sched_avg avg ____cacheline_aligned_in_smp;
Paul Turner9d85f212012-10-04 13:18:29 +0200521#endif
Ingo Molnar20b8a592007-07-09 18:51:58 +0200522};
Ingo Molnar70b97a72006-07-03 00:25:42 -0700523
Peter Zijlstrafa717062008-01-25 21:08:27 +0100524struct sched_rt_entity {
525 struct list_head run_list;
Peter Zijlstra78f2c7d2008-01-25 21:08:27 +0100526 unsigned long timeout;
Ying Xue57d2aa02012-07-17 15:03:43 +0800527 unsigned long watchdog_stamp;
Richard Kennedybee367e2008-08-01 13:24:08 +0100528 unsigned int time_slice;
Peter Zijlstraff77e462016-01-18 15:27:07 +0100529 unsigned short on_rq;
530 unsigned short on_list;
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100531
Peter Zijlstra58d6c2d2008-04-19 19:45:00 +0200532 struct sched_rt_entity *back;
Peter Zijlstra052f1dc2008-02-13 15:45:40 +0100533#ifdef CONFIG_RT_GROUP_SCHED
Peter Zijlstra6f505b12008-01-25 21:08:30 +0100534 struct sched_rt_entity *parent;
535 /* rq on which this entity is (to be) queued: */
536 struct rt_rq *rt_rq;
537 /* rq "owned" by this entity/group: */
538 struct rt_rq *my_q;
539#endif
Peter Zijlstrafa717062008-01-25 21:08:27 +0100540};
541
Dario Faggioliaab03e02013-11-28 11:14:43 +0100542struct sched_dl_entity {
543 struct rb_node rb_node;
544
545 /*
546 * Original scheduling parameters. Copied here from sched_attr
xiaofeng.yan4027d082014-05-09 03:21:27 +0000547 * during sched_setattr(), they will remain the same until
548 * the next sched_setattr().
Dario Faggioliaab03e02013-11-28 11:14:43 +0100549 */
550 u64 dl_runtime; /* maximum runtime for each instance */
551 u64 dl_deadline; /* relative deadline of each instance */
Harald Gustafsson755378a2013-11-07 14:43:40 +0100552 u64 dl_period; /* separation of two instances (period) */
Dario Faggioli332ac172013-11-07 14:43:45 +0100553 u64 dl_bw; /* dl_runtime / dl_deadline */
Dario Faggioliaab03e02013-11-28 11:14:43 +0100554
555 /*
556 * Actual scheduling parameters. Initialized with the values above,
557 * they are continously updated during task execution. Note that
558 * the remaining runtime could be < 0 in case we are in overrun.
559 */
560 s64 runtime; /* remaining runtime for this instance */
561 u64 deadline; /* absolute deadline for this instance */
562 unsigned int flags; /* specifying the scheduler behaviour */
563
564 /*
565 * Some bool flags:
566 *
567 * @dl_throttled tells if we exhausted the runtime. If so, the
568 * task has to wait for a replenishment to be performed at the
569 * next firing of dl_timer.
570 *
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100571 * @dl_boosted tells if we are boosted due to DI. If so we are
572 * outside bandwidth enforcement mechanism (but only until we
Juri Lelli5bfd1262014-04-15 13:49:04 +0200573 * exit the critical section);
574 *
575 * @dl_yielded tells if task gave up the cpu before consuming
576 * all its available runtime during the last job.
Dario Faggioliaab03e02013-11-28 11:14:43 +0100577 */
Luca Abeni72f9f3f2016-03-07 12:27:04 +0100578 int dl_throttled, dl_boosted, dl_yielded;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100579
580 /*
581 * Bandwidth enforcement timer. Each -deadline task has its
582 * own bandwidth to be enforced, thus we need one timer per task.
583 */
584 struct hrtimer dl_timer;
585};
Clark Williams8bd75c72013-02-07 09:47:07 -0600586
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700587union rcu_special {
588 struct {
Paul E. McKenney8203d6d2015-08-02 13:53:17 -0700589 u8 blocked;
590 u8 need_qs;
591 u8 exp_need_qs;
592 u8 pad; /* Otherwise the compiler can store garbage here. */
593 } b; /* Bits. */
594 u32 s; /* Set of bits. */
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700595};
Paul E. McKenney86848962009-08-27 15:00:12 -0700596struct rcu_node;
597
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200598enum perf_event_task_context {
599 perf_invalid_context = -1,
600 perf_hw_context = 0,
Peter Zijlstra89a1e182010-09-07 17:34:50 +0200601 perf_sw_context,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200602 perf_nr_task_contexts,
603};
604
Ingo Molnareb61baf2017-02-01 17:09:06 +0100605struct wake_q_node {
606 struct wake_q_node *next;
607};
608
Mel Gorman72b252a2015-09-04 15:47:32 -0700609/* Track pages that require TLB flushes */
610struct tlbflush_unmap_batch {
611 /*
612 * Each bit set is a CPU that potentially has a TLB entry for one of
613 * the PFNs being flushed. See set_tlb_ubc_flush_pending().
614 */
615 struct cpumask cpumask;
616
617 /* True if any bit in cpumask is set */
618 bool flush_required;
Mel Gormand950c942015-09-04 15:47:35 -0700619
620 /*
621 * If true then the PTE was dirty when unmapped. The entry must be
622 * flushed before IO is initiated or a stale TLB entry potentially
623 * allows an update without redirtying the page.
624 */
625 bool writable;
Mel Gorman72b252a2015-09-04 15:47:32 -0700626};
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628struct task_struct {
Andy Lutomirskic65eacb2016-09-13 14:29:24 -0700629#ifdef CONFIG_THREAD_INFO_IN_TASK
630 /*
631 * For reasons of header soup (see current_thread_info()), this
632 * must be the first element of task_struct.
633 */
634 struct thread_info thread_info;
635#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
Roman Zippelf7e42172007-05-09 02:35:17 -0700637 void *stack;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 atomic_t usage;
William Cohen97dc32c2007-05-08 00:23:41 -0700639 unsigned int flags; /* per process flags, defined below */
640 unsigned int ptrace;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
Peter Williams2dd73a42006-06-27 02:54:34 -0700642#ifdef CONFIG_SMP
Peter Zijlstrafa14ff42011-09-12 13:06:17 +0200643 struct llist_node wake_entry;
Peter Zijlstra3ca7a442011-04-05 17:23:40 +0200644 int on_cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -0700645#ifdef CONFIG_THREAD_INFO_IN_TASK
646 unsigned int cpu; /* current CPU */
647#endif
Mike Galbraith63b0e9e2015-07-14 17:39:50 +0200648 unsigned int wakee_flips;
Michael Wang62470412013-07-04 12:55:51 +0800649 unsigned long wakee_flip_decay_ts;
Mike Galbraith63b0e9e2015-07-14 17:39:50 +0200650 struct task_struct *last_wakee;
Peter Zijlstraac66f542013-10-07 11:29:16 +0100651
652 int wake_cpu;
Nick Piggin4866cde2005-06-25 14:57:23 -0700653#endif
Peter Zijlstrafd2f4412011-04-05 17:23:44 +0200654 int on_rq;
Ingo Molnar50e645a2007-07-09 18:52:00 +0200655
Ingo Molnarb29739f2006-06-27 02:54:51 -0700656 int prio, static_prio, normal_prio;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100657 unsigned int rt_priority;
Ingo Molnar5522d5d2007-10-15 17:00:12 +0200658 const struct sched_class *sched_class;
Ingo Molnar20b8a592007-07-09 18:51:58 +0200659 struct sched_entity se;
Peter Zijlstrafa717062008-01-25 21:08:27 +0100660 struct sched_rt_entity rt;
Peter Zijlstra8323f262012-06-22 13:36:05 +0200661#ifdef CONFIG_CGROUP_SCHED
662 struct task_group *sched_task_group;
663#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100664 struct sched_dl_entity dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Avi Kivitye107be32007-07-26 13:40:43 +0200666#ifdef CONFIG_PREEMPT_NOTIFIERS
667 /* list of struct preempt_notifier: */
668 struct hlist_head preempt_notifiers;
669#endif
670
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700671#ifdef CONFIG_BLK_DEV_IO_TRACE
Jens Axboe2056a782006-03-23 20:00:26 +0100672 unsigned int btrace_seq;
Alexey Dobriyan6c5c9342006-09-29 01:59:40 -0700673#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
William Cohen97dc32c2007-05-08 00:23:41 -0700675 unsigned int policy;
Peter Zijlstra29baa742012-04-23 12:11:21 +0200676 int nr_cpus_allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 cpumask_t cpus_allowed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700679#ifdef CONFIG_PREEMPT_RCU
Paul E. McKenneye260be62008-01-25 21:08:24 +0100680 int rcu_read_lock_nesting;
Paul E. McKenney1d082fd2014-08-14 16:01:53 -0700681 union rcu_special rcu_read_unlock_special;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -0700682 struct list_head rcu_node_entry;
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700683 struct rcu_node *rcu_blocked_node;
Pranith Kumar28f65692014-09-22 14:00:48 -0400684#endif /* #ifdef CONFIG_PREEMPT_RCU */
Paul E. McKenney8315f422014-06-27 13:42:20 -0700685#ifdef CONFIG_TASKS_RCU
686 unsigned long rcu_tasks_nvcsw;
687 bool rcu_tasks_holdout;
688 struct list_head rcu_tasks_holdout_list;
Paul E. McKenney176f8f72014-08-04 17:43:50 -0700689 int rcu_tasks_idle_cpu;
Paul E. McKenney8315f422014-06-27 13:42:20 -0700690#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneye260be62008-01-25 21:08:24 +0100691
Naveen N. Raof6db8342015-06-25 23:53:37 +0530692#ifdef CONFIG_SCHED_INFO
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 struct sched_info sched_info;
694#endif
695
696 struct list_head tasks;
Dario Faggioli806c09a2010-11-30 19:51:33 +0100697#ifdef CONFIG_SMP
Gregory Haskins917b6272008-12-29 09:39:53 -0500698 struct plist_node pushable_tasks;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100699 struct rb_node pushable_dl_tasks;
Dario Faggioli806c09a2010-11-30 19:51:33 +0100700#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
702 struct mm_struct *mm, *active_mm;
Ingo Molnar314ff782017-02-03 11:03:31 +0100703
704 /* Per-thread vma caching: */
705 struct vmacache vmacache;
706
KAMEZAWA Hiroyuki34e55232010-03-05 13:41:40 -0800707#if defined(SPLIT_RSS_COUNTING)
708 struct task_rss_stat rss_stat;
709#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710/* task state */
William Cohen97dc32c2007-05-08 00:23:41 -0700711 int exit_state;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 int exit_code, exit_signal;
713 int pdeath_signal; /* The signal sent when the parent dies */
Palmer Dabbelte7cc4172015-04-30 21:19:55 -0700714 unsigned long jobctl; /* JOBCTL_*, siglock protected */
Andrei Epure9b89f6b2013-04-11 20:30:29 +0300715
716 /* Used for emulating ABI behavior of previous Linux versions */
William Cohen97dc32c2007-05-08 00:23:41 -0700717 unsigned int personality;
Andrei Epure9b89f6b2013-04-11 20:30:29 +0300718
Peter Zijlstrabe958bd2015-11-25 16:02:07 +0100719 /* scheduler bits, serialized by scheduler locks */
Lennart Poetteringca94c442009-06-15 17:17:47 +0200720 unsigned sched_reset_on_fork:1;
Peter Zijlstraa8e4f2e2011-04-05 17:23:49 +0200721 unsigned sched_contributes_to_load:1;
Peter Zijlstraff303e62015-04-17 20:05:30 +0200722 unsigned sched_migrated:1;
Peter Zijlstrab7e7ade2016-05-23 11:19:07 +0200723 unsigned sched_remote_wakeup:1;
Peter Zijlstrabe958bd2015-11-25 16:02:07 +0100724 unsigned :0; /* force alignment to the next boundary */
725
726 /* unserialized, strictly 'current' */
727 unsigned in_execve:1; /* bit to tell LSMs we're in execve */
728 unsigned in_iowait:1;
Andy Lutomirski7e781412016-08-02 14:05:36 -0700729#if !defined(TIF_RESTORE_SIGMASK)
730 unsigned restore_sigmask:1;
731#endif
Tejun Heo626ebc42015-11-05 18:46:09 -0800732#ifdef CONFIG_MEMCG
733 unsigned memcg_may_oom:1;
Johannes Weiner127424c2016-01-20 15:02:32 -0800734#ifndef CONFIG_SLOB
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800735 unsigned memcg_kmem_skip_account:1;
736#endif
Johannes Weiner127424c2016-01-20 15:02:32 -0800737#endif
Peter Zijlstraff303e62015-04-17 20:05:30 +0200738#ifdef CONFIG_COMPAT_BRK
739 unsigned brk_randomized:1;
740#endif
Vladimir Davydov6f185c22014-12-12 16:55:15 -0800741
Kees Cook1d4457f2014-05-21 15:23:46 -0700742 unsigned long atomic_flags; /* Flags needing atomic access. */
743
Andy Lutomirskif56141e2015-02-12 15:01:14 -0800744 struct restart_block restart_block;
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 pid_t pid;
747 pid_t tgid;
Arjan van de Ven0a4254052006-09-26 10:52:38 +0200748
Hiroshi Shimamoto13145622009-08-18 15:06:02 +0900749#ifdef CONFIG_CC_STACKPROTECTOR
Arjan van de Ven0a4254052006-09-26 10:52:38 +0200750 /* Canary value for the -fstack-protector gcc feature */
751 unsigned long stack_canary;
Hiroshi Shimamoto13145622009-08-18 15:06:02 +0900752#endif
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000753 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 * pointers to (original) parent process, youngest child, younger sibling,
Oleg Nesterov4d1d61a2012-05-11 10:59:08 +1000755 * older sibling, respectively. (p->father can be replaced with
Roland McGrathf4700212008-03-24 18:36:23 -0700756 * p->real_parent->pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 */
Kees Cookabd63bc2011-12-14 14:39:26 -0800758 struct task_struct __rcu *real_parent; /* real parent process */
759 struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 /*
Roland McGrathf4700212008-03-24 18:36:23 -0700761 * children/sibling forms the list of my natural children
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 */
763 struct list_head children; /* list of my children */
764 struct list_head sibling; /* linkage in my parent's children list */
765 struct task_struct *group_leader; /* threadgroup leader */
766
Roland McGrathf4700212008-03-24 18:36:23 -0700767 /*
768 * ptraced is the list of tasks this task is using ptrace on.
769 * This includes both natural children and PTRACE_ATTACH targets.
770 * p->ptrace_entry is p's link on the p->parent->ptraced list.
771 */
772 struct list_head ptraced;
773 struct list_head ptrace_entry;
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 /* PID/PID hash table linkage. */
Eric W. Biederman92476d72006-03-31 02:31:42 -0800776 struct pid_link pids[PIDTYPE_MAX];
Oleg Nesterov47e65322006-03-28 16:11:25 -0800777 struct list_head thread_group;
Oleg Nesterov0c740d02014-01-21 15:49:56 -0800778 struct list_head thread_node;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 struct completion *vfork_done; /* for vfork() */
781 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
782 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
783
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100784 u64 utime, stime;
Stanislaw Gruszka40565b52016-11-15 03:06:51 +0100785#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
Frederic Weisbecker5613fda2017-01-31 04:09:23 +0100786 u64 utimescaled, stimescaled;
Stanislaw Gruszka40565b52016-11-15 03:06:51 +0100787#endif
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +0100788 u64 gtime;
Peter Zijlstra9d7fb042015-06-30 11:30:54 +0200789 struct prev_cputime prev_cputime;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100790#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
Frederic Weisbeckerb7ce2272015-11-19 16:47:34 +0100791 seqcount_t vtime_seqcount;
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100792 unsigned long long vtime_snap;
793 enum {
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +0100794 /* Task is sleeping or running in a CPU with VTIME inactive */
795 VTIME_INACTIVE = 0,
796 /* Task runs in userspace in a CPU with VTIME active */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100797 VTIME_USER,
Frederic Weisbecker7098c1e2015-11-19 16:47:30 +0100798 /* Task runs in kernelspace in a CPU with VTIME active */
Frederic Weisbecker6a616712012-12-16 20:00:34 +0100799 VTIME_SYS,
800 } vtime_snap_whence;
801#endif
Frederic Weisbeckerd027d452015-06-07 15:54:30 +0200802
803#ifdef CONFIG_NO_HZ_FULL
Frederic Weisbeckerf009a7a2016-03-24 15:38:00 +0100804 atomic_t tick_dep_mask;
Frederic Weisbeckerd027d452015-06-07 15:54:30 +0200805#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 unsigned long nvcsw, nivcsw; /* context switch counts */
Thomas Gleixnerccbf62d2014-07-16 21:04:34 +0000807 u64 start_time; /* monotonic time in nsec */
Thomas Gleixner57e0be02014-07-16 21:04:32 +0000808 u64 real_start_time; /* boot based time in nsec */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
810 unsigned long min_flt, maj_flt;
811
Nicolas Pitreb18b6a92017-01-21 00:09:08 -0500812#ifdef CONFIG_POSIX_TIMERS
Frank Mayharf06febc2008-09-12 09:54:39 -0700813 struct task_cputime cputime_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 struct list_head cpu_timers[3];
Nicolas Pitreb18b6a92017-01-21 00:09:08 -0500815#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817/* process credentials */
Eric W. Biederman64b875f2016-11-14 18:48:07 -0600818 const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
Arnd Bergmann1b0ba1c2010-02-24 19:45:09 +0100819 const struct cred __rcu *real_cred; /* objective and real subjective task
David Howells3b11a1d2008-11-14 10:39:26 +1100820 * credentials (COW) */
Arnd Bergmann1b0ba1c2010-02-24 19:45:09 +0100821 const struct cred __rcu *cred; /* effective (overridable) subjective task
David Howells3b11a1d2008-11-14 10:39:26 +1100822 * credentials (COW) */
Paolo 'Blaisorblade' Giarrusso36772092005-05-05 16:16:12 -0700823 char comm[TASK_COMM_LEN]; /* executable name excluding path
824 - access with [gs]et_task_comm (which lock
825 it with task_lock())
Linus Torvalds221af7f2010-01-28 22:14:42 -0800826 - initialized normally by setup_new_exec */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827/* file system info */
NeilBrown756daf22015-03-23 13:37:38 +1100828 struct nameidata *nameidata;
Alexey Dobriyan3d5b6fc2006-09-29 01:59:40 -0700829#ifdef CONFIG_SYSVIPC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830/* ipc stuff */
831 struct sysv_sem sysvsem;
Jack Millerab602f72014-08-08 14:23:19 -0700832 struct sysv_shm sysvshm;
Alexey Dobriyan3d5b6fc2006-09-29 01:59:40 -0700833#endif
Mandeep Singh Bainese162b392009-01-15 11:08:40 -0800834#ifdef CONFIG_DETECT_HUNG_TASK
Ingo Molnar82a1fcb2008-01-25 21:08:02 +0100835/* hung task detection */
Ingo Molnar82a1fcb2008-01-25 21:08:02 +0100836 unsigned long last_switch_count;
837#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838/* filesystem information */
839 struct fs_struct *fs;
840/* open file information */
841 struct files_struct *files;
Serge E. Hallyn1651e142006-10-02 02:18:08 -0700842/* namespaces */
Serge E. Hallynab516012006-10-02 02:18:06 -0700843 struct nsproxy *nsproxy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844/* signal handlers */
845 struct signal_struct *signal;
846 struct sighand_struct *sighand;
847
848 sigset_t blocked, real_blocked;
Roland McGrathf3de2722008-04-30 00:53:09 -0700849 sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 struct sigpending pending;
851
852 unsigned long sas_ss_sp;
853 size_t sas_ss_size;
Stas Sergeev2a742132016-04-14 23:20:04 +0300854 unsigned sas_ss_flags;
Oleg Nesterov2e01fab2015-11-06 16:32:19 -0800855
Al Viro67d12142012-06-27 11:07:19 +0400856 struct callback_head *task_works;
Oleg Nesterove73f8952012-05-11 10:59:07 +1000857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 struct audit_context *audit_context;
Al Virobfef93a2008-01-10 04:53:18 -0500859#ifdef CONFIG_AUDITSYSCALL
Eric W. Biedermane1760bd2012-09-10 22:39:43 -0700860 kuid_t loginuid;
Eric Paris4746ec52008-01-08 10:06:53 -0500861 unsigned int sessionid;
Al Virobfef93a2008-01-10 04:53:18 -0500862#endif
Will Drewry932eceb2012-04-12 16:47:54 -0500863 struct seccomp seccomp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865/* Thread group tracking */
866 u32 parent_exec_id;
867 u32 self_exec_id;
Miao Xie58568d22009-06-16 15:31:49 -0700868/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
869 * mempolicy */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 spinlock_t alloc_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Ingo Molnarb29739f2006-06-27 02:54:51 -0700872 /* Protection of the PI data structures: */
Thomas Gleixner1d615482009-11-17 14:54:03 +0100873 raw_spinlock_t pi_lock;
Ingo Molnarb29739f2006-06-27 02:54:51 -0700874
Peter Zijlstra76751042015-05-01 08:27:50 -0700875 struct wake_q_node wake_q;
876
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700877#ifdef CONFIG_RT_MUTEXES
878 /* PI waiters blocked on a rt_mutex held by this task */
Peter Zijlstrafb00aca2013-11-07 14:43:43 +0100879 struct rb_root pi_waiters;
880 struct rb_node *pi_waiters_leftmost;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700881 /* Deadlock detection and priority inheritance handling */
882 struct rt_mutex_waiter *pi_blocked_on;
Ingo Molnar23f78d4a2006-06-27 02:54:53 -0700883#endif
884
Ingo Molnar408894e2006-01-09 15:59:20 -0800885#ifdef CONFIG_DEBUG_MUTEXES
886 /* mutex deadlock detection */
887 struct mutex_waiter *blocked_on;
888#endif
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700889#ifdef CONFIG_TRACE_IRQFLAGS
890 unsigned int irq_events;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700891 unsigned long hardirq_enable_ip;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700892 unsigned long hardirq_disable_ip;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900893 unsigned int hardirq_enable_event;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700894 unsigned int hardirq_disable_event;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900895 int hardirqs_enabled;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700896 int hardirq_context;
Hiroshi Shimamotofa1452e2009-11-30 14:59:44 +0900897 unsigned long softirq_disable_ip;
898 unsigned long softirq_enable_ip;
899 unsigned int softirq_disable_event;
900 unsigned int softirq_enable_event;
901 int softirqs_enabled;
Ingo Molnarde30a2b2006-07-03 00:24:42 -0700902 int softirq_context;
903#endif
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700904#ifdef CONFIG_LOCKDEP
Peter Zijlstrabdb94412008-02-25 23:02:48 +0100905# define MAX_LOCK_DEPTH 48UL
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700906 u64 curr_chain_key;
907 int lockdep_depth;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700908 unsigned int lockdep_recursion;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100909 struct held_lock held_locks[MAX_LOCK_DEPTH];
Nick Piggincf40bd12009-01-21 08:12:39 +0100910 gfp_t lockdep_reclaim_gfp;
Ingo Molnarfbb9ce952006-07-03 00:24:50 -0700911#endif
Andrey Ryabininc6d30852016-01-20 15:00:55 -0800912#ifdef CONFIG_UBSAN
913 unsigned int in_ubsan;
914#endif
Ingo Molnar408894e2006-01-09 15:59:20 -0800915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916/* journalling filesystem info */
917 void *journal_info;
918
Neil Brownd89d8792007-05-01 09:53:42 +0200919/* stacked block device info */
Akinobu Mitabddd87c2010-02-23 08:55:42 +0100920 struct bio_list *bio_list;
Neil Brownd89d8792007-05-01 09:53:42 +0200921
Jens Axboe73c10102011-03-08 13:19:51 +0100922#ifdef CONFIG_BLOCK
923/* stack plugging */
924 struct blk_plug *plug;
925#endif
926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927/* VM state */
928 struct reclaim_state *reclaim_state;
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 struct backing_dev_info *backing_dev_info;
931
932 struct io_context *io_context;
933
934 unsigned long ptrace_message;
935 siginfo_t *last_siginfo; /* For ptrace use. */
Andrew Morton7c3ab7382006-12-10 02:19:19 -0800936 struct task_io_accounting ioac;
Jay Lan8f0ab512006-09-30 23:28:59 -0700937#if defined(CONFIG_TASK_XACCT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 u64 acct_rss_mem1; /* accumulated rss usage */
939 u64 acct_vm_mem1; /* accumulated virtual memory usage */
Frederic Weisbecker605dc2b2017-01-31 04:09:30 +0100940 u64 acct_timexpd; /* stime + utime since last update */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941#endif
942#ifdef CONFIG_CPUSETS
Miao Xie58568d22009-06-16 15:31:49 -0700943 nodemask_t mems_allowed; /* Protected by alloc_lock */
Mel Gormancc9a6c82012-03-21 16:34:11 -0700944 seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
Paul Jackson825a46a2006-03-24 03:16:03 -0800945 int cpuset_mem_spread_rotor;
Jack Steiner6adef3e2010-05-26 14:42:49 -0700946 int cpuset_slab_spread_rotor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947#endif
Paul Menageddbcc7e2007-10-18 23:39:30 -0700948#ifdef CONFIG_CGROUPS
Paul Menage817929e2007-10-18 23:39:36 -0700949 /* Control Group info protected by css_set_lock */
Arnd Bergmann2c392b82010-02-24 19:41:39 +0100950 struct css_set __rcu *cgroups;
Paul Menage817929e2007-10-18 23:39:36 -0700951 /* cg_list protected by css_set_lock and tsk->alloc_lock */
952 struct list_head cg_list;
Paul Menageddbcc7e2007-10-18 23:39:30 -0700953#endif
Fenghua Yue02737d2016-10-28 15:04:46 -0700954#ifdef CONFIG_INTEL_RDT_A
955 int closid;
956#endif
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -0700957#ifdef CONFIG_FUTEX
Ingo Molnar0771dfe2006-03-27 01:16:22 -0800958 struct robust_list_head __user *robust_list;
Ingo Molnar34f192c2006-03-27 01:16:24 -0800959#ifdef CONFIG_COMPAT
960 struct compat_robust_list_head __user *compat_robust_list;
961#endif
Ingo Molnarc87e2832006-06-27 02:54:58 -0700962 struct list_head pi_state_list;
963 struct futex_pi_state *pi_state_cache;
Alexey Dobriyan42b2dd02007-10-16 23:27:30 -0700964#endif
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200965#ifdef CONFIG_PERF_EVENTS
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200966 struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200967 struct mutex perf_event_mutex;
968 struct list_head perf_event_list;
Paul Mackerrasa63eaf32009-05-22 14:17:31 +1000969#endif
Thomas Gleixner8f47b182014-02-07 20:58:39 +0100970#ifdef CONFIG_DEBUG_PREEMPT
971 unsigned long preempt_disable_ip;
972#endif
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100973#ifdef CONFIG_NUMA
Miao Xie58568d22009-06-16 15:31:49 -0700974 struct mempolicy *mempolicy; /* Protected by alloc_lock */
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100975 short il_next;
Eric Dumazet207205a2011-03-22 16:30:44 -0700976 short pref_node_fork;
Richard Kennedyc7aceab2008-05-15 12:09:15 +0100977#endif
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200978#ifdef CONFIG_NUMA_BALANCING
979 int numa_scan_seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200980 unsigned int numa_scan_period;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100981 unsigned int numa_scan_period_max;
Rik van Rielde1c9ce2013-10-07 11:29:39 +0100982 int numa_preferred_nid;
Mel Gorman6b9a7462013-10-07 11:29:11 +0100983 unsigned long numa_migrate_retry;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200984 u64 node_stamp; /* migration stamp */
Rik van Riel7e2703e2014-01-27 17:03:45 -0500985 u64 last_task_numa_placement;
986 u64 last_sum_exec_runtime;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200987 struct callback_head numa_work;
Mel Gormanf809ca92013-10-07 11:28:57 +0100988
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100989 struct list_head numa_entry;
990 struct numa_group *numa_group;
991
Mel Gorman745d6142013-10-07 11:28:59 +0100992 /*
Iulia Manda44dba3d2014-10-31 02:13:31 +0200993 * numa_faults is an array split into four regions:
994 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
995 * in this precise order.
996 *
997 * faults_memory: Exponential decaying average of faults on a per-node
998 * basis. Scheduling placement decisions are made based on these
999 * counts. The values remain static for the duration of a PTE scan.
1000 * faults_cpu: Track the nodes the process was running on when a NUMA
1001 * hinting fault was incurred.
1002 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1003 * during the current scan window. When the scan completes, the counts
1004 * in faults_memory and faults_cpu decay and these values are copied.
Mel Gorman745d6142013-10-07 11:28:59 +01001005 */
Iulia Manda44dba3d2014-10-31 02:13:31 +02001006 unsigned long *numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001007 unsigned long total_numa_faults;
Mel Gorman745d6142013-10-07 11:28:59 +01001008
1009 /*
Rik van Riel04bb2f92013-10-07 11:29:36 +01001010 * numa_faults_locality tracks if faults recorded during the last
Mel Gorman074c2382015-03-25 15:55:42 -07001011 * scan window were remote/local or failed to migrate. The task scan
1012 * period is adapted based on the locality of the faults with different
1013 * weights depending on whether they were shared or private faults
Rik van Riel04bb2f92013-10-07 11:29:36 +01001014 */
Mel Gorman074c2382015-03-25 15:55:42 -07001015 unsigned long numa_faults_locality[3];
Rik van Riel04bb2f92013-10-07 11:29:36 +01001016
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001017 unsigned long numa_pages_migrated;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001018#endif /* CONFIG_NUMA_BALANCING */
1019
Mel Gorman72b252a2015-09-04 15:47:32 -07001020#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
1021 struct tlbflush_unmap_batch tlb_ubc;
1022#endif
1023
Ingo Molnare56d0902006-01-08 01:01:37 -08001024 struct rcu_head rcu;
Jens Axboeb92ce552006-04-11 13:52:07 +02001025
1026 /*
1027 * cache last used pipe for splice
1028 */
1029 struct pipe_inode_info *splice_pipe;
Eric Dumazet5640f762012-09-23 23:04:42 +00001030
1031 struct page_frag task_frag;
1032
Ingo Molnar47913d42017-02-01 18:00:26 +01001033#ifdef CONFIG_TASK_DELAY_ACCT
1034 struct task_delay_info *delays;
Shailabh Nagarca74e922006-07-14 00:24:36 -07001035#endif
Ingo Molnar47913d42017-02-01 18:00:26 +01001036
Akinobu Mitaf4f154f2006-12-08 02:39:47 -08001037#ifdef CONFIG_FAULT_INJECTION
1038 int make_it_fail;
1039#endif
Wu Fengguang9d823e82011-06-11 18:10:12 -06001040 /*
1041 * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1042 * balance_dirty_pages() for some dirty throttling pause
1043 */
1044 int nr_dirtied;
1045 int nr_dirtied_pause;
Wu Fengguang83712352011-06-11 19:25:42 -06001046 unsigned long dirty_paused_when; /* start of a write-and-pause period */
Wu Fengguang9d823e82011-06-11 18:10:12 -06001047
Arjan van de Ven97455122008-01-25 21:08:34 +01001048#ifdef CONFIG_LATENCYTOP
1049 int latency_record_count;
1050 struct latency_record latency_record[LT_SAVECOUNT];
1051#endif
Arjan van de Ven69766752008-09-01 15:52:40 -07001052 /*
1053 * time slack values; these are used to round up poll() and
1054 * select() etc timeout values. These are in nanoseconds.
1055 */
John Stultzda8b44d2016-03-17 14:20:51 -07001056 u64 timer_slack_ns;
1057 u64 default_timer_slack_ns;
David Millerf8d570a2008-11-06 00:37:40 -08001058
Andrey Ryabinin0b24bec2015-02-13 14:39:17 -08001059#ifdef CONFIG_KASAN
1060 unsigned int kasan_depth;
1061#endif
Frederic Weisbeckerfb526072008-11-25 21:07:04 +01001062#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Daniel Mack3ad2f3f2010-02-03 08:01:28 +08001063 /* Index of current stored address in ret_stack */
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001064 int curr_ret_stack;
1065 /* Stack of return addresses for return function tracing */
1066 struct ftrace_ret_stack *ret_stack;
Steven Rostedt8aef2d22009-03-24 01:10:15 -04001067 /* time stamp for last schedule */
1068 unsigned long long ftrace_timestamp;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001069 /*
1070 * Number of functions that haven't been traced
1071 * because of depth overrun.
1072 */
1073 atomic_t trace_overrun;
Frederic Weisbecker380c4b12008-12-06 03:43:41 +01001074 /* Pause for the tracing */
1075 atomic_t tracing_graph_pause;
Frederic Weisbeckerf201ae22008-11-23 06:22:56 +01001076#endif
Steven Rostedtea4e2bc2008-12-03 15:36:57 -05001077#ifdef CONFIG_TRACING
1078 /* state flags for use by tracers */
1079 unsigned long trace;
Steven Rostedtb1cff0a2011-05-25 14:27:43 -04001080 /* bitmask and counter of trace recursion */
Steven Rostedt261842b2009-04-16 21:41:52 -04001081 unsigned long trace_recursion;
1082#endif /* CONFIG_TRACING */
Dmitry Vyukov5c9a8752016-03-22 14:27:30 -07001083#ifdef CONFIG_KCOV
1084 /* Coverage collection mode enabled for this task (0 if disabled). */
1085 enum kcov_mode kcov_mode;
1086 /* Size of the kcov_area. */
1087 unsigned kcov_size;
1088 /* Buffer for coverage collection. */
1089 void *kcov_area;
1090 /* kcov desciptor wired with this task or NULL. */
1091 struct kcov *kcov;
1092#endif
Vladimir Davydov6f185c22014-12-12 16:55:15 -08001093#ifdef CONFIG_MEMCG
Tejun Heo626ebc42015-11-05 18:46:09 -08001094 struct mem_cgroup *memcg_in_oom;
1095 gfp_t memcg_oom_gfp_mask;
1096 int memcg_oom_order;
Tejun Heob23afb92015-11-05 18:46:11 -08001097
1098 /* number of pages to reclaim on returning to userland */
1099 unsigned int memcg_nr_pages_over_high;
KAMEZAWA Hiroyuki569b8462009-12-15 16:47:03 -08001100#endif
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301101#ifdef CONFIG_UPROBES
1102 struct uprobe_task *utask;
Srikar Dronamraju0326f5a2012-03-13 23:30:11 +05301103#endif
Kent Overstreetcafe5632013-03-23 16:11:31 -07001104#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1105 unsigned int sequential_io;
1106 unsigned int sequential_io_avg;
1107#endif
Peter Zijlstra8eb23b92014-09-24 10:18:55 +02001108#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1109 unsigned long task_state_change;
1110#endif
David Hildenbrand8bcbde52015-05-11 17:52:06 +02001111 int pagefault_disabled;
Michal Hocko03049262016-03-25 14:20:33 -07001112#ifdef CONFIG_MMU
Vladimir Davydov29c696e2016-03-25 14:20:39 -07001113 struct task_struct *oom_reaper_list;
Michal Hocko03049262016-03-25 14:20:33 -07001114#endif
Andy Lutomirskiba14a192016-08-11 02:35:21 -07001115#ifdef CONFIG_VMAP_STACK
1116 struct vm_struct *stack_vm_area;
1117#endif
Andy Lutomirski68f24b082016-09-15 22:45:48 -07001118#ifdef CONFIG_THREAD_INFO_IN_TASK
1119 /* A live task holds one reference. */
1120 atomic_t stack_refcount;
1121#endif
Dave Hansen0c8c0f02015-07-17 12:28:11 +02001122/* CPU-specific state of this task */
1123 struct thread_struct thread;
1124/*
1125 * WARNING: on x86, 'thread_struct' contains a variable-sized
1126 * structure. It *MUST* be at the end of 'task_struct'.
1127 *
1128 * Do not put anything below here!
1129 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130};
1131
Ingo Molnar5aaeb5c2015-07-17 12:28:12 +02001132#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
1133extern int arch_task_struct_size __read_mostly;
1134#else
1135# define arch_task_struct_size (sizeof(struct task_struct))
1136#endif
Dave Hansen0c8c0f02015-07-17 12:28:11 +02001137
Andy Lutomirskiba14a192016-08-11 02:35:21 -07001138#ifdef CONFIG_VMAP_STACK
1139static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1140{
1141 return t->stack_vm_area;
1142}
1143#else
1144static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t)
1145{
1146 return NULL;
1147}
1148#endif
1149
Alexey Dobriyane8681712007-10-26 12:17:22 +04001150static inline struct pid *task_pid(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001151{
1152 return task->pids[PIDTYPE_PID].pid;
1153}
1154
Alexey Dobriyane8681712007-10-26 12:17:22 +04001155static inline struct pid *task_tgid(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001156{
1157 return task->group_leader->pids[PIDTYPE_PID].pid;
1158}
1159
Oleg Nesterov6dda81f2009-04-02 16:58:35 -07001160/*
1161 * Without tasklist or rcu lock it is not safe to dereference
1162 * the result of task_pgrp/task_session even if task == current,
1163 * we can race with another thread doing sys_setsid/sys_setpgid.
1164 */
Alexey Dobriyane8681712007-10-26 12:17:22 +04001165static inline struct pid *task_pgrp(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001166{
1167 return task->group_leader->pids[PIDTYPE_PGID].pid;
1168}
1169
Alexey Dobriyane8681712007-10-26 12:17:22 +04001170static inline struct pid *task_session(struct task_struct *task)
Eric W. Biederman22c935f2006-10-02 02:17:09 -07001171{
1172 return task->group_leader->pids[PIDTYPE_SID].pid;
1173}
1174
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001175struct pid_namespace;
1176
1177/*
1178 * the helpers to get the task's different pids as they are seen
1179 * from various namespaces
1180 *
1181 * task_xid_nr() : global id, i.e. the id seen from the init namespace;
Eric W. Biederman44c4e1b2008-02-08 04:19:15 -08001182 * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
1183 * current.
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001184 * task_xid_nr_ns() : id seen from the ns specified;
1185 *
1186 * set_task_vxid() : assigns a virtual id to a task;
1187 *
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001188 * see also pid_nr() etc in include/linux/pid.h
1189 */
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001190pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1191 struct pid_namespace *ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001192
Alexey Dobriyane8681712007-10-26 12:17:22 +04001193static inline pid_t task_pid_nr(struct task_struct *tsk)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001194{
1195 return tsk->pid;
1196}
1197
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001198static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1199 struct pid_namespace *ns)
1200{
1201 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1202}
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001203
1204static inline pid_t task_pid_vnr(struct task_struct *tsk)
1205{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001206 return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001207}
1208
1209
Alexey Dobriyane8681712007-10-26 12:17:22 +04001210static inline pid_t task_tgid_nr(struct task_struct *tsk)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001211{
1212 return tsk->tgid;
1213}
1214
Pavel Emelyanov2f2a3a42007-10-18 23:40:19 -07001215pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001216
1217static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1218{
1219 return pid_vnr(task_tgid(tsk));
1220}
1221
1222
Richard Guy Briggs80e0b6e2014-03-16 14:00:19 -04001223static inline int pid_alive(const struct task_struct *p);
Richard Guy Briggsad36d282013-08-15 18:05:12 -04001224static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1225{
1226 pid_t pid = 0;
1227
1228 rcu_read_lock();
1229 if (pid_alive(tsk))
1230 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1231 rcu_read_unlock();
1232
1233 return pid;
1234}
1235
1236static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1237{
1238 return task_ppid_nr_ns(tsk, &init_pid_ns);
1239}
1240
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001241static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1242 struct pid_namespace *ns)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001243{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001244 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001245}
1246
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001247static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1248{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001249 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001250}
1251
1252
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001253static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1254 struct pid_namespace *ns)
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001255{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001256 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001257}
1258
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001259static inline pid_t task_session_vnr(struct task_struct *tsk)
1260{
Oleg Nesterov52ee2df2009-04-02 16:58:38 -07001261 return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001262}
1263
Oleg Nesterov1b0f7ffd2009-04-02 16:58:39 -07001264/* obsolete, do not use */
1265static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1266{
1267 return task_pgrp_nr_ns(tsk, &init_pid_ns);
1268}
Pavel Emelyanov7af57292007-10-18 23:40:06 -07001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270/**
1271 * pid_alive - check that a task structure is not stale
1272 * @p: Task structure to be checked.
1273 *
1274 * Test if a process is not yet dead (at most zombie state)
1275 * If pid_alive fails, then pointers within the task structure
1276 * can be stale and must not be dereferenced.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001277 *
1278 * Return: 1 if the process is alive. 0 otherwise.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 */
Richard Guy Briggsad36d282013-08-15 18:05:12 -04001280static inline int pid_alive(const struct task_struct *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281{
Eric W. Biederman92476d72006-03-31 02:31:42 -08001282 return p->pids[PIDTYPE_PID].pid != NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283}
1284
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -07001285/**
Sergey Senozhatsky570f5242016-01-01 23:03:01 +09001286 * is_global_init - check if a task structure is init. Since init
1287 * is free to have sub-threads we need to check tgid.
Henne32602592006-10-06 00:44:01 -07001288 * @tsk: Task structure to be checked.
1289 *
1290 * Check if a task structure is the first user space task the kernel created.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001291 *
1292 * Return: 1 if the task structure is init. 0 otherwise.
Sukadev Bhattiproluf400e192006-09-29 02:00:07 -07001293 */
Alexey Dobriyane8681712007-10-26 12:17:22 +04001294static inline int is_global_init(struct task_struct *tsk)
Pavel Emelyanovb461cc02007-10-18 23:40:09 -07001295{
Sergey Senozhatsky570f5242016-01-01 23:03:01 +09001296 return task_tgid_nr(tsk) == 1;
Pavel Emelyanovb461cc02007-10-18 23:40:09 -07001297}
Serge E. Hallynb460cbc2007-10-18 23:39:52 -07001298
Cedric Le Goater9ec52092006-10-02 02:19:00 -07001299extern struct pid *cad_pid;
1300
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301extern void free_task(struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
Ingo Molnare56d0902006-01-08 01:01:37 -08001303
Andrew Morton158d9eb2006-03-31 02:31:34 -08001304extern void __put_task_struct(struct task_struct *t);
Ingo Molnare56d0902006-01-08 01:01:37 -08001305
1306static inline void put_task_struct(struct task_struct *t)
1307{
1308 if (atomic_dec_and_test(&t->usage))
Eric W. Biederman8c7904a2006-03-31 02:31:37 -08001309 __put_task_struct(t);
Ingo Molnare56d0902006-01-08 01:01:37 -08001310}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
Oleg Nesterov150593b2016-05-18 19:02:18 +02001312struct task_struct *task_rcu_dereference(struct task_struct **ptask);
1313struct task_struct *try_get_task_struct(struct task_struct **ptask);
1314
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001315#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1316extern void task_cputime(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001317 u64 *utime, u64 *stime);
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +01001318extern u64 task_gtime(struct task_struct *t);
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001319#else
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001320static inline void task_cputime(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001321 u64 *utime, u64 *stime)
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001322{
Stanislaw Gruszka353c50e2016-11-15 03:06:52 +01001323 *utime = t->utime;
1324 *stime = t->stime;
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001325}
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001326
Frederic Weisbecker16a6d9b2017-01-31 04:09:21 +01001327static inline u64 task_gtime(struct task_struct *t)
Frederic Weisbecker6a616712012-12-16 20:00:34 +01001328{
1329 return t->gtime;
1330}
1331#endif
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001332
1333#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
Jens Axboeb31dc662006-06-13 08:26:10 +02001334static inline void task_cputime_scaled(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001335 u64 *utimescaled,
1336 u64 *stimescaled)
Jens Axboeb31dc662006-06-13 08:26:10 +02001337{
Stanislaw Gruszka353c50e2016-11-15 03:06:52 +01001338 *utimescaled = t->utimescaled;
1339 *stimescaled = t->stimescaled;
Tejun Heo58a69cb2011-02-16 09:25:31 +01001340}
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001341#else
1342static inline void task_cputime_scaled(struct task_struct *t,
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001343 u64 *utimescaled,
1344 u64 *stimescaled)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001346 task_cputime(t, utimescaled, stimescaled);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347}
1348#endif
Stanislaw Gruszka40565b52016-11-15 03:06:51 +01001349
Frederic Weisbecker5613fda2017-01-31 04:09:23 +01001350extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
1351extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353/*
1354 * Per process flags
1355 */
Peter Zijlstrac1de45c2016-11-28 23:03:05 -08001356#define PF_IDLE 0x00000002 /* I am an IDLE thread */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357#define PF_EXITING 0x00000004 /* getting shut down */
1358#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
1359#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
1360#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
1361#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
1362#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
1363#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
1364#define PF_DUMPCORE 0x00000200 /* dumped core */
1365#define PF_SIGNALED 0x00000400 /* killed by a signal */
1366#define PF_MEMALLOC 0x00000800 /* Allocating memory */
1367#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
1368#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
1369#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
1370#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
1371#define PF_FROZEN 0x00010000 /* frozen for system suspend */
1372#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
1373#define PF_KSWAPD 0x00040000 /* I am kswapd */
Ming Lei21caf2f2013-02-22 16:34:08 -08001374#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
1376#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
1377#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
1378#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
Tejun Heo14a40ff2013-03-19 13:45:20 -07001379#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1382#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
Colin Cross2b44c4d2013-07-24 17:41:33 -07001383#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385/*
1386 * Only the _current_ task can read/write to tsk->flags, but other
1387 * tasks can access tsk->flags in readonly mode for example
1388 * with tsk_used_math (like during threaded core dumping).
1389 * There is however an exception to this rule during ptrace
1390 * or during fork: the ptracer task is allowed to write to the
1391 * child->flags of its traced child (same goes for fork, the parent
1392 * can write to the child->flags), because we're guaranteed the
1393 * child is not running and in turn not changing child->flags
1394 * at the same time the parent does it.
1395 */
1396#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1397#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1398#define clear_used_math() clear_stopped_child_used_math(current)
1399#define set_used_math() set_stopped_child_used_math(current)
1400#define conditional_stopped_child_used_math(condition, child) \
1401 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1402#define conditional_used_math(condition) \
1403 conditional_stopped_child_used_math(condition, current)
1404#define copy_to_stopped_child_used_math(child) \
1405 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1406/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1407#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1408#define used_math() tsk_used_math(current)
1409
Kees Cook1d4457f2014-05-21 15:23:46 -07001410/* Per-process atomic flags. */
Zefan Lia2b86f72014-09-25 09:40:17 +08001411#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
Zefan Li2ad654b2014-09-25 09:41:02 +08001412#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
1413#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
Tetsuo Handa77ed2c52016-03-08 20:01:32 +09001414#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
Kees Cook1d4457f2014-05-21 15:23:46 -07001415
Kees Cook1d4457f2014-05-21 15:23:46 -07001416
Zefan Lie0e50702014-09-25 09:40:40 +08001417#define TASK_PFA_TEST(name, func) \
1418 static inline bool task_##func(struct task_struct *p) \
1419 { return test_bit(PFA_##name, &p->atomic_flags); }
1420#define TASK_PFA_SET(name, func) \
1421 static inline void task_set_##func(struct task_struct *p) \
1422 { set_bit(PFA_##name, &p->atomic_flags); }
1423#define TASK_PFA_CLEAR(name, func) \
1424 static inline void task_clear_##func(struct task_struct *p) \
1425 { clear_bit(PFA_##name, &p->atomic_flags); }
Kees Cook1d4457f2014-05-21 15:23:46 -07001426
Zefan Lie0e50702014-09-25 09:40:40 +08001427TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1428TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
Kees Cook1d4457f2014-05-21 15:23:46 -07001429
Zefan Li2ad654b2014-09-25 09:41:02 +08001430TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1431TASK_PFA_SET(SPREAD_PAGE, spread_page)
1432TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1433
1434TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1435TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1436TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
Tejun Heo544b2c92011-06-14 11:20:18 +02001437
Tetsuo Handa77ed2c52016-03-08 20:01:32 +09001438TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
1439TASK_PFA_SET(LMK_WAITING, lmk_waiting)
1440
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001441static inline void rcu_copy_process(struct task_struct *p)
1442{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443#ifdef CONFIG_PREEMPT_RCU
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001444 p->rcu_read_lock_nesting = 0;
Paul E. McKenney1d082fd2014-08-14 16:01:53 -07001445 p->rcu_read_unlock_special.s = 0;
Paul E. McKenneydd5d19b2009-08-27 14:58:16 -07001446 p->rcu_blocked_node = NULL;
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001447 INIT_LIST_HEAD(&p->rcu_node_entry);
Paul E. McKenney8315f422014-06-27 13:42:20 -07001448#endif /* #ifdef CONFIG_PREEMPT_RCU */
1449#ifdef CONFIG_TASKS_RCU
1450 p->rcu_tasks_holdout = false;
1451 INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
Paul E. McKenney176f8f72014-08-04 17:43:50 -07001452 p->rcu_tasks_idle_cpu = -1;
Paul E. McKenney8315f422014-06-27 13:42:20 -07001453#endif /* #ifdef CONFIG_TASKS_RCU */
Paul E. McKenneyf41d9112009-08-22 13:56:52 -07001454}
1455
Mel Gorman907aed42012-07-31 16:44:07 -07001456static inline void tsk_restore_flags(struct task_struct *task,
1457 unsigned long orig_flags, unsigned long flags)
1458{
1459 task->flags &= ~flags;
1460 task->flags |= orig_flags & flags;
1461}
1462
Juri Lellif82f8042014-10-07 09:52:11 +01001463extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
1464 const struct cpumask *trial);
Juri Lelli7f514122014-09-19 10:22:40 +01001465extern int task_can_attach(struct task_struct *p,
1466 const struct cpumask *cs_cpus_allowed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467#ifdef CONFIG_SMP
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09001468extern void do_set_cpus_allowed(struct task_struct *p,
1469 const struct cpumask *new_mask);
1470
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001471extern int set_cpus_allowed_ptr(struct task_struct *p,
Rusty Russell96f874e22008-11-25 02:35:14 +10301472 const struct cpumask *new_mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473#else
KOSAKI Motohiro1e1b6c52011-05-19 15:08:58 +09001474static inline void do_set_cpus_allowed(struct task_struct *p,
1475 const struct cpumask *new_mask)
1476{
1477}
Mike Traviscd8ba7c2008-03-26 14:23:49 -07001478static inline int set_cpus_allowed_ptr(struct task_struct *p,
Rusty Russell96f874e22008-11-25 02:35:14 +10301479 const struct cpumask *new_mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480{
Rusty Russell96f874e22008-11-25 02:35:14 +10301481 if (!cpumask_test_cpu(0, new_mask))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 return -EINVAL;
1483 return 0;
1484}
1485#endif
Rusty Russelle0ad9552009-09-24 09:34:38 -06001486
Christian Borntraeger6d0d2872016-11-16 13:23:05 +01001487#ifndef cpu_relax_yield
1488#define cpu_relax_yield() cpu_relax()
1489#endif
1490
Ingo Molnar36c8b582006-07-03 00:25:41 -07001491extern unsigned long long
Ingo Molnar41b86e92007-07-09 18:51:58 +02001492task_sched_runtime(struct task_struct *task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
1494/* sched_exec is called by processes performing an exec */
1495#ifdef CONFIG_SMP
1496extern void sched_exec(void);
1497#else
1498#define sched_exec() {}
1499#endif
1500
1501#ifdef CONFIG_HOTPLUG_CPU
1502extern void idle_task_exit(void);
1503#else
1504static inline void idle_task_exit(void) {}
1505#endif
1506
Dan Carpenterfa933842014-05-23 13:20:42 +03001507extern int yield_to(struct task_struct *p, bool preempt);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001508extern void set_user_nice(struct task_struct *p, long nice);
1509extern int task_prio(const struct task_struct *p);
Dongsheng Yangd0ea0262014-01-27 22:00:45 -05001510/**
1511 * task_nice - return the nice value of a given task.
1512 * @p: the task in question.
1513 *
1514 * Return: The nice value [ -20 ... 0 ... 19 ].
1515 */
1516static inline int task_nice(const struct task_struct *p)
1517{
1518 return PRIO_TO_NICE((p)->static_prio);
1519}
Ingo Molnar36c8b582006-07-03 00:25:41 -07001520extern int can_nice(const struct task_struct *p, const int nice);
1521extern int task_curr(const struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522extern int idle_cpu(int cpu);
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07001523extern int sched_setscheduler(struct task_struct *, int,
1524 const struct sched_param *);
Rusty Russell961ccdd2008-06-23 13:55:38 +10001525extern int sched_setscheduler_nocheck(struct task_struct *, int,
KOSAKI Motohirofe7de492010-10-20 16:01:12 -07001526 const struct sched_param *);
Dario Faggiolid50dde52013-11-07 14:43:36 +01001527extern int sched_setattr(struct task_struct *,
1528 const struct sched_attr *);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001529extern struct task_struct *idle_task(int cpu);
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001530/**
1531 * is_idle_task - is the specified task an idle task?
Randy Dunlapfa757282012-01-21 11:03:13 -08001532 * @p: the task in question.
Yacine Belkadie69f6182013-07-12 20:45:47 +02001533 *
1534 * Return: 1 if @p is an idle task. 0 otherwise.
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001535 */
Paul E. McKenney7061ca32011-12-20 08:20:46 -08001536static inline bool is_idle_task(const struct task_struct *p)
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001537{
Peter Zijlstrac1de45c2016-11-28 23:03:05 -08001538 return !!(p->flags & PF_IDLE);
Paul E. McKenneyc4f30602011-11-10 12:41:56 -08001539}
Ingo Molnar36c8b582006-07-03 00:25:41 -07001540extern struct task_struct *curr_task(int cpu);
Peter Zijlstraa458ae22016-09-20 20:29:40 +02001541extern void ia64_set_curr_task(int cpu, struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543void yield(void);
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545union thread_union {
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001546#ifndef CONFIG_THREAD_INFO_IN_TASK
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 struct thread_info thread_info;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001548#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 unsigned long stack[THREAD_SIZE/sizeof(long)];
1550};
1551
1552#ifndef __HAVE_ARCH_KSTACK_END
1553static inline int kstack_end(void *addr)
1554{
1555 /* Reliable end of stack detection:
1556 * Some APM bios versions misalign the stack
1557 */
1558 return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
1559}
1560#endif
1561
1562extern union thread_union init_thread_union;
1563extern struct task_struct init_task;
1564
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001565extern struct pid_namespace init_pid_ns;
1566
1567/*
1568 * find a task by one of its numerical ids
1569 *
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001570 * find_task_by_pid_ns():
1571 * finds a task by its pid in the specified namespace
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001572 * find_task_by_vpid():
1573 * finds a task by its virtual pid
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001574 *
Pavel Emelyanove49859e2008-07-25 01:48:36 -07001575 * see also find_vpid() etc in include/linux/pid.h
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001576 */
1577
Pavel Emelyanov228ebcb2007-10-18 23:40:16 -07001578extern struct task_struct *find_task_by_vpid(pid_t nr);
1579extern struct task_struct *find_task_by_pid_ns(pid_t nr,
1580 struct pid_namespace *ns);
Pavel Emelyanov198fe212007-10-18 23:40:06 -07001581
Harvey Harrisonb3c97522008-02-13 15:03:15 -08001582extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1583extern int wake_up_process(struct task_struct *tsk);
Samir Bellabes3e51e3e2011-05-11 18:18:05 +02001584extern void wake_up_new_task(struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585#ifdef CONFIG_SMP
1586 extern void kick_process(struct task_struct *tsk);
1587#else
1588 static inline void kick_process(struct task_struct *tsk) { }
1589#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +01001590extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
Ingo Molnarad46c2c2007-07-09 18:52:00 +02001591extern void sched_dead(struct task_struct *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593extern void proc_caches_init(void);
Oleg Nesterov9a130492015-11-06 16:32:25 -08001594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595extern void release_task(struct task_struct * p);
Al Viro5a1b98d2012-11-06 13:28:21 -05001596
Josh Triplett3033f14a2015-06-25 15:01:19 -07001597#ifdef CONFIG_HAVE_COPY_THREAD_TLS
1598extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
1599 struct task_struct *, unsigned long);
1600#else
Alexey Dobriyan6f2c55b2009-04-02 16:56:59 -07001601extern int copy_thread(unsigned long, unsigned long, unsigned long,
Al Viroafa86fc2012-10-22 22:51:14 -04001602 struct task_struct *);
Josh Triplett3033f14a2015-06-25 15:01:19 -07001603
1604/* Architectures that haven't opted into copy_thread_tls get the tls argument
1605 * via pt_regs, so ignore the tls argument passed via C. */
1606static inline int copy_thread_tls(
1607 unsigned long clone_flags, unsigned long sp, unsigned long arg,
1608 struct task_struct *p, unsigned long tls)
1609{
1610 return copy_thread(clone_flags, sp, arg, p);
1611}
1612#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613extern void flush_thread(void);
Jiri Slaby5f56a5d2016-05-20 17:00:16 -07001614
1615#ifdef CONFIG_HAVE_EXIT_THREAD
Jiri Slabye6464692016-05-20 17:00:20 -07001616extern void exit_thread(struct task_struct *tsk);
Jiri Slaby5f56a5d2016-05-20 17:00:16 -07001617#else
Jiri Slabye6464692016-05-20 17:00:20 -07001618static inline void exit_thread(struct task_struct *tsk)
Jiri Slaby5f56a5d2016-05-20 17:00:16 -07001619{
1620}
1621#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623extern void exit_files(struct task_struct *);
Oleg Nesterovcbaffba2008-05-26 20:55:42 +04001624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625extern void exit_itimers(struct signal_struct *);
1626
Joe Perches9402c952012-01-12 17:17:17 -08001627extern void do_group_exit(int);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Linus Torvaldsc4ad8f92014-02-05 12:54:53 -08001629extern int do_execve(struct filename *,
David Howellsd7627462010-08-17 23:52:56 +01001630 const char __user * const __user *,
Al Viroda3d4c52012-10-20 21:49:33 -04001631 const char __user * const __user *);
David Drysdale51f39a12014-12-12 16:57:29 -08001632extern int do_execveat(int, struct filename *,
1633 const char __user * const __user *,
1634 const char __user * const __user *,
1635 int);
Josh Triplett3033f14a2015-06-25 15:01:19 -07001636extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
Al Viroe80d6662012-10-22 23:10:08 -04001637extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
Ingo Molnar36c8b582006-07-03 00:25:41 -07001638struct task_struct *fork_idle(int);
Al Viro2aa3a7f2012-09-21 19:55:31 -04001639extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
Adrian Hunter82b89772014-05-28 11:45:04 +03001641extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1642static inline void set_task_comm(struct task_struct *tsk, const char *from)
1643{
1644 __set_task_comm(tsk, from, false);
1645}
Andrew Morton59714d62008-02-04 22:27:21 -08001646extern char *get_task_comm(char *to, struct task_struct *tsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
1648#ifdef CONFIG_SMP
Peter Zijlstra317f3942011-04-05 17:23:58 +02001649void scheduler_ipi(void);
Roland McGrath85ba2d82008-07-25 19:45:58 -07001650extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651#else
Peter Zijlstra184748c2011-04-05 17:23:39 +02001652static inline void scheduler_ipi(void) { }
Roland McGrath85ba2d82008-07-25 19:45:58 -07001653static inline unsigned long wait_task_inactive(struct task_struct *p,
1654 long match_state)
1655{
1656 return 1;
1657}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658#endif
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660/*
Eric W. Biederman260ea102006-06-23 02:05:18 -07001661 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
Jens Axboe22e2c502005-06-27 10:55:12 +02001662 * subscriptions and synchronises with wait4(). Also used in procfs. Also
Paul Menageddbcc7e2007-10-18 23:39:30 -07001663 * pins the final release of task.io_context. Also protects ->cpuset and
Oleg Nesterovd68b46f2012-03-05 14:59:13 -08001664 * ->cgroup.subsys[]. And ->vfork_done.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 *
1666 * Nests both inside and outside of read_lock(&tasklist_lock).
1667 * It must not be nested with write_lock_irq(&tasklist_lock),
1668 * neither inside nor outside.
1669 */
1670static inline void task_lock(struct task_struct *p)
1671{
1672 spin_lock(&p->alloc_lock);
1673}
1674
1675static inline void task_unlock(struct task_struct *p)
1676{
1677 spin_unlock(&p->alloc_lock);
1678}
1679
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001680#ifdef CONFIG_THREAD_INFO_IN_TASK
1681
1682static inline struct thread_info *task_thread_info(struct task_struct *task)
1683{
1684 return &task->thread_info;
1685}
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001686
1687/*
1688 * When accessing the stack of a non-current task that might exit, use
1689 * try_get_task_stack() instead. task_stack_page will return a pointer
1690 * that could get freed out from under you.
1691 */
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001692static inline void *task_stack_page(const struct task_struct *task)
1693{
1694 return task->stack;
1695}
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001696
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001697#define setup_thread_stack(new,old) do { } while(0)
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001698
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001699static inline unsigned long *end_of_stack(const struct task_struct *task)
1700{
1701 return task->stack;
1702}
1703
1704#elif !defined(__HAVE_THREAD_FUNCTIONS)
Al Virof0373602005-11-13 16:06:57 -08001705
Roman Zippelf7e42172007-05-09 02:35:17 -07001706#define task_thread_info(task) ((struct thread_info *)(task)->stack)
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001707#define task_stack_page(task) ((void *)(task)->stack)
Al Viroa1261f542005-11-13 16:06:55 -08001708
Al Viro10ebffd2005-11-13 16:06:56 -08001709static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
1710{
1711 *task_thread_info(p) = *task_thread_info(org);
1712 task_thread_info(p)->task = p;
1713}
1714
Chuck Ebbert6a402812014-09-20 10:17:51 -05001715/*
1716 * Return the address of the last usable long on the stack.
1717 *
1718 * When the stack grows down, this is just above the thread
1719 * info struct. Going any lower will corrupt the threadinfo.
1720 *
1721 * When the stack grows up, this is the highest address.
1722 * Beyond that position, we corrupt data on the next page.
1723 */
Al Viro10ebffd2005-11-13 16:06:56 -08001724static inline unsigned long *end_of_stack(struct task_struct *p)
1725{
Chuck Ebbert6a402812014-09-20 10:17:51 -05001726#ifdef CONFIG_STACK_GROWSUP
1727 return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
1728#else
Roman Zippelf7e42172007-05-09 02:35:17 -07001729 return (unsigned long *)(task_thread_info(p) + 1);
Chuck Ebbert6a402812014-09-20 10:17:51 -05001730#endif
Al Viro10ebffd2005-11-13 16:06:56 -08001731}
1732
Al Virof0373602005-11-13 16:06:57 -08001733#endif
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001734
Andy Lutomirski68f24b082016-09-15 22:45:48 -07001735#ifdef CONFIG_THREAD_INFO_IN_TASK
1736static inline void *try_get_task_stack(struct task_struct *tsk)
1737{
1738 return atomic_inc_not_zero(&tsk->stack_refcount) ?
1739 task_stack_page(tsk) : NULL;
1740}
1741
1742extern void put_task_stack(struct task_struct *tsk);
1743#else
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001744static inline void *try_get_task_stack(struct task_struct *tsk)
1745{
1746 return task_stack_page(tsk);
1747}
1748
1749static inline void put_task_stack(struct task_struct *tsk) {}
Andy Lutomirski68f24b082016-09-15 22:45:48 -07001750#endif
Andy Lutomirskic6c314a2016-09-15 22:45:43 -07001751
Aaron Tomlina70857e2014-09-12 14:16:18 +01001752#define task_stack_end_corrupted(task) \
1753 (*(end_of_stack(task)) != STACK_END_MAGIC)
Al Virof0373602005-11-13 16:06:57 -08001754
FUJITA Tomonori8b05c7e2008-07-23 21:26:53 -07001755static inline int object_is_on_stack(void *obj)
1756{
1757 void *stack = task_stack_page(current);
1758
1759 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
1760}
1761
Linus Torvaldsb235bee2016-06-24 15:09:37 -07001762extern void thread_stack_cache_init(void);
Benjamin Herrenschmidt8c9843e2008-04-18 16:56:15 +10001763
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001764#ifdef CONFIG_DEBUG_STACK_USAGE
1765static inline unsigned long stack_not_used(struct task_struct *p)
1766{
1767 unsigned long *n = end_of_stack(p);
1768
1769 do { /* Skip over canary */
Helge Deller6c31da32016-03-19 17:54:10 +01001770# ifdef CONFIG_STACK_GROWSUP
1771 n--;
1772# else
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001773 n++;
Helge Deller6c31da32016-03-19 17:54:10 +01001774# endif
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001775 } while (!*n);
1776
Helge Deller6c31da32016-03-19 17:54:10 +01001777# ifdef CONFIG_STACK_GROWSUP
1778 return (unsigned long)end_of_stack(p) - (unsigned long)n;
1779# else
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001780 return (unsigned long)n - (unsigned long)end_of_stack(p);
Helge Deller6c31da32016-03-19 17:54:10 +01001781# endif
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001782}
1783#endif
Aaron Tomlind4311ff2014-09-12 14:16:17 +01001784extern void set_task_stack_end_magic(struct task_struct *tsk);
Eric Sandeen7c9f8862008-04-22 16:38:23 -05001785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786/* set thread flags in other task's structures
1787 * - see asm/thread_info.h for TIF_xxxx flags available
1788 */
1789static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1790{
Al Viroa1261f542005-11-13 16:06:55 -08001791 set_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792}
1793
1794static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1795{
Al Viroa1261f542005-11-13 16:06:55 -08001796 clear_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797}
1798
1799static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1800{
Al Viroa1261f542005-11-13 16:06:55 -08001801 return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
1804static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1805{
Al Viroa1261f542005-11-13 16:06:55 -08001806 return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807}
1808
1809static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1810{
Al Viroa1261f542005-11-13 16:06:55 -08001811 return test_ti_thread_flag(task_thread_info(tsk), flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812}
1813
1814static inline void set_tsk_need_resched(struct task_struct *tsk)
1815{
1816 set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1817}
1818
1819static inline void clear_tsk_need_resched(struct task_struct *tsk)
1820{
1821 clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1822}
1823
Gregory Haskins8ae121a2008-04-23 07:13:29 -04001824static inline int test_tsk_need_resched(struct task_struct *tsk)
1825{
1826 return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1827}
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829/*
1830 * cond_resched() and cond_resched_lock(): latency reduction via
1831 * explicit rescheduling in places that are safe. The return
1832 * value indicates whether a reschedule was done in fact.
1833 * cond_resched_lock() will drop the spinlock before scheduling,
1834 * cond_resched_softirq() will enable bhs before scheduling.
1835 */
Peter Zijlstra35a773a2016-09-19 12:57:53 +02001836#ifndef CONFIG_PREEMPT
Linus Torvaldsc3921ab2008-05-11 16:04:48 -07001837extern int _cond_resched(void);
Peter Zijlstra35a773a2016-09-19 12:57:53 +02001838#else
1839static inline int _cond_resched(void) { return 0; }
1840#endif
Frederic Weisbecker6f80bd92009-07-16 15:44:29 +02001841
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001842#define cond_resched() ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001843 ___might_sleep(__FILE__, __LINE__, 0); \
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001844 _cond_resched(); \
1845})
Frederic Weisbecker6f80bd92009-07-16 15:44:29 +02001846
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001847extern int __cond_resched_lock(spinlock_t *lock);
1848
1849#define cond_resched_lock(lock) ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001850 ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001851 __cond_resched_lock(lock); \
1852})
1853
1854extern int __cond_resched_softirq(void);
1855
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07001856#define cond_resched_softirq() ({ \
Peter Zijlstra34274452014-09-24 10:18:56 +02001857 ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
Venkatesh Pallipadi75e10562010-10-04 17:03:16 -07001858 __cond_resched_softirq(); \
Frederic Weisbecker613afbf2009-07-16 15:44:29 +02001859})
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
Simon Hormanf6f3c432013-05-22 14:50:31 +09001861static inline void cond_resched_rcu(void)
1862{
1863#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1864 rcu_read_unlock();
1865 cond_resched();
1866 rcu_read_lock();
1867#endif
1868}
1869
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870/*
1871 * Does a critical section need to be broken due to another
Nick Piggin95c354f2008-01-30 13:31:20 +01001872 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1873 * but a general need for low latency)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 */
Nick Piggin95c354f2008-01-30 13:31:20 +01001875static inline int spin_needbreak(spinlock_t *lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876{
Nick Piggin95c354f2008-01-30 13:31:20 +01001877#ifdef CONFIG_PREEMPT
1878 return spin_is_contended(lock);
1879#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 return 0;
Nick Piggin95c354f2008-01-30 13:31:20 +01001881#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882}
1883
Peter Zijlstra75f93fe2013-09-27 17:30:03 +02001884static __always_inline bool need_resched(void)
1885{
1886 return unlikely(tif_need_resched());
1887}
1888
Thomas Gleixneree761f62013-03-21 22:49:32 +01001889/*
Frank Mayharf06febc2008-09-12 09:54:39 -07001890 * Thread group CPU time accounting.
1891 */
Peter Zijlstra4cd4c1b2009-02-05 12:24:16 +01001892void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
Peter Zijlstra4da94d492009-02-11 11:30:27 +01001893void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
Frank Mayharf06febc2008-09-12 09:54:39 -07001894
Frank Mayharf06febc2008-09-12 09:54:39 -07001895/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 * Wrappers for p->thread_info->cpu access. No-op on UP.
1897 */
1898#ifdef CONFIG_SMP
1899
1900static inline unsigned int task_cpu(const struct task_struct *p)
1901{
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001902#ifdef CONFIG_THREAD_INFO_IN_TASK
1903 return p->cpu;
1904#else
Al Viroa1261f542005-11-13 16:06:55 -08001905 return task_thread_info(p)->cpu;
Andy Lutomirskic65eacb2016-09-13 14:29:24 -07001906#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907}
1908
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001909static inline int task_node(const struct task_struct *p)
1910{
1911 return cpu_to_node(task_cpu(p));
1912}
1913
Ingo Molnarc65cc872007-07-09 18:51:58 +02001914extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916#else
1917
1918static inline unsigned int task_cpu(const struct task_struct *p)
1919{
1920 return 0;
1921}
1922
1923static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1924{
1925}
1926
1927#endif /* CONFIG_SMP */
1928
Pan Xinhuid9345c62016-11-02 05:08:28 -04001929/*
1930 * In order to reduce various lock holder preemption latencies provide an
1931 * interface to see if a vCPU is currently running or not.
1932 *
1933 * This allows us to terminate optimistic spin loops and block, analogous to
1934 * the native optimistic spin heuristic of testing if the lock owner task is
1935 * running or not.
1936 */
1937#ifndef vcpu_is_preempted
1938# define vcpu_is_preempted(cpu) false
1939#endif
1940
Rusty Russell96f874e22008-11-25 02:35:14 +10301941extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1942extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
Siddha, Suresh B5c45bf22006-06-27 02:54:42 -07001943
Dhaval Giani7c941432010-01-20 13:26:18 +01001944#ifdef CONFIG_CGROUP_SCHED
Yong Zhang07e06b02011-01-07 15:17:36 +08001945extern struct task_group root_task_group;
Peter Zijlstra8323f262012-06-22 13:36:05 +02001946#endif /* CONFIG_CGROUP_SCHED */
Srivatsa Vaddagiri9b5b7752007-10-15 17:00:09 +02001947
Dhaval Giani54e99122009-02-27 15:13:54 +05301948extern int task_can_switch_user(struct user_struct *up,
1949 struct task_struct *tsk);
1950
Dave Hansen82455252008-02-04 22:28:59 -08001951#ifndef TASK_SIZE_OF
1952#define TASK_SIZE_OF(tsk) TASK_SIZE
1953#endif
1954
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955#endif