blob: 472cd142e4f4462422ebbafd4f6df1da8cda9571 [file] [log] [blame]
Peter Zijlstra029632f2011-10-25 10:00:11 +02001
2#include <linux/sched.h>
Clark Williamscf4aebc22013-02-07 09:46:59 -06003#include <linux/sched/sysctl.h>
Clark Williams8bd75c72013-02-07 09:47:07 -06004#include <linux/sched/rt.h>
Dario Faggioliaab03e02013-11-28 11:14:43 +01005#include <linux/sched/deadline.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +02006#include <linux/mutex.h>
7#include <linux/spinlock.h>
8#include <linux/stop_machine.h>
Steven Rostedtb6366f02015-03-18 14:49:46 -04009#include <linux/irq_work.h>
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +020010#include <linux/tick.h>
Mel Gormanf809ca92013-10-07 11:28:57 +010011#include <linux/slab.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020012
Peter Zijlstra391e43d2011-11-15 17:14:39 +010013#include "cpupri.h"
Juri Lelli6bfd6d72013-11-07 14:43:47 +010014#include "cpudeadline.h"
Li Zefan60fed782013-03-29 14:36:43 +080015#include "cpuacct.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +020016
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040017struct rq;
Daniel Lezcano442bf3a2014-09-04 11:32:09 -040018struct cpuidle_state;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040019
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040020/* task_struct::on_rq states: */
21#define TASK_ON_RQ_QUEUED 1
Kirill Tkhaicca26e82014-08-20 13:47:42 +040022#define TASK_ON_RQ_MIGRATING 2
Kirill Tkhaida0c1e62014-08-20 13:47:32 +040023
Peter Zijlstra029632f2011-10-25 10:00:11 +020024extern __read_mostly int scheduler_running;
25
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040026extern unsigned long calc_load_update;
27extern atomic_long_t calc_load_tasks;
28
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020029extern void calc_global_load_tick(struct rq *this_rq);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040030extern long calc_load_fold_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020031
32#ifdef CONFIG_SMP
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040033extern void update_cpu_load_active(struct rq *this_rq);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020034#else
35static inline void update_cpu_load_active(struct rq *this_rq) { }
36#endif
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040037
Peter Zijlstra029632f2011-10-25 10:00:11 +020038/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020039 * Helpers for converting nanosecond timing to jiffy resolution
40 */
41#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
42
Li Zefancc1f4b12013-03-05 16:06:09 +080043/*
44 * Increase resolution of nice-level calculations for 64-bit architectures.
45 * The extra resolution improves shares distribution and load balancing of
46 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
47 * hierarchies, especially on larger systems. This is not a user-visible change
48 * and does not change the user-interface for setting shares/weights.
49 *
50 * We increase resolution only if we have enough bits to allow this increased
51 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
52 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
53 * increased costs.
54 */
55#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */
56# define SCHED_LOAD_RESOLUTION 10
57# define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION)
58# define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION)
59#else
60# define SCHED_LOAD_RESOLUTION 0
61# define scale_load(w) (w)
62# define scale_load_down(w) (w)
63#endif
64
65#define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION)
66#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
67
Peter Zijlstra029632f2011-10-25 10:00:11 +020068#define NICE_0_LOAD SCHED_LOAD_SCALE
69#define NICE_0_SHIFT SCHED_LOAD_SHIFT
70
71/*
Dario Faggioli332ac172013-11-07 14:43:45 +010072 * Single value that decides SCHED_DEADLINE internal math precision.
73 * 10 -> just above 1us
74 * 9 -> just above 0.5us
75 */
76#define DL_SCALE (10)
77
78/*
Peter Zijlstra029632f2011-10-25 10:00:11 +020079 * These are the 'tuning knobs' of the scheduler:
Peter Zijlstra029632f2011-10-25 10:00:11 +020080 */
Peter Zijlstra029632f2011-10-25 10:00:11 +020081
82/*
83 * single value that denotes runtime == period, ie unlimited time.
84 */
85#define RUNTIME_INF ((u64)~0ULL)
86
Henrik Austad20f9cd22015-09-09 17:00:41 +020087static inline int idle_policy(int policy)
88{
89 return policy == SCHED_IDLE;
90}
Dario Faggiolid50dde52013-11-07 14:43:36 +010091static inline int fair_policy(int policy)
92{
93 return policy == SCHED_NORMAL || policy == SCHED_BATCH;
94}
95
Peter Zijlstra029632f2011-10-25 10:00:11 +020096static inline int rt_policy(int policy)
97{
Dario Faggiolid50dde52013-11-07 14:43:36 +010098 return policy == SCHED_FIFO || policy == SCHED_RR;
Peter Zijlstra029632f2011-10-25 10:00:11 +020099}
100
Dario Faggioliaab03e02013-11-28 11:14:43 +0100101static inline int dl_policy(int policy)
102{
103 return policy == SCHED_DEADLINE;
104}
Henrik Austad20f9cd22015-09-09 17:00:41 +0200105static inline bool valid_policy(int policy)
106{
107 return idle_policy(policy) || fair_policy(policy) ||
108 rt_policy(policy) || dl_policy(policy);
109}
Dario Faggioliaab03e02013-11-28 11:14:43 +0100110
Peter Zijlstra029632f2011-10-25 10:00:11 +0200111static inline int task_has_rt_policy(struct task_struct *p)
112{
113 return rt_policy(p->policy);
114}
115
Dario Faggioliaab03e02013-11-28 11:14:43 +0100116static inline int task_has_dl_policy(struct task_struct *p)
117{
118 return dl_policy(p->policy);
119}
120
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100121/*
122 * Tells if entity @a should preempt entity @b.
123 */
Dario Faggioli332ac172013-11-07 14:43:45 +0100124static inline bool
125dl_entity_preempt(struct sched_dl_entity *a, struct sched_dl_entity *b)
Dario Faggioli2d3d8912013-11-07 14:43:44 +0100126{
127 return dl_time_before(a->deadline, b->deadline);
128}
129
Peter Zijlstra029632f2011-10-25 10:00:11 +0200130/*
131 * This is the priority-queue data structure of the RT scheduling class:
132 */
133struct rt_prio_array {
134 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
135 struct list_head queue[MAX_RT_PRIO];
136};
137
138struct rt_bandwidth {
139 /* nests inside the rq lock: */
140 raw_spinlock_t rt_runtime_lock;
141 ktime_t rt_period;
142 u64 rt_runtime;
143 struct hrtimer rt_period_timer;
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200144 unsigned int rt_period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200145};
Juri Lellia5e7be32014-09-19 10:22:39 +0100146
147void __dl_clear_params(struct task_struct *p);
148
Dario Faggioli332ac172013-11-07 14:43:45 +0100149/*
150 * To keep the bandwidth of -deadline tasks and groups under control
151 * we need some place where:
152 * - store the maximum -deadline bandwidth of the system (the group);
153 * - cache the fraction of that bandwidth that is currently allocated.
154 *
155 * This is all done in the data structure below. It is similar to the
156 * one used for RT-throttling (rt_bandwidth), with the main difference
157 * that, since here we are only interested in admission control, we
158 * do not decrease any runtime while the group "executes", neither we
159 * need a timer to replenish it.
160 *
161 * With respect to SMP, the bandwidth is given on a per-CPU basis,
162 * meaning that:
163 * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
164 * - dl_total_bw array contains, in the i-eth element, the currently
165 * allocated bandwidth on the i-eth CPU.
166 * Moreover, groups consume bandwidth on each CPU, while tasks only
167 * consume bandwidth on the CPU they're running on.
168 * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
169 * that will be shown the next time the proc or cgroup controls will
170 * be red. It on its turn can be changed by writing on its own
171 * control.
172 */
173struct dl_bandwidth {
174 raw_spinlock_t dl_runtime_lock;
175 u64 dl_runtime;
176 u64 dl_period;
177};
178
179static inline int dl_bandwidth_enabled(void)
180{
Peter Zijlstra17248132013-12-17 12:44:49 +0100181 return sysctl_sched_rt_runtime >= 0;
Dario Faggioli332ac172013-11-07 14:43:45 +0100182}
183
184extern struct dl_bw *dl_bw_of(int i);
185
186struct dl_bw {
187 raw_spinlock_t lock;
188 u64 bw, total_bw;
189};
190
Juri Lelli7f514122014-09-19 10:22:40 +0100191static inline
192void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
193{
194 dl_b->total_bw -= tsk_bw;
195}
196
197static inline
198void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
199{
200 dl_b->total_bw += tsk_bw;
201}
202
203static inline
204bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
205{
206 return dl_b->bw != -1 &&
207 dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
208}
209
Peter Zijlstra029632f2011-10-25 10:00:11 +0200210extern struct mutex sched_domains_mutex;
211
212#ifdef CONFIG_CGROUP_SCHED
213
214#include <linux/cgroup.h>
215
216struct cfs_rq;
217struct rt_rq;
218
Mike Galbraith35cf4e52012-08-07 05:00:13 +0200219extern struct list_head task_groups;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200220
221struct cfs_bandwidth {
222#ifdef CONFIG_CFS_BANDWIDTH
223 raw_spinlock_t lock;
224 ktime_t period;
225 u64 quota, runtime;
Zhihui Zhang9c58c792014-09-20 21:24:36 -0400226 s64 hierarchical_quota;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200227 u64 runtime_expires;
228
Peter Zijlstra4cfafd32015-05-14 12:23:11 +0200229 int idle, period_active;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200230 struct hrtimer period_timer, slack_timer;
231 struct list_head throttled_cfs_rq;
232
233 /* statistics */
234 int nr_periods, nr_throttled;
235 u64 throttled_time;
236#endif
237};
238
239/* task group related information */
240struct task_group {
241 struct cgroup_subsys_state css;
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244 /* schedulable entities of this group on each cpu */
245 struct sched_entity **se;
246 /* runqueue "owned" by this group on each cpu */
247 struct cfs_rq **cfs_rq;
248 unsigned long shares;
249
Alex Shifa6bdde2013-06-20 10:18:46 +0800250#ifdef CONFIG_SMP
Alex Shibf5b9862013-06-20 10:18:54 +0800251 atomic_long_t load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200252#endif
Alex Shifa6bdde2013-06-20 10:18:46 +0800253#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200254
255#ifdef CONFIG_RT_GROUP_SCHED
256 struct sched_rt_entity **rt_se;
257 struct rt_rq **rt_rq;
258
259 struct rt_bandwidth rt_bandwidth;
260#endif
261
262 struct rcu_head rcu;
263 struct list_head list;
264
265 struct task_group *parent;
266 struct list_head siblings;
267 struct list_head children;
268
269#ifdef CONFIG_SCHED_AUTOGROUP
270 struct autogroup *autogroup;
271#endif
272
273 struct cfs_bandwidth cfs_bandwidth;
274};
275
276#ifdef CONFIG_FAIR_GROUP_SCHED
277#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
278
279/*
280 * A weight of 0 or 1 can cause arithmetics problems.
281 * A weight of a cfs_rq is the sum of weights of which entities
282 * are queued on this cfs_rq, so a weight of a entity should not be
283 * too large, so as the shares value of a task group.
284 * (The default weight is 1024 - so there's no practical
285 * limitation from this.)
286 */
287#define MIN_SHARES (1UL << 1)
288#define MAX_SHARES (1UL << 18)
289#endif
290
Peter Zijlstra029632f2011-10-25 10:00:11 +0200291typedef int (*tg_visitor)(struct task_group *, void *);
292
293extern int walk_tg_tree_from(struct task_group *from,
294 tg_visitor down, tg_visitor up, void *data);
295
296/*
297 * Iterate the full tree, calling @down when first entering a node and @up when
298 * leaving it for the final time.
299 *
300 * Caller must hold rcu_lock or sufficient equivalent.
301 */
302static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
303{
304 return walk_tg_tree_from(&root_task_group, down, up, data);
305}
306
307extern int tg_nop(struct task_group *tg, void *data);
308
309extern void free_fair_sched_group(struct task_group *tg);
310extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
311extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
312extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
313 struct sched_entity *se, int cpu,
314 struct sched_entity *parent);
315extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
316extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
317
318extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +0200319extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200320extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
321
322extern void free_rt_sched_group(struct task_group *tg);
323extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
324extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
325 struct sched_rt_entity *rt_se, int cpu,
326 struct sched_rt_entity *parent);
327
Li Zefan25cc7da2013-03-05 16:07:33 +0800328extern struct task_group *sched_create_group(struct task_group *parent);
329extern void sched_online_group(struct task_group *tg,
330 struct task_group *parent);
331extern void sched_destroy_group(struct task_group *tg);
332extern void sched_offline_group(struct task_group *tg);
333
334extern void sched_move_task(struct task_struct *tsk);
335
336#ifdef CONFIG_FAIR_GROUP_SCHED
337extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
Byungchul Parkad936d82015-10-24 01:16:19 +0900338
339#ifdef CONFIG_SMP
340extern void set_task_rq_fair(struct sched_entity *se,
341 struct cfs_rq *prev, struct cfs_rq *next);
342#else /* !CONFIG_SMP */
343static inline void set_task_rq_fair(struct sched_entity *se,
344 struct cfs_rq *prev, struct cfs_rq *next) { }
345#endif /* CONFIG_SMP */
346#endif /* CONFIG_FAIR_GROUP_SCHED */
Li Zefan25cc7da2013-03-05 16:07:33 +0800347
Peter Zijlstra029632f2011-10-25 10:00:11 +0200348#else /* CONFIG_CGROUP_SCHED */
349
350struct cfs_bandwidth { };
351
352#endif /* CONFIG_CGROUP_SCHED */
353
354/* CFS-related fields in a runqueue */
355struct cfs_rq {
356 struct load_weight load;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200357 unsigned int nr_running, h_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200358
359 u64 exec_clock;
360 u64 min_vruntime;
361#ifndef CONFIG_64BIT
362 u64 min_vruntime_copy;
363#endif
364
365 struct rb_root tasks_timeline;
366 struct rb_node *rb_leftmost;
367
Peter Zijlstra029632f2011-10-25 10:00:11 +0200368 /*
369 * 'curr' points to currently running entity on this cfs_rq.
370 * It is set to NULL otherwise (i.e when none are currently running).
371 */
372 struct sched_entity *curr, *next, *last, *skip;
373
374#ifdef CONFIG_SCHED_DEBUG
375 unsigned int nr_spread_over;
376#endif
377
Paul Turner2dac7542012-10-04 13:18:30 +0200378#ifdef CONFIG_SMP
379 /*
Yuyang Du9d89c252015-07-15 08:04:37 +0800380 * CFS load tracking
Paul Turner2dac7542012-10-04 13:18:30 +0200381 */
Yuyang Du9d89c252015-07-15 08:04:37 +0800382 struct sched_avg avg;
Yuyang Du13962232015-07-15 08:04:41 +0800383 u64 runnable_load_sum;
384 unsigned long runnable_load_avg;
Yuyang Du9d89c252015-07-15 08:04:37 +0800385#ifdef CONFIG_FAIR_GROUP_SCHED
386 unsigned long tg_load_avg_contrib;
387#endif
388 atomic_long_t removed_load_avg, removed_util_avg;
389#ifndef CONFIG_64BIT
390 u64 load_last_update_time_copy;
391#endif
Alex Shi141965c2013-06-26 13:05:39 +0800392
Paul Turnerc566e8e2012-10-04 13:18:30 +0200393#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner82958362012-10-04 13:18:31 +0200394 /*
395 * h_load = weight * f(tg)
396 *
397 * Where f(tg) is the recursive weight fraction assigned to
398 * this group.
399 */
400 unsigned long h_load;
Vladimir Davydov68520792013-07-15 17:49:19 +0400401 u64 last_h_load_update;
402 struct sched_entity *h_load_next;
403#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner82958362012-10-04 13:18:31 +0200404#endif /* CONFIG_SMP */
405
Peter Zijlstra029632f2011-10-25 10:00:11 +0200406#ifdef CONFIG_FAIR_GROUP_SCHED
407 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
408
409 /*
410 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
411 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
412 * (like users, containers etc.)
413 *
414 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
415 * list is used during load balance.
416 */
417 int on_list;
418 struct list_head leaf_cfs_rq_list;
419 struct task_group *tg; /* group that "owns" this runqueue */
420
Peter Zijlstra029632f2011-10-25 10:00:11 +0200421#ifdef CONFIG_CFS_BANDWIDTH
422 int runtime_enabled;
423 u64 runtime_expires;
424 s64 runtime_remaining;
425
Paul Turnerf1b17282012-10-04 13:18:31 +0200426 u64 throttled_clock, throttled_clock_task;
427 u64 throttled_clock_task_time;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200428 int throttled, throttle_count;
429 struct list_head throttled_list;
430#endif /* CONFIG_CFS_BANDWIDTH */
431#endif /* CONFIG_FAIR_GROUP_SCHED */
432};
433
434static inline int rt_bandwidth_enabled(void)
435{
436 return sysctl_sched_rt_runtime >= 0;
437}
438
Steven Rostedtb6366f02015-03-18 14:49:46 -0400439/* RT IPI pull logic requires IRQ_WORK */
440#ifdef CONFIG_IRQ_WORK
441# define HAVE_RT_PUSH_IPI
442#endif
443
Peter Zijlstra029632f2011-10-25 10:00:11 +0200444/* Real-Time classes' related field in a runqueue: */
445struct rt_rq {
446 struct rt_prio_array active;
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200447 unsigned int rt_nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200448#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
449 struct {
450 int curr; /* highest queued rt task prio */
451#ifdef CONFIG_SMP
452 int next; /* next highest */
453#endif
454 } highest_prio;
455#endif
456#ifdef CONFIG_SMP
457 unsigned long rt_nr_migratory;
458 unsigned long rt_nr_total;
459 int overloaded;
460 struct plist_head pushable_tasks;
Steven Rostedtb6366f02015-03-18 14:49:46 -0400461#ifdef HAVE_RT_PUSH_IPI
462 int push_flags;
463 int push_cpu;
464 struct irq_work push_work;
465 raw_spinlock_t push_lock;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200466#endif
Steven Rostedtb6366f02015-03-18 14:49:46 -0400467#endif /* CONFIG_SMP */
Kirill Tkhaif4ebcbc2014-03-15 02:15:00 +0400468 int rt_queued;
469
Peter Zijlstra029632f2011-10-25 10:00:11 +0200470 int rt_throttled;
471 u64 rt_time;
472 u64 rt_runtime;
473 /* Nests inside the rq lock: */
474 raw_spinlock_t rt_runtime_lock;
475
476#ifdef CONFIG_RT_GROUP_SCHED
477 unsigned long rt_nr_boosted;
478
479 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200480 struct task_group *tg;
481#endif
482};
483
Dario Faggioliaab03e02013-11-28 11:14:43 +0100484/* Deadline class' related fields in a runqueue */
485struct dl_rq {
486 /* runqueue is an rbtree, ordered by deadline */
487 struct rb_root rb_root;
488 struct rb_node *rb_leftmost;
489
490 unsigned long dl_nr_running;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100491
492#ifdef CONFIG_SMP
493 /*
494 * Deadline values of the currently executing and the
495 * earliest ready task on this rq. Caching these facilitates
496 * the decision wether or not a ready but not running task
497 * should migrate somewhere else.
498 */
499 struct {
500 u64 curr;
501 u64 next;
502 } earliest_dl;
503
504 unsigned long dl_nr_migratory;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100505 int overloaded;
506
507 /*
508 * Tasks on this rq that can be pushed away. They are kept in
509 * an rb-tree, ordered by tasks' deadlines, with caching
510 * of the leftmost (earliest deadline) element.
511 */
512 struct rb_root pushable_dl_tasks_root;
513 struct rb_node *pushable_dl_tasks_leftmost;
Dario Faggioli332ac172013-11-07 14:43:45 +0100514#else
515 struct dl_bw dl_bw;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100516#endif
Dario Faggioliaab03e02013-11-28 11:14:43 +0100517};
518
Peter Zijlstra029632f2011-10-25 10:00:11 +0200519#ifdef CONFIG_SMP
520
521/*
522 * We add the notion of a root-domain which will be used to define per-domain
523 * variables. Each exclusive cpuset essentially defines an island domain by
524 * fully partitioning the member cpus from any other cpuset. Whenever a new
525 * exclusive cpuset is created, we also create and attach a new root-domain
526 * object.
527 *
528 */
529struct root_domain {
530 atomic_t refcount;
531 atomic_t rto_count;
532 struct rcu_head rcu;
533 cpumask_var_t span;
534 cpumask_var_t online;
535
Tim Chen4486edd2014-06-23 12:16:49 -0700536 /* Indicate more than one runnable task for any CPU */
537 bool overload;
538
Peter Zijlstra029632f2011-10-25 10:00:11 +0200539 /*
Juri Lelli1baca4c2013-11-07 14:43:38 +0100540 * The bit corresponding to a CPU gets set here if such CPU has more
541 * than one runnable -deadline task (as it is below for RT tasks).
542 */
543 cpumask_var_t dlo_mask;
544 atomic_t dlo_count;
Dario Faggioli332ac172013-11-07 14:43:45 +0100545 struct dl_bw dl_bw;
Juri Lelli6bfd6d72013-11-07 14:43:47 +0100546 struct cpudl cpudl;
Juri Lelli1baca4c2013-11-07 14:43:38 +0100547
548 /*
Peter Zijlstra029632f2011-10-25 10:00:11 +0200549 * The "RT overload" flag: it gets set if a CPU has more than
550 * one runnable RT task.
551 */
552 cpumask_var_t rto_mask;
553 struct cpupri cpupri;
554};
555
556extern struct root_domain def_root_domain;
557
558#endif /* CONFIG_SMP */
559
560/*
561 * This is the main, per-CPU runqueue data structure.
562 *
563 * Locking rule: those places that want to lock multiple runqueues
564 * (such as the load balancing or the thread migration code), lock
565 * acquire operations must be ordered by ascending &runqueue.
566 */
567struct rq {
568 /* runqueue lock: */
569 raw_spinlock_t lock;
570
571 /*
572 * nr_running and cpu_load should be in the same cacheline because
573 * remote CPUs use both these fields when doing load calculation.
574 */
Peter Zijlstrac82513e2012-04-26 13:12:27 +0200575 unsigned int nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100576#ifdef CONFIG_NUMA_BALANCING
577 unsigned int nr_numa_running;
578 unsigned int nr_preferred_running;
579#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200580 #define CPU_LOAD_IDX_MAX 5
581 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
582 unsigned long last_load_update_tick;
Frederic Weisbecker3451d022011-08-10 23:21:01 +0200583#ifdef CONFIG_NO_HZ_COMMON
Peter Zijlstra029632f2011-10-25 10:00:11 +0200584 u64 nohz_stamp;
Suresh Siddha1c792db2011-12-01 17:07:32 -0800585 unsigned long nohz_flags;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200586#endif
Frederic Weisbecker265f22a2013-05-03 03:39:05 +0200587#ifdef CONFIG_NO_HZ_FULL
588 unsigned long last_sched_tick;
589#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200590 /* capture load from *all* tasks on this cpu: */
591 struct load_weight load;
592 unsigned long nr_load_updates;
593 u64 nr_switches;
594
595 struct cfs_rq cfs;
596 struct rt_rq rt;
Dario Faggioliaab03e02013-11-28 11:14:43 +0100597 struct dl_rq dl;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200598
599#ifdef CONFIG_FAIR_GROUP_SCHED
600 /* list of leaf cfs_rq on this cpu: */
601 struct list_head leaf_cfs_rq_list;
Peter Zijlstraa35b6462012-08-08 21:46:40 +0200602#endif /* CONFIG_FAIR_GROUP_SCHED */
603
Peter Zijlstra029632f2011-10-25 10:00:11 +0200604 /*
605 * This is part of a global counter where only the total sum
606 * over all CPUs matters. A task can increase this counter on
607 * one CPU and if it got migrated afterwards it may decrease
608 * it on another CPU. Always updated under the runqueue lock:
609 */
610 unsigned long nr_uninterruptible;
611
612 struct task_struct *curr, *idle, *stop;
613 unsigned long next_balance;
614 struct mm_struct *prev_mm;
615
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100616 unsigned int clock_skip_update;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200617 u64 clock;
618 u64 clock_task;
619
620 atomic_t nr_iowait;
621
622#ifdef CONFIG_SMP
623 struct root_domain *rd;
624 struct sched_domain *sd;
625
Nicolas Pitreced549f2014-05-26 18:19:38 -0400626 unsigned long cpu_capacity;
Vincent Guittotca6d75e2015-02-27 16:54:09 +0100627 unsigned long cpu_capacity_orig;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200628
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200629 struct callback_head *balance_callback;
630
Peter Zijlstra029632f2011-10-25 10:00:11 +0200631 unsigned char idle_balance;
632 /* For active balancing */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200633 int active_balance;
634 int push_cpu;
635 struct cpu_stop_work active_balance_work;
636 /* cpu of this runqueue: */
637 int cpu;
638 int online;
639
Peter Zijlstra367456c2012-02-20 21:49:09 +0100640 struct list_head cfs_tasks;
641
Peter Zijlstra029632f2011-10-25 10:00:11 +0200642 u64 rt_avg;
643 u64 age_stamp;
644 u64 idle_stamp;
645 u64 avg_idle;
Jason Low9bd721c2013-09-13 11:26:52 -0700646
647 /* This is used to determine avg_idle's max value */
648 u64 max_idle_balance_cost;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200649#endif
650
651#ifdef CONFIG_IRQ_TIME_ACCOUNTING
652 u64 prev_irq_time;
653#endif
654#ifdef CONFIG_PARAVIRT
655 u64 prev_steal_time;
656#endif
657#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
658 u64 prev_steal_time_rq;
659#endif
660
661 /* calc_load related fields */
662 unsigned long calc_load_update;
663 long calc_load_active;
664
665#ifdef CONFIG_SCHED_HRTICK
666#ifdef CONFIG_SMP
667 int hrtick_csd_pending;
668 struct call_single_data hrtick_csd;
669#endif
670 struct hrtimer hrtick_timer;
671#endif
672
673#ifdef CONFIG_SCHEDSTATS
674 /* latency stats */
675 struct sched_info rq_sched_info;
676 unsigned long long rq_cpu_time;
677 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
678
679 /* sys_sched_yield() stats */
680 unsigned int yld_count;
681
682 /* schedule() stats */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200683 unsigned int sched_count;
684 unsigned int sched_goidle;
685
686 /* try_to_wake_up() stats */
687 unsigned int ttwu_count;
688 unsigned int ttwu_local;
689#endif
690
691#ifdef CONFIG_SMP
692 struct llist_head wake_list;
693#endif
Daniel Lezcano442bf3a2014-09-04 11:32:09 -0400694
695#ifdef CONFIG_CPU_IDLE
696 /* Must be inspected within a rcu lock section */
697 struct cpuidle_state *idle_state;
698#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +0200699};
700
701static inline int cpu_of(struct rq *rq)
702{
703#ifdef CONFIG_SMP
704 return rq->cpu;
705#else
706 return 0;
707#endif
708}
709
Pranith Kumar8b06c552014-08-13 13:28:12 -0400710DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200711
Peter Zijlstra518cd622011-12-07 15:07:31 +0100712#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500713#define this_rq() this_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100714#define task_rq(p) cpu_rq(task_cpu(p))
715#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
Christoph Lameter4a32fea2014-08-17 12:30:27 -0500716#define raw_rq() raw_cpu_ptr(&runqueues)
Peter Zijlstra518cd622011-12-07 15:07:31 +0100717
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100718static inline u64 __rq_clock_broken(struct rq *rq)
719{
Jason Low316c1608d2015-04-28 13:00:20 -0700720 return READ_ONCE(rq->clock);
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100721}
722
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200723static inline u64 rq_clock(struct rq *rq)
724{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100725 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200726 return rq->clock;
727}
728
729static inline u64 rq_clock_task(struct rq *rq)
730{
Peter Zijlstracebde6d2015-01-05 11:18:10 +0100731 lockdep_assert_held(&rq->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200732 return rq->clock_task;
733}
734
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +0100735#define RQCF_REQ_SKIP 0x01
736#define RQCF_ACT_SKIP 0x02
737
738static inline void rq_clock_skip_update(struct rq *rq, bool skip)
739{
740 lockdep_assert_held(&rq->lock);
741 if (skip)
742 rq->clock_skip_update |= RQCF_REQ_SKIP;
743 else
744 rq->clock_skip_update &= ~RQCF_REQ_SKIP;
745}
746
Rik van Riel9942f792014-10-17 03:29:49 -0400747#ifdef CONFIG_NUMA
Rik van Riele3fe70b2014-10-17 03:29:50 -0400748enum numa_topology_type {
749 NUMA_DIRECT,
750 NUMA_GLUELESS_MESH,
751 NUMA_BACKPLANE,
752};
753extern enum numa_topology_type sched_numa_topology_type;
Rik van Riel9942f792014-10-17 03:29:49 -0400754extern int sched_max_numa_distance;
755extern bool find_numa_distance(int distance);
756#endif
757
Mel Gormanf809ca92013-10-07 11:28:57 +0100758#ifdef CONFIG_NUMA_BALANCING
Iulia Manda44dba3d2014-10-31 02:13:31 +0200759/* The regions in numa_faults array from task_struct */
760enum numa_faults_stats {
761 NUMA_MEM = 0,
762 NUMA_CPU,
763 NUMA_MEMBUF,
764 NUMA_CPUBUF
765};
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100766extern void sched_setnuma(struct task_struct *p, int node);
Mel Gormane6628d52013-10-07 11:29:02 +0100767extern int migrate_task_to(struct task_struct *p, int cpu);
Peter Zijlstraac66f542013-10-07 11:29:16 +0100768extern int migrate_swap(struct task_struct *, struct task_struct *);
Mel Gormanf809ca92013-10-07 11:28:57 +0100769#endif /* CONFIG_NUMA_BALANCING */
770
Peter Zijlstra518cd622011-12-07 15:07:31 +0100771#ifdef CONFIG_SMP
772
Peter Zijlstrae3fca9e2015-06-11 14:46:37 +0200773static inline void
774queue_balance_callback(struct rq *rq,
775 struct callback_head *head,
776 void (*func)(struct rq *rq))
777{
778 lockdep_assert_held(&rq->lock);
779
780 if (unlikely(head->next))
781 return;
782
783 head->func = (void (*)(struct callback_head *))func;
784 head->next = rq->balance_callback;
785 rq->balance_callback = head;
786}
787
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700788extern void sched_ttwu_pending(void);
789
Peter Zijlstra029632f2011-10-25 10:00:11 +0200790#define rcu_dereference_check_sched_domain(p) \
791 rcu_dereference_check((p), \
792 lockdep_is_held(&sched_domains_mutex))
793
794/*
795 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
796 * See detach_destroy_domains: synchronize_sched for details.
797 *
798 * The domain tree of any CPU may only be accessed from within
799 * preempt-disabled sections.
800 */
801#define for_each_domain(cpu, __sd) \
Peter Zijlstra518cd622011-12-07 15:07:31 +0100802 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); \
803 __sd; __sd = __sd->parent)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200804
Suresh Siddha77e81362011-11-17 11:08:23 -0800805#define for_each_lower_domain(sd) for (; sd; sd = sd->child)
806
Peter Zijlstra518cd622011-12-07 15:07:31 +0100807/**
808 * highest_flag_domain - Return highest sched_domain containing flag.
809 * @cpu: The cpu whose highest level of sched domain is to
810 * be returned.
811 * @flag: The flag to check for the highest sched_domain
812 * for the given cpu.
813 *
814 * Returns the highest sched_domain of a cpu which contains the given flag.
815 */
816static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
817{
818 struct sched_domain *sd, *hsd = NULL;
819
820 for_each_domain(cpu, sd) {
821 if (!(sd->flags & flag))
822 break;
823 hsd = sd;
824 }
825
826 return hsd;
827}
828
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100829static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
830{
831 struct sched_domain *sd;
832
833 for_each_domain(cpu, sd) {
834 if (sd->flags & flag)
835 break;
836 }
837
838 return sd;
839}
840
Peter Zijlstra518cd622011-12-07 15:07:31 +0100841DECLARE_PER_CPU(struct sched_domain *, sd_llc);
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +0800842DECLARE_PER_CPU(int, sd_llc_size);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100843DECLARE_PER_CPU(int, sd_llc_id);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100844DECLARE_PER_CPU(struct sched_domain *, sd_numa);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +0530845DECLARE_PER_CPU(struct sched_domain *, sd_busy);
846DECLARE_PER_CPU(struct sched_domain *, sd_asym);
Peter Zijlstra518cd622011-12-07 15:07:31 +0100847
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400848struct sched_group_capacity {
Li Zefan5e6521e2013-03-05 16:06:23 +0800849 atomic_t ref;
850 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400851 * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity
852 * for a single CPU.
Li Zefan5e6521e2013-03-05 16:06:23 +0800853 */
Vincent Guittotdc7ff762015-03-03 11:35:03 +0100854 unsigned int capacity;
Li Zefan5e6521e2013-03-05 16:06:23 +0800855 unsigned long next_update;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400856 int imbalance; /* XXX unrelated to capacity but shared group state */
Li Zefan5e6521e2013-03-05 16:06:23 +0800857 /*
858 * Number of busy cpus in this group.
859 */
860 atomic_t nr_busy_cpus;
861
862 unsigned long cpumask[0]; /* iteration mask */
863};
864
865struct sched_group {
866 struct sched_group *next; /* Must be a circular list */
867 atomic_t ref;
868
869 unsigned int group_weight;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400870 struct sched_group_capacity *sgc;
Li Zefan5e6521e2013-03-05 16:06:23 +0800871
872 /*
873 * The CPUs this group covers.
874 *
875 * NOTE: this field is variable length. (Allocated dynamically
876 * by attaching extra space to the end of the structure,
877 * depending on how many CPUs the kernel has booted up with)
878 */
879 unsigned long cpumask[0];
880};
881
882static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
883{
884 return to_cpumask(sg->cpumask);
885}
886
887/*
888 * cpumask masking which cpus in the group are allowed to iterate up the domain
889 * tree.
890 */
891static inline struct cpumask *sched_group_mask(struct sched_group *sg)
892{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -0400893 return to_cpumask(sg->sgc->cpumask);
Li Zefan5e6521e2013-03-05 16:06:23 +0800894}
895
896/**
897 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
898 * @group: The group whose first cpu is to be returned.
899 */
900static inline unsigned int group_first_cpu(struct sched_group *group)
901{
902 return cpumask_first(sched_group_cpus(group));
903}
904
Peter Zijlstrac1174872012-05-31 14:47:33 +0200905extern int group_balance_cpu(struct sched_group *sg);
906
Peter Zijlstrae3baac42014-06-04 10:31:18 -0700907#else
908
909static inline void sched_ttwu_pending(void) { }
910
Peter Zijlstra518cd622011-12-07 15:07:31 +0100911#endif /* CONFIG_SMP */
Peter Zijlstra029632f2011-10-25 10:00:11 +0200912
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100913#include "stats.h"
914#include "auto_group.h"
Peter Zijlstra029632f2011-10-25 10:00:11 +0200915
916#ifdef CONFIG_CGROUP_SCHED
917
918/*
919 * Return the group to which this tasks belongs.
920 *
Tejun Heo8af01f52013-08-08 20:11:22 -0400921 * We cannot use task_css() and friends because the cgroup subsystem
922 * changes that value before the cgroup_subsys::attach() method is called,
923 * therefore we cannot pin it and might observe the wrong value.
Peter Zijlstra8323f262012-06-22 13:36:05 +0200924 *
925 * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
926 * core changes this before calling sched_move_task().
927 *
928 * Instead we use a 'copy' which is updated from sched_move_task() while
929 * holding both task_struct::pi_lock and rq::lock.
Peter Zijlstra029632f2011-10-25 10:00:11 +0200930 */
931static inline struct task_group *task_group(struct task_struct *p)
932{
Peter Zijlstra8323f262012-06-22 13:36:05 +0200933 return p->sched_task_group;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200934}
935
936/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
937static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
938{
939#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
940 struct task_group *tg = task_group(p);
941#endif
942
943#ifdef CONFIG_FAIR_GROUP_SCHED
Byungchul Parkad936d82015-10-24 01:16:19 +0900944 set_task_rq_fair(&p->se, p->se.cfs_rq, tg->cfs_rq[cpu]);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200945 p->se.cfs_rq = tg->cfs_rq[cpu];
946 p->se.parent = tg->se[cpu];
947#endif
948
949#ifdef CONFIG_RT_GROUP_SCHED
950 p->rt.rt_rq = tg->rt_rq[cpu];
951 p->rt.parent = tg->rt_se[cpu];
952#endif
953}
954
955#else /* CONFIG_CGROUP_SCHED */
956
957static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
958static inline struct task_group *task_group(struct task_struct *p)
959{
960 return NULL;
961}
962
963#endif /* CONFIG_CGROUP_SCHED */
964
965static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
966{
967 set_task_rq(p, cpu);
968#ifdef CONFIG_SMP
969 /*
970 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
971 * successfuly executed on another CPU. We must ensure that updates of
972 * per-task data have been completed by this moment.
973 */
974 smp_wmb();
975 task_thread_info(p)->cpu = cpu;
Peter Zijlstraac66f542013-10-07 11:29:16 +0100976 p->wake_cpu = cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200977#endif
978}
979
980/*
981 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
982 */
983#ifdef CONFIG_SCHED_DEBUG
Ingo Molnarc5905af2012-02-24 08:31:31 +0100984# include <linux/static_key.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +0200985# define const_debug __read_mostly
986#else
987# define const_debug const
988#endif
989
990extern const_debug unsigned int sysctl_sched_features;
991
992#define SCHED_FEAT(name, enabled) \
993 __SCHED_FEAT_##name ,
994
995enum {
Peter Zijlstra391e43d2011-11-15 17:14:39 +0100996#include "features.h"
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +0200997 __SCHED_FEAT_NR,
Peter Zijlstra029632f2011-10-25 10:00:11 +0200998};
999
1000#undef SCHED_FEAT
1001
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001002#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001003#define SCHED_FEAT(name, enabled) \
Ingo Molnarc5905af2012-02-24 08:31:31 +01001004static __always_inline bool static_branch_##name(struct static_key *key) \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001005{ \
Jason Baron6e76ea82014-07-02 15:52:41 +00001006 return static_key_##enabled(key); \
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001007}
1008
1009#include "features.h"
1010
1011#undef SCHED_FEAT
1012
Ingo Molnarc5905af2012-02-24 08:31:31 +01001013extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001014#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
1015#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001016#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
Peter Zijlstraf8b6d1c2011-07-06 14:20:14 +02001017#endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001018
Srikar Dronamraju2a595722015-08-11 21:54:21 +05301019extern struct static_key_false sched_numa_balancing;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001020
Peter Zijlstra029632f2011-10-25 10:00:11 +02001021static inline u64 global_rt_period(void)
1022{
1023 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
1024}
1025
1026static inline u64 global_rt_runtime(void)
1027{
1028 if (sysctl_sched_rt_runtime < 0)
1029 return RUNTIME_INF;
1030
1031 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
1032}
1033
Peter Zijlstra029632f2011-10-25 10:00:11 +02001034static inline int task_current(struct rq *rq, struct task_struct *p)
1035{
1036 return rq->curr == p;
1037}
1038
1039static inline int task_running(struct rq *rq, struct task_struct *p)
1040{
1041#ifdef CONFIG_SMP
1042 return p->on_cpu;
1043#else
1044 return task_current(rq, p);
1045#endif
1046}
1047
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04001048static inline int task_on_rq_queued(struct task_struct *p)
1049{
1050 return p->on_rq == TASK_ON_RQ_QUEUED;
1051}
Peter Zijlstra029632f2011-10-25 10:00:11 +02001052
Kirill Tkhaicca26e82014-08-20 13:47:42 +04001053static inline int task_on_rq_migrating(struct task_struct *p)
1054{
1055 return p->on_rq == TASK_ON_RQ_MIGRATING;
1056}
1057
Peter Zijlstra029632f2011-10-25 10:00:11 +02001058#ifndef prepare_arch_switch
1059# define prepare_arch_switch(next) do { } while (0)
1060#endif
Catalin Marinas01f23e12011-11-27 21:43:10 +00001061#ifndef finish_arch_post_lock_switch
1062# define finish_arch_post_lock_switch() do { } while (0)
1063#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02001064
Peter Zijlstra029632f2011-10-25 10:00:11 +02001065static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
1066{
1067#ifdef CONFIG_SMP
1068 /*
1069 * We can optimise this out completely for !SMP, because the
1070 * SMP rebalancing from interrupt is the only thing that cares
1071 * here.
1072 */
1073 next->on_cpu = 1;
1074#endif
1075}
1076
1077static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
1078{
1079#ifdef CONFIG_SMP
1080 /*
1081 * After ->on_cpu is cleared, the task can be moved to a different CPU.
1082 * We must ensure this doesn't happen until the switch is completely
1083 * finished.
Peter Zijlstra95913d92015-09-29 14:45:09 +02001084 *
Peter Zijlstrab75a2252015-10-06 14:36:17 +02001085 * In particular, the load of prev->state in finish_task_switch() must
1086 * happen before this.
1087 *
Peter Zijlstra95913d92015-09-29 14:45:09 +02001088 * Pairs with the control dependency and rmb in try_to_wake_up().
Peter Zijlstra029632f2011-10-25 10:00:11 +02001089 */
Peter Zijlstra95913d92015-09-29 14:45:09 +02001090 smp_store_release(&prev->on_cpu, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001091#endif
1092#ifdef CONFIG_DEBUG_SPINLOCK
1093 /* this is a valid case when another task releases the spinlock */
1094 rq->lock.owner = current;
1095#endif
1096 /*
1097 * If we are tracking spinlock dependencies then we have to
1098 * fix up the runqueue lock - which gets 'carried over' from
1099 * prev into current:
1100 */
1101 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
1102
1103 raw_spin_unlock_irq(&rq->lock);
1104}
1105
Li Zefanb13095f2013-03-05 16:06:38 +08001106/*
1107 * wake flags
1108 */
1109#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
1110#define WF_FORK 0x02 /* child wakeup after fork */
1111#define WF_MIGRATED 0x4 /* internal use, task got migrated */
1112
Peter Zijlstra029632f2011-10-25 10:00:11 +02001113/*
1114 * To aid in avoiding the subversion of "niceness" due to uneven distribution
1115 * of tasks with abnormal "nice" values across CPUs the contribution that
1116 * each task makes to its run queue's load is weighted according to its
1117 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
1118 * scaled version of the new time slice allocation that they receive on time
1119 * slice expiry etc.
1120 */
1121
1122#define WEIGHT_IDLEPRIO 3
1123#define WMULT_IDLEPRIO 1431655765
1124
Andi Kleened82b8a2015-11-29 20:59:43 -08001125extern const int sched_prio_to_weight[40];
1126extern const u32 sched_prio_to_wmult[40];
Peter Zijlstra029632f2011-10-25 10:00:11 +02001127
Peter Zijlstra1de64442015-09-30 17:44:13 +02001128#define ENQUEUE_WAKEUP 0x01
1129#define ENQUEUE_HEAD 0x02
Li Zefanc82ba9f2013-03-05 16:06:55 +08001130#ifdef CONFIG_SMP
Peter Zijlstra1de64442015-09-30 17:44:13 +02001131#define ENQUEUE_WAKING 0x04 /* sched_class::task_waking was called */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001132#else
Peter Zijlstra1de64442015-09-30 17:44:13 +02001133#define ENQUEUE_WAKING 0x00
Li Zefanc82ba9f2013-03-05 16:06:55 +08001134#endif
Peter Zijlstra1de64442015-09-30 17:44:13 +02001135#define ENQUEUE_REPLENISH 0x08
1136#define ENQUEUE_RESTORE 0x10
Li Zefanc82ba9f2013-03-05 16:06:55 +08001137
Peter Zijlstra1de64442015-09-30 17:44:13 +02001138#define DEQUEUE_SLEEP 0x01
1139#define DEQUEUE_SAVE 0x02
Li Zefanc82ba9f2013-03-05 16:06:55 +08001140
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001141#define RETRY_TASK ((void *)-1UL)
1142
Li Zefanc82ba9f2013-03-05 16:06:55 +08001143struct sched_class {
1144 const struct sched_class *next;
1145
1146 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
1147 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1148 void (*yield_task) (struct rq *rq);
1149 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1150
1151 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1152
Peter Zijlstra606dba22012-02-11 06:05:00 +01001153 /*
1154 * It is the responsibility of the pick_next_task() method that will
1155 * return the next task to call put_prev_task() on the @prev task or
1156 * something equivalent.
Peter Zijlstra37e117c2014-02-14 12:25:08 +01001157 *
1158 * May return RETRY_TASK when it finds a higher prio class has runnable
1159 * tasks.
Peter Zijlstra606dba22012-02-11 06:05:00 +01001160 */
1161 struct task_struct * (*pick_next_task) (struct rq *rq,
1162 struct task_struct *prev);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001163 void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1164
1165#ifdef CONFIG_SMP
Peter Zijlstraac66f542013-10-07 11:29:16 +01001166 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
xiaofeng.yan5a4fd032015-09-23 14:55:59 +08001167 void (*migrate_task_rq)(struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001168
Li Zefanc82ba9f2013-03-05 16:06:55 +08001169 void (*task_waking) (struct task_struct *task);
1170 void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1171
1172 void (*set_cpus_allowed)(struct task_struct *p,
1173 const struct cpumask *newmask);
1174
1175 void (*rq_online)(struct rq *rq);
1176 void (*rq_offline)(struct rq *rq);
1177#endif
1178
1179 void (*set_curr_task) (struct rq *rq);
1180 void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1181 void (*task_fork) (struct task_struct *p);
Dario Faggiolie6c390f2013-11-07 14:43:35 +01001182 void (*task_dead) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001183
Kirill Tkhai67dfa1b2014-10-27 17:40:52 +03001184 /*
1185 * The switched_from() call is allowed to drop rq->lock, therefore we
1186 * cannot assume the switched_from/switched_to pair is serliazed by
1187 * rq->lock. They are however serialized by p->pi_lock.
1188 */
Li Zefanc82ba9f2013-03-05 16:06:55 +08001189 void (*switched_from) (struct rq *this_rq, struct task_struct *task);
1190 void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1191 void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1192 int oldprio);
1193
1194 unsigned int (*get_rr_interval) (struct rq *rq,
1195 struct task_struct *task);
1196
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01001197 void (*update_curr) (struct rq *rq);
1198
Li Zefanc82ba9f2013-03-05 16:06:55 +08001199#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrabc54da22015-08-31 17:13:55 +02001200 void (*task_move_group) (struct task_struct *p);
Li Zefanc82ba9f2013-03-05 16:06:55 +08001201#endif
1202};
Peter Zijlstra029632f2011-10-25 10:00:11 +02001203
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01001204static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1205{
1206 prev->sched_class->put_prev_task(rq, prev);
1207}
1208
Peter Zijlstra029632f2011-10-25 10:00:11 +02001209#define sched_class_highest (&stop_sched_class)
1210#define for_each_class(class) \
1211 for (class = sched_class_highest; class; class = class->next)
1212
1213extern const struct sched_class stop_sched_class;
Dario Faggioliaab03e02013-11-28 11:14:43 +01001214extern const struct sched_class dl_sched_class;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001215extern const struct sched_class rt_sched_class;
1216extern const struct sched_class fair_sched_class;
1217extern const struct sched_class idle_sched_class;
1218
1219
1220#ifdef CONFIG_SMP
1221
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04001222extern void update_group_capacity(struct sched_domain *sd, int cpu);
Li Zefanb7192032013-03-07 10:00:26 +08001223
Daniel Lezcano7caff662014-01-06 12:34:38 +01001224extern void trigger_load_balance(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001225
Peter Zijlstrac5b28032015-05-15 17:43:35 +02001226extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask);
1227
Peter Zijlstra029632f2011-10-25 10:00:11 +02001228#endif
1229
Daniel Lezcano442bf3a2014-09-04 11:32:09 -04001230#ifdef CONFIG_CPU_IDLE
1231static inline void idle_set_state(struct rq *rq,
1232 struct cpuidle_state *idle_state)
1233{
1234 rq->idle_state = idle_state;
1235}
1236
1237static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1238{
1239 WARN_ON(!rcu_read_lock_held());
1240 return rq->idle_state;
1241}
1242#else
1243static inline void idle_set_state(struct rq *rq,
1244 struct cpuidle_state *idle_state)
1245{
1246}
1247
1248static inline struct cpuidle_state *idle_get_state(struct rq *rq)
1249{
1250 return NULL;
1251}
1252#endif
1253
Peter Zijlstra029632f2011-10-25 10:00:11 +02001254extern void sysrq_sched_debug_show(void);
1255extern void sched_init_granularity(void);
1256extern void update_max_interval(void);
Juri Lelli1baca4c2013-11-07 14:43:38 +01001257
1258extern void init_sched_dl_class(void);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001259extern void init_sched_rt_class(void);
1260extern void init_sched_fair_class(void);
1261
Kirill Tkhai88751252014-06-29 00:03:57 +04001262extern void resched_curr(struct rq *rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001263extern void resched_cpu(int cpu);
1264
1265extern struct rt_bandwidth def_rt_bandwidth;
1266extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
1267
Dario Faggioli332ac172013-11-07 14:43:45 +01001268extern struct dl_bandwidth def_dl_bandwidth;
1269extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
Dario Faggioliaab03e02013-11-28 11:14:43 +01001270extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
1271
Dario Faggioli332ac172013-11-07 14:43:45 +01001272unsigned long to_ratio(u64 period, u64 runtime);
1273
Yuyang Du540247f2015-07-15 08:04:39 +08001274extern void init_entity_runnable_average(struct sched_entity *se);
Alex Shia75cdaa2013-06-20 10:18:47 +08001275
Kirill Tkhai72465442014-05-09 03:00:14 +04001276static inline void add_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001277{
Kirill Tkhai72465442014-05-09 03:00:14 +04001278 unsigned prev_nr = rq->nr_running;
1279
1280 rq->nr_running = prev_nr + count;
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001281
Kirill Tkhai72465442014-05-09 03:00:14 +04001282 if (prev_nr < 2 && rq->nr_running >= 2) {
Tim Chen4486edd2014-06-23 12:16:49 -07001283#ifdef CONFIG_SMP
1284 if (!rq->rd->overload)
1285 rq->rd->overload = true;
1286#endif
1287
1288#ifdef CONFIG_NO_HZ_FULL
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001289 if (tick_nohz_full_cpu(rq->cpu)) {
Frederic Weisbecker3882ec62014-03-18 22:54:04 +01001290 /*
1291 * Tick is needed if more than one task runs on a CPU.
1292 * Send the target an IPI to kick it out of nohz mode.
1293 *
1294 * We assume that IPI implies full memory barrier and the
1295 * new value of rq->nr_running is visible on reception
1296 * from the target.
1297 */
Frederic Weisbeckerfd2ac4f2014-03-18 21:12:53 +01001298 tick_nohz_full_kick_cpu(rq->cpu);
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001299 }
Frederic Weisbecker9f3660c2013-04-20 14:35:09 +02001300#endif
Tim Chen4486edd2014-06-23 12:16:49 -07001301 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02001302}
1303
Kirill Tkhai72465442014-05-09 03:00:14 +04001304static inline void sub_nr_running(struct rq *rq, unsigned count)
Peter Zijlstra029632f2011-10-25 10:00:11 +02001305{
Kirill Tkhai72465442014-05-09 03:00:14 +04001306 rq->nr_running -= count;
Peter Zijlstra029632f2011-10-25 10:00:11 +02001307}
1308
Frederic Weisbecker265f22a2013-05-03 03:39:05 +02001309static inline void rq_last_tick_reset(struct rq *rq)
1310{
1311#ifdef CONFIG_NO_HZ_FULL
1312 rq->last_sched_tick = jiffies;
1313#endif
1314}
1315
Peter Zijlstra029632f2011-10-25 10:00:11 +02001316extern void update_rq_clock(struct rq *rq);
1317
1318extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
1319extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
1320
1321extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
1322
1323extern const_debug unsigned int sysctl_sched_time_avg;
1324extern const_debug unsigned int sysctl_sched_nr_migrate;
1325extern const_debug unsigned int sysctl_sched_migration_cost;
1326
1327static inline u64 sched_avg_period(void)
1328{
1329 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
1330}
1331
Peter Zijlstra029632f2011-10-25 10:00:11 +02001332#ifdef CONFIG_SCHED_HRTICK
1333
1334/*
1335 * Use hrtick when:
1336 * - enabled by features
1337 * - hrtimer is actually high res
1338 */
1339static inline int hrtick_enabled(struct rq *rq)
1340{
1341 if (!sched_feat(HRTICK))
1342 return 0;
1343 if (!cpu_active(cpu_of(rq)))
1344 return 0;
1345 return hrtimer_is_hres_active(&rq->hrtick_timer);
1346}
1347
1348void hrtick_start(struct rq *rq, u64 delay);
1349
Mike Galbraithb39e66e2011-11-22 15:20:07 +01001350#else
1351
1352static inline int hrtick_enabled(struct rq *rq)
1353{
1354 return 0;
1355}
1356
Peter Zijlstra029632f2011-10-25 10:00:11 +02001357#endif /* CONFIG_SCHED_HRTICK */
1358
1359#ifdef CONFIG_SMP
1360extern void sched_avg_update(struct rq *rq);
Peter Zijlstradfbca412015-03-23 14:19:05 +01001361
1362#ifndef arch_scale_freq_capacity
1363static __always_inline
1364unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1365{
1366 return SCHED_CAPACITY_SCALE;
1367}
1368#endif
Vincent Guittotb5b48602015-02-27 16:54:08 +01001369
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001370#ifndef arch_scale_cpu_capacity
1371static __always_inline
1372unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
1373{
Dietmar Eggemanne3279a22015-08-15 00:04:41 +01001374 if (sd && (sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
Morten Rasmussen8cd56012015-08-14 17:23:10 +01001375 return sd->smt_gain / sd->span_weight;
1376
1377 return SCHED_CAPACITY_SCALE;
1378}
1379#endif
1380
Peter Zijlstra029632f2011-10-25 10:00:11 +02001381static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
1382{
Vincent Guittotb5b48602015-02-27 16:54:08 +01001383 rq->rt_avg += rt_delta * arch_scale_freq_capacity(NULL, cpu_of(rq));
Peter Zijlstra029632f2011-10-25 10:00:11 +02001384 sched_avg_update(rq);
1385}
1386#else
1387static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
1388static inline void sched_avg_update(struct rq *rq) { }
1389#endif
1390
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001391/*
1392 * __task_rq_lock - lock the rq @p resides on.
1393 */
1394static inline struct rq *__task_rq_lock(struct task_struct *p)
1395 __acquires(rq->lock)
1396{
1397 struct rq *rq;
1398
1399 lockdep_assert_held(&p->pi_lock);
1400
1401 for (;;) {
1402 rq = task_rq(p);
1403 raw_spin_lock(&rq->lock);
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001404 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1405 lockdep_pin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001406 return rq;
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001407 }
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001408 raw_spin_unlock(&rq->lock);
1409
1410 while (unlikely(task_on_rq_migrating(p)))
1411 cpu_relax();
1412 }
1413}
1414
1415/*
1416 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
1417 */
1418static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
1419 __acquires(p->pi_lock)
1420 __acquires(rq->lock)
1421{
1422 struct rq *rq;
1423
1424 for (;;) {
1425 raw_spin_lock_irqsave(&p->pi_lock, *flags);
1426 rq = task_rq(p);
1427 raw_spin_lock(&rq->lock);
1428 /*
1429 * move_queued_task() task_rq_lock()
1430 *
1431 * ACQUIRE (rq->lock)
1432 * [S] ->on_rq = MIGRATING [L] rq = task_rq()
1433 * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
1434 * [S] ->cpu = new_cpu [L] task_rq()
1435 * [L] ->on_rq
1436 * RELEASE (rq->lock)
1437 *
1438 * If we observe the old cpu in task_rq_lock, the acquire of
1439 * the old rq->lock will fully serialize against the stores.
1440 *
1441 * If we observe the new cpu in task_rq_lock, the acquire will
1442 * pair with the WMB to ensure we must then also see migrating.
1443 */
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001444 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
1445 lockdep_pin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001446 return rq;
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001447 }
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001448 raw_spin_unlock(&rq->lock);
1449 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1450
1451 while (unlikely(task_on_rq_migrating(p)))
1452 cpu_relax();
1453 }
1454}
1455
1456static inline void __task_rq_unlock(struct rq *rq)
1457 __releases(rq->lock)
1458{
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001459 lockdep_unpin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001460 raw_spin_unlock(&rq->lock);
1461}
1462
1463static inline void
1464task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
1465 __releases(rq->lock)
1466 __releases(p->pi_lock)
1467{
Peter Zijlstracbce1a62015-06-11 14:46:54 +02001468 lockdep_unpin_lock(&rq->lock);
Peter Zijlstra3960c8c2015-02-17 13:22:25 +01001469 raw_spin_unlock(&rq->lock);
1470 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
1471}
1472
Peter Zijlstra029632f2011-10-25 10:00:11 +02001473#ifdef CONFIG_SMP
1474#ifdef CONFIG_PREEMPT
1475
1476static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
1477
1478/*
1479 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1480 * way at the expense of forcing extra atomic operations in all
1481 * invocations. This assures that the double_lock is acquired using the
1482 * same underlying policy as the spinlock_t on this architecture, which
1483 * reduces latency compared to the unfair variant below. However, it
1484 * also adds more overhead and therefore may reduce throughput.
1485 */
1486static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1487 __releases(this_rq->lock)
1488 __acquires(busiest->lock)
1489 __acquires(this_rq->lock)
1490{
1491 raw_spin_unlock(&this_rq->lock);
1492 double_rq_lock(this_rq, busiest);
1493
1494 return 1;
1495}
1496
1497#else
1498/*
1499 * Unfair double_lock_balance: Optimizes throughput at the expense of
1500 * latency by eliminating extra atomic operations when the locks are
1501 * already in proper order on entry. This favors lower cpu-ids and will
1502 * grant the double lock to lower cpus over higher ids under contention,
1503 * regardless of entry order into the function.
1504 */
1505static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1506 __releases(this_rq->lock)
1507 __acquires(busiest->lock)
1508 __acquires(this_rq->lock)
1509{
1510 int ret = 0;
1511
1512 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
1513 if (busiest < this_rq) {
1514 raw_spin_unlock(&this_rq->lock);
1515 raw_spin_lock(&busiest->lock);
1516 raw_spin_lock_nested(&this_rq->lock,
1517 SINGLE_DEPTH_NESTING);
1518 ret = 1;
1519 } else
1520 raw_spin_lock_nested(&busiest->lock,
1521 SINGLE_DEPTH_NESTING);
1522 }
1523 return ret;
1524}
1525
1526#endif /* CONFIG_PREEMPT */
1527
1528/*
1529 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1530 */
1531static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1532{
1533 if (unlikely(!irqs_disabled())) {
1534 /* printk() doesn't work good under rq->lock */
1535 raw_spin_unlock(&this_rq->lock);
1536 BUG_ON(1);
1537 }
1538
1539 return _double_lock_balance(this_rq, busiest);
1540}
1541
1542static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1543 __releases(busiest->lock)
1544{
1545 raw_spin_unlock(&busiest->lock);
1546 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1547}
1548
Peter Zijlstra74602312013-10-10 20:17:22 +02001549static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
1550{
1551 if (l1 > l2)
1552 swap(l1, l2);
1553
1554 spin_lock(l1);
1555 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1556}
1557
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001558static inline void double_lock_irq(spinlock_t *l1, spinlock_t *l2)
1559{
1560 if (l1 > l2)
1561 swap(l1, l2);
1562
1563 spin_lock_irq(l1);
1564 spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1565}
1566
Peter Zijlstra74602312013-10-10 20:17:22 +02001567static inline void double_raw_lock(raw_spinlock_t *l1, raw_spinlock_t *l2)
1568{
1569 if (l1 > l2)
1570 swap(l1, l2);
1571
1572 raw_spin_lock(l1);
1573 raw_spin_lock_nested(l2, SINGLE_DEPTH_NESTING);
1574}
1575
Peter Zijlstra029632f2011-10-25 10:00:11 +02001576/*
1577 * double_rq_lock - safely lock two runqueues
1578 *
1579 * Note this does not disable interrupts like task_rq_lock,
1580 * you need to do so manually before calling.
1581 */
1582static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1583 __acquires(rq1->lock)
1584 __acquires(rq2->lock)
1585{
1586 BUG_ON(!irqs_disabled());
1587 if (rq1 == rq2) {
1588 raw_spin_lock(&rq1->lock);
1589 __acquire(rq2->lock); /* Fake it out ;) */
1590 } else {
1591 if (rq1 < rq2) {
1592 raw_spin_lock(&rq1->lock);
1593 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
1594 } else {
1595 raw_spin_lock(&rq2->lock);
1596 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1597 }
1598 }
1599}
1600
1601/*
1602 * double_rq_unlock - safely unlock two runqueues
1603 *
1604 * Note this does not restore interrupts like task_rq_unlock,
1605 * you need to do so manually after calling.
1606 */
1607static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1608 __releases(rq1->lock)
1609 __releases(rq2->lock)
1610{
1611 raw_spin_unlock(&rq1->lock);
1612 if (rq1 != rq2)
1613 raw_spin_unlock(&rq2->lock);
1614 else
1615 __release(rq2->lock);
1616}
1617
1618#else /* CONFIG_SMP */
1619
1620/*
1621 * double_rq_lock - safely lock two runqueues
1622 *
1623 * Note this does not disable interrupts like task_rq_lock,
1624 * you need to do so manually before calling.
1625 */
1626static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1627 __acquires(rq1->lock)
1628 __acquires(rq2->lock)
1629{
1630 BUG_ON(!irqs_disabled());
1631 BUG_ON(rq1 != rq2);
1632 raw_spin_lock(&rq1->lock);
1633 __acquire(rq2->lock); /* Fake it out ;) */
1634}
1635
1636/*
1637 * double_rq_unlock - safely unlock two runqueues
1638 *
1639 * Note this does not restore interrupts like task_rq_unlock,
1640 * you need to do so manually after calling.
1641 */
1642static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1643 __releases(rq1->lock)
1644 __releases(rq2->lock)
1645{
1646 BUG_ON(rq1 != rq2);
1647 raw_spin_unlock(&rq1->lock);
1648 __release(rq2->lock);
1649}
1650
1651#endif
1652
1653extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1654extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301655
1656#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02001657extern void print_cfs_stats(struct seq_file *m, int cpu);
1658extern void print_rt_stats(struct seq_file *m, int cpu);
Wanpeng Liacb32132014-10-31 06:39:33 +08001659extern void print_dl_stats(struct seq_file *m, int cpu);
Srikar Dronamraju6b55c962015-06-25 22:51:41 +05301660extern void
1661print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
Srikar Dronamraju397f2372015-06-25 22:51:43 +05301662
1663#ifdef CONFIG_NUMA_BALANCING
1664extern void
1665show_numa_stats(struct task_struct *p, struct seq_file *m);
1666extern void
1667print_numa_stats(struct seq_file *m, int node, unsigned long tsf,
1668 unsigned long tpf, unsigned long gsf, unsigned long gpf);
1669#endif /* CONFIG_NUMA_BALANCING */
1670#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02001671
1672extern void init_cfs_rq(struct cfs_rq *cfs_rq);
Abel Vesa07c54f72015-03-03 13:50:27 +02001673extern void init_rt_rq(struct rt_rq *rt_rq);
1674extern void init_dl_rq(struct dl_rq *dl_rq);
Peter Zijlstra029632f2011-10-25 10:00:11 +02001675
Ben Segall1ee14e62013-10-16 11:16:12 -07001676extern void cfs_bandwidth_usage_inc(void);
1677extern void cfs_bandwidth_usage_dec(void);
Suresh Siddha1c792db2011-12-01 17:07:32 -08001678
Frederic Weisbecker3451d022011-08-10 23:21:01 +02001679#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08001680enum rq_nohz_flag_bits {
1681 NOHZ_TICK_STOPPED,
1682 NOHZ_BALANCE_KICK,
1683};
1684
1685#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
1686#endif
Frederic Weisbecker73fbec62012-06-16 15:57:37 +02001687
1688#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1689
1690DECLARE_PER_CPU(u64, cpu_hardirq_time);
1691DECLARE_PER_CPU(u64, cpu_softirq_time);
1692
1693#ifndef CONFIG_64BIT
1694DECLARE_PER_CPU(seqcount_t, irq_time_seq);
1695
1696static inline void irq_time_write_begin(void)
1697{
1698 __this_cpu_inc(irq_time_seq.sequence);
1699 smp_wmb();
1700}
1701
1702static inline void irq_time_write_end(void)
1703{
1704 smp_wmb();
1705 __this_cpu_inc(irq_time_seq.sequence);
1706}
1707
1708static inline u64 irq_time_read(int cpu)
1709{
1710 u64 irq_time;
1711 unsigned seq;
1712
1713 do {
1714 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
1715 irq_time = per_cpu(cpu_softirq_time, cpu) +
1716 per_cpu(cpu_hardirq_time, cpu);
1717 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
1718
1719 return irq_time;
1720}
1721#else /* CONFIG_64BIT */
1722static inline void irq_time_write_begin(void)
1723{
1724}
1725
1726static inline void irq_time_write_end(void)
1727{
1728}
1729
1730static inline u64 irq_time_read(int cpu)
1731{
1732 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
1733}
1734#endif /* CONFIG_64BIT */
1735#endif /* CONFIG_IRQ_TIME_ACCOUNTING */