blob: 06db94bf47a8b33ea1d9549ea204ffa31ea8869d [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
Alex Shia75cdaa2013-06-20 10:18:47 +0800683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200703/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200710{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200711 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712
Lucas De Marchi41acab82010-03-10 23:37:45 -0300713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200715
716 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200717 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100719
Ingo Molnare9acbff2007-10-15 17:00:04 +0200720 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200721 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200722}
723
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200724static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200725{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200726 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200727 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200738 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100739 if (!delta_exec)
740 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200741
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
Ingo Molnarf977bb42009-09-13 18:15:54 +0200748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100749 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700750 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100751 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200754}
755
756static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200758{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760}
761
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762/*
763 * Task is being enqueued - update stats:
764 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200766{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200771 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200772 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200773}
774
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200787 }
788#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300789 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200790}
791
792static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200794{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200799 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200800 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200808{
809 /*
810 * We are starting a new run period:
811 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200813}
814
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200819#ifdef CONFIG_NUMA_BALANCING
820/*
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200821 * numa task sample period in ms
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200822 */
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200823unsigned int sysctl_numa_balancing_scan_period_min = 100;
Mel Gormanb8593bf2012-11-21 01:18:23 +0000824unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
825unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200826
827/* Portion of address space to scan in MB */
828unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200829
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200830/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
831unsigned int sysctl_numa_balancing_scan_delay = 1000;
832
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200833static void task_numa_placement(struct task_struct *p)
834{
Hugh Dickins2832bc12012-12-19 17:42:16 -0800835 int seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200836
Hugh Dickins2832bc12012-12-19 17:42:16 -0800837 if (!p->mm) /* for example, ksmd faulting in a user's mm */
838 return;
839 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200840 if (p->numa_scan_seq == seq)
841 return;
842 p->numa_scan_seq = seq;
843
844 /* FIXME: Scheduling placement policy hints go here */
845}
846
847/*
848 * Got a PROT_NONE fault for a page on @node.
849 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000850void task_numa_fault(int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200851{
852 struct task_struct *p = current;
853
Mel Gorman1a687c22012-11-22 11:16:36 +0000854 if (!sched_feat_numa(NUMA))
855 return;
856
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200857 /* FIXME: Allocate task-specific structure for placement policy here */
858
Mel Gormanfb003b82012-11-15 09:01:14 +0000859 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000860 * If pages are properly placed (did not migrate) then scan slower.
861 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +0000862 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000863 if (!migrated)
864 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
865 p->numa_scan_period + jiffies_to_msecs(10));
Mel Gormanfb003b82012-11-15 09:01:14 +0000866
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200867 task_numa_placement(p);
868}
869
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200870static void reset_ptenuma_scan(struct task_struct *p)
871{
872 ACCESS_ONCE(p->mm->numa_scan_seq)++;
873 p->mm->numa_scan_offset = 0;
874}
875
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200876/*
877 * The expensive part of numa migration is done from task_work context.
878 * Triggered from task_tick_numa().
879 */
880void task_numa_work(struct callback_head *work)
881{
882 unsigned long migrate, next_scan, now = jiffies;
883 struct task_struct *p = current;
884 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200885 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +0000886 unsigned long start, end;
887 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200888
889 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
890
891 work->next = work; /* protect against double add */
892 /*
893 * Who cares about NUMA placement when they're dying.
894 *
895 * NOTE: make sure not to dereference p->mm before this check,
896 * exit_task_work() happens _after_ exit_mm() so we could be called
897 * without p->mm even though we still had it when we enqueued this
898 * work.
899 */
900 if (p->flags & PF_EXITING)
901 return;
902
903 /*
Mel Gorman5bca2302012-11-22 14:40:03 +0000904 * We do not care about task placement until a task runs on a node
905 * other than the first one used by the address space. This is
906 * largely because migrations are driven by what CPU the task
907 * is running on. If it's never scheduled on another node, it'll
908 * not migrate so why bother trapping the fault.
909 */
910 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
911 mm->first_nid = numa_node_id();
912 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
913 /* Are we running on a new node yet? */
914 if (numa_node_id() == mm->first_nid &&
915 !sched_feat_numa(NUMA_FORCE))
916 return;
917
918 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
919 }
920
921 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000922 * Reset the scan period if enough time has gone by. Objective is that
923 * scanning will be reduced if pages are properly placed. As tasks
924 * can enter different phases this needs to be re-examined. Lacking
925 * proper tracking of reference behaviour, this blunt hammer is used.
926 */
927 migrate = mm->numa_next_reset;
928 if (time_after(now, migrate)) {
929 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
930 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
931 xchg(&mm->numa_next_reset, next_scan);
932 }
933
934 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200935 * Enforce maximal scan/migration frequency..
936 */
937 migrate = mm->numa_next_scan;
938 if (time_before(now, migrate))
939 return;
940
941 if (p->numa_scan_period == 0)
942 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
943
Mel Gormanfb003b82012-11-15 09:01:14 +0000944 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200945 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
946 return;
947
Mel Gormane14808b2012-11-19 10:59:15 +0000948 /*
949 * Do not set pte_numa if the current running node is rate-limited.
950 * This loses statistics on the fault but if we are unwilling to
951 * migrate to this node, it is less likely we can do useful work
952 */
953 if (migrate_ratelimited(numa_node_id()))
954 return;
955
Mel Gorman9f406042012-11-14 18:34:32 +0000956 start = mm->numa_scan_offset;
957 pages = sysctl_numa_balancing_scan_size;
958 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
959 if (!pages)
960 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200961
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200962 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +0000963 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200964 if (!vma) {
965 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +0000966 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200967 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200968 }
Mel Gorman9f406042012-11-14 18:34:32 +0000969 for (; vma; vma = vma->vm_next) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200970 if (!vma_migratable(vma))
971 continue;
972
973 /* Skip small VMAs. They are not likely to be of relevance */
Mel Gorman221392c2012-12-17 14:05:53 +0000974 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200975 continue;
976
Mel Gorman9f406042012-11-14 18:34:32 +0000977 do {
978 start = max(start, vma->vm_start);
979 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
980 end = min(end, vma->vm_end);
981 pages -= change_prot_numa(vma, start, end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200982
Mel Gorman9f406042012-11-14 18:34:32 +0000983 start = end;
984 if (pages <= 0)
985 goto out;
986 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200987 }
988
Mel Gorman9f406042012-11-14 18:34:32 +0000989out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200990 /*
991 * It is possible to reach the end of the VMA list but the last few VMAs are
992 * not guaranteed to the vma_migratable. If they are not, we would find the
993 * !migratable VMA on the next scan but not reset the scanner to the start
994 * so check it now.
995 */
996 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +0000997 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200998 else
999 reset_ptenuma_scan(p);
1000 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001001}
1002
1003/*
1004 * Drive the periodic memory faults..
1005 */
1006void task_tick_numa(struct rq *rq, struct task_struct *curr)
1007{
1008 struct callback_head *work = &curr->numa_work;
1009 u64 period, now;
1010
1011 /*
1012 * We don't care about NUMA placement if we don't have memory.
1013 */
1014 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1015 return;
1016
1017 /*
1018 * Using runtime rather than walltime has the dual advantage that
1019 * we (mostly) drive the selection from busy threads and that the
1020 * task needs to have done some actual work before we bother with
1021 * NUMA placement.
1022 */
1023 now = curr->se.sum_exec_runtime;
1024 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1025
1026 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001027 if (!curr->node_stamp)
1028 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001029 curr->node_stamp = now;
1030
1031 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1032 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1033 task_work_add(curr, work, true);
1034 }
1035 }
1036}
1037#else
1038static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1039{
1040}
1041#endif /* CONFIG_NUMA_BALANCING */
1042
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001043static void
1044account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1045{
1046 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001047 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001048 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001049#ifdef CONFIG_SMP
1050 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001051 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001052#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001053 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001054}
1055
1056static void
1057account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1058{
1059 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001060 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001061 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001062 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301063 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001064 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001065}
1066
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001067#ifdef CONFIG_FAIR_GROUP_SCHED
1068# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001069static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1070{
1071 long tg_weight;
1072
1073 /*
1074 * Use this CPU's actual weight instead of the last load_contribution
1075 * to gain a more accurate current total weight. See
1076 * update_cfs_rq_load_contribution().
1077 */
Alex Shibf5b9862013-06-20 10:18:54 +08001078 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02001079 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001080 tg_weight += cfs_rq->load.weight;
1081
1082 return tg_weight;
1083}
1084
Paul Turner6d5ab292011-01-21 20:45:01 -08001085static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001086{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001087 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001088
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001089 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001090 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001091
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001092 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001093 if (tg_weight)
1094 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001095
1096 if (shares < MIN_SHARES)
1097 shares = MIN_SHARES;
1098 if (shares > tg->shares)
1099 shares = tg->shares;
1100
1101 return shares;
1102}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001103# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001104static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001105{
1106 return tg->shares;
1107}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001108# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001109static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1110 unsigned long weight)
1111{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001112 if (se->on_rq) {
1113 /* commit outstanding execution time */
1114 if (cfs_rq->curr == se)
1115 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001116 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001117 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001118
1119 update_load_set(&se->load, weight);
1120
1121 if (se->on_rq)
1122 account_entity_enqueue(cfs_rq, se);
1123}
1124
Paul Turner82958362012-10-04 13:18:31 +02001125static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1126
Paul Turner6d5ab292011-01-21 20:45:01 -08001127static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001128{
1129 struct task_group *tg;
1130 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001131 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001132
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001133 tg = cfs_rq->tg;
1134 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001135 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001136 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001137#ifndef CONFIG_SMP
1138 if (likely(se->load.weight == tg->shares))
1139 return;
1140#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001141 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001142
1143 reweight_entity(cfs_rq_of(se), se, shares);
1144}
1145#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001146static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001147{
1148}
1149#endif /* CONFIG_FAIR_GROUP_SCHED */
1150
Alex Shi141965c2013-06-26 13:05:39 +08001151#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001152/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001153 * We choose a half-life close to 1 scheduling period.
1154 * Note: The tables below are dependent on this value.
1155 */
1156#define LOAD_AVG_PERIOD 32
1157#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1158#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1159
1160/* Precomputed fixed inverse multiplies for multiplication by y^n */
1161static const u32 runnable_avg_yN_inv[] = {
1162 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1163 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1164 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1165 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1166 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1167 0x85aac367, 0x82cd8698,
1168};
1169
1170/*
1171 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1172 * over-estimates when re-combining.
1173 */
1174static const u32 runnable_avg_yN_sum[] = {
1175 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1176 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1177 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1178};
1179
1180/*
Paul Turner9d85f212012-10-04 13:18:29 +02001181 * Approximate:
1182 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1183 */
1184static __always_inline u64 decay_load(u64 val, u64 n)
1185{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001186 unsigned int local_n;
1187
1188 if (!n)
1189 return val;
1190 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1191 return 0;
1192
1193 /* after bounds checking we can collapse to 32-bit */
1194 local_n = n;
1195
1196 /*
1197 * As y^PERIOD = 1/2, we can combine
1198 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1199 * With a look-up table which covers k^n (n<PERIOD)
1200 *
1201 * To achieve constant time decay_load.
1202 */
1203 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1204 val >>= local_n / LOAD_AVG_PERIOD;
1205 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001206 }
1207
Paul Turner5b51f2f2012-10-04 13:18:32 +02001208 val *= runnable_avg_yN_inv[local_n];
1209 /* We don't use SRR here since we always want to round down. */
1210 return val >> 32;
1211}
1212
1213/*
1214 * For updates fully spanning n periods, the contribution to runnable
1215 * average will be: \Sum 1024*y^n
1216 *
1217 * We can compute this reasonably efficiently by combining:
1218 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1219 */
1220static u32 __compute_runnable_contrib(u64 n)
1221{
1222 u32 contrib = 0;
1223
1224 if (likely(n <= LOAD_AVG_PERIOD))
1225 return runnable_avg_yN_sum[n];
1226 else if (unlikely(n >= LOAD_AVG_MAX_N))
1227 return LOAD_AVG_MAX;
1228
1229 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1230 do {
1231 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1232 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1233
1234 n -= LOAD_AVG_PERIOD;
1235 } while (n > LOAD_AVG_PERIOD);
1236
1237 contrib = decay_load(contrib, n);
1238 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001239}
1240
1241/*
1242 * We can represent the historical contribution to runnable average as the
1243 * coefficients of a geometric series. To do this we sub-divide our runnable
1244 * history into segments of approximately 1ms (1024us); label the segment that
1245 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1246 *
1247 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1248 * p0 p1 p2
1249 * (now) (~1ms ago) (~2ms ago)
1250 *
1251 * Let u_i denote the fraction of p_i that the entity was runnable.
1252 *
1253 * We then designate the fractions u_i as our co-efficients, yielding the
1254 * following representation of historical load:
1255 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1256 *
1257 * We choose y based on the with of a reasonably scheduling period, fixing:
1258 * y^32 = 0.5
1259 *
1260 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1261 * approximately half as much as the contribution to load within the last ms
1262 * (u_0).
1263 *
1264 * When a period "rolls over" and we have new u_0`, multiplying the previous
1265 * sum again by y is sufficient to update:
1266 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1267 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1268 */
1269static __always_inline int __update_entity_runnable_avg(u64 now,
1270 struct sched_avg *sa,
1271 int runnable)
1272{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001273 u64 delta, periods;
1274 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001275 int delta_w, decayed = 0;
1276
1277 delta = now - sa->last_runnable_update;
1278 /*
1279 * This should only happen when time goes backwards, which it
1280 * unfortunately does during sched clock init when we swap over to TSC.
1281 */
1282 if ((s64)delta < 0) {
1283 sa->last_runnable_update = now;
1284 return 0;
1285 }
1286
1287 /*
1288 * Use 1024ns as the unit of measurement since it's a reasonable
1289 * approximation of 1us and fast to compute.
1290 */
1291 delta >>= 10;
1292 if (!delta)
1293 return 0;
1294 sa->last_runnable_update = now;
1295
1296 /* delta_w is the amount already accumulated against our next period */
1297 delta_w = sa->runnable_avg_period % 1024;
1298 if (delta + delta_w >= 1024) {
1299 /* period roll-over */
1300 decayed = 1;
1301
1302 /*
1303 * Now that we know we're crossing a period boundary, figure
1304 * out how much from delta we need to complete the current
1305 * period and accrue it.
1306 */
1307 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001308 if (runnable)
1309 sa->runnable_avg_sum += delta_w;
1310 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001311
Paul Turner5b51f2f2012-10-04 13:18:32 +02001312 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001313
Paul Turner5b51f2f2012-10-04 13:18:32 +02001314 /* Figure out how many additional periods this update spans */
1315 periods = delta / 1024;
1316 delta %= 1024;
1317
1318 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1319 periods + 1);
1320 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1321 periods + 1);
1322
1323 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1324 runnable_contrib = __compute_runnable_contrib(periods);
1325 if (runnable)
1326 sa->runnable_avg_sum += runnable_contrib;
1327 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001328 }
1329
1330 /* Remainder of delta accrued against u_0` */
1331 if (runnable)
1332 sa->runnable_avg_sum += delta;
1333 sa->runnable_avg_period += delta;
1334
1335 return decayed;
1336}
1337
Paul Turner9ee474f2012-10-04 13:18:30 +02001338/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001339static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001340{
1341 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1342 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1343
1344 decays -= se->avg.decay_count;
1345 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001346 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001347
1348 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1349 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001350
1351 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001352}
1353
Paul Turnerc566e8e2012-10-04 13:18:30 +02001354#ifdef CONFIG_FAIR_GROUP_SCHED
1355static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1356 int force_update)
1357{
1358 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08001359 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02001360
1361 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1362 tg_contrib -= cfs_rq->tg_load_contrib;
1363
Alex Shibf5b9862013-06-20 10:18:54 +08001364 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1365 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02001366 cfs_rq->tg_load_contrib += tg_contrib;
1367 }
1368}
Paul Turner8165e142012-10-04 13:18:31 +02001369
Paul Turnerbb17f652012-10-04 13:18:31 +02001370/*
1371 * Aggregate cfs_rq runnable averages into an equivalent task_group
1372 * representation for computing load contributions.
1373 */
1374static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1375 struct cfs_rq *cfs_rq)
1376{
1377 struct task_group *tg = cfs_rq->tg;
1378 long contrib;
1379
1380 /* The fraction of a cpu used by this cfs_rq */
1381 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1382 sa->runnable_avg_period + 1);
1383 contrib -= cfs_rq->tg_runnable_contrib;
1384
1385 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1386 atomic_add(contrib, &tg->runnable_avg);
1387 cfs_rq->tg_runnable_contrib += contrib;
1388 }
1389}
1390
Paul Turner8165e142012-10-04 13:18:31 +02001391static inline void __update_group_entity_contrib(struct sched_entity *se)
1392{
1393 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1394 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001395 int runnable_avg;
1396
Paul Turner8165e142012-10-04 13:18:31 +02001397 u64 contrib;
1398
1399 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08001400 se->avg.load_avg_contrib = div_u64(contrib,
1401 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001402
1403 /*
1404 * For group entities we need to compute a correction term in the case
1405 * that they are consuming <1 cpu so that we would contribute the same
1406 * load as a task of equal weight.
1407 *
1408 * Explicitly co-ordinating this measurement would be expensive, but
1409 * fortunately the sum of each cpus contribution forms a usable
1410 * lower-bound on the true value.
1411 *
1412 * Consider the aggregate of 2 contributions. Either they are disjoint
1413 * (and the sum represents true value) or they are disjoint and we are
1414 * understating by the aggregate of their overlap.
1415 *
1416 * Extending this to N cpus, for a given overlap, the maximum amount we
1417 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1418 * cpus that overlap for this interval and w_i is the interval width.
1419 *
1420 * On a small machine; the first term is well-bounded which bounds the
1421 * total error since w_i is a subset of the period. Whereas on a
1422 * larger machine, while this first term can be larger, if w_i is the
1423 * of consequential size guaranteed to see n_i*w_i quickly converge to
1424 * our upper bound of 1-cpu.
1425 */
1426 runnable_avg = atomic_read(&tg->runnable_avg);
1427 if (runnable_avg < NICE_0_LOAD) {
1428 se->avg.load_avg_contrib *= runnable_avg;
1429 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1430 }
Paul Turner8165e142012-10-04 13:18:31 +02001431}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001432#else
1433static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1434 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001435static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1436 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001437static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001438#endif
1439
Paul Turner8165e142012-10-04 13:18:31 +02001440static inline void __update_task_entity_contrib(struct sched_entity *se)
1441{
1442 u32 contrib;
1443
1444 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1445 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1446 contrib /= (se->avg.runnable_avg_period + 1);
1447 se->avg.load_avg_contrib = scale_load(contrib);
1448}
1449
Paul Turner2dac7542012-10-04 13:18:30 +02001450/* Compute the current contribution to load_avg by se, return any delta */
1451static long __update_entity_load_avg_contrib(struct sched_entity *se)
1452{
1453 long old_contrib = se->avg.load_avg_contrib;
1454
Paul Turner8165e142012-10-04 13:18:31 +02001455 if (entity_is_task(se)) {
1456 __update_task_entity_contrib(se);
1457 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001458 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001459 __update_group_entity_contrib(se);
1460 }
Paul Turner2dac7542012-10-04 13:18:30 +02001461
1462 return se->avg.load_avg_contrib - old_contrib;
1463}
1464
Paul Turner9ee474f2012-10-04 13:18:30 +02001465static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1466 long load_contrib)
1467{
1468 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1469 cfs_rq->blocked_load_avg -= load_contrib;
1470 else
1471 cfs_rq->blocked_load_avg = 0;
1472}
1473
Paul Turnerf1b17282012-10-04 13:18:31 +02001474static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1475
Paul Turner9d85f212012-10-04 13:18:29 +02001476/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001477static inline void update_entity_load_avg(struct sched_entity *se,
1478 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001479{
Paul Turner2dac7542012-10-04 13:18:30 +02001480 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1481 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001482 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001483
Paul Turnerf1b17282012-10-04 13:18:31 +02001484 /*
1485 * For a group entity we need to use their owned cfs_rq_clock_task() in
1486 * case they are the parent of a throttled hierarchy.
1487 */
1488 if (entity_is_task(se))
1489 now = cfs_rq_clock_task(cfs_rq);
1490 else
1491 now = cfs_rq_clock_task(group_cfs_rq(se));
1492
1493 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001494 return;
1495
1496 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001497
1498 if (!update_cfs_rq)
1499 return;
1500
Paul Turner2dac7542012-10-04 13:18:30 +02001501 if (se->on_rq)
1502 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001503 else
1504 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1505}
1506
1507/*
1508 * Decay the load contributed by all blocked children and account this so that
1509 * their contribution may appropriately discounted when they wake up.
1510 */
Paul Turneraff3e492012-10-04 13:18:30 +02001511static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001512{
Paul Turnerf1b17282012-10-04 13:18:31 +02001513 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001514 u64 decays;
1515
1516 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001517 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001518 return;
1519
Alex Shi25099402013-06-20 10:18:55 +08001520 if (atomic_long_read(&cfs_rq->removed_load)) {
1521 unsigned long removed_load;
1522 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02001523 subtract_blocked_load_contrib(cfs_rq, removed_load);
1524 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001525
Paul Turneraff3e492012-10-04 13:18:30 +02001526 if (decays) {
1527 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1528 decays);
1529 atomic64_add(decays, &cfs_rq->decay_counter);
1530 cfs_rq->last_decay = now;
1531 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001532
1533 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001534}
Ben Segall18bf2802012-10-04 12:51:20 +02001535
1536static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1537{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001538 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001539 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001540}
Paul Turner2dac7542012-10-04 13:18:30 +02001541
1542/* Add the load generated by se into cfs_rq's child load-average */
1543static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001544 struct sched_entity *se,
1545 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001546{
Paul Turneraff3e492012-10-04 13:18:30 +02001547 /*
1548 * We track migrations using entity decay_count <= 0, on a wake-up
1549 * migration we use a negative decay count to track the remote decays
1550 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08001551 *
1552 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1553 * are seen by enqueue_entity_load_avg() as a migration with an already
1554 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02001555 */
1556 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001557 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001558 if (se->avg.decay_count) {
1559 /*
1560 * In a wake-up migration we have to approximate the
1561 * time sleeping. This is because we can't synchronize
1562 * clock_task between the two cpus, and it is not
1563 * guaranteed to be read-safe. Instead, we can
1564 * approximate this using our carried decays, which are
1565 * explicitly atomically readable.
1566 */
1567 se->avg.last_runnable_update -= (-se->avg.decay_count)
1568 << 20;
1569 update_entity_load_avg(se, 0);
1570 /* Indicate that we're now synchronized and on-rq */
1571 se->avg.decay_count = 0;
1572 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001573 wakeup = 0;
1574 } else {
Alex Shi282cf492013-06-20 10:18:48 +08001575 /*
1576 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1577 * would have made count negative); we must be careful to avoid
1578 * double-accounting blocked time after synchronizing decays.
1579 */
1580 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1581 << 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001582 }
1583
Paul Turneraff3e492012-10-04 13:18:30 +02001584 /* migrated tasks did not contribute to our blocked load */
1585 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02001586 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02001587 update_entity_load_avg(se, 0);
1588 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001589
Paul Turner2dac7542012-10-04 13:18:30 +02001590 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02001591 /* we force update consideration on load-balancer moves */
1592 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02001593}
1594
Paul Turner9ee474f2012-10-04 13:18:30 +02001595/*
1596 * Remove se's load from this cfs_rq child load-average, if the entity is
1597 * transitioning to a blocked state we track its projected decay using
1598 * blocked_load_avg.
1599 */
Paul Turner2dac7542012-10-04 13:18:30 +02001600static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001601 struct sched_entity *se,
1602 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02001603{
Paul Turner9ee474f2012-10-04 13:18:30 +02001604 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02001605 /* we force update consideration on load-balancer moves */
1606 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02001607
Paul Turner2dac7542012-10-04 13:18:30 +02001608 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02001609 if (sleep) {
1610 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1611 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1612 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02001613}
Vincent Guittot642dbc32013-04-18 18:34:26 +02001614
1615/*
1616 * Update the rq's load with the elapsed running time before entering
1617 * idle. if the last scheduled task is not a CFS task, idle_enter will
1618 * be the only way to update the runnable statistic.
1619 */
1620void idle_enter_fair(struct rq *this_rq)
1621{
1622 update_rq_runnable_avg(this_rq, 1);
1623}
1624
1625/*
1626 * Update the rq's load with the elapsed idle time before a task is
1627 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1628 * be the only way to update the runnable statistic.
1629 */
1630void idle_exit_fair(struct rq *this_rq)
1631{
1632 update_rq_runnable_avg(this_rq, 0);
1633}
1634
Paul Turner9d85f212012-10-04 13:18:29 +02001635#else
Paul Turner9ee474f2012-10-04 13:18:30 +02001636static inline void update_entity_load_avg(struct sched_entity *se,
1637 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02001638static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001639static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001640 struct sched_entity *se,
1641 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001642static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001643 struct sched_entity *se,
1644 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02001645static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1646 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02001647#endif
1648
Ingo Molnar2396af62007-08-09 11:16:48 +02001649static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001650{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001651#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02001652 struct task_struct *tsk = NULL;
1653
1654 if (entity_is_task(se))
1655 tsk = task_of(se);
1656
Lucas De Marchi41acab82010-03-10 23:37:45 -03001657 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001658 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001659
1660 if ((s64)delta < 0)
1661 delta = 0;
1662
Lucas De Marchi41acab82010-03-10 23:37:45 -03001663 if (unlikely(delta > se->statistics.sleep_max))
1664 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001665
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001666 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001667 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01001668
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001669 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02001670 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001671 trace_sched_stat_sleep(tsk, delta);
1672 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001673 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03001674 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001675 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001676
1677 if ((s64)delta < 0)
1678 delta = 0;
1679
Lucas De Marchi41acab82010-03-10 23:37:45 -03001680 if (unlikely(delta > se->statistics.block_max))
1681 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001682
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001683 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001684 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02001685
Peter Zijlstrae4143142009-07-23 20:13:26 +02001686 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001687 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001688 se->statistics.iowait_sum += delta;
1689 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001690 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001691 }
1692
Andrew Vaginb781a602011-11-28 12:03:35 +03001693 trace_sched_stat_blocked(tsk, delta);
1694
Peter Zijlstrae4143142009-07-23 20:13:26 +02001695 /*
1696 * Blocking time is in units of nanosecs, so shift by
1697 * 20 to get a milliseconds-range estimation of the
1698 * amount of time that the task spent sleeping:
1699 */
1700 if (unlikely(prof_on == SLEEP_PROFILING)) {
1701 profile_hits(SLEEP_PROFILING,
1702 (void *)get_wchan(tsk),
1703 delta >> 20);
1704 }
1705 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02001706 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001707 }
1708#endif
1709}
1710
Peter Zijlstraddc97292007-10-15 17:00:10 +02001711static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1712{
1713#ifdef CONFIG_SCHED_DEBUG
1714 s64 d = se->vruntime - cfs_rq->min_vruntime;
1715
1716 if (d < 0)
1717 d = -d;
1718
1719 if (d > 3*sysctl_sched_latency)
1720 schedstat_inc(cfs_rq, nr_spread_over);
1721#endif
1722}
1723
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001724static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001725place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1726{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001727 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001728
Peter Zijlstra2cb86002007-11-09 22:39:37 +01001729 /*
1730 * The 'current' period is already promised to the current tasks,
1731 * however the extra weight of the new task will slow them down a
1732 * little, place the new task so that it fits in the slot that
1733 * stays open at the end.
1734 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001735 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02001736 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001737
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001738 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01001739 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001740 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001741
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001742 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001743 * Halve their sleep time's effect, to allow
1744 * for a gentler effect of sleepers:
1745 */
1746 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1747 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02001748
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001749 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001750 }
1751
Mike Galbraithb5d9d732009-09-08 11:12:28 +02001752 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05301753 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001754}
1755
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001756static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1757
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001758static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001759enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001760{
1761 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001762 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05301763 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001764 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001765 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001766 se->vruntime += cfs_rq->min_vruntime;
1767
1768 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001769 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001770 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001771 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02001772 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001773 account_entity_enqueue(cfs_rq, se);
1774 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001775
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001776 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001777 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02001778 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02001779 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001780
Ingo Molnard2417e52007-08-09 11:16:47 +02001781 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02001782 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001783 if (se != cfs_rq->curr)
1784 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001785 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001786
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001787 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001788 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001789 check_enqueue_throttle(cfs_rq);
1790 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001791}
1792
Rik van Riel2c13c9192011-02-01 09:48:37 -05001793static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01001794{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001795 for_each_sched_entity(se) {
1796 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1797 if (cfs_rq->last == se)
1798 cfs_rq->last = NULL;
1799 else
1800 break;
1801 }
1802}
Peter Zijlstra2002c692008-11-11 11:52:33 +01001803
Rik van Riel2c13c9192011-02-01 09:48:37 -05001804static void __clear_buddies_next(struct sched_entity *se)
1805{
1806 for_each_sched_entity(se) {
1807 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1808 if (cfs_rq->next == se)
1809 cfs_rq->next = NULL;
1810 else
1811 break;
1812 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01001813}
1814
Rik van Rielac53db52011-02-01 09:51:03 -05001815static void __clear_buddies_skip(struct sched_entity *se)
1816{
1817 for_each_sched_entity(se) {
1818 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1819 if (cfs_rq->skip == se)
1820 cfs_rq->skip = NULL;
1821 else
1822 break;
1823 }
1824}
1825
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001826static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1827{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001828 if (cfs_rq->last == se)
1829 __clear_buddies_last(se);
1830
1831 if (cfs_rq->next == se)
1832 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05001833
1834 if (cfs_rq->skip == se)
1835 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001836}
1837
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07001838static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07001839
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001840static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001841dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001842{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001843 /*
1844 * Update run-time statistics of the 'current'.
1845 */
1846 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001847 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001848
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02001849 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001850 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001851#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001852 if (entity_is_task(se)) {
1853 struct task_struct *tsk = task_of(se);
1854
1855 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001856 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001857 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001858 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001859 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02001860#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001861 }
1862
Peter Zijlstra2002c692008-11-11 11:52:33 +01001863 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001864
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001865 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001866 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001867 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001868 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001869
1870 /*
1871 * Normalize the entity after updating the min_vruntime because the
1872 * update can refer to the ->curr item and we need to reflect this
1873 * movement in our normalized position.
1874 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001875 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001876 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07001877
Paul Turnerd8b49862011-07-21 09:43:41 -07001878 /* return excess runtime on last dequeue */
1879 return_cfs_rq_runtime(cfs_rq);
1880
Peter Zijlstra1e876232011-05-17 16:21:10 -07001881 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001882 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001883}
1884
1885/*
1886 * Preempt the current task with a newly woken task if needed:
1887 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02001888static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001889check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001890{
Peter Zijlstra11697832007-09-05 14:32:49 +02001891 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001892 struct sched_entity *se;
1893 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02001894
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02001895 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02001896 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001897 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001898 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001899 /*
1900 * The current task ran long enough, ensure it doesn't get
1901 * re-elected due to buddy favours.
1902 */
1903 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001904 return;
1905 }
1906
1907 /*
1908 * Ensure that a task that missed wakeup preemption by a
1909 * narrow margin doesn't have to wait for a full slice.
1910 * This also mitigates buddy induced latencies under load.
1911 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02001912 if (delta_exec < sysctl_sched_min_granularity)
1913 return;
1914
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001915 se = __pick_first_entity(cfs_rq);
1916 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02001917
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001918 if (delta < 0)
1919 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01001920
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001921 if (delta > ideal_runtime)
1922 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001923}
1924
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001925static void
Ingo Molnar8494f412007-08-09 11:16:48 +02001926set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001927{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001928 /* 'current' is not kept within the tree. */
1929 if (se->on_rq) {
1930 /*
1931 * Any task has to be enqueued before it get to execute on
1932 * a CPU. So account for the time it spent waiting on the
1933 * runqueue.
1934 */
1935 update_stats_wait_end(cfs_rq, se);
1936 __dequeue_entity(cfs_rq, se);
1937 }
1938
Ingo Molnar79303e92007-08-09 11:16:47 +02001939 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02001940 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02001941#ifdef CONFIG_SCHEDSTATS
1942 /*
1943 * Track our maximum slice length, if the CPU's load is at
1944 * least twice that of our own weight (i.e. dont track it
1945 * when there are only lesser-weight tasks around):
1946 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02001947 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001948 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02001949 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1950 }
1951#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02001952 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001953}
1954
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02001955static int
1956wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1957
Rik van Rielac53db52011-02-01 09:51:03 -05001958/*
1959 * Pick the next process, keeping these things in mind, in this order:
1960 * 1) keep things fair between processes/task groups
1961 * 2) pick the "next" process, since someone really wants that to run
1962 * 3) pick the "last" process, for cache locality
1963 * 4) do not run the "skip" process, if something else is available
1964 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001965static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001966{
Rik van Rielac53db52011-02-01 09:51:03 -05001967 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001968 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001969
Rik van Rielac53db52011-02-01 09:51:03 -05001970 /*
1971 * Avoid running the skip buddy, if running something else can
1972 * be done without getting too unfair.
1973 */
1974 if (cfs_rq->skip == se) {
1975 struct sched_entity *second = __pick_next_entity(se);
1976 if (second && wakeup_preempt_entity(second, left) < 1)
1977 se = second;
1978 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001979
Mike Galbraithf685cea2009-10-23 23:09:22 +02001980 /*
1981 * Prefer last buddy, try to return the CPU to a preempted task.
1982 */
1983 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1984 se = cfs_rq->last;
1985
Rik van Rielac53db52011-02-01 09:51:03 -05001986 /*
1987 * Someone really wants this to run. If it's not unfair, run it.
1988 */
1989 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1990 se = cfs_rq->next;
1991
Mike Galbraithf685cea2009-10-23 23:09:22 +02001992 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001993
1994 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001995}
1996
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001997static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1998
Ingo Molnarab6cde22007-08-09 11:16:48 +02001999static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002000{
2001 /*
2002 * If still on the runqueue then deactivate_task()
2003 * was not called and update_curr() has to be done:
2004 */
2005 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002006 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002007
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002008 /* throttle cfs_rqs exceeding runtime */
2009 check_cfs_rq_runtime(cfs_rq);
2010
Peter Zijlstraddc97292007-10-15 17:00:10 +02002011 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002012 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002013 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002014 /* Put 'current' back into the tree. */
2015 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002016 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002017 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002018 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002019 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002020}
2021
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002022static void
2023entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002024{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002025 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002026 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002027 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002028 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002029
Paul Turner43365bd2010-12-15 19:10:17 -08002030 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002031 * Ensure that runnable average is periodically updated.
2032 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002033 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002034 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02002035 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02002036
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002037#ifdef CONFIG_SCHED_HRTICK
2038 /*
2039 * queued ticks are scheduled to match the slice, so don't bother
2040 * validating it and just reschedule.
2041 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002042 if (queued) {
2043 resched_task(rq_of(cfs_rq)->curr);
2044 return;
2045 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002046 /*
2047 * don't let the period tick interfere with the hrtick preemption
2048 */
2049 if (!sched_feat(DOUBLE_TICK) &&
2050 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2051 return;
2052#endif
2053
Yong Zhang2c2efae2011-07-29 16:20:33 +08002054 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002055 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002056}
2057
Paul Turnerab84d312011-07-21 09:43:28 -07002058
2059/**************************************************
2060 * CFS bandwidth control machinery
2061 */
2062
2063#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002064
2065#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002066static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002067
2068static inline bool cfs_bandwidth_used(void)
2069{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002070 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002071}
2072
2073void account_cfs_bandwidth_used(int enabled, int was_enabled)
2074{
2075 /* only need to count groups transitioning between enabled/!enabled */
2076 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002077 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002078 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002079 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002080}
2081#else /* HAVE_JUMP_LABEL */
2082static bool cfs_bandwidth_used(void)
2083{
2084 return true;
2085}
2086
2087void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2088#endif /* HAVE_JUMP_LABEL */
2089
Paul Turnerab84d312011-07-21 09:43:28 -07002090/*
2091 * default period for cfs group bandwidth.
2092 * default: 0.1s, units: nanoseconds
2093 */
2094static inline u64 default_cfs_period(void)
2095{
2096 return 100000000ULL;
2097}
Paul Turnerec12cb72011-07-21 09:43:30 -07002098
2099static inline u64 sched_cfs_bandwidth_slice(void)
2100{
2101 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2102}
2103
Paul Turnera9cf55b2011-07-21 09:43:32 -07002104/*
2105 * Replenish runtime according to assigned quota and update expiration time.
2106 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2107 * additional synchronization around rq->lock.
2108 *
2109 * requires cfs_b->lock
2110 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002111void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002112{
2113 u64 now;
2114
2115 if (cfs_b->quota == RUNTIME_INF)
2116 return;
2117
2118 now = sched_clock_cpu(smp_processor_id());
2119 cfs_b->runtime = cfs_b->quota;
2120 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2121}
2122
Peter Zijlstra029632f2011-10-25 10:00:11 +02002123static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2124{
2125 return &tg->cfs_bandwidth;
2126}
2127
Paul Turnerf1b17282012-10-04 13:18:31 +02002128/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2129static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2130{
2131 if (unlikely(cfs_rq->throttle_count))
2132 return cfs_rq->throttled_clock_task;
2133
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002134 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002135}
2136
Paul Turner85dac902011-07-21 09:43:33 -07002137/* returns 0 on failure to allocate runtime */
2138static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002139{
2140 struct task_group *tg = cfs_rq->tg;
2141 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002142 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002143
2144 /* note: this is a positive sum as runtime_remaining <= 0 */
2145 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2146
2147 raw_spin_lock(&cfs_b->lock);
2148 if (cfs_b->quota == RUNTIME_INF)
2149 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002150 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002151 /*
2152 * If the bandwidth pool has become inactive, then at least one
2153 * period must have elapsed since the last consumption.
2154 * Refresh the global state and ensure bandwidth timer becomes
2155 * active.
2156 */
2157 if (!cfs_b->timer_active) {
2158 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002159 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002160 }
Paul Turner58088ad2011-07-21 09:43:31 -07002161
2162 if (cfs_b->runtime > 0) {
2163 amount = min(cfs_b->runtime, min_amount);
2164 cfs_b->runtime -= amount;
2165 cfs_b->idle = 0;
2166 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002167 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002168 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002169 raw_spin_unlock(&cfs_b->lock);
2170
2171 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002172 /*
2173 * we may have advanced our local expiration to account for allowed
2174 * spread between our sched_clock and the one on which runtime was
2175 * issued.
2176 */
2177 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2178 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002179
2180 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002181}
2182
2183/*
2184 * Note: This depends on the synchronization provided by sched_clock and the
2185 * fact that rq->clock snapshots this value.
2186 */
2187static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2188{
2189 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002190
2191 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002192 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002193 return;
2194
2195 if (cfs_rq->runtime_remaining < 0)
2196 return;
2197
2198 /*
2199 * If the local deadline has passed we have to consider the
2200 * possibility that our sched_clock is 'fast' and the global deadline
2201 * has not truly expired.
2202 *
2203 * Fortunately we can check determine whether this the case by checking
2204 * whether the global deadline has advanced.
2205 */
2206
2207 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2208 /* extend local deadline, drift is bounded above by 2 ticks */
2209 cfs_rq->runtime_expires += TICK_NSEC;
2210 } else {
2211 /* global deadline is ahead, expiration has passed */
2212 cfs_rq->runtime_remaining = 0;
2213 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002214}
2215
2216static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2217 unsigned long delta_exec)
2218{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002219 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002220 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002221 expire_cfs_rq_runtime(cfs_rq);
2222
2223 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002224 return;
2225
Paul Turner85dac902011-07-21 09:43:33 -07002226 /*
2227 * if we're unable to extend our runtime we resched so that the active
2228 * hierarchy can be throttled
2229 */
2230 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2231 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002232}
2233
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002234static __always_inline
2235void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002236{
Paul Turner56f570e2011-11-07 20:26:33 -08002237 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002238 return;
2239
2240 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2241}
2242
Paul Turner85dac902011-07-21 09:43:33 -07002243static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2244{
Paul Turner56f570e2011-11-07 20:26:33 -08002245 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002246}
2247
Paul Turner64660c82011-07-21 09:43:36 -07002248/* check whether cfs_rq, or any parent, is throttled */
2249static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2250{
Paul Turner56f570e2011-11-07 20:26:33 -08002251 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002252}
2253
2254/*
2255 * Ensure that neither of the group entities corresponding to src_cpu or
2256 * dest_cpu are members of a throttled hierarchy when performing group
2257 * load-balance operations.
2258 */
2259static inline int throttled_lb_pair(struct task_group *tg,
2260 int src_cpu, int dest_cpu)
2261{
2262 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2263
2264 src_cfs_rq = tg->cfs_rq[src_cpu];
2265 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2266
2267 return throttled_hierarchy(src_cfs_rq) ||
2268 throttled_hierarchy(dest_cfs_rq);
2269}
2270
2271/* updated child weight may affect parent so we have to do this bottom up */
2272static int tg_unthrottle_up(struct task_group *tg, void *data)
2273{
2274 struct rq *rq = data;
2275 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2276
2277 cfs_rq->throttle_count--;
2278#ifdef CONFIG_SMP
2279 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002280 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002281 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002282 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002283 }
2284#endif
2285
2286 return 0;
2287}
2288
2289static int tg_throttle_down(struct task_group *tg, void *data)
2290{
2291 struct rq *rq = data;
2292 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2293
Paul Turner82958362012-10-04 13:18:31 +02002294 /* group is entering throttled state, stop time */
2295 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002296 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002297 cfs_rq->throttle_count++;
2298
2299 return 0;
2300}
2301
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002302static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002303{
2304 struct rq *rq = rq_of(cfs_rq);
2305 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2306 struct sched_entity *se;
2307 long task_delta, dequeue = 1;
2308
2309 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2310
Paul Turnerf1b17282012-10-04 13:18:31 +02002311 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002312 rcu_read_lock();
2313 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2314 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002315
2316 task_delta = cfs_rq->h_nr_running;
2317 for_each_sched_entity(se) {
2318 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2319 /* throttled entity or throttle-on-deactivate */
2320 if (!se->on_rq)
2321 break;
2322
2323 if (dequeue)
2324 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2325 qcfs_rq->h_nr_running -= task_delta;
2326
2327 if (qcfs_rq->load.weight)
2328 dequeue = 0;
2329 }
2330
2331 if (!se)
2332 rq->nr_running -= task_delta;
2333
2334 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002335 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002336 raw_spin_lock(&cfs_b->lock);
2337 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2338 raw_spin_unlock(&cfs_b->lock);
2339}
2340
Peter Zijlstra029632f2011-10-25 10:00:11 +02002341void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002342{
2343 struct rq *rq = rq_of(cfs_rq);
2344 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2345 struct sched_entity *se;
2346 int enqueue = 1;
2347 long task_delta;
2348
Michael Wang22b958d2013-06-04 14:23:39 +08002349 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002350
2351 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002352
2353 update_rq_clock(rq);
2354
Paul Turner671fd9d2011-07-21 09:43:34 -07002355 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002356 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002357 list_del_rcu(&cfs_rq->throttled_list);
2358 raw_spin_unlock(&cfs_b->lock);
2359
Paul Turner64660c82011-07-21 09:43:36 -07002360 /* update hierarchical throttle state */
2361 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2362
Paul Turner671fd9d2011-07-21 09:43:34 -07002363 if (!cfs_rq->load.weight)
2364 return;
2365
2366 task_delta = cfs_rq->h_nr_running;
2367 for_each_sched_entity(se) {
2368 if (se->on_rq)
2369 enqueue = 0;
2370
2371 cfs_rq = cfs_rq_of(se);
2372 if (enqueue)
2373 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2374 cfs_rq->h_nr_running += task_delta;
2375
2376 if (cfs_rq_throttled(cfs_rq))
2377 break;
2378 }
2379
2380 if (!se)
2381 rq->nr_running += task_delta;
2382
2383 /* determine whether we need to wake up potentially idle cpu */
2384 if (rq->curr == rq->idle && rq->cfs.nr_running)
2385 resched_task(rq->curr);
2386}
2387
2388static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2389 u64 remaining, u64 expires)
2390{
2391 struct cfs_rq *cfs_rq;
2392 u64 runtime = remaining;
2393
2394 rcu_read_lock();
2395 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2396 throttled_list) {
2397 struct rq *rq = rq_of(cfs_rq);
2398
2399 raw_spin_lock(&rq->lock);
2400 if (!cfs_rq_throttled(cfs_rq))
2401 goto next;
2402
2403 runtime = -cfs_rq->runtime_remaining + 1;
2404 if (runtime > remaining)
2405 runtime = remaining;
2406 remaining -= runtime;
2407
2408 cfs_rq->runtime_remaining += runtime;
2409 cfs_rq->runtime_expires = expires;
2410
2411 /* we check whether we're throttled above */
2412 if (cfs_rq->runtime_remaining > 0)
2413 unthrottle_cfs_rq(cfs_rq);
2414
2415next:
2416 raw_spin_unlock(&rq->lock);
2417
2418 if (!remaining)
2419 break;
2420 }
2421 rcu_read_unlock();
2422
2423 return remaining;
2424}
2425
Paul Turner58088ad2011-07-21 09:43:31 -07002426/*
2427 * Responsible for refilling a task_group's bandwidth and unthrottling its
2428 * cfs_rqs as appropriate. If there has been no activity within the last
2429 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2430 * used to track this state.
2431 */
2432static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2433{
Paul Turner671fd9d2011-07-21 09:43:34 -07002434 u64 runtime, runtime_expires;
2435 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002436
2437 raw_spin_lock(&cfs_b->lock);
2438 /* no need to continue the timer with no bandwidth constraint */
2439 if (cfs_b->quota == RUNTIME_INF)
2440 goto out_unlock;
2441
Paul Turner671fd9d2011-07-21 09:43:34 -07002442 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2443 /* idle depends on !throttled (for the case of a large deficit) */
2444 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002445 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002446
Paul Turnera9cf55b2011-07-21 09:43:32 -07002447 /* if we're going inactive then everything else can be deferred */
2448 if (idle)
2449 goto out_unlock;
2450
2451 __refill_cfs_bandwidth_runtime(cfs_b);
2452
Paul Turner671fd9d2011-07-21 09:43:34 -07002453 if (!throttled) {
2454 /* mark as potentially idle for the upcoming period */
2455 cfs_b->idle = 1;
2456 goto out_unlock;
2457 }
Paul Turner58088ad2011-07-21 09:43:31 -07002458
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002459 /* account preceding periods in which throttling occurred */
2460 cfs_b->nr_throttled += overrun;
2461
Paul Turner671fd9d2011-07-21 09:43:34 -07002462 /*
2463 * There are throttled entities so we must first use the new bandwidth
2464 * to unthrottle them before making it generally available. This
2465 * ensures that all existing debts will be paid before a new cfs_rq is
2466 * allowed to run.
2467 */
2468 runtime = cfs_b->runtime;
2469 runtime_expires = cfs_b->runtime_expires;
2470 cfs_b->runtime = 0;
2471
2472 /*
2473 * This check is repeated as we are holding onto the new bandwidth
2474 * while we unthrottle. This can potentially race with an unthrottled
2475 * group trying to acquire new bandwidth from the global pool.
2476 */
2477 while (throttled && runtime > 0) {
2478 raw_spin_unlock(&cfs_b->lock);
2479 /* we can't nest cfs_b->lock while distributing bandwidth */
2480 runtime = distribute_cfs_runtime(cfs_b, runtime,
2481 runtime_expires);
2482 raw_spin_lock(&cfs_b->lock);
2483
2484 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2485 }
2486
2487 /* return (any) remaining runtime */
2488 cfs_b->runtime = runtime;
2489 /*
2490 * While we are ensured activity in the period following an
2491 * unthrottle, this also covers the case in which the new bandwidth is
2492 * insufficient to cover the existing bandwidth deficit. (Forcing the
2493 * timer to remain active while there are any throttled entities.)
2494 */
2495 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002496out_unlock:
2497 if (idle)
2498 cfs_b->timer_active = 0;
2499 raw_spin_unlock(&cfs_b->lock);
2500
2501 return idle;
2502}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002503
Paul Turnerd8b49862011-07-21 09:43:41 -07002504/* a cfs_rq won't donate quota below this amount */
2505static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2506/* minimum remaining period time to redistribute slack quota */
2507static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2508/* how long we wait to gather additional slack before distributing */
2509static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2510
2511/* are we near the end of the current quota period? */
2512static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2513{
2514 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2515 u64 remaining;
2516
2517 /* if the call-back is running a quota refresh is already occurring */
2518 if (hrtimer_callback_running(refresh_timer))
2519 return 1;
2520
2521 /* is a quota refresh about to occur? */
2522 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2523 if (remaining < min_expire)
2524 return 1;
2525
2526 return 0;
2527}
2528
2529static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2530{
2531 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2532
2533 /* if there's a quota refresh soon don't bother with slack */
2534 if (runtime_refresh_within(cfs_b, min_left))
2535 return;
2536
2537 start_bandwidth_timer(&cfs_b->slack_timer,
2538 ns_to_ktime(cfs_bandwidth_slack_period));
2539}
2540
2541/* we know any runtime found here is valid as update_curr() precedes return */
2542static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2543{
2544 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2545 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2546
2547 if (slack_runtime <= 0)
2548 return;
2549
2550 raw_spin_lock(&cfs_b->lock);
2551 if (cfs_b->quota != RUNTIME_INF &&
2552 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2553 cfs_b->runtime += slack_runtime;
2554
2555 /* we are under rq->lock, defer unthrottling using a timer */
2556 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2557 !list_empty(&cfs_b->throttled_cfs_rq))
2558 start_cfs_slack_bandwidth(cfs_b);
2559 }
2560 raw_spin_unlock(&cfs_b->lock);
2561
2562 /* even if it's not valid for return we don't want to try again */
2563 cfs_rq->runtime_remaining -= slack_runtime;
2564}
2565
2566static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2567{
Paul Turner56f570e2011-11-07 20:26:33 -08002568 if (!cfs_bandwidth_used())
2569 return;
2570
Paul Turnerfccfdc62011-11-07 20:26:34 -08002571 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002572 return;
2573
2574 __return_cfs_rq_runtime(cfs_rq);
2575}
2576
2577/*
2578 * This is done with a timer (instead of inline with bandwidth return) since
2579 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2580 */
2581static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2582{
2583 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2584 u64 expires;
2585
2586 /* confirm we're still not at a refresh boundary */
2587 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2588 return;
2589
2590 raw_spin_lock(&cfs_b->lock);
2591 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2592 runtime = cfs_b->runtime;
2593 cfs_b->runtime = 0;
2594 }
2595 expires = cfs_b->runtime_expires;
2596 raw_spin_unlock(&cfs_b->lock);
2597
2598 if (!runtime)
2599 return;
2600
2601 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2602
2603 raw_spin_lock(&cfs_b->lock);
2604 if (expires == cfs_b->runtime_expires)
2605 cfs_b->runtime = runtime;
2606 raw_spin_unlock(&cfs_b->lock);
2607}
2608
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002609/*
2610 * When a group wakes up we want to make sure that its quota is not already
2611 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2612 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2613 */
2614static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2615{
Paul Turner56f570e2011-11-07 20:26:33 -08002616 if (!cfs_bandwidth_used())
2617 return;
2618
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002619 /* an active group must be handled by the update_curr()->put() path */
2620 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2621 return;
2622
2623 /* ensure the group is not already throttled */
2624 if (cfs_rq_throttled(cfs_rq))
2625 return;
2626
2627 /* update runtime allocation */
2628 account_cfs_rq_runtime(cfs_rq, 0);
2629 if (cfs_rq->runtime_remaining <= 0)
2630 throttle_cfs_rq(cfs_rq);
2631}
2632
2633/* conditionally throttle active cfs_rq's from put_prev_entity() */
2634static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2635{
Paul Turner56f570e2011-11-07 20:26:33 -08002636 if (!cfs_bandwidth_used())
2637 return;
2638
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002639 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2640 return;
2641
2642 /*
2643 * it's possible for a throttled entity to be forced into a running
2644 * state (e.g. set_curr_task), in this case we're finished.
2645 */
2646 if (cfs_rq_throttled(cfs_rq))
2647 return;
2648
2649 throttle_cfs_rq(cfs_rq);
2650}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002651
Peter Zijlstra029632f2011-10-25 10:00:11 +02002652static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2653{
2654 struct cfs_bandwidth *cfs_b =
2655 container_of(timer, struct cfs_bandwidth, slack_timer);
2656 do_sched_cfs_slack_timer(cfs_b);
2657
2658 return HRTIMER_NORESTART;
2659}
2660
2661static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2662{
2663 struct cfs_bandwidth *cfs_b =
2664 container_of(timer, struct cfs_bandwidth, period_timer);
2665 ktime_t now;
2666 int overrun;
2667 int idle = 0;
2668
2669 for (;;) {
2670 now = hrtimer_cb_get_time(timer);
2671 overrun = hrtimer_forward(timer, now, cfs_b->period);
2672
2673 if (!overrun)
2674 break;
2675
2676 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2677 }
2678
2679 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2680}
2681
2682void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2683{
2684 raw_spin_lock_init(&cfs_b->lock);
2685 cfs_b->runtime = 0;
2686 cfs_b->quota = RUNTIME_INF;
2687 cfs_b->period = ns_to_ktime(default_cfs_period());
2688
2689 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2690 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2691 cfs_b->period_timer.function = sched_cfs_period_timer;
2692 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2693 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2694}
2695
2696static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2697{
2698 cfs_rq->runtime_enabled = 0;
2699 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2700}
2701
2702/* requires cfs_b->lock, may release to reprogram timer */
2703void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2704{
2705 /*
2706 * The timer may be active because we're trying to set a new bandwidth
2707 * period or because we're racing with the tear-down path
2708 * (timer_active==0 becomes visible before the hrtimer call-back
2709 * terminates). In either case we ensure that it's re-programmed
2710 */
2711 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2712 raw_spin_unlock(&cfs_b->lock);
2713 /* ensure cfs_b->lock is available while we wait */
2714 hrtimer_cancel(&cfs_b->period_timer);
2715
2716 raw_spin_lock(&cfs_b->lock);
2717 /* if someone else restarted the timer then we're done */
2718 if (cfs_b->timer_active)
2719 return;
2720 }
2721
2722 cfs_b->timer_active = 1;
2723 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2724}
2725
2726static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2727{
2728 hrtimer_cancel(&cfs_b->period_timer);
2729 hrtimer_cancel(&cfs_b->slack_timer);
2730}
2731
Arnd Bergmann38dc3342013-01-25 14:14:22 +00002732static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002733{
2734 struct cfs_rq *cfs_rq;
2735
2736 for_each_leaf_cfs_rq(rq, cfs_rq) {
2737 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2738
2739 if (!cfs_rq->runtime_enabled)
2740 continue;
2741
2742 /*
2743 * clock_task is not advancing so we just need to make sure
2744 * there's some valid quota amount
2745 */
2746 cfs_rq->runtime_remaining = cfs_b->quota;
2747 if (cfs_rq_throttled(cfs_rq))
2748 unthrottle_cfs_rq(cfs_rq);
2749 }
2750}
2751
2752#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02002753static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2754{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002755 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02002756}
2757
2758static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2759 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002760static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2761static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002762static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07002763
2764static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2765{
2766 return 0;
2767}
Paul Turner64660c82011-07-21 09:43:36 -07002768
2769static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2770{
2771 return 0;
2772}
2773
2774static inline int throttled_lb_pair(struct task_group *tg,
2775 int src_cpu, int dest_cpu)
2776{
2777 return 0;
2778}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002779
2780void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2781
2782#ifdef CONFIG_FAIR_GROUP_SCHED
2783static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07002784#endif
2785
Peter Zijlstra029632f2011-10-25 10:00:11 +02002786static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2787{
2788 return NULL;
2789}
2790static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07002791static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002792
2793#endif /* CONFIG_CFS_BANDWIDTH */
2794
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002795/**************************************************
2796 * CFS operations on tasks:
2797 */
2798
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002799#ifdef CONFIG_SCHED_HRTICK
2800static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2801{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002802 struct sched_entity *se = &p->se;
2803 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2804
2805 WARN_ON(task_rq(p) != rq);
2806
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002807 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002808 u64 slice = sched_slice(cfs_rq, se);
2809 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2810 s64 delta = slice - ran;
2811
2812 if (delta < 0) {
2813 if (rq->curr == p)
2814 resched_task(p);
2815 return;
2816 }
2817
2818 /*
2819 * Don't schedule slices shorter than 10000ns, that just
2820 * doesn't make sense. Rely on vruntime for fairness.
2821 */
Peter Zijlstra31656512008-07-18 18:01:23 +02002822 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02002823 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002824
Peter Zijlstra31656512008-07-18 18:01:23 +02002825 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002826 }
2827}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002828
2829/*
2830 * called from enqueue/dequeue and updates the hrtick when the
2831 * current task is from our class and nr_running is low enough
2832 * to matter.
2833 */
2834static void hrtick_update(struct rq *rq)
2835{
2836 struct task_struct *curr = rq->curr;
2837
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002838 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002839 return;
2840
2841 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2842 hrtick_start_fair(rq, curr);
2843}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302844#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002845static inline void
2846hrtick_start_fair(struct rq *rq, struct task_struct *p)
2847{
2848}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002849
2850static inline void hrtick_update(struct rq *rq)
2851{
2852}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002853#endif
2854
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002855/*
2856 * The enqueue_task method is called before nr_running is
2857 * increased. Here we update the fair scheduling stats and
2858 * then put the task into the rbtree:
2859 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00002860static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002861enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002862{
2863 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002864 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002865
2866 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002867 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002868 break;
2869 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002870 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002871
2872 /*
2873 * end evaluation on encountering a throttled cfs_rq
2874 *
2875 * note: in the case of encountering a throttled cfs_rq we will
2876 * post the final h_nr_running increment below.
2877 */
2878 if (cfs_rq_throttled(cfs_rq))
2879 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002880 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07002881
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002882 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002883 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002884
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002885 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002886 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002887 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002888
Paul Turner85dac902011-07-21 09:43:33 -07002889 if (cfs_rq_throttled(cfs_rq))
2890 break;
2891
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002892 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002893 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002894 }
2895
Ben Segall18bf2802012-10-04 12:51:20 +02002896 if (!se) {
2897 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07002898 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002899 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002900 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002901}
2902
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002903static void set_next_buddy(struct sched_entity *se);
2904
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002905/*
2906 * The dequeue_task method is called before nr_running is
2907 * decreased. We remove the task from the rbtree and
2908 * update the fair scheduling stats:
2909 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002910static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002911{
2912 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002913 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002914 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002915
2916 for_each_sched_entity(se) {
2917 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002918 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002919
2920 /*
2921 * end evaluation on encountering a throttled cfs_rq
2922 *
2923 * note: in the case of encountering a throttled cfs_rq we will
2924 * post the final h_nr_running decrement below.
2925 */
2926 if (cfs_rq_throttled(cfs_rq))
2927 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002928 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002929
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002930 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002931 if (cfs_rq->load.weight) {
2932 /*
2933 * Bias pick_next to pick a task from this cfs_rq, as
2934 * p is sleeping when it is within its sched_slice.
2935 */
2936 if (task_sleep && parent_entity(se))
2937 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07002938
2939 /* avoid re-evaluating load for this entity */
2940 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002941 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002942 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002943 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002944 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002945
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002946 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002947 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002948 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002949
Paul Turner85dac902011-07-21 09:43:33 -07002950 if (cfs_rq_throttled(cfs_rq))
2951 break;
2952
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002953 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002954 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002955 }
2956
Ben Segall18bf2802012-10-04 12:51:20 +02002957 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07002958 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002959 update_rq_runnable_avg(rq, 1);
2960 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002961 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002962}
2963
Gregory Haskinse7693a32008-01-25 21:08:09 +01002964#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02002965/* Used instead of source_load when we know the type == 0 */
2966static unsigned long weighted_cpuload(const int cpu)
2967{
Alex Shib92486c2013-06-20 10:18:50 +08002968 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002969}
2970
2971/*
2972 * Return a low guess at the load of a migration-source cpu weighted
2973 * according to the scheduling class and "nice" value.
2974 *
2975 * We want to under-estimate the load of migration sources, to
2976 * balance conservatively.
2977 */
2978static unsigned long source_load(int cpu, int type)
2979{
2980 struct rq *rq = cpu_rq(cpu);
2981 unsigned long total = weighted_cpuload(cpu);
2982
2983 if (type == 0 || !sched_feat(LB_BIAS))
2984 return total;
2985
2986 return min(rq->cpu_load[type-1], total);
2987}
2988
2989/*
2990 * Return a high guess at the load of a migration-target cpu weighted
2991 * according to the scheduling class and "nice" value.
2992 */
2993static unsigned long target_load(int cpu, int type)
2994{
2995 struct rq *rq = cpu_rq(cpu);
2996 unsigned long total = weighted_cpuload(cpu);
2997
2998 if (type == 0 || !sched_feat(LB_BIAS))
2999 return total;
3000
3001 return max(rq->cpu_load[type-1], total);
3002}
3003
3004static unsigned long power_of(int cpu)
3005{
3006 return cpu_rq(cpu)->cpu_power;
3007}
3008
3009static unsigned long cpu_avg_load_per_task(int cpu)
3010{
3011 struct rq *rq = cpu_rq(cpu);
3012 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08003013 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003014
3015 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08003016 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003017
3018 return 0;
3019}
3020
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003021
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003022static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003023{
3024 struct sched_entity *se = &p->se;
3025 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003026 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003027
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003028#ifndef CONFIG_64BIT
3029 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003030
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003031 do {
3032 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3033 smp_rmb();
3034 min_vruntime = cfs_rq->min_vruntime;
3035 } while (min_vruntime != min_vruntime_copy);
3036#else
3037 min_vruntime = cfs_rq->min_vruntime;
3038#endif
3039
3040 se->vruntime -= min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003041}
3042
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003043#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003044/*
3045 * effective_load() calculates the load change as seen from the root_task_group
3046 *
3047 * Adding load to a group doesn't make a group heavier, but can cause movement
3048 * of group shares between cpus. Assuming the shares were perfectly aligned one
3049 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003050 *
3051 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3052 * on this @cpu and results in a total addition (subtraction) of @wg to the
3053 * total group weight.
3054 *
3055 * Given a runqueue weight distribution (rw_i) we can compute a shares
3056 * distribution (s_i) using:
3057 *
3058 * s_i = rw_i / \Sum rw_j (1)
3059 *
3060 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3061 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3062 * shares distribution (s_i):
3063 *
3064 * rw_i = { 2, 4, 1, 0 }
3065 * s_i = { 2/7, 4/7, 1/7, 0 }
3066 *
3067 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3068 * task used to run on and the CPU the waker is running on), we need to
3069 * compute the effect of waking a task on either CPU and, in case of a sync
3070 * wakeup, compute the effect of the current task going to sleep.
3071 *
3072 * So for a change of @wl to the local @cpu with an overall group weight change
3073 * of @wl we can compute the new shares distribution (s'_i) using:
3074 *
3075 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3076 *
3077 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3078 * differences in waking a task to CPU 0. The additional task changes the
3079 * weight and shares distributions like:
3080 *
3081 * rw'_i = { 3, 4, 1, 0 }
3082 * s'_i = { 3/8, 4/8, 1/8, 0 }
3083 *
3084 * We can then compute the difference in effective weight by using:
3085 *
3086 * dw_i = S * (s'_i - s_i) (3)
3087 *
3088 * Where 'S' is the group weight as seen by its parent.
3089 *
3090 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3091 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3092 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003093 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003094static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003095{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003096 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003097
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003098 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003099 return wl;
3100
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003101 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003102 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003103
Paul Turner977dda72011-01-14 17:57:50 -08003104 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003105
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003106 /*
3107 * W = @wg + \Sum rw_j
3108 */
3109 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003110
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003111 /*
3112 * w = rw_i + @wl
3113 */
3114 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003115
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003116 /*
3117 * wl = S * s'_i; see (2)
3118 */
3119 if (W > 0 && w < W)
3120 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003121 else
3122 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003123
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003124 /*
3125 * Per the above, wl is the new se->load.weight value; since
3126 * those are clipped to [MIN_SHARES, ...) do so now. See
3127 * calc_cfs_shares().
3128 */
Paul Turner977dda72011-01-14 17:57:50 -08003129 if (wl < MIN_SHARES)
3130 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003131
3132 /*
3133 * wl = dw_i = S * (s'_i - s_i); see (3)
3134 */
Paul Turner977dda72011-01-14 17:57:50 -08003135 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003136
3137 /*
3138 * Recursively apply this logic to all parent groups to compute
3139 * the final effective load change on the root group. Since
3140 * only the @tg group gets extra weight, all parent groups can
3141 * only redistribute existing shares. @wl is the shift in shares
3142 * resulting from this level per the above.
3143 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003144 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003145 }
3146
3147 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003148}
3149#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003150
Peter Zijlstra83378262008-06-27 13:41:37 +02003151static inline unsigned long effective_load(struct task_group *tg, int cpu,
3152 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003153{
Peter Zijlstra83378262008-06-27 13:41:37 +02003154 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003155}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003156
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003157#endif
3158
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003159static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003160{
Paul Turnere37b6a72011-01-21 20:44:59 -08003161 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003162 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003163 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003164 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003165 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003166 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003167
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003168 idx = sd->wake_idx;
3169 this_cpu = smp_processor_id();
3170 prev_cpu = task_cpu(p);
3171 load = source_load(prev_cpu, idx);
3172 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003173
3174 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003175 * If sync wakeup then subtract the (maximum possible)
3176 * effect of the currently running task from the load
3177 * of the current CPU:
3178 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003179 if (sync) {
3180 tg = task_group(current);
3181 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003182
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003183 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003184 load += effective_load(tg, prev_cpu, 0, -weight);
3185 }
3186
3187 tg = task_group(p);
3188 weight = p->se.load.weight;
3189
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003190 /*
3191 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003192 * due to the sync cause above having dropped this_load to 0, we'll
3193 * always have an imbalance, but there's really nothing you can do
3194 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003195 *
3196 * Otherwise check if either cpus are near enough in load to allow this
3197 * task to be woken on this_cpu.
3198 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003199 if (this_load > 0) {
3200 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003201
3202 this_eff_load = 100;
3203 this_eff_load *= power_of(prev_cpu);
3204 this_eff_load *= this_load +
3205 effective_load(tg, this_cpu, weight, weight);
3206
3207 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3208 prev_eff_load *= power_of(this_cpu);
3209 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3210
3211 balanced = this_eff_load <= prev_eff_load;
3212 } else
3213 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003214
3215 /*
3216 * If the currently running task will sleep within
3217 * a reasonable amount of time then attract this newly
3218 * woken task:
3219 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003220 if (sync && balanced)
3221 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003222
Lucas De Marchi41acab82010-03-10 23:37:45 -03003223 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003224 tl_per_task = cpu_avg_load_per_task(this_cpu);
3225
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003226 if (balanced ||
3227 (this_load <= load &&
3228 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003229 /*
3230 * This domain has SD_WAKE_AFFINE and
3231 * p is cache cold in this domain, and
3232 * there is no bad imbalance.
3233 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003234 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003235 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003236
3237 return 1;
3238 }
3239 return 0;
3240}
3241
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003242/*
3243 * find_idlest_group finds and returns the least busy CPU group within the
3244 * domain.
3245 */
3246static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003247find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003248 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003249{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003250 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003251 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003252 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003253
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003254 do {
3255 unsigned long load, avg_load;
3256 int local_group;
3257 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003258
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003259 /* Skip over this group if it has no CPUs allowed */
3260 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003261 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003262 continue;
3263
3264 local_group = cpumask_test_cpu(this_cpu,
3265 sched_group_cpus(group));
3266
3267 /* Tally up the load of all CPUs in the group */
3268 avg_load = 0;
3269
3270 for_each_cpu(i, sched_group_cpus(group)) {
3271 /* Bias balancing toward cpus of our domain */
3272 if (local_group)
3273 load = source_load(i, load_idx);
3274 else
3275 load = target_load(i, load_idx);
3276
3277 avg_load += load;
3278 }
3279
3280 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003281 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003282
3283 if (local_group) {
3284 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003285 } else if (avg_load < min_load) {
3286 min_load = avg_load;
3287 idlest = group;
3288 }
3289 } while (group = group->next, group != sd->groups);
3290
3291 if (!idlest || 100*this_load < imbalance*min_load)
3292 return NULL;
3293 return idlest;
3294}
3295
3296/*
3297 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3298 */
3299static int
3300find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3301{
3302 unsigned long load, min_load = ULONG_MAX;
3303 int idlest = -1;
3304 int i;
3305
3306 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003307 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003308 load = weighted_cpuload(i);
3309
3310 if (load < min_load || (load == min_load && i == this_cpu)) {
3311 min_load = load;
3312 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003313 }
3314 }
3315
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003316 return idlest;
3317}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003318
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003319/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003320 * Try and locate an idle CPU in the sched_domain.
3321 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003322static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003323{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003324 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003325 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003326 int i = task_cpu(p);
3327
3328 if (idle_cpu(target))
3329 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003330
3331 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003332 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003333 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003334 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3335 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003336
3337 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003338 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003339 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003340 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003341 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003342 sg = sd->groups;
3343 do {
3344 if (!cpumask_intersects(sched_group_cpus(sg),
3345 tsk_cpus_allowed(p)))
3346 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003347
Linus Torvalds37407ea2012-09-16 12:29:43 -07003348 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003349 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003350 goto next;
3351 }
3352
3353 target = cpumask_first_and(sched_group_cpus(sg),
3354 tsk_cpus_allowed(p));
3355 goto done;
3356next:
3357 sg = sg->next;
3358 } while (sg != sd->groups);
3359 }
3360done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003361 return target;
3362}
3363
3364/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003365 * sched_balance_self: balance the current task (running on cpu) in domains
3366 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3367 * SD_BALANCE_EXEC.
3368 *
3369 * Balance, ie. select the least loaded group.
3370 *
3371 * Returns the target CPU number, or the same CPU if no balancing is needed.
3372 *
3373 * preempt must be disabled.
3374 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003375static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003376select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003377{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003378 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003379 int cpu = smp_processor_id();
3380 int prev_cpu = task_cpu(p);
3381 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003382 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003383 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003384
Peter Zijlstra29baa742012-04-23 12:11:21 +02003385 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003386 return prev_cpu;
3387
Peter Zijlstra0763a662009-09-14 19:37:39 +02003388 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003389 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003390 want_affine = 1;
3391 new_cpu = prev_cpu;
3392 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003393
Peter Zijlstradce840a2011-04-07 14:09:50 +02003394 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003395 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01003396 if (!(tmp->flags & SD_LOAD_BALANCE))
3397 continue;
3398
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003399 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003400 * If both cpu and prev_cpu are part of this domain,
3401 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003402 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003403 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3404 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3405 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003406 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003407 }
3408
Alex Shif03542a2012-07-26 08:55:34 +08003409 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003410 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003411 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003412
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003413 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003414 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003415 prev_cpu = cpu;
3416
3417 new_cpu = select_idle_sibling(p, prev_cpu);
3418 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003419 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003420
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003421 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003422 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003423 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003424 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003425
Peter Zijlstra0763a662009-09-14 19:37:39 +02003426 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003427 sd = sd->child;
3428 continue;
3429 }
3430
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003431 if (sd_flag & SD_BALANCE_WAKE)
3432 load_idx = sd->wake_idx;
3433
3434 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003435 if (!group) {
3436 sd = sd->child;
3437 continue;
3438 }
3439
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003440 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003441 if (new_cpu == -1 || new_cpu == cpu) {
3442 /* Now try balancing at a lower domain level of cpu */
3443 sd = sd->child;
3444 continue;
3445 }
3446
3447 /* Now try balancing at a lower domain level of new_cpu */
3448 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003449 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003450 sd = NULL;
3451 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003452 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003453 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003454 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003455 sd = tmp;
3456 }
3457 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003458 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003459unlock:
3460 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003461
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003462 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003463}
Paul Turner0a74bef2012-10-04 13:18:30 +02003464
3465/*
3466 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3467 * cfs_rq_of(p) references at time of call are still valid and identify the
3468 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3469 * other assumptions, including the state of rq->lock, should be made.
3470 */
3471static void
3472migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3473{
Paul Turneraff3e492012-10-04 13:18:30 +02003474 struct sched_entity *se = &p->se;
3475 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3476
3477 /*
3478 * Load tracking: accumulate removed load so that it can be processed
3479 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3480 * to blocked load iff they have a positive decay-count. It can never
3481 * be negative here since on-rq tasks have decay-count == 0.
3482 */
3483 if (se->avg.decay_count) {
3484 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08003485 atomic_long_add(se->avg.load_avg_contrib,
3486 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02003487 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003488}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003489#endif /* CONFIG_SMP */
3490
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003491static unsigned long
3492wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003493{
3494 unsigned long gran = sysctl_sched_wakeup_granularity;
3495
3496 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003497 * Since its curr running now, convert the gran from real-time
3498 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003499 *
3500 * By using 'se' instead of 'curr' we penalize light tasks, so
3501 * they get preempted easier. That is, if 'se' < 'curr' then
3502 * the resulting gran will be larger, therefore penalizing the
3503 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3504 * be smaller, again penalizing the lighter task.
3505 *
3506 * This is especially important for buddies when the leftmost
3507 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003508 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003509 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003510}
3511
3512/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003513 * Should 'se' preempt 'curr'.
3514 *
3515 * |s1
3516 * |s2
3517 * |s3
3518 * g
3519 * |<--->|c
3520 *
3521 * w(c, s1) = -1
3522 * w(c, s2) = 0
3523 * w(c, s3) = 1
3524 *
3525 */
3526static int
3527wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3528{
3529 s64 gran, vdiff = curr->vruntime - se->vruntime;
3530
3531 if (vdiff <= 0)
3532 return -1;
3533
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003534 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003535 if (vdiff > gran)
3536 return 1;
3537
3538 return 0;
3539}
3540
Peter Zijlstra02479092008-11-04 21:25:10 +01003541static void set_last_buddy(struct sched_entity *se)
3542{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003543 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3544 return;
3545
3546 for_each_sched_entity(se)
3547 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003548}
3549
3550static void set_next_buddy(struct sched_entity *se)
3551{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003552 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3553 return;
3554
3555 for_each_sched_entity(se)
3556 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003557}
3558
Rik van Rielac53db52011-02-01 09:51:03 -05003559static void set_skip_buddy(struct sched_entity *se)
3560{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003561 for_each_sched_entity(se)
3562 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05003563}
3564
Peter Zijlstra464b7522008-10-24 11:06:15 +02003565/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003566 * Preempt the current task with a newly woken task if needed:
3567 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02003568static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003569{
3570 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02003571 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003572 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003573 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003574 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003575
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01003576 if (unlikely(se == pse))
3577 return;
3578
Paul Turner5238cdd2011-07-21 09:43:37 -07003579 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003580 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07003581 * unconditionally check_prempt_curr() after an enqueue (which may have
3582 * lead to a throttle). This both saves work and prevents false
3583 * next-buddy nomination below.
3584 */
3585 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3586 return;
3587
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003588 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02003589 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003590 next_buddy_marked = 1;
3591 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02003592
Bharata B Raoaec0a512008-08-28 14:42:49 +05303593 /*
3594 * We can come here with TIF_NEED_RESCHED already set from new task
3595 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07003596 *
3597 * Note: this also catches the edge-case of curr being in a throttled
3598 * group (e.g. via set_curr_task), since update_curr() (in the
3599 * enqueue of curr) will have resulted in resched being set. This
3600 * prevents us from potentially nominating it as a false LAST_BUDDY
3601 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05303602 */
3603 if (test_tsk_need_resched(curr))
3604 return;
3605
Darren Harta2f5c9a2011-02-22 13:04:33 -08003606 /* Idle tasks are by definition preempted by non-idle tasks. */
3607 if (unlikely(curr->policy == SCHED_IDLE) &&
3608 likely(p->policy != SCHED_IDLE))
3609 goto preempt;
3610
Ingo Molnar91c234b2007-10-15 17:00:18 +02003611 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08003612 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3613 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02003614 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02003615 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02003616 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003617
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003618 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07003619 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003620 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003621 if (wakeup_preempt_entity(se, pse) == 1) {
3622 /*
3623 * Bias pick_next to pick the sched entity that is
3624 * triggering this preemption.
3625 */
3626 if (!next_buddy_marked)
3627 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003628 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003629 }
Jupyung Leea65ac742009-11-17 18:51:40 +09003630
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003631 return;
3632
3633preempt:
3634 resched_task(curr);
3635 /*
3636 * Only set the backward buddy when the current task is still
3637 * on the rq. This can happen when a wakeup gets interleaved
3638 * with schedule on the ->pre_schedule() or idle_balance()
3639 * point, either of which can * drop the rq lock.
3640 *
3641 * Also, during early boot the idle thread is in the fair class,
3642 * for obvious reasons its a bad idea to schedule back to it.
3643 */
3644 if (unlikely(!se->on_rq || curr == rq->idle))
3645 return;
3646
3647 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3648 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003649}
3650
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003651static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003652{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003653 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003654 struct cfs_rq *cfs_rq = &rq->cfs;
3655 struct sched_entity *se;
3656
Tim Blechmann36ace272009-11-24 11:55:45 +01003657 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003658 return NULL;
3659
3660 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02003661 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003662 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003663 cfs_rq = group_cfs_rq(se);
3664 } while (cfs_rq);
3665
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003666 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003667 if (hrtick_enabled(rq))
3668 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003669
3670 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003671}
3672
3673/*
3674 * Account for a descheduled task:
3675 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02003676static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003677{
3678 struct sched_entity *se = &prev->se;
3679 struct cfs_rq *cfs_rq;
3680
3681 for_each_sched_entity(se) {
3682 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02003683 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003684 }
3685}
3686
Rik van Rielac53db52011-02-01 09:51:03 -05003687/*
3688 * sched_yield() is very simple
3689 *
3690 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3691 */
3692static void yield_task_fair(struct rq *rq)
3693{
3694 struct task_struct *curr = rq->curr;
3695 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3696 struct sched_entity *se = &curr->se;
3697
3698 /*
3699 * Are we the only task in the tree?
3700 */
3701 if (unlikely(rq->nr_running == 1))
3702 return;
3703
3704 clear_buddies(cfs_rq, se);
3705
3706 if (curr->policy != SCHED_BATCH) {
3707 update_rq_clock(rq);
3708 /*
3709 * Update run-time statistics of the 'current'.
3710 */
3711 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01003712 /*
3713 * Tell update_rq_clock() that we've just updated,
3714 * so we don't do microscopic update in schedule()
3715 * and double the fastpath cost.
3716 */
3717 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05003718 }
3719
3720 set_skip_buddy(se);
3721}
3722
Mike Galbraithd95f4122011-02-01 09:50:51 -05003723static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3724{
3725 struct sched_entity *se = &p->se;
3726
Paul Turner5238cdd2011-07-21 09:43:37 -07003727 /* throttled hierarchies are not runnable */
3728 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05003729 return false;
3730
3731 /* Tell the scheduler that we'd really like pse to run next. */
3732 set_next_buddy(se);
3733
Mike Galbraithd95f4122011-02-01 09:50:51 -05003734 yield_task_fair(rq);
3735
3736 return true;
3737}
3738
Peter Williams681f3e62007-10-24 18:23:51 +02003739#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003740/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02003741 * Fair scheduling class load-balancing methods.
3742 *
3743 * BASICS
3744 *
3745 * The purpose of load-balancing is to achieve the same basic fairness the
3746 * per-cpu scheduler provides, namely provide a proportional amount of compute
3747 * time to each task. This is expressed in the following equation:
3748 *
3749 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3750 *
3751 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3752 * W_i,0 is defined as:
3753 *
3754 * W_i,0 = \Sum_j w_i,j (2)
3755 *
3756 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3757 * is derived from the nice value as per prio_to_weight[].
3758 *
3759 * The weight average is an exponential decay average of the instantaneous
3760 * weight:
3761 *
3762 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3763 *
3764 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3765 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3766 * can also include other factors [XXX].
3767 *
3768 * To achieve this balance we define a measure of imbalance which follows
3769 * directly from (1):
3770 *
3771 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3772 *
3773 * We them move tasks around to minimize the imbalance. In the continuous
3774 * function space it is obvious this converges, in the discrete case we get
3775 * a few fun cases generally called infeasible weight scenarios.
3776 *
3777 * [XXX expand on:
3778 * - infeasible weights;
3779 * - local vs global optima in the discrete case. ]
3780 *
3781 *
3782 * SCHED DOMAINS
3783 *
3784 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3785 * for all i,j solution, we create a tree of cpus that follows the hardware
3786 * topology where each level pairs two lower groups (or better). This results
3787 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3788 * tree to only the first of the previous level and we decrease the frequency
3789 * of load-balance at each level inv. proportional to the number of cpus in
3790 * the groups.
3791 *
3792 * This yields:
3793 *
3794 * log_2 n 1 n
3795 * \Sum { --- * --- * 2^i } = O(n) (5)
3796 * i = 0 2^i 2^i
3797 * `- size of each group
3798 * | | `- number of cpus doing load-balance
3799 * | `- freq
3800 * `- sum over all levels
3801 *
3802 * Coupled with a limit on how many tasks we can migrate every balance pass,
3803 * this makes (5) the runtime complexity of the balancer.
3804 *
3805 * An important property here is that each CPU is still (indirectly) connected
3806 * to every other cpu in at most O(log n) steps:
3807 *
3808 * The adjacency matrix of the resulting graph is given by:
3809 *
3810 * log_2 n
3811 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3812 * k = 0
3813 *
3814 * And you'll find that:
3815 *
3816 * A^(log_2 n)_i,j != 0 for all i,j (7)
3817 *
3818 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3819 * The task movement gives a factor of O(m), giving a convergence complexity
3820 * of:
3821 *
3822 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3823 *
3824 *
3825 * WORK CONSERVING
3826 *
3827 * In order to avoid CPUs going idle while there's still work to do, new idle
3828 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3829 * tree itself instead of relying on other CPUs to bring it work.
3830 *
3831 * This adds some complexity to both (5) and (8) but it reduces the total idle
3832 * time.
3833 *
3834 * [XXX more?]
3835 *
3836 *
3837 * CGROUPS
3838 *
3839 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3840 *
3841 * s_k,i
3842 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3843 * S_k
3844 *
3845 * Where
3846 *
3847 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
3848 *
3849 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3850 *
3851 * The big problem is S_k, its a global sum needed to compute a local (W_i)
3852 * property.
3853 *
3854 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3855 * rewrite all of this once again.]
3856 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003857
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09003858static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3859
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003860#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01003861#define LBF_NEED_BREAK 0x02
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303862#define LBF_SOME_PINNED 0x04
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003863
3864struct lb_env {
3865 struct sched_domain *sd;
3866
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003867 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05303868 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003869
3870 int dst_cpu;
3871 struct rq *dst_rq;
3872
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303873 struct cpumask *dst_grpmask;
3874 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003875 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02003876 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08003877 /* The set of CPUs under consideration for load-balancing */
3878 struct cpumask *cpus;
3879
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003880 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01003881
3882 unsigned int loop;
3883 unsigned int loop_break;
3884 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003885};
3886
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003887/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003888 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003889 * Both runqueues must be locked.
3890 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003891static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003892{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003893 deactivate_task(env->src_rq, p, 0);
3894 set_task_cpu(p, env->dst_cpu);
3895 activate_task(env->dst_rq, p, 0);
3896 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003897}
3898
3899/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02003900 * Is this task likely cache-hot:
3901 */
3902static int
3903task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3904{
3905 s64 delta;
3906
3907 if (p->sched_class != &fair_sched_class)
3908 return 0;
3909
3910 if (unlikely(p->policy == SCHED_IDLE))
3911 return 0;
3912
3913 /*
3914 * Buddy candidates are cache hot:
3915 */
3916 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3917 (&p->se == cfs_rq_of(&p->se)->next ||
3918 &p->se == cfs_rq_of(&p->se)->last))
3919 return 1;
3920
3921 if (sysctl_sched_migration_cost == -1)
3922 return 1;
3923 if (sysctl_sched_migration_cost == 0)
3924 return 0;
3925
3926 delta = now - p->se.exec_start;
3927
3928 return delta < (s64)sysctl_sched_migration_cost;
3929}
3930
3931/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003932 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3933 */
3934static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003935int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003936{
3937 int tsk_cache_hot = 0;
3938 /*
3939 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09003940 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003941 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09003942 * 3) running (obviously), or
3943 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003944 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09003945 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3946 return 0;
3947
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003948 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003949 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303950
Lucas De Marchi41acab82010-03-10 23:37:45 -03003951 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303952
3953 /*
3954 * Remember if this task can be migrated to any other cpu in
3955 * our sched_group. We may want to revisit it if we couldn't
3956 * meet load balance goals by pulling other tasks on src_cpu.
3957 *
3958 * Also avoid computing new_dst_cpu if we have already computed
3959 * one in current iteration.
3960 */
3961 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3962 return 0;
3963
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003964 /* Prevent to re-select dst_cpu via env's cpus */
3965 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3966 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3967 env->flags |= LBF_SOME_PINNED;
3968 env->new_dst_cpu = cpu;
3969 break;
3970 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303971 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003972
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003973 return 0;
3974 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303975
3976 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003977 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003978
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003979 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003980 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003981 return 0;
3982 }
3983
3984 /*
3985 * Aggressive migration if:
3986 * 1) task is cache cold, or
3987 * 2) too many balance attempts have failed.
3988 */
3989
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003990 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003991 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003992 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003993
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003994 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003995 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003996 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003997 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003998
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003999 return 1;
4000 }
4001
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004002 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4003 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004004}
4005
Peter Zijlstra897c3952009-12-17 17:45:42 +01004006/*
4007 * move_one_task tries to move exactly one task from busiest to this_rq, as
4008 * part of active balancing operations within "domain".
4009 * Returns 1 if successful and 0 otherwise.
4010 *
4011 * Called with both runqueues locked.
4012 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004013static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004014{
4015 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004016
Peter Zijlstra367456c2012-02-20 21:49:09 +01004017 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004018 if (!can_migrate_task(p, env))
4019 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004020
Peter Zijlstra367456c2012-02-20 21:49:09 +01004021 move_task(p, env);
4022 /*
4023 * Right now, this is only the second place move_task()
4024 * is called, so we can safely collect move_task()
4025 * stats here rather than inside move_task().
4026 */
4027 schedstat_inc(env->sd, lb_gained[env->idle]);
4028 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004029 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004030 return 0;
4031}
4032
Peter Zijlstra367456c2012-02-20 21:49:09 +01004033static unsigned long task_h_load(struct task_struct *p);
4034
Peter Zijlstraeb953082012-04-17 13:38:40 +02004035static const unsigned int sched_nr_migrate_break = 32;
4036
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004037/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004038 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004039 * this_rq, as part of a balancing operation within domain "sd".
4040 * Returns 1 if successful and 0 otherwise.
4041 *
4042 * Called with both runqueues locked.
4043 */
4044static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004045{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004046 struct list_head *tasks = &env->src_rq->cfs_tasks;
4047 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004048 unsigned long load;
4049 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004050
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004051 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004052 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004053
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004054 while (!list_empty(tasks)) {
4055 p = list_first_entry(tasks, struct task_struct, se.group_node);
4056
Peter Zijlstra367456c2012-02-20 21:49:09 +01004057 env->loop++;
4058 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004059 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004060 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004061
4062 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004063 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004064 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004065 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004066 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004067 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004068
Joonsoo Kimd3198082013-04-23 17:27:40 +09004069 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004070 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004071
Peter Zijlstra367456c2012-02-20 21:49:09 +01004072 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004073
Peter Zijlstraeb953082012-04-17 13:38:40 +02004074 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004075 goto next;
4076
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004077 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004078 goto next;
4079
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004080 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004081 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004082 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004083
4084#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004085 /*
4086 * NEWIDLE balancing is a source of latency, so preemptible
4087 * kernels will stop after the first task is pulled to minimize
4088 * the critical section.
4089 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004090 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004091 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004092#endif
4093
Peter Zijlstraee00e662009-12-17 17:25:20 +01004094 /*
4095 * We only want to steal up to the prescribed amount of
4096 * weighted load.
4097 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004098 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004099 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004100
Peter Zijlstra367456c2012-02-20 21:49:09 +01004101 continue;
4102next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004103 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004104 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004105
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004106 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004107 * Right now, this is one of only two places move_task() is called,
4108 * so we can safely collect move_task() stats here rather than
4109 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004110 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004111 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004112
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004113 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004114}
4115
Peter Zijlstra230059de2009-12-17 17:47:12 +01004116#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004117/*
4118 * update tg->load_weight by folding this cpu's load_avg
4119 */
Paul Turner48a16752012-10-04 13:18:31 +02004120static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004121{
Paul Turner48a16752012-10-04 13:18:31 +02004122 struct sched_entity *se = tg->se[cpu];
4123 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004124
Paul Turner48a16752012-10-04 13:18:31 +02004125 /* throttled entities do not contribute to load */
4126 if (throttled_hierarchy(cfs_rq))
4127 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004128
Paul Turneraff3e492012-10-04 13:18:30 +02004129 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004130
Paul Turner82958362012-10-04 13:18:31 +02004131 if (se) {
4132 update_entity_load_avg(se, 1);
4133 /*
4134 * We pivot on our runnable average having decayed to zero for
4135 * list removal. This generally implies that all our children
4136 * have also been removed (modulo rounding error or bandwidth
4137 * control); however, such cases are rare and we can fix these
4138 * at enqueue.
4139 *
4140 * TODO: fix up out-of-order children on enqueue.
4141 */
4142 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4143 list_del_leaf_cfs_rq(cfs_rq);
4144 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004145 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004146 update_rq_runnable_avg(rq, rq->nr_running);
4147 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004148}
4149
Paul Turner48a16752012-10-04 13:18:31 +02004150static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004151{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004152 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004153 struct cfs_rq *cfs_rq;
4154 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004155
Paul Turner48a16752012-10-04 13:18:31 +02004156 raw_spin_lock_irqsave(&rq->lock, flags);
4157 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004158 /*
4159 * Iterates the task_group tree in a bottom up fashion, see
4160 * list_add_leaf_cfs_rq() for details.
4161 */
Paul Turner64660c82011-07-21 09:43:36 -07004162 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004163 /*
4164 * Note: We may want to consider periodically releasing
4165 * rq->lock about these updates so that creating many task
4166 * groups does not result in continually extending hold time.
4167 */
4168 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004169 }
Paul Turner48a16752012-10-04 13:18:31 +02004170
4171 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004172}
4173
Peter Zijlstra9763b672011-07-13 13:09:25 +02004174/*
4175 * Compute the cpu's hierarchical load factor for each task group.
4176 * This needs to be done in a top-down fashion because the load of a child
4177 * group is a fraction of its parents load.
4178 */
4179static int tg_load_down(struct task_group *tg, void *data)
4180{
4181 unsigned long load;
4182 long cpu = (long)data;
4183
4184 if (!tg->parent) {
Alex Shia003a252013-06-20 10:18:51 +08004185 load = cpu_rq(cpu)->avg.load_avg_contrib;
Peter Zijlstra9763b672011-07-13 13:09:25 +02004186 } else {
4187 load = tg->parent->cfs_rq[cpu]->h_load;
Alex Shi72a4cf22013-06-20 10:18:53 +08004188 load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
4189 tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004190 }
4191
4192 tg->cfs_rq[cpu]->h_load = load;
4193
4194 return 0;
4195}
4196
4197static void update_h_load(long cpu)
4198{
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004199 struct rq *rq = cpu_rq(cpu);
4200 unsigned long now = jiffies;
4201
4202 if (rq->h_load_throttle == now)
4203 return;
4204
4205 rq->h_load_throttle = now;
4206
Peter Zijlstra367456c2012-02-20 21:49:09 +01004207 rcu_read_lock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004208 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstra367456c2012-02-20 21:49:09 +01004209 rcu_read_unlock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004210}
4211
Peter Zijlstra367456c2012-02-20 21:49:09 +01004212static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004213{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004214 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004215
Alex Shia003a252013-06-20 10:18:51 +08004216 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4217 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004218}
4219#else
Paul Turner48a16752012-10-04 13:18:31 +02004220static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004221{
4222}
4223
Peter Zijlstra367456c2012-02-20 21:49:09 +01004224static inline void update_h_load(long cpu)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004225{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004226}
4227
4228static unsigned long task_h_load(struct task_struct *p)
4229{
Alex Shia003a252013-06-20 10:18:51 +08004230 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004231}
4232#endif
4233
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004234/********** Helpers for find_busiest_group ************************/
4235/*
4236 * sd_lb_stats - Structure to store the statistics of a sched_domain
4237 * during load balancing.
4238 */
4239struct sd_lb_stats {
4240 struct sched_group *busiest; /* Busiest group in this sd */
4241 struct sched_group *this; /* Local group in this sd */
4242 unsigned long total_load; /* Total load of all groups in sd */
4243 unsigned long total_pwr; /* Total power of all groups in sd */
4244 unsigned long avg_load; /* Average load across all groups in sd */
4245
4246 /** Statistics of this group */
4247 unsigned long this_load;
4248 unsigned long this_load_per_task;
4249 unsigned long this_nr_running;
Nikhil Raofab47622010-10-15 13:12:29 -07004250 unsigned long this_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004251 unsigned int this_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004252
4253 /* Statistics of the busiest group */
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004254 unsigned int busiest_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004255 unsigned long max_load;
4256 unsigned long busiest_load_per_task;
4257 unsigned long busiest_nr_running;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004258 unsigned long busiest_group_capacity;
Nikhil Raofab47622010-10-15 13:12:29 -07004259 unsigned long busiest_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004260 unsigned int busiest_group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004261
4262 int group_imb; /* Is there imbalance in this sd */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004263};
4264
4265/*
4266 * sg_lb_stats - stats of a sched_group required for load_balancing
4267 */
4268struct sg_lb_stats {
4269 unsigned long avg_load; /*Avg load across the CPUs of the group */
4270 unsigned long group_load; /* Total load over the CPUs of the group */
4271 unsigned long sum_nr_running; /* Nr tasks running in the group */
4272 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4273 unsigned long group_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004274 unsigned long idle_cpus;
4275 unsigned long group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004276 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004277 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004278};
4279
4280/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004281 * get_sd_load_idx - Obtain the load index for a given sched domain.
4282 * @sd: The sched_domain whose load_idx is to be obtained.
4283 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004284 *
4285 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004286 */
4287static inline int get_sd_load_idx(struct sched_domain *sd,
4288 enum cpu_idle_type idle)
4289{
4290 int load_idx;
4291
4292 switch (idle) {
4293 case CPU_NOT_IDLE:
4294 load_idx = sd->busy_idx;
4295 break;
4296
4297 case CPU_NEWLY_IDLE:
4298 load_idx = sd->newidle_idx;
4299 break;
4300 default:
4301 load_idx = sd->idle_idx;
4302 break;
4303 }
4304
4305 return load_idx;
4306}
4307
Li Zefan15f803c2013-03-05 16:07:11 +08004308static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004309{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004310 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004311}
4312
4313unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4314{
4315 return default_scale_freq_power(sd, cpu);
4316}
4317
Li Zefan15f803c2013-03-05 16:07:11 +08004318static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004319{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004320 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004321 unsigned long smt_gain = sd->smt_gain;
4322
4323 smt_gain /= weight;
4324
4325 return smt_gain;
4326}
4327
4328unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4329{
4330 return default_scale_smt_power(sd, cpu);
4331}
4332
Li Zefan15f803c2013-03-05 16:07:11 +08004333static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004334{
4335 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004336 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004337
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004338 /*
4339 * Since we're reading these variables without serialization make sure
4340 * we read them once before doing sanity checks on them.
4341 */
4342 age_stamp = ACCESS_ONCE(rq->age_stamp);
4343 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004344
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004345 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004346
4347 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004348 /* Ensures that power won't end up being negative */
4349 available = 0;
4350 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004351 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004352 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004353
Nikhil Rao1399fa72011-05-18 10:09:39 -07004354 if (unlikely((s64)total < SCHED_POWER_SCALE))
4355 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004356
Nikhil Rao1399fa72011-05-18 10:09:39 -07004357 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004358
4359 return div_u64(available, total);
4360}
4361
4362static void update_cpu_power(struct sched_domain *sd, int cpu)
4363{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004364 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004365 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004366 struct sched_group *sdg = sd->groups;
4367
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004368 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4369 if (sched_feat(ARCH_POWER))
4370 power *= arch_scale_smt_power(sd, cpu);
4371 else
4372 power *= default_scale_smt_power(sd, cpu);
4373
Nikhil Rao1399fa72011-05-18 10:09:39 -07004374 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004375 }
4376
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004377 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004378
4379 if (sched_feat(ARCH_POWER))
4380 power *= arch_scale_freq_power(sd, cpu);
4381 else
4382 power *= default_scale_freq_power(sd, cpu);
4383
Nikhil Rao1399fa72011-05-18 10:09:39 -07004384 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004385
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004386 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004387 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004388
4389 if (!power)
4390 power = 1;
4391
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004392 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004393 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004394}
4395
Peter Zijlstra029632f2011-10-25 10:00:11 +02004396void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004397{
4398 struct sched_domain *child = sd->child;
4399 struct sched_group *group, *sdg = sd->groups;
4400 unsigned long power;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004401 unsigned long interval;
4402
4403 interval = msecs_to_jiffies(sd->balance_interval);
4404 interval = clamp(interval, 1UL, max_load_balance_interval);
4405 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004406
4407 if (!child) {
4408 update_cpu_power(sd, cpu);
4409 return;
4410 }
4411
4412 power = 0;
4413
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004414 if (child->flags & SD_OVERLAP) {
4415 /*
4416 * SD_OVERLAP domains cannot assume that child groups
4417 * span the current group.
4418 */
4419
4420 for_each_cpu(cpu, sched_group_cpus(sdg))
4421 power += power_of(cpu);
4422 } else {
4423 /*
4424 * !SD_OVERLAP domains can assume that child groups
4425 * span the current group.
4426 */
4427
4428 group = child->groups;
4429 do {
4430 power += group->sgp->power;
4431 group = group->next;
4432 } while (group != child->groups);
4433 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004434
Peter Zijlstrac3decf02012-05-31 12:05:32 +02004435 sdg->sgp->power_orig = sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004436}
4437
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004438/*
4439 * Try and fix up capacity for tiny siblings, this is needed when
4440 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4441 * which on its own isn't powerful enough.
4442 *
4443 * See update_sd_pick_busiest() and check_asym_packing().
4444 */
4445static inline int
4446fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4447{
4448 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07004449 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004450 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02004451 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004452 return 0;
4453
4454 /*
4455 * If ~90% of the cpu_power is still there, we're good.
4456 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004457 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004458 return 1;
4459
4460 return 0;
4461}
4462
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004463/**
4464 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004465 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004466 * @group: sched_group whose statistics are to be updated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004467 * @load_idx: Load index of sched_domain of this_cpu for load calc.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004468 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004469 * @balance: Should we balance.
4470 * @sgs: variable to hold the statistics for this group.
4471 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004472static inline void update_sg_lb_stats(struct lb_env *env,
4473 struct sched_group *group, int load_idx,
Michael Wangb94031302012-07-12 16:10:13 +08004474 int local_group, int *balance, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004475{
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004476 unsigned long nr_running, max_nr_running, min_nr_running;
4477 unsigned long load, max_cpu_load, min_cpu_load;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004478 unsigned int balance_cpu = -1, first_idle_cpu = 0;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004479 unsigned long avg_load_per_task = 0;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004480 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004481
Gautham R Shenoy871e35b2010-01-20 14:02:44 -06004482 if (local_group)
Peter Zijlstrac1174872012-05-31 14:47:33 +02004483 balance_cpu = group_balance_cpu(group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004484
4485 /* Tally up the load of all CPUs in the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004486 max_cpu_load = 0;
4487 min_cpu_load = ~0UL;
Nikhil Rao2582f0e2010-10-13 12:09:36 -07004488 max_nr_running = 0;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004489 min_nr_running = ~0UL;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004490
Michael Wangb94031302012-07-12 16:10:13 +08004491 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004492 struct rq *rq = cpu_rq(i);
4493
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004494 nr_running = rq->nr_running;
4495
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004496 /* Bias balancing toward cpus of our domain */
4497 if (local_group) {
Peter Zijlstrac1174872012-05-31 14:47:33 +02004498 if (idle_cpu(i) && !first_idle_cpu &&
4499 cpumask_test_cpu(i, sched_group_mask(group))) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004500 first_idle_cpu = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004501 balance_cpu = i;
4502 }
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004503
4504 load = target_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004505 } else {
4506 load = source_load(i, load_idx);
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004507 if (load > max_cpu_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004508 max_cpu_load = load;
4509 if (min_cpu_load > load)
4510 min_cpu_load = load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004511
4512 if (nr_running > max_nr_running)
4513 max_nr_running = nr_running;
4514 if (min_nr_running > nr_running)
4515 min_nr_running = nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004516 }
4517
4518 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004519 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004520 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004521 if (idle_cpu(i))
4522 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004523 }
4524
4525 /*
4526 * First idle cpu or the first cpu(busiest) in this sched group
4527 * is eligible for doing load balancing at this and above
4528 * domains. In the newly idle case, we will allow all the cpu's
4529 * to do the newly idle load balance.
4530 */
Vincent Guittot4ec44122011-12-12 20:21:08 +01004531 if (local_group) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004532 if (env->idle != CPU_NEWLY_IDLE) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004533 if (balance_cpu != env->dst_cpu) {
Vincent Guittot4ec44122011-12-12 20:21:08 +01004534 *balance = 0;
4535 return;
4536 }
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004537 update_group_power(env->sd, env->dst_cpu);
Vincent Guittot4ec44122011-12-12 20:21:08 +01004538 } else if (time_after_eq(jiffies, group->sgp->next_update))
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004539 update_group_power(env->sd, env->dst_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004540 }
4541
4542 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004543 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004544
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004545 /*
4546 * Consider the group unbalanced when the imbalance is larger
Peter Zijlstra866ab432011-02-21 18:56:47 +01004547 * than the average weight of a task.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004548 *
4549 * APZ: with cgroup the avg task weight can vary wildly and
4550 * might not be a suitable number - should we keep a
4551 * normalized nr_running number somewhere that negates
4552 * the hierarchy?
4553 */
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004554 if (sgs->sum_nr_running)
4555 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004556
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004557 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4558 (max_nr_running - min_nr_running) > 1)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004559 sgs->group_imb = 1;
4560
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004561 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
Nikhil Rao1399fa72011-05-18 10:09:39 -07004562 SCHED_POWER_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004563 if (!sgs->group_capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004564 sgs->group_capacity = fix_small_capacity(env->sd, group);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004565 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07004566
4567 if (sgs->group_capacity > sgs->sum_nr_running)
4568 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004569}
4570
4571/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10004572 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07004573 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004574 * @sds: sched_domain statistics
4575 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10004576 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10004577 *
4578 * Determine if @sg is a busier group than the previously selected
4579 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004580 *
4581 * Return: %true if @sg is a busier group than the previously selected
4582 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004583 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004584static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10004585 struct sd_lb_stats *sds,
4586 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004587 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004588{
4589 if (sgs->avg_load <= sds->max_load)
4590 return false;
4591
4592 if (sgs->sum_nr_running > sgs->group_capacity)
4593 return true;
4594
4595 if (sgs->group_imb)
4596 return true;
4597
4598 /*
4599 * ASYM_PACKING needs to move all the work to the lowest
4600 * numbered CPUs in the group, therefore mark all groups
4601 * higher than ourself as busy.
4602 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004603 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4604 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004605 if (!sds->busiest)
4606 return true;
4607
4608 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4609 return true;
4610 }
4611
4612 return false;
4613}
4614
4615/**
Hui Kang461819a2011-10-11 23:00:59 -04004616 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004617 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004618 * @balance: Should we balance.
4619 * @sds: variable to hold the statistics for this sched_domain.
4620 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004621static inline void update_sd_lb_stats(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004622 int *balance, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004623{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004624 struct sched_domain *child = env->sd->child;
4625 struct sched_group *sg = env->sd->groups;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004626 struct sg_lb_stats sgs;
4627 int load_idx, prefer_sibling = 0;
4628
4629 if (child && child->flags & SD_PREFER_SIBLING)
4630 prefer_sibling = 1;
4631
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004632 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004633
4634 do {
4635 int local_group;
4636
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004637 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004638 memset(&sgs, 0, sizeof(sgs));
Michael Wangb94031302012-07-12 16:10:13 +08004639 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004640
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004641 if (local_group && !(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004642 return;
4643
4644 sds->total_load += sgs.group_load;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004645 sds->total_pwr += sg->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004646
4647 /*
4648 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10004649 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07004650 * and move all the excess tasks away. We lower the capacity
4651 * of a group only if the local group has the capacity to fit
4652 * these excess tasks, i.e. nr_running < group_capacity. The
4653 * extra check prevents the case where you always pull from the
4654 * heaviest group when it is already under-utilized (possible
4655 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004656 */
Nikhil Rao75dd3212010-10-15 13:12:30 -07004657 if (prefer_sibling && !local_group && sds->this_has_capacity)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004658 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4659
4660 if (local_group) {
4661 sds->this_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004662 sds->this = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004663 sds->this_nr_running = sgs.sum_nr_running;
4664 sds->this_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004665 sds->this_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004666 sds->this_idle_cpus = sgs.idle_cpus;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004667 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004668 sds->max_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004669 sds->busiest = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004670 sds->busiest_nr_running = sgs.sum_nr_running;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004671 sds->busiest_idle_cpus = sgs.idle_cpus;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004672 sds->busiest_group_capacity = sgs.group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004673 sds->busiest_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004674 sds->busiest_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004675 sds->busiest_group_weight = sgs.group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004676 sds->group_imb = sgs.group_imb;
4677 }
4678
Michael Neuling532cb4c2010-06-08 14:57:02 +10004679 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004680 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10004681}
4682
Michael Neuling532cb4c2010-06-08 14:57:02 +10004683/**
4684 * check_asym_packing - Check to see if the group is packed into the
4685 * sched doman.
4686 *
4687 * This is primarily intended to used at the sibling level. Some
4688 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4689 * case of POWER7, it can move to lower SMT modes only when higher
4690 * threads are idle. When in lower SMT modes, the threads will
4691 * perform better since they share less core resources. Hence when we
4692 * have idle threads, we want them to be the higher ones.
4693 *
4694 * This packing function is run on idle threads. It checks to see if
4695 * the busiest CPU in this domain (core in the P7 case) has a higher
4696 * CPU number than the packing function is being run on. Here we are
4697 * assuming lower CPU number will be equivalent to lower a SMT thread
4698 * number.
4699 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02004700 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10004701 * this CPU. The amount of the imbalance is returned in *imbalance.
4702 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004703 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004704 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10004705 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004706static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004707{
4708 int busiest_cpu;
4709
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004710 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004711 return 0;
4712
4713 if (!sds->busiest)
4714 return 0;
4715
4716 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004717 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004718 return 0;
4719
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004720 env->imbalance = DIV_ROUND_CLOSEST(
4721 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4722
Michael Neuling532cb4c2010-06-08 14:57:02 +10004723 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004724}
4725
4726/**
4727 * fix_small_imbalance - Calculate the minor imbalance that exists
4728 * amongst the groups of a sched_domain, during
4729 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004730 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004731 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004732 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004733static inline
4734void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004735{
4736 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4737 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004738 unsigned long scaled_busy_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004739
4740 if (sds->this_nr_running) {
4741 sds->this_load_per_task /= sds->this_nr_running;
4742 if (sds->busiest_load_per_task >
4743 sds->this_load_per_task)
4744 imbn = 1;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004745 } else {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004746 sds->this_load_per_task =
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004747 cpu_avg_load_per_task(env->dst_cpu);
4748 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004749
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004750 scaled_busy_load_per_task = sds->busiest_load_per_task
Nikhil Rao1399fa72011-05-18 10:09:39 -07004751 * SCHED_POWER_SCALE;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004752 scaled_busy_load_per_task /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004753
4754 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4755 (scaled_busy_load_per_task * imbn)) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004756 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004757 return;
4758 }
4759
4760 /*
4761 * OK, we don't have enough imbalance to justify moving tasks,
4762 * however we may be able to increase total CPU power used by
4763 * moving them.
4764 */
4765
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004766 pwr_now += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004767 min(sds->busiest_load_per_task, sds->max_load);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004768 pwr_now += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004769 min(sds->this_load_per_task, sds->this_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004770 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004771
4772 /* Amount of load we'd subtract */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004773 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004774 sds->busiest->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004775 if (sds->max_load > tmp)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004776 pwr_move += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004777 min(sds->busiest_load_per_task, sds->max_load - tmp);
4778
4779 /* Amount of load we'd add */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004780 if (sds->max_load * sds->busiest->sgp->power <
Nikhil Rao1399fa72011-05-18 10:09:39 -07004781 sds->busiest_load_per_task * SCHED_POWER_SCALE)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004782 tmp = (sds->max_load * sds->busiest->sgp->power) /
4783 sds->this->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004784 else
Nikhil Rao1399fa72011-05-18 10:09:39 -07004785 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004786 sds->this->sgp->power;
4787 pwr_move += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004788 min(sds->this_load_per_task, sds->this_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004789 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004790
4791 /* Move if we gain throughput */
4792 if (pwr_move > pwr_now)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004793 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004794}
4795
4796/**
4797 * calculate_imbalance - Calculate the amount of imbalance present within the
4798 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004799 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004800 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004801 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004802static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004803{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004804 unsigned long max_pull, load_above_capacity = ~0UL;
4805
4806 sds->busiest_load_per_task /= sds->busiest_nr_running;
4807 if (sds->group_imb) {
4808 sds->busiest_load_per_task =
4809 min(sds->busiest_load_per_task, sds->avg_load);
4810 }
4811
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004812 /*
4813 * In the presence of smp nice balancing, certain scenarios can have
4814 * max load less than avg load(as we skip the groups at or below
4815 * its cpu_power, while calculating max_load..)
4816 */
4817 if (sds->max_load < sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004818 env->imbalance = 0;
4819 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004820 }
4821
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004822 if (!sds->group_imb) {
4823 /*
4824 * Don't want to pull so many tasks that a group would go idle.
4825 */
4826 load_above_capacity = (sds->busiest_nr_running -
4827 sds->busiest_group_capacity);
4828
Nikhil Rao1399fa72011-05-18 10:09:39 -07004829 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004830
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004831 load_above_capacity /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004832 }
4833
4834 /*
4835 * We're trying to get all the cpus to the average_load, so we don't
4836 * want to push ourselves above the average load, nor do we wish to
4837 * reduce the max loaded cpu below the average load. At the same time,
4838 * we also don't want to reduce the group load below the group capacity
4839 * (so that we can implement power-savings policies etc). Thus we look
4840 * for the minimum possible imbalance.
4841 * Be careful of negative numbers as they'll appear as very large values
4842 * with unsigned longs.
4843 */
4844 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004845
4846 /* How much load to actually move to equalise the imbalance */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004847 env->imbalance = min(max_pull * sds->busiest->sgp->power,
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004848 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
Nikhil Rao1399fa72011-05-18 10:09:39 -07004849 / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004850
4851 /*
4852 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004853 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004854 * a think about bumping its value to force at least one task to be
4855 * moved
4856 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004857 if (env->imbalance < sds->busiest_load_per_task)
4858 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004859
4860}
Nikhil Raofab47622010-10-15 13:12:29 -07004861
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004862/******* find_busiest_group() helpers end here *********************/
4863
4864/**
4865 * find_busiest_group - Returns the busiest group within the sched_domain
4866 * if there is an imbalance. If there isn't an imbalance, and
4867 * the user has opted for power-savings, it returns a group whose
4868 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4869 * such a group exists.
4870 *
4871 * Also calculates the amount of weighted load which should be moved
4872 * to restore balance.
4873 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004874 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004875 * @balance: Pointer to a variable indicating if this_cpu
4876 * is the appropriate cpu to perform load balancing at this_level.
4877 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02004878 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004879 * - If no imbalance and user has opted for power-savings balance,
4880 * return the least loaded group whose CPUs can be
4881 * put to idle by rebalancing its tasks onto our group.
4882 */
4883static struct sched_group *
Michael Wangb94031302012-07-12 16:10:13 +08004884find_busiest_group(struct lb_env *env, int *balance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004885{
4886 struct sd_lb_stats sds;
4887
4888 memset(&sds, 0, sizeof(sds));
4889
4890 /*
4891 * Compute the various statistics relavent for load balancing at
4892 * this level.
4893 */
Michael Wangb94031302012-07-12 16:10:13 +08004894 update_sd_lb_stats(env, balance, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004895
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004896 /*
4897 * this_cpu is not the appropriate cpu to perform load balancing at
4898 * this level.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004899 */
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004900 if (!(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004901 goto ret;
4902
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004903 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4904 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004905 return sds.busiest;
4906
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004907 /* There is no busy sibling group to pull tasks from */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004908 if (!sds.busiest || sds.busiest_nr_running == 0)
4909 goto out_balanced;
4910
Nikhil Rao1399fa72011-05-18 10:09:39 -07004911 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07004912
Peter Zijlstra866ab432011-02-21 18:56:47 +01004913 /*
4914 * If the busiest group is imbalanced the below checks don't
4915 * work because they assumes all things are equal, which typically
4916 * isn't true due to cpus_allowed constraints and the like.
4917 */
4918 if (sds.group_imb)
4919 goto force_balance;
4920
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004921 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004922 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
Nikhil Raofab47622010-10-15 13:12:29 -07004923 !sds.busiest_has_capacity)
4924 goto force_balance;
4925
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004926 /*
4927 * If the local group is more busy than the selected busiest group
4928 * don't try and pull any tasks.
4929 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004930 if (sds.this_load >= sds.max_load)
4931 goto out_balanced;
4932
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004933 /*
4934 * Don't pull any tasks if this group is already above the domain
4935 * average load.
4936 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004937 if (sds.this_load >= sds.avg_load)
4938 goto out_balanced;
4939
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004940 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004941 /*
4942 * This cpu is idle. If the busiest group load doesn't
4943 * have more tasks than the number of available cpu's and
4944 * there is no imbalance between this and busiest group
4945 * wrt to idle cpu's, it is balanced.
4946 */
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004947 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004948 sds.busiest_nr_running <= sds.busiest_group_weight)
4949 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004950 } else {
4951 /*
4952 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4953 * imbalance_pct to be conservative.
4954 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004955 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004956 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004957 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004958
Nikhil Raofab47622010-10-15 13:12:29 -07004959force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004960 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004961 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004962 return sds.busiest;
4963
4964out_balanced:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004965ret:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004966 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004967 return NULL;
4968}
4969
4970/*
4971 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4972 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004973static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004974 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004975{
4976 struct rq *busiest = NULL, *rq;
4977 unsigned long max_load = 0;
4978 int i;
4979
4980 for_each_cpu(i, sched_group_cpus(group)) {
4981 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004982 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4983 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004984 unsigned long wl;
4985
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004986 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004987 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004988
Michael Wangb94031302012-07-12 16:10:13 +08004989 if (!cpumask_test_cpu(i, env->cpus))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004990 continue;
4991
4992 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004993 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004994
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004995 /*
4996 * When comparing with imbalance, use weighted_cpuload()
4997 * which is not scaled with the cpu power.
4998 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004999 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005000 continue;
5001
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005002 /*
5003 * For the load comparisons with the other cpu's, consider
5004 * the weighted_cpuload() scaled with the cpu power, so that
5005 * the load can be moved away from the cpu that is potentially
5006 * running at a lower capacity.
5007 */
Nikhil Rao1399fa72011-05-18 10:09:39 -07005008 wl = (wl * SCHED_POWER_SCALE) / power;
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005009
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005010 if (wl > max_load) {
5011 max_load = wl;
5012 busiest = rq;
5013 }
5014 }
5015
5016 return busiest;
5017}
5018
5019/*
5020 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5021 * so long as it is large enough.
5022 */
5023#define MAX_PINNED_INTERVAL 512
5024
5025/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09005026DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005027
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005028static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005029{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005030 struct sched_domain *sd = env->sd;
5031
5032 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005033
5034 /*
5035 * ASYM_PACKING needs to force migrate tasks from busy but
5036 * higher numbered CPUs in order to pack all tasks in the
5037 * lowest numbered CPUs.
5038 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005039 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005040 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005041 }
5042
5043 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5044}
5045
Tejun Heo969c7922010-05-06 18:49:21 +02005046static int active_load_balance_cpu_stop(void *data);
5047
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005048/*
5049 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5050 * tasks if there is an imbalance.
5051 */
5052static int load_balance(int this_cpu, struct rq *this_rq,
5053 struct sched_domain *sd, enum cpu_idle_type idle,
5054 int *balance)
5055{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305056 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005057 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005058 struct rq *busiest;
5059 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005060 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005061
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005062 struct lb_env env = {
5063 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005064 .dst_cpu = this_cpu,
5065 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305066 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005067 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005068 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08005069 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005070 };
5071
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005072 /*
5073 * For NEWLY_IDLE load_balancing, we don't need to consider
5074 * other cpus in our group
5075 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005076 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005077 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005078
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005079 cpumask_copy(cpus, cpu_active_mask);
5080
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005081 schedstat_inc(sd, lb_count[idle]);
5082
5083redo:
Michael Wangb94031302012-07-12 16:10:13 +08005084 group = find_busiest_group(&env, balance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005085
5086 if (*balance == 0)
5087 goto out_balanced;
5088
5089 if (!group) {
5090 schedstat_inc(sd, lb_nobusyg[idle]);
5091 goto out_balanced;
5092 }
5093
Michael Wangb94031302012-07-12 16:10:13 +08005094 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005095 if (!busiest) {
5096 schedstat_inc(sd, lb_nobusyq[idle]);
5097 goto out_balanced;
5098 }
5099
Michael Wang78feefc2012-08-06 16:41:59 +08005100 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005101
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005102 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005103
5104 ld_moved = 0;
5105 if (busiest->nr_running > 1) {
5106 /*
5107 * Attempt to move tasks. If find_busiest_group has found
5108 * an imbalance but busiest->nr_running <= 1, the group is
5109 * still unbalanced. ld_moved simply stays zero, so it is
5110 * correctly treated as an imbalance.
5111 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005112 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005113 env.src_cpu = busiest->cpu;
5114 env.src_rq = busiest;
5115 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005116
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005117 update_h_load(env.src_cpu);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005118more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005119 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005120 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305121
5122 /*
5123 * cur_ld_moved - load moved in current iteration
5124 * ld_moved - cumulative load moved across iterations
5125 */
5126 cur_ld_moved = move_tasks(&env);
5127 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005128 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005129 local_irq_restore(flags);
5130
5131 /*
5132 * some other cpu did the load balance for us.
5133 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305134 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5135 resched_cpu(env.dst_cpu);
5136
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005137 if (env.flags & LBF_NEED_BREAK) {
5138 env.flags &= ~LBF_NEED_BREAK;
5139 goto more_balance;
5140 }
5141
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305142 /*
5143 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5144 * us and move them to an alternate dst_cpu in our sched_group
5145 * where they can run. The upper limit on how many times we
5146 * iterate on same src_cpu is dependent on number of cpus in our
5147 * sched_group.
5148 *
5149 * This changes load balance semantics a bit on who can move
5150 * load to a given_cpu. In addition to the given_cpu itself
5151 * (or a ilb_cpu acting on its behalf where given_cpu is
5152 * nohz-idle), we now have balance_cpu in a position to move
5153 * load to given_cpu. In rare situations, this may cause
5154 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5155 * _independently_ and at _same_ time to move some load to
5156 * given_cpu) causing exceess load to be moved to given_cpu.
5157 * This however should not happen so much in practice and
5158 * moreover subsequent load balance cycles should correct the
5159 * excess load moved.
5160 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005161 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305162
Michael Wang78feefc2012-08-06 16:41:59 +08005163 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305164 env.dst_cpu = env.new_dst_cpu;
5165 env.flags &= ~LBF_SOME_PINNED;
5166 env.loop = 0;
5167 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005168
5169 /* Prevent to re-select dst_cpu via env's cpus */
5170 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5171
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305172 /*
5173 * Go back to "more_balance" rather than "redo" since we
5174 * need to continue with same src_cpu.
5175 */
5176 goto more_balance;
5177 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005178
5179 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005180 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005181 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305182 if (!cpumask_empty(cpus)) {
5183 env.loop = 0;
5184 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005185 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305186 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005187 goto out_balanced;
5188 }
5189 }
5190
5191 if (!ld_moved) {
5192 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005193 /*
5194 * Increment the failure counter only on periodic balance.
5195 * We do not want newidle balance, which can be very
5196 * frequent, pollute the failure counter causing
5197 * excessive cache_hot migrations and active balances.
5198 */
5199 if (idle != CPU_NEWLY_IDLE)
5200 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005201
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005202 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005203 raw_spin_lock_irqsave(&busiest->lock, flags);
5204
Tejun Heo969c7922010-05-06 18:49:21 +02005205 /* don't kick the active_load_balance_cpu_stop,
5206 * if the curr task on busiest cpu can't be
5207 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005208 */
5209 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005210 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005211 raw_spin_unlock_irqrestore(&busiest->lock,
5212 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005213 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005214 goto out_one_pinned;
5215 }
5216
Tejun Heo969c7922010-05-06 18:49:21 +02005217 /*
5218 * ->active_balance synchronizes accesses to
5219 * ->active_balance_work. Once set, it's cleared
5220 * only after active load balance is finished.
5221 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005222 if (!busiest->active_balance) {
5223 busiest->active_balance = 1;
5224 busiest->push_cpu = this_cpu;
5225 active_balance = 1;
5226 }
5227 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005228
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005229 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005230 stop_one_cpu_nowait(cpu_of(busiest),
5231 active_load_balance_cpu_stop, busiest,
5232 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005233 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005234
5235 /*
5236 * We've kicked active balancing, reset the failure
5237 * counter.
5238 */
5239 sd->nr_balance_failed = sd->cache_nice_tries+1;
5240 }
5241 } else
5242 sd->nr_balance_failed = 0;
5243
5244 if (likely(!active_balance)) {
5245 /* We were unbalanced, so reset the balancing interval */
5246 sd->balance_interval = sd->min_interval;
5247 } else {
5248 /*
5249 * If we've begun active balancing, start to back off. This
5250 * case may not be covered by the all_pinned logic if there
5251 * is only 1 task on the busy runqueue (because we don't call
5252 * move_tasks).
5253 */
5254 if (sd->balance_interval < sd->max_interval)
5255 sd->balance_interval *= 2;
5256 }
5257
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005258 goto out;
5259
5260out_balanced:
5261 schedstat_inc(sd, lb_balanced[idle]);
5262
5263 sd->nr_balance_failed = 0;
5264
5265out_one_pinned:
5266 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005267 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005268 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005269 (sd->balance_interval < sd->max_interval))
5270 sd->balance_interval *= 2;
5271
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005272 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005273out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005274 return ld_moved;
5275}
5276
5277/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005278 * idle_balance is called by schedule() if this_cpu is about to become
5279 * idle. Attempts to pull tasks from other CPUs.
5280 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005281void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005282{
5283 struct sched_domain *sd;
5284 int pulled_task = 0;
5285 unsigned long next_balance = jiffies + HZ;
5286
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005287 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005288
5289 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5290 return;
5291
Peter Zijlstraf492e122009-12-23 15:29:42 +01005292 /*
5293 * Drop the rq->lock, but keep IRQ/preempt disabled.
5294 */
5295 raw_spin_unlock(&this_rq->lock);
5296
Paul Turner48a16752012-10-04 13:18:31 +02005297 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005298 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005299 for_each_domain(this_cpu, sd) {
5300 unsigned long interval;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005301 int balance = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005302
5303 if (!(sd->flags & SD_LOAD_BALANCE))
5304 continue;
5305
Peter Zijlstraf492e122009-12-23 15:29:42 +01005306 if (sd->flags & SD_BALANCE_NEWIDLE) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005307 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005308 pulled_task = load_balance(this_cpu, this_rq,
5309 sd, CPU_NEWLY_IDLE, &balance);
5310 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005311
5312 interval = msecs_to_jiffies(sd->balance_interval);
5313 if (time_after(next_balance, sd->last_balance + interval))
5314 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005315 if (pulled_task) {
5316 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005317 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005318 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005319 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005320 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005321
5322 raw_spin_lock(&this_rq->lock);
5323
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005324 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5325 /*
5326 * We are going idle. next_balance may be set based on
5327 * a busy processor. So reset next_balance.
5328 */
5329 this_rq->next_balance = next_balance;
5330 }
5331}
5332
5333/*
Tejun Heo969c7922010-05-06 18:49:21 +02005334 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5335 * running tasks off the busiest CPU onto idle CPUs. It requires at
5336 * least 1 task to be running on each physical CPU where possible, and
5337 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005338 */
Tejun Heo969c7922010-05-06 18:49:21 +02005339static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005340{
Tejun Heo969c7922010-05-06 18:49:21 +02005341 struct rq *busiest_rq = data;
5342 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005343 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005344 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005345 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005346
5347 raw_spin_lock_irq(&busiest_rq->lock);
5348
5349 /* make sure the requested cpu hasn't gone down in the meantime */
5350 if (unlikely(busiest_cpu != smp_processor_id() ||
5351 !busiest_rq->active_balance))
5352 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005353
5354 /* Is there any task to move? */
5355 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005356 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005357
5358 /*
5359 * This condition is "impossible", if it occurs
5360 * we need to fix it. Originally reported by
5361 * Bjorn Helgaas on a 128-cpu setup.
5362 */
5363 BUG_ON(busiest_rq == target_rq);
5364
5365 /* move a task from busiest_rq to target_rq */
5366 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005367
5368 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005369 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005370 for_each_domain(target_cpu, sd) {
5371 if ((sd->flags & SD_LOAD_BALANCE) &&
5372 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5373 break;
5374 }
5375
5376 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005377 struct lb_env env = {
5378 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005379 .dst_cpu = target_cpu,
5380 .dst_rq = target_rq,
5381 .src_cpu = busiest_rq->cpu,
5382 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005383 .idle = CPU_IDLE,
5384 };
5385
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005386 schedstat_inc(sd, alb_count);
5387
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005388 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005389 schedstat_inc(sd, alb_pushed);
5390 else
5391 schedstat_inc(sd, alb_failed);
5392 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005393 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005394 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02005395out_unlock:
5396 busiest_rq->active_balance = 0;
5397 raw_spin_unlock_irq(&busiest_rq->lock);
5398 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005399}
5400
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005401#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005402/*
5403 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005404 * - When one of the busy CPUs notice that there may be an idle rebalancing
5405 * needed, they will kick the idle load balancer, which then does idle
5406 * load balancing for all the idle CPUs.
5407 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005408static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005409 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005410 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005411 unsigned long next_balance; /* in jiffy units */
5412} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005413
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01005414static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005415{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005416 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005417
Suresh Siddha786d6dc72011-12-01 17:07:35 -08005418 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5419 return ilb;
5420
5421 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005422}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005423
5424/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005425 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5426 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5427 * CPU (if there is one).
5428 */
5429static void nohz_balancer_kick(int cpu)
5430{
5431 int ilb_cpu;
5432
5433 nohz.next_balance++;
5434
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005435 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005436
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005437 if (ilb_cpu >= nr_cpu_ids)
5438 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005439
Suresh Siddhacd490c52011-12-06 11:26:34 -08005440 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08005441 return;
5442 /*
5443 * Use smp_send_reschedule() instead of resched_cpu().
5444 * This way we generate a sched IPI on the target cpu which
5445 * is idle. And the softirq performing nohz idle load balance
5446 * will be run before returning from the IPI.
5447 */
5448 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005449 return;
5450}
5451
Alex Shic1cc0172012-09-10 15:10:58 +08005452static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08005453{
5454 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5455 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5456 atomic_dec(&nohz.nr_cpus);
5457 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5458 }
5459}
5460
Suresh Siddha69e1e812011-12-01 17:07:33 -08005461static inline void set_cpu_sd_state_busy(void)
5462{
5463 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005464
Suresh Siddha69e1e812011-12-01 17:07:33 -08005465 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005466 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005467
5468 if (!sd || !sd->nohz_idle)
5469 goto unlock;
5470 sd->nohz_idle = 0;
5471
5472 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005473 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005474unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005475 rcu_read_unlock();
5476}
5477
5478void set_cpu_sd_state_idle(void)
5479{
5480 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005481
Suresh Siddha69e1e812011-12-01 17:07:33 -08005482 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005483 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005484
5485 if (!sd || sd->nohz_idle)
5486 goto unlock;
5487 sd->nohz_idle = 1;
5488
5489 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005490 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005491unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005492 rcu_read_unlock();
5493}
5494
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005495/*
Alex Shic1cc0172012-09-10 15:10:58 +08005496 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005497 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005498 */
Alex Shic1cc0172012-09-10 15:10:58 +08005499void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005500{
Suresh Siddha71325962012-01-19 18:28:57 -08005501 /*
5502 * If this cpu is going down, then nothing needs to be done.
5503 */
5504 if (!cpu_active(cpu))
5505 return;
5506
Alex Shic1cc0172012-09-10 15:10:58 +08005507 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5508 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005509
Alex Shic1cc0172012-09-10 15:10:58 +08005510 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5511 atomic_inc(&nohz.nr_cpus);
5512 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005513}
Suresh Siddha71325962012-01-19 18:28:57 -08005514
5515static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
5516 unsigned long action, void *hcpu)
5517{
5518 switch (action & ~CPU_TASKS_FROZEN) {
5519 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08005520 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08005521 return NOTIFY_OK;
5522 default:
5523 return NOTIFY_DONE;
5524 }
5525}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005526#endif
5527
5528static DEFINE_SPINLOCK(balancing);
5529
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005530/*
5531 * Scale the max load_balance interval with the number of CPUs in the system.
5532 * This trades load-balance latency on larger machines for less cross talk.
5533 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005534void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005535{
5536 max_load_balance_interval = HZ*num_online_cpus()/10;
5537}
5538
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005539/*
5540 * It checks each scheduling domain to see if it is due to be balanced,
5541 * and initiates a balancing operation if so.
5542 *
Libinb9b08532013-04-01 19:14:01 +08005543 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005544 */
5545static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5546{
5547 int balance = 1;
5548 struct rq *rq = cpu_rq(cpu);
5549 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005550 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005551 /* Earliest time when we have to do rebalance again */
5552 unsigned long next_balance = jiffies + 60*HZ;
5553 int update_next_balance = 0;
5554 int need_serialize;
5555
Paul Turner48a16752012-10-04 13:18:31 +02005556 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08005557
Peter Zijlstradce840a2011-04-07 14:09:50 +02005558 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005559 for_each_domain(cpu, sd) {
5560 if (!(sd->flags & SD_LOAD_BALANCE))
5561 continue;
5562
5563 interval = sd->balance_interval;
5564 if (idle != CPU_IDLE)
5565 interval *= sd->busy_factor;
5566
5567 /* scale ms to jiffies */
5568 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005569 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005570
5571 need_serialize = sd->flags & SD_SERIALIZE;
5572
5573 if (need_serialize) {
5574 if (!spin_trylock(&balancing))
5575 goto out;
5576 }
5577
5578 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5579 if (load_balance(cpu, rq, sd, idle, &balance)) {
5580 /*
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005581 * The LBF_SOME_PINNED logic could have changed
5582 * env->dst_cpu, so we can't know our idle
5583 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005584 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005585 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005586 }
5587 sd->last_balance = jiffies;
5588 }
5589 if (need_serialize)
5590 spin_unlock(&balancing);
5591out:
5592 if (time_after(next_balance, sd->last_balance + interval)) {
5593 next_balance = sd->last_balance + interval;
5594 update_next_balance = 1;
5595 }
5596
5597 /*
5598 * Stop the load balance at this level. There is another
5599 * CPU in our sched group which is doing load balancing more
5600 * actively.
5601 */
5602 if (!balance)
5603 break;
5604 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005605 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005606
5607 /*
5608 * next_balance will be updated only when there is a need.
5609 * When the cpu is attached to null domain for ex, it will not be
5610 * updated.
5611 */
5612 if (likely(update_next_balance))
5613 rq->next_balance = next_balance;
5614}
5615
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005616#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005617/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005618 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005619 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5620 */
5621static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5622{
5623 struct rq *this_rq = cpu_rq(this_cpu);
5624 struct rq *rq;
5625 int balance_cpu;
5626
Suresh Siddha1c792db2011-12-01 17:07:32 -08005627 if (idle != CPU_IDLE ||
5628 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5629 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005630
5631 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08005632 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005633 continue;
5634
5635 /*
5636 * If this cpu gets work to do, stop the load balancing
5637 * work being done for other cpus. Next load
5638 * balancing owner will pick it up.
5639 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08005640 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005641 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005642
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02005643 rq = cpu_rq(balance_cpu);
5644
5645 raw_spin_lock_irq(&rq->lock);
5646 update_rq_clock(rq);
5647 update_idle_cpu_load(rq);
5648 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005649
5650 rebalance_domains(balance_cpu, CPU_IDLE);
5651
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005652 if (time_after(this_rq->next_balance, rq->next_balance))
5653 this_rq->next_balance = rq->next_balance;
5654 }
5655 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005656end:
5657 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005658}
5659
5660/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005661 * Current heuristic for kicking the idle load balancer in the presence
5662 * of an idle cpu is the system.
5663 * - This rq has more than one task.
5664 * - At any scheduler domain level, this cpu's scheduler group has multiple
5665 * busy cpu's exceeding the group's power.
5666 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5667 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005668 */
5669static inline int nohz_kick_needed(struct rq *rq, int cpu)
5670{
5671 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005672 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005673
Suresh Siddha1c792db2011-12-01 17:07:32 -08005674 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005675 return 0;
5676
Suresh Siddha1c792db2011-12-01 17:07:32 -08005677 /*
5678 * We may be recently in ticked or tickless idle mode. At the first
5679 * busy tick after returning from idle, we will update the busy stats.
5680 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08005681 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08005682 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005683
5684 /*
5685 * None are in tickless mode and hence no need for NOHZ idle load
5686 * balancing.
5687 */
5688 if (likely(!atomic_read(&nohz.nr_cpus)))
5689 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005690
5691 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005692 return 0;
5693
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005694 if (rq->nr_running >= 2)
5695 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005696
Peter Zijlstra067491b2011-12-07 14:32:08 +01005697 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005698 for_each_domain(cpu, sd) {
5699 struct sched_group *sg = sd->groups;
5700 struct sched_group_power *sgp = sg->sgp;
5701 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005702
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005703 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01005704 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005705
5706 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5707 && (cpumask_first_and(nohz.idle_cpus_mask,
5708 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01005709 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005710
5711 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5712 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005713 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01005714 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005715 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01005716
5717need_kick_unlock:
5718 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005719need_kick:
5720 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005721}
5722#else
5723static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5724#endif
5725
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005726/*
5727 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005728 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005729 */
5730static void run_rebalance_domains(struct softirq_action *h)
5731{
5732 int this_cpu = smp_processor_id();
5733 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07005734 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005735 CPU_IDLE : CPU_NOT_IDLE;
5736
5737 rebalance_domains(this_cpu, idle);
5738
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005739 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005740 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005741 * balancing on behalf of the other idle cpus whose ticks are
5742 * stopped.
5743 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005744 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005745}
5746
5747static inline int on_null_domain(int cpu)
5748{
Paul E. McKenney90a65012010-02-28 08:32:18 -08005749 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005750}
5751
5752/*
5753 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005754 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005755void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005756{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005757 /* Don't need to rebalance while attached to NULL domain */
5758 if (time_after_eq(jiffies, rq->next_balance) &&
5759 likely(!on_null_domain(cpu)))
5760 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005761#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08005762 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005763 nohz_balancer_kick(cpu);
5764#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005765}
5766
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005767static void rq_online_fair(struct rq *rq)
5768{
5769 update_sysctl();
5770}
5771
5772static void rq_offline_fair(struct rq *rq)
5773{
5774 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07005775
5776 /* Ensure any throttled groups are reachable by pick_next_task */
5777 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005778}
5779
Dhaval Giani55e12e52008-06-24 23:39:43 +05305780#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02005781
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005782/*
5783 * scheduler tick hitting a task of our scheduling class:
5784 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005785static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005786{
5787 struct cfs_rq *cfs_rq;
5788 struct sched_entity *se = &curr->se;
5789
5790 for_each_sched_entity(se) {
5791 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005792 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005793 }
Ben Segall18bf2802012-10-04 12:51:20 +02005794
Peter Zijlstracbee9f82012-10-25 14:16:43 +02005795 if (sched_feat_numa(NUMA))
5796 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08005797
Ben Segall18bf2802012-10-04 12:51:20 +02005798 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005799}
5800
5801/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005802 * called on fork with the child task as argument from the parent's context
5803 * - child not yet on the tasklist
5804 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005805 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005806static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005807{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005808 struct cfs_rq *cfs_rq;
5809 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02005810 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005811 struct rq *rq = this_rq();
5812 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005813
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005814 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005815
Peter Zijlstra861d0342010-08-19 13:31:43 +02005816 update_rq_clock(rq);
5817
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005818 cfs_rq = task_cfs_rq(current);
5819 curr = cfs_rq->curr;
5820
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005821 if (unlikely(task_cpu(p) != this_cpu)) {
5822 rcu_read_lock();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005823 __set_task_cpu(p, this_cpu);
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005824 rcu_read_unlock();
5825 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005826
Ting Yang7109c442007-08-28 12:53:24 +02005827 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005828
Mike Galbraithb5d9d732009-09-08 11:12:28 +02005829 if (curr)
5830 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02005831 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005832
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005833 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02005834 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02005835 * Upon rescheduling, sched_class::put_prev_task() will place
5836 * 'current' within the tree based on its new key value.
5837 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005838 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05305839 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005840 }
5841
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005842 se->vruntime -= cfs_rq->min_vruntime;
5843
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005844 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005845}
5846
Steven Rostedtcb469842008-01-25 21:08:22 +01005847/*
5848 * Priority of the task has changed. Check to see if we preempt
5849 * the current task.
5850 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005851static void
5852prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01005853{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005854 if (!p->se.on_rq)
5855 return;
5856
Steven Rostedtcb469842008-01-25 21:08:22 +01005857 /*
5858 * Reschedule if we are currently running on this runqueue and
5859 * our priority decreased, or if we are not currently running on
5860 * this runqueue and our priority is higher than the current's
5861 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005862 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01005863 if (p->prio > oldprio)
5864 resched_task(rq->curr);
5865 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005866 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005867}
5868
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005869static void switched_from_fair(struct rq *rq, struct task_struct *p)
5870{
5871 struct sched_entity *se = &p->se;
5872 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5873
5874 /*
5875 * Ensure the task's vruntime is normalized, so that when its
5876 * switched back to the fair class the enqueue_entity(.flags=0) will
5877 * do the right thing.
5878 *
5879 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5880 * have normalized the vruntime, if it was !on_rq, then only when
5881 * the task is sleeping will it still have non-normalized vruntime.
5882 */
5883 if (!se->on_rq && p->state != TASK_RUNNING) {
5884 /*
5885 * Fix up our vruntime so that the current sleep doesn't
5886 * cause 'unlimited' sleep bonus.
5887 */
5888 place_entity(cfs_rq, se, 0);
5889 se->vruntime -= cfs_rq->min_vruntime;
5890 }
Paul Turner9ee474f2012-10-04 13:18:30 +02005891
Alex Shi141965c2013-06-26 13:05:39 +08005892#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02005893 /*
5894 * Remove our load from contribution when we leave sched_fair
5895 * and ensure we don't carry in an old decay_count if we
5896 * switch back.
5897 */
5898 if (p->se.avg.decay_count) {
5899 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5900 __synchronize_entity_decay(&p->se);
5901 subtract_blocked_load_contrib(cfs_rq,
5902 p->se.avg.load_avg_contrib);
5903 }
5904#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005905}
5906
Steven Rostedtcb469842008-01-25 21:08:22 +01005907/*
5908 * We switched to the sched_fair class.
5909 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005910static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005911{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005912 if (!p->se.on_rq)
5913 return;
5914
Steven Rostedtcb469842008-01-25 21:08:22 +01005915 /*
5916 * We were most likely switched from sched_rt, so
5917 * kick off the schedule if running, otherwise just see
5918 * if we can still preempt the current task.
5919 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005920 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005921 resched_task(rq->curr);
5922 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005923 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005924}
5925
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005926/* Account for a task changing its policy or group.
5927 *
5928 * This routine is mostly called to set cfs_rq->curr field when a task
5929 * migrates between groups/classes.
5930 */
5931static void set_curr_task_fair(struct rq *rq)
5932{
5933 struct sched_entity *se = &rq->curr->se;
5934
Paul Turnerec12cb72011-07-21 09:43:30 -07005935 for_each_sched_entity(se) {
5936 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5937
5938 set_next_entity(cfs_rq, se);
5939 /* ensure bandwidth has been allocated on our new cfs_rq */
5940 account_cfs_rq_runtime(cfs_rq, 0);
5941 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005942}
5943
Peter Zijlstra029632f2011-10-25 10:00:11 +02005944void init_cfs_rq(struct cfs_rq *cfs_rq)
5945{
5946 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02005947 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5948#ifndef CONFIG_64BIT
5949 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5950#endif
Alex Shi141965c2013-06-26 13:05:39 +08005951#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02005952 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08005953 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02005954#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02005955}
5956
Peter Zijlstra810b3812008-02-29 15:21:01 -05005957#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005958static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05005959{
Paul Turneraff3e492012-10-04 13:18:30 +02005960 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005961 /*
5962 * If the task was not on the rq at the time of this cgroup movement
5963 * it must have been asleep, sleeping tasks keep their ->vruntime
5964 * absolute on their old rq until wakeup (needed for the fair sleeper
5965 * bonus in place_entity()).
5966 *
5967 * If it was on the rq, we've just 'preempted' it, which does convert
5968 * ->vruntime to a relative base.
5969 *
5970 * Make sure both cases convert their relative position when migrating
5971 * to another cgroup's rq. This does somewhat interfere with the
5972 * fair sleeper stuff for the first placement, but who cares.
5973 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005974 /*
5975 * When !on_rq, vruntime of the task has usually NOT been normalized.
5976 * But there are some cases where it has already been normalized:
5977 *
5978 * - Moving a forked child which is waiting for being woken up by
5979 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005980 * - Moving a task which has been woken up by try_to_wake_up() and
5981 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005982 *
5983 * To prevent boost or penalty in the new cfs_rq caused by delta
5984 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5985 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005986 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005987 on_rq = 1;
5988
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005989 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005990 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5991 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02005992 if (!on_rq) {
5993 cfs_rq = cfs_rq_of(&p->se);
5994 p->se.vruntime += cfs_rq->min_vruntime;
5995#ifdef CONFIG_SMP
5996 /*
5997 * migrate_task_rq_fair() will have removed our previous
5998 * contribution, but we must synchronize for ongoing future
5999 * decay.
6000 */
6001 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6002 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6003#endif
6004 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05006005}
Peter Zijlstra029632f2011-10-25 10:00:11 +02006006
6007void free_fair_sched_group(struct task_group *tg)
6008{
6009 int i;
6010
6011 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6012
6013 for_each_possible_cpu(i) {
6014 if (tg->cfs_rq)
6015 kfree(tg->cfs_rq[i]);
6016 if (tg->se)
6017 kfree(tg->se[i]);
6018 }
6019
6020 kfree(tg->cfs_rq);
6021 kfree(tg->se);
6022}
6023
6024int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6025{
6026 struct cfs_rq *cfs_rq;
6027 struct sched_entity *se;
6028 int i;
6029
6030 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6031 if (!tg->cfs_rq)
6032 goto err;
6033 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6034 if (!tg->se)
6035 goto err;
6036
6037 tg->shares = NICE_0_LOAD;
6038
6039 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6040
6041 for_each_possible_cpu(i) {
6042 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6043 GFP_KERNEL, cpu_to_node(i));
6044 if (!cfs_rq)
6045 goto err;
6046
6047 se = kzalloc_node(sizeof(struct sched_entity),
6048 GFP_KERNEL, cpu_to_node(i));
6049 if (!se)
6050 goto err_free_rq;
6051
6052 init_cfs_rq(cfs_rq);
6053 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6054 }
6055
6056 return 1;
6057
6058err_free_rq:
6059 kfree(cfs_rq);
6060err:
6061 return 0;
6062}
6063
6064void unregister_fair_sched_group(struct task_group *tg, int cpu)
6065{
6066 struct rq *rq = cpu_rq(cpu);
6067 unsigned long flags;
6068
6069 /*
6070 * Only empty task groups can be destroyed; so we can speculatively
6071 * check on_list without danger of it being re-added.
6072 */
6073 if (!tg->cfs_rq[cpu]->on_list)
6074 return;
6075
6076 raw_spin_lock_irqsave(&rq->lock, flags);
6077 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6078 raw_spin_unlock_irqrestore(&rq->lock, flags);
6079}
6080
6081void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6082 struct sched_entity *se, int cpu,
6083 struct sched_entity *parent)
6084{
6085 struct rq *rq = cpu_rq(cpu);
6086
6087 cfs_rq->tg = tg;
6088 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006089 init_cfs_rq_runtime(cfs_rq);
6090
6091 tg->cfs_rq[cpu] = cfs_rq;
6092 tg->se[cpu] = se;
6093
6094 /* se could be NULL for root_task_group */
6095 if (!se)
6096 return;
6097
6098 if (!parent)
6099 se->cfs_rq = &rq->cfs;
6100 else
6101 se->cfs_rq = parent->my_q;
6102
6103 se->my_q = cfs_rq;
6104 update_load_set(&se->load, 0);
6105 se->parent = parent;
6106}
6107
6108static DEFINE_MUTEX(shares_mutex);
6109
6110int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6111{
6112 int i;
6113 unsigned long flags;
6114
6115 /*
6116 * We can't change the weight of the root cgroup.
6117 */
6118 if (!tg->se[0])
6119 return -EINVAL;
6120
6121 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6122
6123 mutex_lock(&shares_mutex);
6124 if (tg->shares == shares)
6125 goto done;
6126
6127 tg->shares = shares;
6128 for_each_possible_cpu(i) {
6129 struct rq *rq = cpu_rq(i);
6130 struct sched_entity *se;
6131
6132 se = tg->se[i];
6133 /* Propagate contribution to hierarchy */
6134 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006135
6136 /* Possible calls to update_curr() need rq clock */
6137 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006138 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006139 update_cfs_shares(group_cfs_rq(se));
6140 raw_spin_unlock_irqrestore(&rq->lock, flags);
6141 }
6142
6143done:
6144 mutex_unlock(&shares_mutex);
6145 return 0;
6146}
6147#else /* CONFIG_FAIR_GROUP_SCHED */
6148
6149void free_fair_sched_group(struct task_group *tg) { }
6150
6151int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6152{
6153 return 1;
6154}
6155
6156void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6157
6158#endif /* CONFIG_FAIR_GROUP_SCHED */
6159
Peter Zijlstra810b3812008-02-29 15:21:01 -05006160
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006161static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006162{
6163 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006164 unsigned int rr_interval = 0;
6165
6166 /*
6167 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6168 * idle runqueue:
6169 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006170 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006171 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006172
6173 return rr_interval;
6174}
6175
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006176/*
6177 * All the scheduling class methods:
6178 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006179const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006180 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006181 .enqueue_task = enqueue_task_fair,
6182 .dequeue_task = dequeue_task_fair,
6183 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006184 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006185
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006186 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006187
6188 .pick_next_task = pick_next_task_fair,
6189 .put_prev_task = put_prev_task_fair,
6190
Peter Williams681f3e62007-10-24 18:23:51 +02006191#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006192 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02006193 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08006194
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006195 .rq_online = rq_online_fair,
6196 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006197
6198 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006199#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006200
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006201 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006202 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006203 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006204
6205 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006206 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006207 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006208
Peter Williams0d721ce2009-09-21 01:31:53 +00006209 .get_rr_interval = get_rr_interval_fair,
6210
Peter Zijlstra810b3812008-02-29 15:21:01 -05006211#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006212 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006213#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006214};
6215
6216#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006217void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006218{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006219 struct cfs_rq *cfs_rq;
6220
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006221 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006222 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006223 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006224 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006225}
6226#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006227
6228__init void init_sched_fair_class(void)
6229{
6230#ifdef CONFIG_SMP
6231 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6232
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006233#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006234 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006235 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006236 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006237#endif
6238#endif /* SMP */
6239
6240}