blob: 47a30be1fe8335b956763618f4c8dfbbbb93f4ac [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
683/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200684 * Update the current task's runtime statistics. Skip current tasks that
685 * are not in our scheduling class.
686 */
687static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200688__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
689 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200690{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200691 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200692
Lucas De Marchi41acab82010-03-10 23:37:45 -0300693 schedstat_set(curr->statistics.exec_max,
694 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200695
696 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200697 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200698 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100699
Ingo Molnare9acbff2007-10-15 17:00:04 +0200700 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200701 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200702}
703
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200704static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200705{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200706 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200707 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200708 unsigned long delta_exec;
709
710 if (unlikely(!curr))
711 return;
712
713 /*
714 * Get the amount of time the current task was running
715 * since the last time we changed load (this cannot
716 * overflow on 32 bits):
717 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200718 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100719 if (!delta_exec)
720 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200721
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200722 __update_curr(cfs_rq, curr, delta_exec);
723 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100724
725 if (entity_is_task(curr)) {
726 struct task_struct *curtask = task_of(curr);
727
Ingo Molnarf977bb42009-09-13 18:15:54 +0200728 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100729 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700730 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100731 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700732
733 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200734}
735
736static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200737update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200738{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200739 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200740}
741
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200742/*
743 * Task is being enqueued - update stats:
744 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200745static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200746{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200747 /*
748 * Are we enqueueing a waiting task? (for current tasks
749 * a dequeue/enqueue event is a NOP)
750 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200751 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200752 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200753}
754
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200755static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200756update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200757{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300758 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200759 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300760 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
761 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200762 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200763#ifdef CONFIG_SCHEDSTATS
764 if (entity_is_task(se)) {
765 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200766 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200767 }
768#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300769 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200770}
771
772static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200773update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200774{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775 /*
776 * Mark the end of the wait period if dequeueing a
777 * waiting task:
778 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200779 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200780 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200781}
782
783/*
784 * We are picking a new current task - update its stats:
785 */
786static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200787update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200788{
789 /*
790 * We are starting a new run period:
791 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200792 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200793}
794
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795/**************************************************
796 * Scheduling class queueing methods:
797 */
798
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200799#ifdef CONFIG_NUMA_BALANCING
800/*
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200801 * numa task sample period in ms
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200802 */
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200803unsigned int sysctl_numa_balancing_scan_period_min = 100;
Mel Gormanb8593bf2012-11-21 01:18:23 +0000804unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
805unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200806
807/* Portion of address space to scan in MB */
808unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200809
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200810/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
811unsigned int sysctl_numa_balancing_scan_delay = 1000;
812
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200813static void task_numa_placement(struct task_struct *p)
814{
Hugh Dickins2832bc12012-12-19 17:42:16 -0800815 int seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200816
Hugh Dickins2832bc12012-12-19 17:42:16 -0800817 if (!p->mm) /* for example, ksmd faulting in a user's mm */
818 return;
819 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200820 if (p->numa_scan_seq == seq)
821 return;
822 p->numa_scan_seq = seq;
823
824 /* FIXME: Scheduling placement policy hints go here */
825}
826
827/*
828 * Got a PROT_NONE fault for a page on @node.
829 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000830void task_numa_fault(int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200831{
832 struct task_struct *p = current;
833
Mel Gorman1a687c22012-11-22 11:16:36 +0000834 if (!sched_feat_numa(NUMA))
835 return;
836
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200837 /* FIXME: Allocate task-specific structure for placement policy here */
838
Mel Gormanfb003b82012-11-15 09:01:14 +0000839 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000840 * If pages are properly placed (did not migrate) then scan slower.
841 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +0000842 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000843 if (!migrated)
844 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
845 p->numa_scan_period + jiffies_to_msecs(10));
Mel Gormanfb003b82012-11-15 09:01:14 +0000846
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200847 task_numa_placement(p);
848}
849
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200850static void reset_ptenuma_scan(struct task_struct *p)
851{
852 ACCESS_ONCE(p->mm->numa_scan_seq)++;
853 p->mm->numa_scan_offset = 0;
854}
855
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200856/*
857 * The expensive part of numa migration is done from task_work context.
858 * Triggered from task_tick_numa().
859 */
860void task_numa_work(struct callback_head *work)
861{
862 unsigned long migrate, next_scan, now = jiffies;
863 struct task_struct *p = current;
864 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200865 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +0000866 unsigned long start, end;
867 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200868
869 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
870
871 work->next = work; /* protect against double add */
872 /*
873 * Who cares about NUMA placement when they're dying.
874 *
875 * NOTE: make sure not to dereference p->mm before this check,
876 * exit_task_work() happens _after_ exit_mm() so we could be called
877 * without p->mm even though we still had it when we enqueued this
878 * work.
879 */
880 if (p->flags & PF_EXITING)
881 return;
882
883 /*
Mel Gorman5bca2302012-11-22 14:40:03 +0000884 * We do not care about task placement until a task runs on a node
885 * other than the first one used by the address space. This is
886 * largely because migrations are driven by what CPU the task
887 * is running on. If it's never scheduled on another node, it'll
888 * not migrate so why bother trapping the fault.
889 */
890 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
891 mm->first_nid = numa_node_id();
892 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
893 /* Are we running on a new node yet? */
894 if (numa_node_id() == mm->first_nid &&
895 !sched_feat_numa(NUMA_FORCE))
896 return;
897
898 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
899 }
900
901 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000902 * Reset the scan period if enough time has gone by. Objective is that
903 * scanning will be reduced if pages are properly placed. As tasks
904 * can enter different phases this needs to be re-examined. Lacking
905 * proper tracking of reference behaviour, this blunt hammer is used.
906 */
907 migrate = mm->numa_next_reset;
908 if (time_after(now, migrate)) {
909 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
910 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
911 xchg(&mm->numa_next_reset, next_scan);
912 }
913
914 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200915 * Enforce maximal scan/migration frequency..
916 */
917 migrate = mm->numa_next_scan;
918 if (time_before(now, migrate))
919 return;
920
921 if (p->numa_scan_period == 0)
922 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
923
Mel Gormanfb003b82012-11-15 09:01:14 +0000924 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200925 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
926 return;
927
Mel Gormane14808b2012-11-19 10:59:15 +0000928 /*
929 * Do not set pte_numa if the current running node is rate-limited.
930 * This loses statistics on the fault but if we are unwilling to
931 * migrate to this node, it is less likely we can do useful work
932 */
933 if (migrate_ratelimited(numa_node_id()))
934 return;
935
Mel Gorman9f406042012-11-14 18:34:32 +0000936 start = mm->numa_scan_offset;
937 pages = sysctl_numa_balancing_scan_size;
938 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
939 if (!pages)
940 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200941
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200942 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +0000943 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200944 if (!vma) {
945 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +0000946 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200947 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200948 }
Mel Gorman9f406042012-11-14 18:34:32 +0000949 for (; vma; vma = vma->vm_next) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200950 if (!vma_migratable(vma))
951 continue;
952
953 /* Skip small VMAs. They are not likely to be of relevance */
Mel Gorman221392c2012-12-17 14:05:53 +0000954 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200955 continue;
956
Mel Gorman9f406042012-11-14 18:34:32 +0000957 do {
958 start = max(start, vma->vm_start);
959 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
960 end = min(end, vma->vm_end);
961 pages -= change_prot_numa(vma, start, end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200962
Mel Gorman9f406042012-11-14 18:34:32 +0000963 start = end;
964 if (pages <= 0)
965 goto out;
966 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200967 }
968
Mel Gorman9f406042012-11-14 18:34:32 +0000969out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200970 /*
971 * It is possible to reach the end of the VMA list but the last few VMAs are
972 * not guaranteed to the vma_migratable. If they are not, we would find the
973 * !migratable VMA on the next scan but not reset the scanner to the start
974 * so check it now.
975 */
976 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +0000977 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200978 else
979 reset_ptenuma_scan(p);
980 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200981}
982
983/*
984 * Drive the periodic memory faults..
985 */
986void task_tick_numa(struct rq *rq, struct task_struct *curr)
987{
988 struct callback_head *work = &curr->numa_work;
989 u64 period, now;
990
991 /*
992 * We don't care about NUMA placement if we don't have memory.
993 */
994 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
995 return;
996
997 /*
998 * Using runtime rather than walltime has the dual advantage that
999 * we (mostly) drive the selection from busy threads and that the
1000 * task needs to have done some actual work before we bother with
1001 * NUMA placement.
1002 */
1003 now = curr->se.sum_exec_runtime;
1004 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1005
1006 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001007 if (!curr->node_stamp)
1008 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001009 curr->node_stamp = now;
1010
1011 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1012 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1013 task_work_add(curr, work, true);
1014 }
1015 }
1016}
1017#else
1018static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1019{
1020}
1021#endif /* CONFIG_NUMA_BALANCING */
1022
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001023static void
1024account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1025{
1026 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001027 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001028 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001029#ifdef CONFIG_SMP
1030 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001031 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001032#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001033 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001034}
1035
1036static void
1037account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1038{
1039 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001040 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001041 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001042 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301043 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001044 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001045}
1046
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001047#ifdef CONFIG_FAIR_GROUP_SCHED
1048# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001049static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1050{
1051 long tg_weight;
1052
1053 /*
1054 * Use this CPU's actual weight instead of the last load_contribution
1055 * to gain a more accurate current total weight. See
1056 * update_cfs_rq_load_contribution().
1057 */
Paul Turner82958362012-10-04 13:18:31 +02001058 tg_weight = atomic64_read(&tg->load_avg);
1059 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001060 tg_weight += cfs_rq->load.weight;
1061
1062 return tg_weight;
1063}
1064
Paul Turner6d5ab292011-01-21 20:45:01 -08001065static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001066{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001067 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001068
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001069 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001070 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001071
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001072 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001073 if (tg_weight)
1074 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001075
1076 if (shares < MIN_SHARES)
1077 shares = MIN_SHARES;
1078 if (shares > tg->shares)
1079 shares = tg->shares;
1080
1081 return shares;
1082}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001083# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001084static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001085{
1086 return tg->shares;
1087}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001088# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001089static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1090 unsigned long weight)
1091{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001092 if (se->on_rq) {
1093 /* commit outstanding execution time */
1094 if (cfs_rq->curr == se)
1095 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001096 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001097 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001098
1099 update_load_set(&se->load, weight);
1100
1101 if (se->on_rq)
1102 account_entity_enqueue(cfs_rq, se);
1103}
1104
Paul Turner82958362012-10-04 13:18:31 +02001105static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1106
Paul Turner6d5ab292011-01-21 20:45:01 -08001107static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001108{
1109 struct task_group *tg;
1110 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001111 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001112
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001113 tg = cfs_rq->tg;
1114 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001115 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001116 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001117#ifndef CONFIG_SMP
1118 if (likely(se->load.weight == tg->shares))
1119 return;
1120#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001121 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001122
1123 reweight_entity(cfs_rq_of(se), se, shares);
1124}
1125#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001126static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001127{
1128}
1129#endif /* CONFIG_FAIR_GROUP_SCHED */
1130
Paul Turnerf4e26b12012-10-04 13:18:32 +02001131/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */
1132#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
Paul Turner9d85f212012-10-04 13:18:29 +02001133/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001134 * We choose a half-life close to 1 scheduling period.
1135 * Note: The tables below are dependent on this value.
1136 */
1137#define LOAD_AVG_PERIOD 32
1138#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1139#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1140
1141/* Precomputed fixed inverse multiplies for multiplication by y^n */
1142static const u32 runnable_avg_yN_inv[] = {
1143 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1144 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1145 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1146 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1147 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1148 0x85aac367, 0x82cd8698,
1149};
1150
1151/*
1152 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1153 * over-estimates when re-combining.
1154 */
1155static const u32 runnable_avg_yN_sum[] = {
1156 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1157 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1158 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1159};
1160
1161/*
Paul Turner9d85f212012-10-04 13:18:29 +02001162 * Approximate:
1163 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1164 */
1165static __always_inline u64 decay_load(u64 val, u64 n)
1166{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001167 unsigned int local_n;
1168
1169 if (!n)
1170 return val;
1171 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1172 return 0;
1173
1174 /* after bounds checking we can collapse to 32-bit */
1175 local_n = n;
1176
1177 /*
1178 * As y^PERIOD = 1/2, we can combine
1179 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1180 * With a look-up table which covers k^n (n<PERIOD)
1181 *
1182 * To achieve constant time decay_load.
1183 */
1184 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1185 val >>= local_n / LOAD_AVG_PERIOD;
1186 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001187 }
1188
Paul Turner5b51f2f2012-10-04 13:18:32 +02001189 val *= runnable_avg_yN_inv[local_n];
1190 /* We don't use SRR here since we always want to round down. */
1191 return val >> 32;
1192}
1193
1194/*
1195 * For updates fully spanning n periods, the contribution to runnable
1196 * average will be: \Sum 1024*y^n
1197 *
1198 * We can compute this reasonably efficiently by combining:
1199 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1200 */
1201static u32 __compute_runnable_contrib(u64 n)
1202{
1203 u32 contrib = 0;
1204
1205 if (likely(n <= LOAD_AVG_PERIOD))
1206 return runnable_avg_yN_sum[n];
1207 else if (unlikely(n >= LOAD_AVG_MAX_N))
1208 return LOAD_AVG_MAX;
1209
1210 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1211 do {
1212 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1213 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1214
1215 n -= LOAD_AVG_PERIOD;
1216 } while (n > LOAD_AVG_PERIOD);
1217
1218 contrib = decay_load(contrib, n);
1219 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001220}
1221
1222/*
1223 * We can represent the historical contribution to runnable average as the
1224 * coefficients of a geometric series. To do this we sub-divide our runnable
1225 * history into segments of approximately 1ms (1024us); label the segment that
1226 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1227 *
1228 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1229 * p0 p1 p2
1230 * (now) (~1ms ago) (~2ms ago)
1231 *
1232 * Let u_i denote the fraction of p_i that the entity was runnable.
1233 *
1234 * We then designate the fractions u_i as our co-efficients, yielding the
1235 * following representation of historical load:
1236 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1237 *
1238 * We choose y based on the with of a reasonably scheduling period, fixing:
1239 * y^32 = 0.5
1240 *
1241 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1242 * approximately half as much as the contribution to load within the last ms
1243 * (u_0).
1244 *
1245 * When a period "rolls over" and we have new u_0`, multiplying the previous
1246 * sum again by y is sufficient to update:
1247 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1248 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1249 */
1250static __always_inline int __update_entity_runnable_avg(u64 now,
1251 struct sched_avg *sa,
1252 int runnable)
1253{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001254 u64 delta, periods;
1255 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001256 int delta_w, decayed = 0;
1257
1258 delta = now - sa->last_runnable_update;
1259 /*
1260 * This should only happen when time goes backwards, which it
1261 * unfortunately does during sched clock init when we swap over to TSC.
1262 */
1263 if ((s64)delta < 0) {
1264 sa->last_runnable_update = now;
1265 return 0;
1266 }
1267
1268 /*
1269 * Use 1024ns as the unit of measurement since it's a reasonable
1270 * approximation of 1us and fast to compute.
1271 */
1272 delta >>= 10;
1273 if (!delta)
1274 return 0;
1275 sa->last_runnable_update = now;
1276
1277 /* delta_w is the amount already accumulated against our next period */
1278 delta_w = sa->runnable_avg_period % 1024;
1279 if (delta + delta_w >= 1024) {
1280 /* period roll-over */
1281 decayed = 1;
1282
1283 /*
1284 * Now that we know we're crossing a period boundary, figure
1285 * out how much from delta we need to complete the current
1286 * period and accrue it.
1287 */
1288 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001289 if (runnable)
1290 sa->runnable_avg_sum += delta_w;
1291 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001292
Paul Turner5b51f2f2012-10-04 13:18:32 +02001293 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001294
Paul Turner5b51f2f2012-10-04 13:18:32 +02001295 /* Figure out how many additional periods this update spans */
1296 periods = delta / 1024;
1297 delta %= 1024;
1298
1299 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1300 periods + 1);
1301 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1302 periods + 1);
1303
1304 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1305 runnable_contrib = __compute_runnable_contrib(periods);
1306 if (runnable)
1307 sa->runnable_avg_sum += runnable_contrib;
1308 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001309 }
1310
1311 /* Remainder of delta accrued against u_0` */
1312 if (runnable)
1313 sa->runnable_avg_sum += delta;
1314 sa->runnable_avg_period += delta;
1315
1316 return decayed;
1317}
1318
Paul Turner9ee474f2012-10-04 13:18:30 +02001319/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001320static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001321{
1322 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1323 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1324
1325 decays -= se->avg.decay_count;
1326 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001327 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001328
1329 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1330 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001331
1332 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001333}
1334
Paul Turnerc566e8e2012-10-04 13:18:30 +02001335#ifdef CONFIG_FAIR_GROUP_SCHED
1336static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1337 int force_update)
1338{
1339 struct task_group *tg = cfs_rq->tg;
1340 s64 tg_contrib;
1341
1342 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1343 tg_contrib -= cfs_rq->tg_load_contrib;
1344
1345 if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1346 atomic64_add(tg_contrib, &tg->load_avg);
1347 cfs_rq->tg_load_contrib += tg_contrib;
1348 }
1349}
Paul Turner8165e142012-10-04 13:18:31 +02001350
Paul Turnerbb17f652012-10-04 13:18:31 +02001351/*
1352 * Aggregate cfs_rq runnable averages into an equivalent task_group
1353 * representation for computing load contributions.
1354 */
1355static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1356 struct cfs_rq *cfs_rq)
1357{
1358 struct task_group *tg = cfs_rq->tg;
1359 long contrib;
1360
1361 /* The fraction of a cpu used by this cfs_rq */
1362 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1363 sa->runnable_avg_period + 1);
1364 contrib -= cfs_rq->tg_runnable_contrib;
1365
1366 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1367 atomic_add(contrib, &tg->runnable_avg);
1368 cfs_rq->tg_runnable_contrib += contrib;
1369 }
1370}
1371
Paul Turner8165e142012-10-04 13:18:31 +02001372static inline void __update_group_entity_contrib(struct sched_entity *se)
1373{
1374 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1375 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001376 int runnable_avg;
1377
Paul Turner8165e142012-10-04 13:18:31 +02001378 u64 contrib;
1379
1380 contrib = cfs_rq->tg_load_contrib * tg->shares;
1381 se->avg.load_avg_contrib = div64_u64(contrib,
1382 atomic64_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001383
1384 /*
1385 * For group entities we need to compute a correction term in the case
1386 * that they are consuming <1 cpu so that we would contribute the same
1387 * load as a task of equal weight.
1388 *
1389 * Explicitly co-ordinating this measurement would be expensive, but
1390 * fortunately the sum of each cpus contribution forms a usable
1391 * lower-bound on the true value.
1392 *
1393 * Consider the aggregate of 2 contributions. Either they are disjoint
1394 * (and the sum represents true value) or they are disjoint and we are
1395 * understating by the aggregate of their overlap.
1396 *
1397 * Extending this to N cpus, for a given overlap, the maximum amount we
1398 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1399 * cpus that overlap for this interval and w_i is the interval width.
1400 *
1401 * On a small machine; the first term is well-bounded which bounds the
1402 * total error since w_i is a subset of the period. Whereas on a
1403 * larger machine, while this first term can be larger, if w_i is the
1404 * of consequential size guaranteed to see n_i*w_i quickly converge to
1405 * our upper bound of 1-cpu.
1406 */
1407 runnable_avg = atomic_read(&tg->runnable_avg);
1408 if (runnable_avg < NICE_0_LOAD) {
1409 se->avg.load_avg_contrib *= runnable_avg;
1410 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1411 }
Paul Turner8165e142012-10-04 13:18:31 +02001412}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001413#else
1414static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1415 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001416static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1417 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001418static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001419#endif
1420
Paul Turner8165e142012-10-04 13:18:31 +02001421static inline void __update_task_entity_contrib(struct sched_entity *se)
1422{
1423 u32 contrib;
1424
1425 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1426 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1427 contrib /= (se->avg.runnable_avg_period + 1);
1428 se->avg.load_avg_contrib = scale_load(contrib);
1429}
1430
Paul Turner2dac7542012-10-04 13:18:30 +02001431/* Compute the current contribution to load_avg by se, return any delta */
1432static long __update_entity_load_avg_contrib(struct sched_entity *se)
1433{
1434 long old_contrib = se->avg.load_avg_contrib;
1435
Paul Turner8165e142012-10-04 13:18:31 +02001436 if (entity_is_task(se)) {
1437 __update_task_entity_contrib(se);
1438 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001439 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001440 __update_group_entity_contrib(se);
1441 }
Paul Turner2dac7542012-10-04 13:18:30 +02001442
1443 return se->avg.load_avg_contrib - old_contrib;
1444}
1445
Paul Turner9ee474f2012-10-04 13:18:30 +02001446static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1447 long load_contrib)
1448{
1449 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1450 cfs_rq->blocked_load_avg -= load_contrib;
1451 else
1452 cfs_rq->blocked_load_avg = 0;
1453}
1454
Paul Turnerf1b17282012-10-04 13:18:31 +02001455static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1456
Paul Turner9d85f212012-10-04 13:18:29 +02001457/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001458static inline void update_entity_load_avg(struct sched_entity *se,
1459 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001460{
Paul Turner2dac7542012-10-04 13:18:30 +02001461 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1462 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001463 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001464
Paul Turnerf1b17282012-10-04 13:18:31 +02001465 /*
1466 * For a group entity we need to use their owned cfs_rq_clock_task() in
1467 * case they are the parent of a throttled hierarchy.
1468 */
1469 if (entity_is_task(se))
1470 now = cfs_rq_clock_task(cfs_rq);
1471 else
1472 now = cfs_rq_clock_task(group_cfs_rq(se));
1473
1474 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001475 return;
1476
1477 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001478
1479 if (!update_cfs_rq)
1480 return;
1481
Paul Turner2dac7542012-10-04 13:18:30 +02001482 if (se->on_rq)
1483 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001484 else
1485 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1486}
1487
1488/*
1489 * Decay the load contributed by all blocked children and account this so that
1490 * their contribution may appropriately discounted when they wake up.
1491 */
Paul Turneraff3e492012-10-04 13:18:30 +02001492static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001493{
Paul Turnerf1b17282012-10-04 13:18:31 +02001494 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001495 u64 decays;
1496
1497 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001498 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001499 return;
1500
Paul Turneraff3e492012-10-04 13:18:30 +02001501 if (atomic64_read(&cfs_rq->removed_load)) {
1502 u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
1503 subtract_blocked_load_contrib(cfs_rq, removed_load);
1504 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001505
Paul Turneraff3e492012-10-04 13:18:30 +02001506 if (decays) {
1507 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1508 decays);
1509 atomic64_add(decays, &cfs_rq->decay_counter);
1510 cfs_rq->last_decay = now;
1511 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001512
1513 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001514}
Ben Segall18bf2802012-10-04 12:51:20 +02001515
1516static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1517{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001518 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001519 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001520}
Paul Turner2dac7542012-10-04 13:18:30 +02001521
1522/* Add the load generated by se into cfs_rq's child load-average */
1523static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001524 struct sched_entity *se,
1525 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001526{
Paul Turneraff3e492012-10-04 13:18:30 +02001527 /*
1528 * We track migrations using entity decay_count <= 0, on a wake-up
1529 * migration we use a negative decay count to track the remote decays
1530 * accumulated while sleeping.
1531 */
1532 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001533 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001534 if (se->avg.decay_count) {
1535 /*
1536 * In a wake-up migration we have to approximate the
1537 * time sleeping. This is because we can't synchronize
1538 * clock_task between the two cpus, and it is not
1539 * guaranteed to be read-safe. Instead, we can
1540 * approximate this using our carried decays, which are
1541 * explicitly atomically readable.
1542 */
1543 se->avg.last_runnable_update -= (-se->avg.decay_count)
1544 << 20;
1545 update_entity_load_avg(se, 0);
1546 /* Indicate that we're now synchronized and on-rq */
1547 se->avg.decay_count = 0;
1548 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001549 wakeup = 0;
1550 } else {
1551 __synchronize_entity_decay(se);
1552 }
1553
Paul Turneraff3e492012-10-04 13:18:30 +02001554 /* migrated tasks did not contribute to our blocked load */
1555 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02001556 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02001557 update_entity_load_avg(se, 0);
1558 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001559
Paul Turner2dac7542012-10-04 13:18:30 +02001560 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02001561 /* we force update consideration on load-balancer moves */
1562 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02001563}
1564
Paul Turner9ee474f2012-10-04 13:18:30 +02001565/*
1566 * Remove se's load from this cfs_rq child load-average, if the entity is
1567 * transitioning to a blocked state we track its projected decay using
1568 * blocked_load_avg.
1569 */
Paul Turner2dac7542012-10-04 13:18:30 +02001570static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001571 struct sched_entity *se,
1572 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02001573{
Paul Turner9ee474f2012-10-04 13:18:30 +02001574 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02001575 /* we force update consideration on load-balancer moves */
1576 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02001577
Paul Turner2dac7542012-10-04 13:18:30 +02001578 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02001579 if (sleep) {
1580 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1581 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1582 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02001583}
Vincent Guittot642dbc32013-04-18 18:34:26 +02001584
1585/*
1586 * Update the rq's load with the elapsed running time before entering
1587 * idle. if the last scheduled task is not a CFS task, idle_enter will
1588 * be the only way to update the runnable statistic.
1589 */
1590void idle_enter_fair(struct rq *this_rq)
1591{
1592 update_rq_runnable_avg(this_rq, 1);
1593}
1594
1595/*
1596 * Update the rq's load with the elapsed idle time before a task is
1597 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1598 * be the only way to update the runnable statistic.
1599 */
1600void idle_exit_fair(struct rq *this_rq)
1601{
1602 update_rq_runnable_avg(this_rq, 0);
1603}
1604
Paul Turner9d85f212012-10-04 13:18:29 +02001605#else
Paul Turner9ee474f2012-10-04 13:18:30 +02001606static inline void update_entity_load_avg(struct sched_entity *se,
1607 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02001608static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001609static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001610 struct sched_entity *se,
1611 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001612static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001613 struct sched_entity *se,
1614 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02001615static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1616 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02001617#endif
1618
Ingo Molnar2396af62007-08-09 11:16:48 +02001619static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001620{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001621#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02001622 struct task_struct *tsk = NULL;
1623
1624 if (entity_is_task(se))
1625 tsk = task_of(se);
1626
Lucas De Marchi41acab82010-03-10 23:37:45 -03001627 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001628 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001629
1630 if ((s64)delta < 0)
1631 delta = 0;
1632
Lucas De Marchi41acab82010-03-10 23:37:45 -03001633 if (unlikely(delta > se->statistics.sleep_max))
1634 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001635
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001636 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001637 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01001638
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001639 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02001640 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001641 trace_sched_stat_sleep(tsk, delta);
1642 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001643 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03001644 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001645 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001646
1647 if ((s64)delta < 0)
1648 delta = 0;
1649
Lucas De Marchi41acab82010-03-10 23:37:45 -03001650 if (unlikely(delta > se->statistics.block_max))
1651 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001652
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001653 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001654 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02001655
Peter Zijlstrae4143142009-07-23 20:13:26 +02001656 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001657 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001658 se->statistics.iowait_sum += delta;
1659 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001660 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001661 }
1662
Andrew Vaginb781a602011-11-28 12:03:35 +03001663 trace_sched_stat_blocked(tsk, delta);
1664
Peter Zijlstrae4143142009-07-23 20:13:26 +02001665 /*
1666 * Blocking time is in units of nanosecs, so shift by
1667 * 20 to get a milliseconds-range estimation of the
1668 * amount of time that the task spent sleeping:
1669 */
1670 if (unlikely(prof_on == SLEEP_PROFILING)) {
1671 profile_hits(SLEEP_PROFILING,
1672 (void *)get_wchan(tsk),
1673 delta >> 20);
1674 }
1675 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02001676 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001677 }
1678#endif
1679}
1680
Peter Zijlstraddc97292007-10-15 17:00:10 +02001681static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1682{
1683#ifdef CONFIG_SCHED_DEBUG
1684 s64 d = se->vruntime - cfs_rq->min_vruntime;
1685
1686 if (d < 0)
1687 d = -d;
1688
1689 if (d > 3*sysctl_sched_latency)
1690 schedstat_inc(cfs_rq, nr_spread_over);
1691#endif
1692}
1693
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001694static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001695place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1696{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001697 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001698
Peter Zijlstra2cb86002007-11-09 22:39:37 +01001699 /*
1700 * The 'current' period is already promised to the current tasks,
1701 * however the extra weight of the new task will slow them down a
1702 * little, place the new task so that it fits in the slot that
1703 * stays open at the end.
1704 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001705 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02001706 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001707
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001708 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01001709 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001710 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001711
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001712 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001713 * Halve their sleep time's effect, to allow
1714 * for a gentler effect of sleepers:
1715 */
1716 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1717 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02001718
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001719 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001720 }
1721
Mike Galbraithb5d9d732009-09-08 11:12:28 +02001722 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05301723 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001724}
1725
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001726static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1727
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001728static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001729enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001730{
1731 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001732 * Update the normalized vruntime before updating min_vruntime
1733 * through callig update_curr().
1734 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001735 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001736 se->vruntime += cfs_rq->min_vruntime;
1737
1738 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001739 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001740 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001741 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02001742 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001743 account_entity_enqueue(cfs_rq, se);
1744 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001745
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001746 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001747 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02001748 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02001749 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001750
Ingo Molnard2417e52007-08-09 11:16:47 +02001751 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02001752 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001753 if (se != cfs_rq->curr)
1754 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001755 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001756
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001757 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001758 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001759 check_enqueue_throttle(cfs_rq);
1760 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001761}
1762
Rik van Riel2c13c9192011-02-01 09:48:37 -05001763static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01001764{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001765 for_each_sched_entity(se) {
1766 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1767 if (cfs_rq->last == se)
1768 cfs_rq->last = NULL;
1769 else
1770 break;
1771 }
1772}
Peter Zijlstra2002c692008-11-11 11:52:33 +01001773
Rik van Riel2c13c9192011-02-01 09:48:37 -05001774static void __clear_buddies_next(struct sched_entity *se)
1775{
1776 for_each_sched_entity(se) {
1777 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1778 if (cfs_rq->next == se)
1779 cfs_rq->next = NULL;
1780 else
1781 break;
1782 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01001783}
1784
Rik van Rielac53db52011-02-01 09:51:03 -05001785static void __clear_buddies_skip(struct sched_entity *se)
1786{
1787 for_each_sched_entity(se) {
1788 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1789 if (cfs_rq->skip == se)
1790 cfs_rq->skip = NULL;
1791 else
1792 break;
1793 }
1794}
1795
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001796static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1797{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001798 if (cfs_rq->last == se)
1799 __clear_buddies_last(se);
1800
1801 if (cfs_rq->next == se)
1802 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05001803
1804 if (cfs_rq->skip == se)
1805 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001806}
1807
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07001808static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07001809
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001810static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001811dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001812{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001813 /*
1814 * Update run-time statistics of the 'current'.
1815 */
1816 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001817 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001818
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02001819 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001820 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001821#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001822 if (entity_is_task(se)) {
1823 struct task_struct *tsk = task_of(se);
1824
1825 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001826 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001827 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001828 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001829 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02001830#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001831 }
1832
Peter Zijlstra2002c692008-11-11 11:52:33 +01001833 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001834
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001835 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001836 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001837 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001838 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001839
1840 /*
1841 * Normalize the entity after updating the min_vruntime because the
1842 * update can refer to the ->curr item and we need to reflect this
1843 * movement in our normalized position.
1844 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001845 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001846 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07001847
Paul Turnerd8b49862011-07-21 09:43:41 -07001848 /* return excess runtime on last dequeue */
1849 return_cfs_rq_runtime(cfs_rq);
1850
Peter Zijlstra1e876232011-05-17 16:21:10 -07001851 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001852 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001853}
1854
1855/*
1856 * Preempt the current task with a newly woken task if needed:
1857 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02001858static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001859check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001860{
Peter Zijlstra11697832007-09-05 14:32:49 +02001861 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001862 struct sched_entity *se;
1863 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02001864
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02001865 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02001866 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001867 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001868 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001869 /*
1870 * The current task ran long enough, ensure it doesn't get
1871 * re-elected due to buddy favours.
1872 */
1873 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001874 return;
1875 }
1876
1877 /*
1878 * Ensure that a task that missed wakeup preemption by a
1879 * narrow margin doesn't have to wait for a full slice.
1880 * This also mitigates buddy induced latencies under load.
1881 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02001882 if (delta_exec < sysctl_sched_min_granularity)
1883 return;
1884
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001885 se = __pick_first_entity(cfs_rq);
1886 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02001887
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001888 if (delta < 0)
1889 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01001890
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001891 if (delta > ideal_runtime)
1892 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001893}
1894
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001895static void
Ingo Molnar8494f412007-08-09 11:16:48 +02001896set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001897{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001898 /* 'current' is not kept within the tree. */
1899 if (se->on_rq) {
1900 /*
1901 * Any task has to be enqueued before it get to execute on
1902 * a CPU. So account for the time it spent waiting on the
1903 * runqueue.
1904 */
1905 update_stats_wait_end(cfs_rq, se);
1906 __dequeue_entity(cfs_rq, se);
1907 }
1908
Ingo Molnar79303e92007-08-09 11:16:47 +02001909 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02001910 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02001911#ifdef CONFIG_SCHEDSTATS
1912 /*
1913 * Track our maximum slice length, if the CPU's load is at
1914 * least twice that of our own weight (i.e. dont track it
1915 * when there are only lesser-weight tasks around):
1916 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02001917 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001918 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02001919 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1920 }
1921#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02001922 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001923}
1924
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02001925static int
1926wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1927
Rik van Rielac53db52011-02-01 09:51:03 -05001928/*
1929 * Pick the next process, keeping these things in mind, in this order:
1930 * 1) keep things fair between processes/task groups
1931 * 2) pick the "next" process, since someone really wants that to run
1932 * 3) pick the "last" process, for cache locality
1933 * 4) do not run the "skip" process, if something else is available
1934 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001935static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001936{
Rik van Rielac53db52011-02-01 09:51:03 -05001937 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001938 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001939
Rik van Rielac53db52011-02-01 09:51:03 -05001940 /*
1941 * Avoid running the skip buddy, if running something else can
1942 * be done without getting too unfair.
1943 */
1944 if (cfs_rq->skip == se) {
1945 struct sched_entity *second = __pick_next_entity(se);
1946 if (second && wakeup_preempt_entity(second, left) < 1)
1947 se = second;
1948 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001949
Mike Galbraithf685cea2009-10-23 23:09:22 +02001950 /*
1951 * Prefer last buddy, try to return the CPU to a preempted task.
1952 */
1953 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1954 se = cfs_rq->last;
1955
Rik van Rielac53db52011-02-01 09:51:03 -05001956 /*
1957 * Someone really wants this to run. If it's not unfair, run it.
1958 */
1959 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1960 se = cfs_rq->next;
1961
Mike Galbraithf685cea2009-10-23 23:09:22 +02001962 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001963
1964 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001965}
1966
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001967static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1968
Ingo Molnarab6cde22007-08-09 11:16:48 +02001969static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001970{
1971 /*
1972 * If still on the runqueue then deactivate_task()
1973 * was not called and update_curr() has to be done:
1974 */
1975 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001976 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001977
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001978 /* throttle cfs_rqs exceeding runtime */
1979 check_cfs_rq_runtime(cfs_rq);
1980
Peter Zijlstraddc97292007-10-15 17:00:10 +02001981 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001982 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02001983 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001984 /* Put 'current' back into the tree. */
1985 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02001986 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02001987 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001988 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02001989 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001990}
1991
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001992static void
1993entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001994{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001995 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001996 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001997 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001998 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001999
Paul Turner43365bd2010-12-15 19:10:17 -08002000 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002001 * Ensure that runnable average is periodically updated.
2002 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002003 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002004 update_cfs_rq_blocked_load(cfs_rq, 1);
Paul Turner9d85f212012-10-04 13:18:29 +02002005
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002006#ifdef CONFIG_SCHED_HRTICK
2007 /*
2008 * queued ticks are scheduled to match the slice, so don't bother
2009 * validating it and just reschedule.
2010 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002011 if (queued) {
2012 resched_task(rq_of(cfs_rq)->curr);
2013 return;
2014 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002015 /*
2016 * don't let the period tick interfere with the hrtick preemption
2017 */
2018 if (!sched_feat(DOUBLE_TICK) &&
2019 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2020 return;
2021#endif
2022
Yong Zhang2c2efae2011-07-29 16:20:33 +08002023 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002024 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002025}
2026
Paul Turnerab84d312011-07-21 09:43:28 -07002027
2028/**************************************************
2029 * CFS bandwidth control machinery
2030 */
2031
2032#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002033
2034#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002035static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002036
2037static inline bool cfs_bandwidth_used(void)
2038{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002039 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002040}
2041
2042void account_cfs_bandwidth_used(int enabled, int was_enabled)
2043{
2044 /* only need to count groups transitioning between enabled/!enabled */
2045 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002046 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002047 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002048 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002049}
2050#else /* HAVE_JUMP_LABEL */
2051static bool cfs_bandwidth_used(void)
2052{
2053 return true;
2054}
2055
2056void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2057#endif /* HAVE_JUMP_LABEL */
2058
Paul Turnerab84d312011-07-21 09:43:28 -07002059/*
2060 * default period for cfs group bandwidth.
2061 * default: 0.1s, units: nanoseconds
2062 */
2063static inline u64 default_cfs_period(void)
2064{
2065 return 100000000ULL;
2066}
Paul Turnerec12cb72011-07-21 09:43:30 -07002067
2068static inline u64 sched_cfs_bandwidth_slice(void)
2069{
2070 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2071}
2072
Paul Turnera9cf55b2011-07-21 09:43:32 -07002073/*
2074 * Replenish runtime according to assigned quota and update expiration time.
2075 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2076 * additional synchronization around rq->lock.
2077 *
2078 * requires cfs_b->lock
2079 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002080void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002081{
2082 u64 now;
2083
2084 if (cfs_b->quota == RUNTIME_INF)
2085 return;
2086
2087 now = sched_clock_cpu(smp_processor_id());
2088 cfs_b->runtime = cfs_b->quota;
2089 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2090}
2091
Peter Zijlstra029632f2011-10-25 10:00:11 +02002092static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2093{
2094 return &tg->cfs_bandwidth;
2095}
2096
Paul Turnerf1b17282012-10-04 13:18:31 +02002097/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2098static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2099{
2100 if (unlikely(cfs_rq->throttle_count))
2101 return cfs_rq->throttled_clock_task;
2102
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002103 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002104}
2105
Paul Turner85dac902011-07-21 09:43:33 -07002106/* returns 0 on failure to allocate runtime */
2107static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002108{
2109 struct task_group *tg = cfs_rq->tg;
2110 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002111 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002112
2113 /* note: this is a positive sum as runtime_remaining <= 0 */
2114 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2115
2116 raw_spin_lock(&cfs_b->lock);
2117 if (cfs_b->quota == RUNTIME_INF)
2118 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002119 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002120 /*
2121 * If the bandwidth pool has become inactive, then at least one
2122 * period must have elapsed since the last consumption.
2123 * Refresh the global state and ensure bandwidth timer becomes
2124 * active.
2125 */
2126 if (!cfs_b->timer_active) {
2127 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002128 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002129 }
Paul Turner58088ad2011-07-21 09:43:31 -07002130
2131 if (cfs_b->runtime > 0) {
2132 amount = min(cfs_b->runtime, min_amount);
2133 cfs_b->runtime -= amount;
2134 cfs_b->idle = 0;
2135 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002136 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002137 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002138 raw_spin_unlock(&cfs_b->lock);
2139
2140 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002141 /*
2142 * we may have advanced our local expiration to account for allowed
2143 * spread between our sched_clock and the one on which runtime was
2144 * issued.
2145 */
2146 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2147 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002148
2149 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002150}
2151
2152/*
2153 * Note: This depends on the synchronization provided by sched_clock and the
2154 * fact that rq->clock snapshots this value.
2155 */
2156static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2157{
2158 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002159
2160 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002161 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002162 return;
2163
2164 if (cfs_rq->runtime_remaining < 0)
2165 return;
2166
2167 /*
2168 * If the local deadline has passed we have to consider the
2169 * possibility that our sched_clock is 'fast' and the global deadline
2170 * has not truly expired.
2171 *
2172 * Fortunately we can check determine whether this the case by checking
2173 * whether the global deadline has advanced.
2174 */
2175
2176 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2177 /* extend local deadline, drift is bounded above by 2 ticks */
2178 cfs_rq->runtime_expires += TICK_NSEC;
2179 } else {
2180 /* global deadline is ahead, expiration has passed */
2181 cfs_rq->runtime_remaining = 0;
2182 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002183}
2184
2185static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2186 unsigned long delta_exec)
2187{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002188 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002189 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002190 expire_cfs_rq_runtime(cfs_rq);
2191
2192 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002193 return;
2194
Paul Turner85dac902011-07-21 09:43:33 -07002195 /*
2196 * if we're unable to extend our runtime we resched so that the active
2197 * hierarchy can be throttled
2198 */
2199 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2200 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002201}
2202
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002203static __always_inline
2204void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002205{
Paul Turner56f570e2011-11-07 20:26:33 -08002206 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002207 return;
2208
2209 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2210}
2211
Paul Turner85dac902011-07-21 09:43:33 -07002212static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2213{
Paul Turner56f570e2011-11-07 20:26:33 -08002214 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002215}
2216
Paul Turner64660c82011-07-21 09:43:36 -07002217/* check whether cfs_rq, or any parent, is throttled */
2218static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2219{
Paul Turner56f570e2011-11-07 20:26:33 -08002220 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002221}
2222
2223/*
2224 * Ensure that neither of the group entities corresponding to src_cpu or
2225 * dest_cpu are members of a throttled hierarchy when performing group
2226 * load-balance operations.
2227 */
2228static inline int throttled_lb_pair(struct task_group *tg,
2229 int src_cpu, int dest_cpu)
2230{
2231 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2232
2233 src_cfs_rq = tg->cfs_rq[src_cpu];
2234 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2235
2236 return throttled_hierarchy(src_cfs_rq) ||
2237 throttled_hierarchy(dest_cfs_rq);
2238}
2239
2240/* updated child weight may affect parent so we have to do this bottom up */
2241static int tg_unthrottle_up(struct task_group *tg, void *data)
2242{
2243 struct rq *rq = data;
2244 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2245
2246 cfs_rq->throttle_count--;
2247#ifdef CONFIG_SMP
2248 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002249 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002250 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002251 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002252 }
2253#endif
2254
2255 return 0;
2256}
2257
2258static int tg_throttle_down(struct task_group *tg, void *data)
2259{
2260 struct rq *rq = data;
2261 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2262
Paul Turner82958362012-10-04 13:18:31 +02002263 /* group is entering throttled state, stop time */
2264 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002265 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002266 cfs_rq->throttle_count++;
2267
2268 return 0;
2269}
2270
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002271static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002272{
2273 struct rq *rq = rq_of(cfs_rq);
2274 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2275 struct sched_entity *se;
2276 long task_delta, dequeue = 1;
2277
2278 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2279
Paul Turnerf1b17282012-10-04 13:18:31 +02002280 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002281 rcu_read_lock();
2282 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2283 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002284
2285 task_delta = cfs_rq->h_nr_running;
2286 for_each_sched_entity(se) {
2287 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2288 /* throttled entity or throttle-on-deactivate */
2289 if (!se->on_rq)
2290 break;
2291
2292 if (dequeue)
2293 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2294 qcfs_rq->h_nr_running -= task_delta;
2295
2296 if (qcfs_rq->load.weight)
2297 dequeue = 0;
2298 }
2299
2300 if (!se)
2301 rq->nr_running -= task_delta;
2302
2303 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002304 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002305 raw_spin_lock(&cfs_b->lock);
2306 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2307 raw_spin_unlock(&cfs_b->lock);
2308}
2309
Peter Zijlstra029632f2011-10-25 10:00:11 +02002310void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002311{
2312 struct rq *rq = rq_of(cfs_rq);
2313 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2314 struct sched_entity *se;
2315 int enqueue = 1;
2316 long task_delta;
2317
Michael Wang22b958d2013-06-04 14:23:39 +08002318 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002319
2320 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002321
2322 update_rq_clock(rq);
2323
Paul Turner671fd9d2011-07-21 09:43:34 -07002324 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002325 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002326 list_del_rcu(&cfs_rq->throttled_list);
2327 raw_spin_unlock(&cfs_b->lock);
2328
Paul Turner64660c82011-07-21 09:43:36 -07002329 /* update hierarchical throttle state */
2330 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2331
Paul Turner671fd9d2011-07-21 09:43:34 -07002332 if (!cfs_rq->load.weight)
2333 return;
2334
2335 task_delta = cfs_rq->h_nr_running;
2336 for_each_sched_entity(se) {
2337 if (se->on_rq)
2338 enqueue = 0;
2339
2340 cfs_rq = cfs_rq_of(se);
2341 if (enqueue)
2342 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2343 cfs_rq->h_nr_running += task_delta;
2344
2345 if (cfs_rq_throttled(cfs_rq))
2346 break;
2347 }
2348
2349 if (!se)
2350 rq->nr_running += task_delta;
2351
2352 /* determine whether we need to wake up potentially idle cpu */
2353 if (rq->curr == rq->idle && rq->cfs.nr_running)
2354 resched_task(rq->curr);
2355}
2356
2357static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2358 u64 remaining, u64 expires)
2359{
2360 struct cfs_rq *cfs_rq;
2361 u64 runtime = remaining;
2362
2363 rcu_read_lock();
2364 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2365 throttled_list) {
2366 struct rq *rq = rq_of(cfs_rq);
2367
2368 raw_spin_lock(&rq->lock);
2369 if (!cfs_rq_throttled(cfs_rq))
2370 goto next;
2371
2372 runtime = -cfs_rq->runtime_remaining + 1;
2373 if (runtime > remaining)
2374 runtime = remaining;
2375 remaining -= runtime;
2376
2377 cfs_rq->runtime_remaining += runtime;
2378 cfs_rq->runtime_expires = expires;
2379
2380 /* we check whether we're throttled above */
2381 if (cfs_rq->runtime_remaining > 0)
2382 unthrottle_cfs_rq(cfs_rq);
2383
2384next:
2385 raw_spin_unlock(&rq->lock);
2386
2387 if (!remaining)
2388 break;
2389 }
2390 rcu_read_unlock();
2391
2392 return remaining;
2393}
2394
Paul Turner58088ad2011-07-21 09:43:31 -07002395/*
2396 * Responsible for refilling a task_group's bandwidth and unthrottling its
2397 * cfs_rqs as appropriate. If there has been no activity within the last
2398 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2399 * used to track this state.
2400 */
2401static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2402{
Paul Turner671fd9d2011-07-21 09:43:34 -07002403 u64 runtime, runtime_expires;
2404 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002405
2406 raw_spin_lock(&cfs_b->lock);
2407 /* no need to continue the timer with no bandwidth constraint */
2408 if (cfs_b->quota == RUNTIME_INF)
2409 goto out_unlock;
2410
Paul Turner671fd9d2011-07-21 09:43:34 -07002411 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2412 /* idle depends on !throttled (for the case of a large deficit) */
2413 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002414 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002415
Paul Turnera9cf55b2011-07-21 09:43:32 -07002416 /* if we're going inactive then everything else can be deferred */
2417 if (idle)
2418 goto out_unlock;
2419
2420 __refill_cfs_bandwidth_runtime(cfs_b);
2421
Paul Turner671fd9d2011-07-21 09:43:34 -07002422 if (!throttled) {
2423 /* mark as potentially idle for the upcoming period */
2424 cfs_b->idle = 1;
2425 goto out_unlock;
2426 }
Paul Turner58088ad2011-07-21 09:43:31 -07002427
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002428 /* account preceding periods in which throttling occurred */
2429 cfs_b->nr_throttled += overrun;
2430
Paul Turner671fd9d2011-07-21 09:43:34 -07002431 /*
2432 * There are throttled entities so we must first use the new bandwidth
2433 * to unthrottle them before making it generally available. This
2434 * ensures that all existing debts will be paid before a new cfs_rq is
2435 * allowed to run.
2436 */
2437 runtime = cfs_b->runtime;
2438 runtime_expires = cfs_b->runtime_expires;
2439 cfs_b->runtime = 0;
2440
2441 /*
2442 * This check is repeated as we are holding onto the new bandwidth
2443 * while we unthrottle. This can potentially race with an unthrottled
2444 * group trying to acquire new bandwidth from the global pool.
2445 */
2446 while (throttled && runtime > 0) {
2447 raw_spin_unlock(&cfs_b->lock);
2448 /* we can't nest cfs_b->lock while distributing bandwidth */
2449 runtime = distribute_cfs_runtime(cfs_b, runtime,
2450 runtime_expires);
2451 raw_spin_lock(&cfs_b->lock);
2452
2453 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2454 }
2455
2456 /* return (any) remaining runtime */
2457 cfs_b->runtime = runtime;
2458 /*
2459 * While we are ensured activity in the period following an
2460 * unthrottle, this also covers the case in which the new bandwidth is
2461 * insufficient to cover the existing bandwidth deficit. (Forcing the
2462 * timer to remain active while there are any throttled entities.)
2463 */
2464 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002465out_unlock:
2466 if (idle)
2467 cfs_b->timer_active = 0;
2468 raw_spin_unlock(&cfs_b->lock);
2469
2470 return idle;
2471}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002472
Paul Turnerd8b49862011-07-21 09:43:41 -07002473/* a cfs_rq won't donate quota below this amount */
2474static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2475/* minimum remaining period time to redistribute slack quota */
2476static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2477/* how long we wait to gather additional slack before distributing */
2478static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2479
2480/* are we near the end of the current quota period? */
2481static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2482{
2483 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2484 u64 remaining;
2485
2486 /* if the call-back is running a quota refresh is already occurring */
2487 if (hrtimer_callback_running(refresh_timer))
2488 return 1;
2489
2490 /* is a quota refresh about to occur? */
2491 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2492 if (remaining < min_expire)
2493 return 1;
2494
2495 return 0;
2496}
2497
2498static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2499{
2500 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2501
2502 /* if there's a quota refresh soon don't bother with slack */
2503 if (runtime_refresh_within(cfs_b, min_left))
2504 return;
2505
2506 start_bandwidth_timer(&cfs_b->slack_timer,
2507 ns_to_ktime(cfs_bandwidth_slack_period));
2508}
2509
2510/* we know any runtime found here is valid as update_curr() precedes return */
2511static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2512{
2513 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2514 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2515
2516 if (slack_runtime <= 0)
2517 return;
2518
2519 raw_spin_lock(&cfs_b->lock);
2520 if (cfs_b->quota != RUNTIME_INF &&
2521 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2522 cfs_b->runtime += slack_runtime;
2523
2524 /* we are under rq->lock, defer unthrottling using a timer */
2525 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2526 !list_empty(&cfs_b->throttled_cfs_rq))
2527 start_cfs_slack_bandwidth(cfs_b);
2528 }
2529 raw_spin_unlock(&cfs_b->lock);
2530
2531 /* even if it's not valid for return we don't want to try again */
2532 cfs_rq->runtime_remaining -= slack_runtime;
2533}
2534
2535static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2536{
Paul Turner56f570e2011-11-07 20:26:33 -08002537 if (!cfs_bandwidth_used())
2538 return;
2539
Paul Turnerfccfdc62011-11-07 20:26:34 -08002540 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002541 return;
2542
2543 __return_cfs_rq_runtime(cfs_rq);
2544}
2545
2546/*
2547 * This is done with a timer (instead of inline with bandwidth return) since
2548 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2549 */
2550static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2551{
2552 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2553 u64 expires;
2554
2555 /* confirm we're still not at a refresh boundary */
2556 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2557 return;
2558
2559 raw_spin_lock(&cfs_b->lock);
2560 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2561 runtime = cfs_b->runtime;
2562 cfs_b->runtime = 0;
2563 }
2564 expires = cfs_b->runtime_expires;
2565 raw_spin_unlock(&cfs_b->lock);
2566
2567 if (!runtime)
2568 return;
2569
2570 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2571
2572 raw_spin_lock(&cfs_b->lock);
2573 if (expires == cfs_b->runtime_expires)
2574 cfs_b->runtime = runtime;
2575 raw_spin_unlock(&cfs_b->lock);
2576}
2577
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002578/*
2579 * When a group wakes up we want to make sure that its quota is not already
2580 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2581 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2582 */
2583static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2584{
Paul Turner56f570e2011-11-07 20:26:33 -08002585 if (!cfs_bandwidth_used())
2586 return;
2587
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002588 /* an active group must be handled by the update_curr()->put() path */
2589 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2590 return;
2591
2592 /* ensure the group is not already throttled */
2593 if (cfs_rq_throttled(cfs_rq))
2594 return;
2595
2596 /* update runtime allocation */
2597 account_cfs_rq_runtime(cfs_rq, 0);
2598 if (cfs_rq->runtime_remaining <= 0)
2599 throttle_cfs_rq(cfs_rq);
2600}
2601
2602/* conditionally throttle active cfs_rq's from put_prev_entity() */
2603static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2604{
Paul Turner56f570e2011-11-07 20:26:33 -08002605 if (!cfs_bandwidth_used())
2606 return;
2607
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002608 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2609 return;
2610
2611 /*
2612 * it's possible for a throttled entity to be forced into a running
2613 * state (e.g. set_curr_task), in this case we're finished.
2614 */
2615 if (cfs_rq_throttled(cfs_rq))
2616 return;
2617
2618 throttle_cfs_rq(cfs_rq);
2619}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002620
2621static inline u64 default_cfs_period(void);
2622static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
2623static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
2624
2625static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2626{
2627 struct cfs_bandwidth *cfs_b =
2628 container_of(timer, struct cfs_bandwidth, slack_timer);
2629 do_sched_cfs_slack_timer(cfs_b);
2630
2631 return HRTIMER_NORESTART;
2632}
2633
2634static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2635{
2636 struct cfs_bandwidth *cfs_b =
2637 container_of(timer, struct cfs_bandwidth, period_timer);
2638 ktime_t now;
2639 int overrun;
2640 int idle = 0;
2641
2642 for (;;) {
2643 now = hrtimer_cb_get_time(timer);
2644 overrun = hrtimer_forward(timer, now, cfs_b->period);
2645
2646 if (!overrun)
2647 break;
2648
2649 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2650 }
2651
2652 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2653}
2654
2655void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2656{
2657 raw_spin_lock_init(&cfs_b->lock);
2658 cfs_b->runtime = 0;
2659 cfs_b->quota = RUNTIME_INF;
2660 cfs_b->period = ns_to_ktime(default_cfs_period());
2661
2662 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2663 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2664 cfs_b->period_timer.function = sched_cfs_period_timer;
2665 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2666 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2667}
2668
2669static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2670{
2671 cfs_rq->runtime_enabled = 0;
2672 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2673}
2674
2675/* requires cfs_b->lock, may release to reprogram timer */
2676void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2677{
2678 /*
2679 * The timer may be active because we're trying to set a new bandwidth
2680 * period or because we're racing with the tear-down path
2681 * (timer_active==0 becomes visible before the hrtimer call-back
2682 * terminates). In either case we ensure that it's re-programmed
2683 */
2684 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2685 raw_spin_unlock(&cfs_b->lock);
2686 /* ensure cfs_b->lock is available while we wait */
2687 hrtimer_cancel(&cfs_b->period_timer);
2688
2689 raw_spin_lock(&cfs_b->lock);
2690 /* if someone else restarted the timer then we're done */
2691 if (cfs_b->timer_active)
2692 return;
2693 }
2694
2695 cfs_b->timer_active = 1;
2696 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2697}
2698
2699static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2700{
2701 hrtimer_cancel(&cfs_b->period_timer);
2702 hrtimer_cancel(&cfs_b->slack_timer);
2703}
2704
Arnd Bergmann38dc3342013-01-25 14:14:22 +00002705static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002706{
2707 struct cfs_rq *cfs_rq;
2708
2709 for_each_leaf_cfs_rq(rq, cfs_rq) {
2710 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2711
2712 if (!cfs_rq->runtime_enabled)
2713 continue;
2714
2715 /*
2716 * clock_task is not advancing so we just need to make sure
2717 * there's some valid quota amount
2718 */
2719 cfs_rq->runtime_remaining = cfs_b->quota;
2720 if (cfs_rq_throttled(cfs_rq))
2721 unthrottle_cfs_rq(cfs_rq);
2722 }
2723}
2724
2725#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02002726static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2727{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002728 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02002729}
2730
2731static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2732 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002733static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2734static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002735static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07002736
2737static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2738{
2739 return 0;
2740}
Paul Turner64660c82011-07-21 09:43:36 -07002741
2742static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2743{
2744 return 0;
2745}
2746
2747static inline int throttled_lb_pair(struct task_group *tg,
2748 int src_cpu, int dest_cpu)
2749{
2750 return 0;
2751}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002752
2753void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2754
2755#ifdef CONFIG_FAIR_GROUP_SCHED
2756static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07002757#endif
2758
Peter Zijlstra029632f2011-10-25 10:00:11 +02002759static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2760{
2761 return NULL;
2762}
2763static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07002764static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002765
2766#endif /* CONFIG_CFS_BANDWIDTH */
2767
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002768/**************************************************
2769 * CFS operations on tasks:
2770 */
2771
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002772#ifdef CONFIG_SCHED_HRTICK
2773static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2774{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002775 struct sched_entity *se = &p->se;
2776 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2777
2778 WARN_ON(task_rq(p) != rq);
2779
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002780 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002781 u64 slice = sched_slice(cfs_rq, se);
2782 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2783 s64 delta = slice - ran;
2784
2785 if (delta < 0) {
2786 if (rq->curr == p)
2787 resched_task(p);
2788 return;
2789 }
2790
2791 /*
2792 * Don't schedule slices shorter than 10000ns, that just
2793 * doesn't make sense. Rely on vruntime for fairness.
2794 */
Peter Zijlstra31656512008-07-18 18:01:23 +02002795 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02002796 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002797
Peter Zijlstra31656512008-07-18 18:01:23 +02002798 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002799 }
2800}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002801
2802/*
2803 * called from enqueue/dequeue and updates the hrtick when the
2804 * current task is from our class and nr_running is low enough
2805 * to matter.
2806 */
2807static void hrtick_update(struct rq *rq)
2808{
2809 struct task_struct *curr = rq->curr;
2810
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002811 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002812 return;
2813
2814 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2815 hrtick_start_fair(rq, curr);
2816}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302817#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002818static inline void
2819hrtick_start_fair(struct rq *rq, struct task_struct *p)
2820{
2821}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002822
2823static inline void hrtick_update(struct rq *rq)
2824{
2825}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002826#endif
2827
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002828/*
2829 * The enqueue_task method is called before nr_running is
2830 * increased. Here we update the fair scheduling stats and
2831 * then put the task into the rbtree:
2832 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00002833static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002834enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002835{
2836 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002837 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002838
2839 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002840 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002841 break;
2842 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002843 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002844
2845 /*
2846 * end evaluation on encountering a throttled cfs_rq
2847 *
2848 * note: in the case of encountering a throttled cfs_rq we will
2849 * post the final h_nr_running increment below.
2850 */
2851 if (cfs_rq_throttled(cfs_rq))
2852 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002853 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07002854
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002855 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002856 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002857
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002858 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002859 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002860 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002861
Paul Turner85dac902011-07-21 09:43:33 -07002862 if (cfs_rq_throttled(cfs_rq))
2863 break;
2864
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002865 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002866 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002867 }
2868
Ben Segall18bf2802012-10-04 12:51:20 +02002869 if (!se) {
2870 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07002871 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002872 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002873 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002874}
2875
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002876static void set_next_buddy(struct sched_entity *se);
2877
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002878/*
2879 * The dequeue_task method is called before nr_running is
2880 * decreased. We remove the task from the rbtree and
2881 * update the fair scheduling stats:
2882 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002883static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002884{
2885 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002886 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002887 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002888
2889 for_each_sched_entity(se) {
2890 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002891 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002892
2893 /*
2894 * end evaluation on encountering a throttled cfs_rq
2895 *
2896 * note: in the case of encountering a throttled cfs_rq we will
2897 * post the final h_nr_running decrement below.
2898 */
2899 if (cfs_rq_throttled(cfs_rq))
2900 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002901 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002902
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002903 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002904 if (cfs_rq->load.weight) {
2905 /*
2906 * Bias pick_next to pick a task from this cfs_rq, as
2907 * p is sleeping when it is within its sched_slice.
2908 */
2909 if (task_sleep && parent_entity(se))
2910 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07002911
2912 /* avoid re-evaluating load for this entity */
2913 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002914 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002915 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002916 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002917 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002918
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002919 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002920 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002921 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002922
Paul Turner85dac902011-07-21 09:43:33 -07002923 if (cfs_rq_throttled(cfs_rq))
2924 break;
2925
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002926 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002927 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002928 }
2929
Ben Segall18bf2802012-10-04 12:51:20 +02002930 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07002931 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002932 update_rq_runnable_avg(rq, 1);
2933 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002934 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002935}
2936
Gregory Haskinse7693a32008-01-25 21:08:09 +01002937#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02002938/* Used instead of source_load when we know the type == 0 */
2939static unsigned long weighted_cpuload(const int cpu)
2940{
2941 return cpu_rq(cpu)->load.weight;
2942}
2943
2944/*
2945 * Return a low guess at the load of a migration-source cpu weighted
2946 * according to the scheduling class and "nice" value.
2947 *
2948 * We want to under-estimate the load of migration sources, to
2949 * balance conservatively.
2950 */
2951static unsigned long source_load(int cpu, int type)
2952{
2953 struct rq *rq = cpu_rq(cpu);
2954 unsigned long total = weighted_cpuload(cpu);
2955
2956 if (type == 0 || !sched_feat(LB_BIAS))
2957 return total;
2958
2959 return min(rq->cpu_load[type-1], total);
2960}
2961
2962/*
2963 * Return a high guess at the load of a migration-target cpu weighted
2964 * according to the scheduling class and "nice" value.
2965 */
2966static unsigned long target_load(int cpu, int type)
2967{
2968 struct rq *rq = cpu_rq(cpu);
2969 unsigned long total = weighted_cpuload(cpu);
2970
2971 if (type == 0 || !sched_feat(LB_BIAS))
2972 return total;
2973
2974 return max(rq->cpu_load[type-1], total);
2975}
2976
2977static unsigned long power_of(int cpu)
2978{
2979 return cpu_rq(cpu)->cpu_power;
2980}
2981
2982static unsigned long cpu_avg_load_per_task(int cpu)
2983{
2984 struct rq *rq = cpu_rq(cpu);
2985 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2986
2987 if (nr_running)
2988 return rq->load.weight / nr_running;
2989
2990 return 0;
2991}
2992
Ingo Molnar098fb9d2008-03-16 20:36:10 +01002993
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02002994static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002995{
2996 struct sched_entity *se = &p->se;
2997 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02002998 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002999
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003000#ifndef CONFIG_64BIT
3001 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003002
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003003 do {
3004 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3005 smp_rmb();
3006 min_vruntime = cfs_rq->min_vruntime;
3007 } while (min_vruntime != min_vruntime_copy);
3008#else
3009 min_vruntime = cfs_rq->min_vruntime;
3010#endif
3011
3012 se->vruntime -= min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003013}
3014
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003015#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003016/*
3017 * effective_load() calculates the load change as seen from the root_task_group
3018 *
3019 * Adding load to a group doesn't make a group heavier, but can cause movement
3020 * of group shares between cpus. Assuming the shares were perfectly aligned one
3021 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003022 *
3023 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3024 * on this @cpu and results in a total addition (subtraction) of @wg to the
3025 * total group weight.
3026 *
3027 * Given a runqueue weight distribution (rw_i) we can compute a shares
3028 * distribution (s_i) using:
3029 *
3030 * s_i = rw_i / \Sum rw_j (1)
3031 *
3032 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3033 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3034 * shares distribution (s_i):
3035 *
3036 * rw_i = { 2, 4, 1, 0 }
3037 * s_i = { 2/7, 4/7, 1/7, 0 }
3038 *
3039 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3040 * task used to run on and the CPU the waker is running on), we need to
3041 * compute the effect of waking a task on either CPU and, in case of a sync
3042 * wakeup, compute the effect of the current task going to sleep.
3043 *
3044 * So for a change of @wl to the local @cpu with an overall group weight change
3045 * of @wl we can compute the new shares distribution (s'_i) using:
3046 *
3047 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3048 *
3049 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3050 * differences in waking a task to CPU 0. The additional task changes the
3051 * weight and shares distributions like:
3052 *
3053 * rw'_i = { 3, 4, 1, 0 }
3054 * s'_i = { 3/8, 4/8, 1/8, 0 }
3055 *
3056 * We can then compute the difference in effective weight by using:
3057 *
3058 * dw_i = S * (s'_i - s_i) (3)
3059 *
3060 * Where 'S' is the group weight as seen by its parent.
3061 *
3062 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3063 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3064 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003065 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003066static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003067{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003068 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003069
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003070 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003071 return wl;
3072
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003073 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003074 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003075
Paul Turner977dda72011-01-14 17:57:50 -08003076 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003077
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003078 /*
3079 * W = @wg + \Sum rw_j
3080 */
3081 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003082
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003083 /*
3084 * w = rw_i + @wl
3085 */
3086 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003087
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003088 /*
3089 * wl = S * s'_i; see (2)
3090 */
3091 if (W > 0 && w < W)
3092 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003093 else
3094 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003095
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003096 /*
3097 * Per the above, wl is the new se->load.weight value; since
3098 * those are clipped to [MIN_SHARES, ...) do so now. See
3099 * calc_cfs_shares().
3100 */
Paul Turner977dda72011-01-14 17:57:50 -08003101 if (wl < MIN_SHARES)
3102 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003103
3104 /*
3105 * wl = dw_i = S * (s'_i - s_i); see (3)
3106 */
Paul Turner977dda72011-01-14 17:57:50 -08003107 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003108
3109 /*
3110 * Recursively apply this logic to all parent groups to compute
3111 * the final effective load change on the root group. Since
3112 * only the @tg group gets extra weight, all parent groups can
3113 * only redistribute existing shares. @wl is the shift in shares
3114 * resulting from this level per the above.
3115 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003116 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003117 }
3118
3119 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003120}
3121#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003122
Peter Zijlstra83378262008-06-27 13:41:37 +02003123static inline unsigned long effective_load(struct task_group *tg, int cpu,
3124 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003125{
Peter Zijlstra83378262008-06-27 13:41:37 +02003126 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003127}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003128
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003129#endif
3130
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003131static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003132{
Paul Turnere37b6a72011-01-21 20:44:59 -08003133 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003134 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003135 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003136 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003137 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003138 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003139
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003140 idx = sd->wake_idx;
3141 this_cpu = smp_processor_id();
3142 prev_cpu = task_cpu(p);
3143 load = source_load(prev_cpu, idx);
3144 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003145
3146 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003147 * If sync wakeup then subtract the (maximum possible)
3148 * effect of the currently running task from the load
3149 * of the current CPU:
3150 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003151 if (sync) {
3152 tg = task_group(current);
3153 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003154
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003155 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003156 load += effective_load(tg, prev_cpu, 0, -weight);
3157 }
3158
3159 tg = task_group(p);
3160 weight = p->se.load.weight;
3161
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003162 /*
3163 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003164 * due to the sync cause above having dropped this_load to 0, we'll
3165 * always have an imbalance, but there's really nothing you can do
3166 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003167 *
3168 * Otherwise check if either cpus are near enough in load to allow this
3169 * task to be woken on this_cpu.
3170 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003171 if (this_load > 0) {
3172 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003173
3174 this_eff_load = 100;
3175 this_eff_load *= power_of(prev_cpu);
3176 this_eff_load *= this_load +
3177 effective_load(tg, this_cpu, weight, weight);
3178
3179 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3180 prev_eff_load *= power_of(this_cpu);
3181 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3182
3183 balanced = this_eff_load <= prev_eff_load;
3184 } else
3185 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003186
3187 /*
3188 * If the currently running task will sleep within
3189 * a reasonable amount of time then attract this newly
3190 * woken task:
3191 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003192 if (sync && balanced)
3193 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003194
Lucas De Marchi41acab82010-03-10 23:37:45 -03003195 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003196 tl_per_task = cpu_avg_load_per_task(this_cpu);
3197
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003198 if (balanced ||
3199 (this_load <= load &&
3200 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003201 /*
3202 * This domain has SD_WAKE_AFFINE and
3203 * p is cache cold in this domain, and
3204 * there is no bad imbalance.
3205 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003206 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003207 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003208
3209 return 1;
3210 }
3211 return 0;
3212}
3213
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003214/*
3215 * find_idlest_group finds and returns the least busy CPU group within the
3216 * domain.
3217 */
3218static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003219find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003220 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003221{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003222 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003223 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003224 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003225
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003226 do {
3227 unsigned long load, avg_load;
3228 int local_group;
3229 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003230
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003231 /* Skip over this group if it has no CPUs allowed */
3232 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003233 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003234 continue;
3235
3236 local_group = cpumask_test_cpu(this_cpu,
3237 sched_group_cpus(group));
3238
3239 /* Tally up the load of all CPUs in the group */
3240 avg_load = 0;
3241
3242 for_each_cpu(i, sched_group_cpus(group)) {
3243 /* Bias balancing toward cpus of our domain */
3244 if (local_group)
3245 load = source_load(i, load_idx);
3246 else
3247 load = target_load(i, load_idx);
3248
3249 avg_load += load;
3250 }
3251
3252 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003253 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003254
3255 if (local_group) {
3256 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003257 } else if (avg_load < min_load) {
3258 min_load = avg_load;
3259 idlest = group;
3260 }
3261 } while (group = group->next, group != sd->groups);
3262
3263 if (!idlest || 100*this_load < imbalance*min_load)
3264 return NULL;
3265 return idlest;
3266}
3267
3268/*
3269 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3270 */
3271static int
3272find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3273{
3274 unsigned long load, min_load = ULONG_MAX;
3275 int idlest = -1;
3276 int i;
3277
3278 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003279 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003280 load = weighted_cpuload(i);
3281
3282 if (load < min_load || (load == min_load && i == this_cpu)) {
3283 min_load = load;
3284 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003285 }
3286 }
3287
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003288 return idlest;
3289}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003290
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003291/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003292 * Try and locate an idle CPU in the sched_domain.
3293 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003294static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003295{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003296 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003297 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003298 int i = task_cpu(p);
3299
3300 if (idle_cpu(target))
3301 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003302
3303 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003304 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003305 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003306 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3307 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003308
3309 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003310 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003311 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003312 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003313 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003314 sg = sd->groups;
3315 do {
3316 if (!cpumask_intersects(sched_group_cpus(sg),
3317 tsk_cpus_allowed(p)))
3318 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003319
Linus Torvalds37407ea2012-09-16 12:29:43 -07003320 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003321 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003322 goto next;
3323 }
3324
3325 target = cpumask_first_and(sched_group_cpus(sg),
3326 tsk_cpus_allowed(p));
3327 goto done;
3328next:
3329 sg = sg->next;
3330 } while (sg != sd->groups);
3331 }
3332done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003333 return target;
3334}
3335
3336/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003337 * sched_balance_self: balance the current task (running on cpu) in domains
3338 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3339 * SD_BALANCE_EXEC.
3340 *
3341 * Balance, ie. select the least loaded group.
3342 *
3343 * Returns the target CPU number, or the same CPU if no balancing is needed.
3344 *
3345 * preempt must be disabled.
3346 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003347static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003348select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003349{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003350 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003351 int cpu = smp_processor_id();
3352 int prev_cpu = task_cpu(p);
3353 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003354 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003355 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003356
Peter Zijlstra29baa742012-04-23 12:11:21 +02003357 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003358 return prev_cpu;
3359
Peter Zijlstra0763a662009-09-14 19:37:39 +02003360 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003361 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003362 want_affine = 1;
3363 new_cpu = prev_cpu;
3364 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003365
Peter Zijlstradce840a2011-04-07 14:09:50 +02003366 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003367 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01003368 if (!(tmp->flags & SD_LOAD_BALANCE))
3369 continue;
3370
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003371 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003372 * If both cpu and prev_cpu are part of this domain,
3373 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003374 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003375 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3376 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3377 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003378 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003379 }
3380
Alex Shif03542a2012-07-26 08:55:34 +08003381 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003382 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003383 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003384
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003385 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003386 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003387 prev_cpu = cpu;
3388
3389 new_cpu = select_idle_sibling(p, prev_cpu);
3390 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003391 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003392
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003393 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003394 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003395 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003396 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003397
Peter Zijlstra0763a662009-09-14 19:37:39 +02003398 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003399 sd = sd->child;
3400 continue;
3401 }
3402
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003403 if (sd_flag & SD_BALANCE_WAKE)
3404 load_idx = sd->wake_idx;
3405
3406 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003407 if (!group) {
3408 sd = sd->child;
3409 continue;
3410 }
3411
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003412 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003413 if (new_cpu == -1 || new_cpu == cpu) {
3414 /* Now try balancing at a lower domain level of cpu */
3415 sd = sd->child;
3416 continue;
3417 }
3418
3419 /* Now try balancing at a lower domain level of new_cpu */
3420 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003421 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003422 sd = NULL;
3423 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003424 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003425 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003426 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003427 sd = tmp;
3428 }
3429 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003430 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003431unlock:
3432 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003433
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003434 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003435}
Paul Turner0a74bef2012-10-04 13:18:30 +02003436
3437/*
Paul Turnerf4e26b12012-10-04 13:18:32 +02003438 * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
3439 * removed when useful for applications beyond shares distribution (e.g.
3440 * load-balance).
3441 */
3442#ifdef CONFIG_FAIR_GROUP_SCHED
3443/*
Paul Turner0a74bef2012-10-04 13:18:30 +02003444 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3445 * cfs_rq_of(p) references at time of call are still valid and identify the
3446 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3447 * other assumptions, including the state of rq->lock, should be made.
3448 */
3449static void
3450migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3451{
Paul Turneraff3e492012-10-04 13:18:30 +02003452 struct sched_entity *se = &p->se;
3453 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3454
3455 /*
3456 * Load tracking: accumulate removed load so that it can be processed
3457 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3458 * to blocked load iff they have a positive decay-count. It can never
3459 * be negative here since on-rq tasks have decay-count == 0.
3460 */
3461 if (se->avg.decay_count) {
3462 se->avg.decay_count = -__synchronize_entity_decay(se);
3463 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3464 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003465}
Paul Turnerf4e26b12012-10-04 13:18:32 +02003466#endif
Gregory Haskinse7693a32008-01-25 21:08:09 +01003467#endif /* CONFIG_SMP */
3468
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003469static unsigned long
3470wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003471{
3472 unsigned long gran = sysctl_sched_wakeup_granularity;
3473
3474 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003475 * Since its curr running now, convert the gran from real-time
3476 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003477 *
3478 * By using 'se' instead of 'curr' we penalize light tasks, so
3479 * they get preempted easier. That is, if 'se' < 'curr' then
3480 * the resulting gran will be larger, therefore penalizing the
3481 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3482 * be smaller, again penalizing the lighter task.
3483 *
3484 * This is especially important for buddies when the leftmost
3485 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003486 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003487 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003488}
3489
3490/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003491 * Should 'se' preempt 'curr'.
3492 *
3493 * |s1
3494 * |s2
3495 * |s3
3496 * g
3497 * |<--->|c
3498 *
3499 * w(c, s1) = -1
3500 * w(c, s2) = 0
3501 * w(c, s3) = 1
3502 *
3503 */
3504static int
3505wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3506{
3507 s64 gran, vdiff = curr->vruntime - se->vruntime;
3508
3509 if (vdiff <= 0)
3510 return -1;
3511
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003512 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003513 if (vdiff > gran)
3514 return 1;
3515
3516 return 0;
3517}
3518
Peter Zijlstra02479092008-11-04 21:25:10 +01003519static void set_last_buddy(struct sched_entity *se)
3520{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003521 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3522 return;
3523
3524 for_each_sched_entity(se)
3525 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003526}
3527
3528static void set_next_buddy(struct sched_entity *se)
3529{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003530 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3531 return;
3532
3533 for_each_sched_entity(se)
3534 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003535}
3536
Rik van Rielac53db52011-02-01 09:51:03 -05003537static void set_skip_buddy(struct sched_entity *se)
3538{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003539 for_each_sched_entity(se)
3540 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05003541}
3542
Peter Zijlstra464b7522008-10-24 11:06:15 +02003543/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003544 * Preempt the current task with a newly woken task if needed:
3545 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02003546static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003547{
3548 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02003549 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003550 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003551 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003552 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003553
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01003554 if (unlikely(se == pse))
3555 return;
3556
Paul Turner5238cdd2011-07-21 09:43:37 -07003557 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003558 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07003559 * unconditionally check_prempt_curr() after an enqueue (which may have
3560 * lead to a throttle). This both saves work and prevents false
3561 * next-buddy nomination below.
3562 */
3563 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3564 return;
3565
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003566 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02003567 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003568 next_buddy_marked = 1;
3569 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02003570
Bharata B Raoaec0a512008-08-28 14:42:49 +05303571 /*
3572 * We can come here with TIF_NEED_RESCHED already set from new task
3573 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07003574 *
3575 * Note: this also catches the edge-case of curr being in a throttled
3576 * group (e.g. via set_curr_task), since update_curr() (in the
3577 * enqueue of curr) will have resulted in resched being set. This
3578 * prevents us from potentially nominating it as a false LAST_BUDDY
3579 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05303580 */
3581 if (test_tsk_need_resched(curr))
3582 return;
3583
Darren Harta2f5c9a2011-02-22 13:04:33 -08003584 /* Idle tasks are by definition preempted by non-idle tasks. */
3585 if (unlikely(curr->policy == SCHED_IDLE) &&
3586 likely(p->policy != SCHED_IDLE))
3587 goto preempt;
3588
Ingo Molnar91c234b2007-10-15 17:00:18 +02003589 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08003590 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3591 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02003592 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02003593 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02003594 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003595
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003596 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07003597 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003598 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003599 if (wakeup_preempt_entity(se, pse) == 1) {
3600 /*
3601 * Bias pick_next to pick the sched entity that is
3602 * triggering this preemption.
3603 */
3604 if (!next_buddy_marked)
3605 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003606 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003607 }
Jupyung Leea65ac742009-11-17 18:51:40 +09003608
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003609 return;
3610
3611preempt:
3612 resched_task(curr);
3613 /*
3614 * Only set the backward buddy when the current task is still
3615 * on the rq. This can happen when a wakeup gets interleaved
3616 * with schedule on the ->pre_schedule() or idle_balance()
3617 * point, either of which can * drop the rq lock.
3618 *
3619 * Also, during early boot the idle thread is in the fair class,
3620 * for obvious reasons its a bad idea to schedule back to it.
3621 */
3622 if (unlikely(!se->on_rq || curr == rq->idle))
3623 return;
3624
3625 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3626 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003627}
3628
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003629static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003630{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003631 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003632 struct cfs_rq *cfs_rq = &rq->cfs;
3633 struct sched_entity *se;
3634
Tim Blechmann36ace272009-11-24 11:55:45 +01003635 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003636 return NULL;
3637
3638 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02003639 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003640 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003641 cfs_rq = group_cfs_rq(se);
3642 } while (cfs_rq);
3643
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003644 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003645 if (hrtick_enabled(rq))
3646 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003647
3648 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003649}
3650
3651/*
3652 * Account for a descheduled task:
3653 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02003654static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003655{
3656 struct sched_entity *se = &prev->se;
3657 struct cfs_rq *cfs_rq;
3658
3659 for_each_sched_entity(se) {
3660 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02003661 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003662 }
3663}
3664
Rik van Rielac53db52011-02-01 09:51:03 -05003665/*
3666 * sched_yield() is very simple
3667 *
3668 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3669 */
3670static void yield_task_fair(struct rq *rq)
3671{
3672 struct task_struct *curr = rq->curr;
3673 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3674 struct sched_entity *se = &curr->se;
3675
3676 /*
3677 * Are we the only task in the tree?
3678 */
3679 if (unlikely(rq->nr_running == 1))
3680 return;
3681
3682 clear_buddies(cfs_rq, se);
3683
3684 if (curr->policy != SCHED_BATCH) {
3685 update_rq_clock(rq);
3686 /*
3687 * Update run-time statistics of the 'current'.
3688 */
3689 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01003690 /*
3691 * Tell update_rq_clock() that we've just updated,
3692 * so we don't do microscopic update in schedule()
3693 * and double the fastpath cost.
3694 */
3695 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05003696 }
3697
3698 set_skip_buddy(se);
3699}
3700
Mike Galbraithd95f4122011-02-01 09:50:51 -05003701static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3702{
3703 struct sched_entity *se = &p->se;
3704
Paul Turner5238cdd2011-07-21 09:43:37 -07003705 /* throttled hierarchies are not runnable */
3706 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05003707 return false;
3708
3709 /* Tell the scheduler that we'd really like pse to run next. */
3710 set_next_buddy(se);
3711
Mike Galbraithd95f4122011-02-01 09:50:51 -05003712 yield_task_fair(rq);
3713
3714 return true;
3715}
3716
Peter Williams681f3e62007-10-24 18:23:51 +02003717#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003718/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02003719 * Fair scheduling class load-balancing methods.
3720 *
3721 * BASICS
3722 *
3723 * The purpose of load-balancing is to achieve the same basic fairness the
3724 * per-cpu scheduler provides, namely provide a proportional amount of compute
3725 * time to each task. This is expressed in the following equation:
3726 *
3727 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3728 *
3729 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3730 * W_i,0 is defined as:
3731 *
3732 * W_i,0 = \Sum_j w_i,j (2)
3733 *
3734 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3735 * is derived from the nice value as per prio_to_weight[].
3736 *
3737 * The weight average is an exponential decay average of the instantaneous
3738 * weight:
3739 *
3740 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3741 *
3742 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3743 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3744 * can also include other factors [XXX].
3745 *
3746 * To achieve this balance we define a measure of imbalance which follows
3747 * directly from (1):
3748 *
3749 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3750 *
3751 * We them move tasks around to minimize the imbalance. In the continuous
3752 * function space it is obvious this converges, in the discrete case we get
3753 * a few fun cases generally called infeasible weight scenarios.
3754 *
3755 * [XXX expand on:
3756 * - infeasible weights;
3757 * - local vs global optima in the discrete case. ]
3758 *
3759 *
3760 * SCHED DOMAINS
3761 *
3762 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3763 * for all i,j solution, we create a tree of cpus that follows the hardware
3764 * topology where each level pairs two lower groups (or better). This results
3765 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3766 * tree to only the first of the previous level and we decrease the frequency
3767 * of load-balance at each level inv. proportional to the number of cpus in
3768 * the groups.
3769 *
3770 * This yields:
3771 *
3772 * log_2 n 1 n
3773 * \Sum { --- * --- * 2^i } = O(n) (5)
3774 * i = 0 2^i 2^i
3775 * `- size of each group
3776 * | | `- number of cpus doing load-balance
3777 * | `- freq
3778 * `- sum over all levels
3779 *
3780 * Coupled with a limit on how many tasks we can migrate every balance pass,
3781 * this makes (5) the runtime complexity of the balancer.
3782 *
3783 * An important property here is that each CPU is still (indirectly) connected
3784 * to every other cpu in at most O(log n) steps:
3785 *
3786 * The adjacency matrix of the resulting graph is given by:
3787 *
3788 * log_2 n
3789 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3790 * k = 0
3791 *
3792 * And you'll find that:
3793 *
3794 * A^(log_2 n)_i,j != 0 for all i,j (7)
3795 *
3796 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3797 * The task movement gives a factor of O(m), giving a convergence complexity
3798 * of:
3799 *
3800 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3801 *
3802 *
3803 * WORK CONSERVING
3804 *
3805 * In order to avoid CPUs going idle while there's still work to do, new idle
3806 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3807 * tree itself instead of relying on other CPUs to bring it work.
3808 *
3809 * This adds some complexity to both (5) and (8) but it reduces the total idle
3810 * time.
3811 *
3812 * [XXX more?]
3813 *
3814 *
3815 * CGROUPS
3816 *
3817 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3818 *
3819 * s_k,i
3820 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3821 * S_k
3822 *
3823 * Where
3824 *
3825 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
3826 *
3827 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3828 *
3829 * The big problem is S_k, its a global sum needed to compute a local (W_i)
3830 * property.
3831 *
3832 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3833 * rewrite all of this once again.]
3834 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003835
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09003836static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3837
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003838#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01003839#define LBF_NEED_BREAK 0x02
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303840#define LBF_SOME_PINNED 0x04
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003841
3842struct lb_env {
3843 struct sched_domain *sd;
3844
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003845 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05303846 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003847
3848 int dst_cpu;
3849 struct rq *dst_rq;
3850
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303851 struct cpumask *dst_grpmask;
3852 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003853 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02003854 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08003855 /* The set of CPUs under consideration for load-balancing */
3856 struct cpumask *cpus;
3857
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003858 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01003859
3860 unsigned int loop;
3861 unsigned int loop_break;
3862 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003863};
3864
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003865/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003866 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003867 * Both runqueues must be locked.
3868 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003869static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003870{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003871 deactivate_task(env->src_rq, p, 0);
3872 set_task_cpu(p, env->dst_cpu);
3873 activate_task(env->dst_rq, p, 0);
3874 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003875}
3876
3877/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02003878 * Is this task likely cache-hot:
3879 */
3880static int
3881task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3882{
3883 s64 delta;
3884
3885 if (p->sched_class != &fair_sched_class)
3886 return 0;
3887
3888 if (unlikely(p->policy == SCHED_IDLE))
3889 return 0;
3890
3891 /*
3892 * Buddy candidates are cache hot:
3893 */
3894 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3895 (&p->se == cfs_rq_of(&p->se)->next ||
3896 &p->se == cfs_rq_of(&p->se)->last))
3897 return 1;
3898
3899 if (sysctl_sched_migration_cost == -1)
3900 return 1;
3901 if (sysctl_sched_migration_cost == 0)
3902 return 0;
3903
3904 delta = now - p->se.exec_start;
3905
3906 return delta < (s64)sysctl_sched_migration_cost;
3907}
3908
3909/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003910 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3911 */
3912static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003913int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003914{
3915 int tsk_cache_hot = 0;
3916 /*
3917 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09003918 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003919 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09003920 * 3) running (obviously), or
3921 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003922 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09003923 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3924 return 0;
3925
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003926 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003927 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303928
Lucas De Marchi41acab82010-03-10 23:37:45 -03003929 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303930
3931 /*
3932 * Remember if this task can be migrated to any other cpu in
3933 * our sched_group. We may want to revisit it if we couldn't
3934 * meet load balance goals by pulling other tasks on src_cpu.
3935 *
3936 * Also avoid computing new_dst_cpu if we have already computed
3937 * one in current iteration.
3938 */
3939 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3940 return 0;
3941
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003942 /* Prevent to re-select dst_cpu via env's cpus */
3943 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3944 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3945 env->flags |= LBF_SOME_PINNED;
3946 env->new_dst_cpu = cpu;
3947 break;
3948 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303949 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003950
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003951 return 0;
3952 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303953
3954 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003955 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003956
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003957 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003958 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003959 return 0;
3960 }
3961
3962 /*
3963 * Aggressive migration if:
3964 * 1) task is cache cold, or
3965 * 2) too many balance attempts have failed.
3966 */
3967
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003968 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003969 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003970 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003971
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003972 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003973 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003974 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003975 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003976
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003977 return 1;
3978 }
3979
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003980 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3981 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003982}
3983
Peter Zijlstra897c3952009-12-17 17:45:42 +01003984/*
3985 * move_one_task tries to move exactly one task from busiest to this_rq, as
3986 * part of active balancing operations within "domain".
3987 * Returns 1 if successful and 0 otherwise.
3988 *
3989 * Called with both runqueues locked.
3990 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003991static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01003992{
3993 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01003994
Peter Zijlstra367456c2012-02-20 21:49:09 +01003995 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01003996 if (!can_migrate_task(p, env))
3997 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01003998
Peter Zijlstra367456c2012-02-20 21:49:09 +01003999 move_task(p, env);
4000 /*
4001 * Right now, this is only the second place move_task()
4002 * is called, so we can safely collect move_task()
4003 * stats here rather than inside move_task().
4004 */
4005 schedstat_inc(env->sd, lb_gained[env->idle]);
4006 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004007 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004008 return 0;
4009}
4010
Peter Zijlstra367456c2012-02-20 21:49:09 +01004011static unsigned long task_h_load(struct task_struct *p);
4012
Peter Zijlstraeb953082012-04-17 13:38:40 +02004013static const unsigned int sched_nr_migrate_break = 32;
4014
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004015/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004016 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004017 * this_rq, as part of a balancing operation within domain "sd".
4018 * Returns 1 if successful and 0 otherwise.
4019 *
4020 * Called with both runqueues locked.
4021 */
4022static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004023{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004024 struct list_head *tasks = &env->src_rq->cfs_tasks;
4025 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004026 unsigned long load;
4027 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004028
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004029 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004030 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004031
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004032 while (!list_empty(tasks)) {
4033 p = list_first_entry(tasks, struct task_struct, se.group_node);
4034
Peter Zijlstra367456c2012-02-20 21:49:09 +01004035 env->loop++;
4036 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004037 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004038 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004039
4040 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004041 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004042 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004043 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004044 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004045 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004046
Joonsoo Kimd3198082013-04-23 17:27:40 +09004047 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004048 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004049
Peter Zijlstra367456c2012-02-20 21:49:09 +01004050 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004051
Peter Zijlstraeb953082012-04-17 13:38:40 +02004052 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004053 goto next;
4054
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004055 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004056 goto next;
4057
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004058 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004059 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004060 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004061
4062#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004063 /*
4064 * NEWIDLE balancing is a source of latency, so preemptible
4065 * kernels will stop after the first task is pulled to minimize
4066 * the critical section.
4067 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004068 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004069 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004070#endif
4071
Peter Zijlstraee00e662009-12-17 17:25:20 +01004072 /*
4073 * We only want to steal up to the prescribed amount of
4074 * weighted load.
4075 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004076 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004077 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004078
Peter Zijlstra367456c2012-02-20 21:49:09 +01004079 continue;
4080next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004081 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004082 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004083
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004084 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004085 * Right now, this is one of only two places move_task() is called,
4086 * so we can safely collect move_task() stats here rather than
4087 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004088 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004089 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004090
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004091 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004092}
4093
Peter Zijlstra230059de2009-12-17 17:47:12 +01004094#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004095/*
4096 * update tg->load_weight by folding this cpu's load_avg
4097 */
Paul Turner48a16752012-10-04 13:18:31 +02004098static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004099{
Paul Turner48a16752012-10-04 13:18:31 +02004100 struct sched_entity *se = tg->se[cpu];
4101 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004102
Paul Turner48a16752012-10-04 13:18:31 +02004103 /* throttled entities do not contribute to load */
4104 if (throttled_hierarchy(cfs_rq))
4105 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004106
Paul Turneraff3e492012-10-04 13:18:30 +02004107 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004108
Paul Turner82958362012-10-04 13:18:31 +02004109 if (se) {
4110 update_entity_load_avg(se, 1);
4111 /*
4112 * We pivot on our runnable average having decayed to zero for
4113 * list removal. This generally implies that all our children
4114 * have also been removed (modulo rounding error or bandwidth
4115 * control); however, such cases are rare and we can fix these
4116 * at enqueue.
4117 *
4118 * TODO: fix up out-of-order children on enqueue.
4119 */
4120 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4121 list_del_leaf_cfs_rq(cfs_rq);
4122 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004123 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004124 update_rq_runnable_avg(rq, rq->nr_running);
4125 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004126}
4127
Paul Turner48a16752012-10-04 13:18:31 +02004128static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004129{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004130 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004131 struct cfs_rq *cfs_rq;
4132 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004133
Paul Turner48a16752012-10-04 13:18:31 +02004134 raw_spin_lock_irqsave(&rq->lock, flags);
4135 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004136 /*
4137 * Iterates the task_group tree in a bottom up fashion, see
4138 * list_add_leaf_cfs_rq() for details.
4139 */
Paul Turner64660c82011-07-21 09:43:36 -07004140 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004141 /*
4142 * Note: We may want to consider periodically releasing
4143 * rq->lock about these updates so that creating many task
4144 * groups does not result in continually extending hold time.
4145 */
4146 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004147 }
Paul Turner48a16752012-10-04 13:18:31 +02004148
4149 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004150}
4151
Peter Zijlstra9763b672011-07-13 13:09:25 +02004152/*
4153 * Compute the cpu's hierarchical load factor for each task group.
4154 * This needs to be done in a top-down fashion because the load of a child
4155 * group is a fraction of its parents load.
4156 */
4157static int tg_load_down(struct task_group *tg, void *data)
4158{
4159 unsigned long load;
4160 long cpu = (long)data;
4161
4162 if (!tg->parent) {
4163 load = cpu_rq(cpu)->load.weight;
4164 } else {
4165 load = tg->parent->cfs_rq[cpu]->h_load;
4166 load *= tg->se[cpu]->load.weight;
4167 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
4168 }
4169
4170 tg->cfs_rq[cpu]->h_load = load;
4171
4172 return 0;
4173}
4174
4175static void update_h_load(long cpu)
4176{
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004177 struct rq *rq = cpu_rq(cpu);
4178 unsigned long now = jiffies;
4179
4180 if (rq->h_load_throttle == now)
4181 return;
4182
4183 rq->h_load_throttle = now;
4184
Peter Zijlstra367456c2012-02-20 21:49:09 +01004185 rcu_read_lock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004186 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstra367456c2012-02-20 21:49:09 +01004187 rcu_read_unlock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004188}
4189
Peter Zijlstra367456c2012-02-20 21:49:09 +01004190static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004191{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004192 struct cfs_rq *cfs_rq = task_cfs_rq(p);
4193 unsigned long load;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004194
Peter Zijlstra367456c2012-02-20 21:49:09 +01004195 load = p->se.load.weight;
4196 load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004197
Peter Zijlstra367456c2012-02-20 21:49:09 +01004198 return load;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004199}
4200#else
Paul Turner48a16752012-10-04 13:18:31 +02004201static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004202{
4203}
4204
Peter Zijlstra367456c2012-02-20 21:49:09 +01004205static inline void update_h_load(long cpu)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004206{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004207}
4208
4209static unsigned long task_h_load(struct task_struct *p)
4210{
4211 return p->se.load.weight;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004212}
4213#endif
4214
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004215/********** Helpers for find_busiest_group ************************/
4216/*
4217 * sd_lb_stats - Structure to store the statistics of a sched_domain
4218 * during load balancing.
4219 */
4220struct sd_lb_stats {
4221 struct sched_group *busiest; /* Busiest group in this sd */
4222 struct sched_group *this; /* Local group in this sd */
4223 unsigned long total_load; /* Total load of all groups in sd */
4224 unsigned long total_pwr; /* Total power of all groups in sd */
4225 unsigned long avg_load; /* Average load across all groups in sd */
4226
4227 /** Statistics of this group */
4228 unsigned long this_load;
4229 unsigned long this_load_per_task;
4230 unsigned long this_nr_running;
Nikhil Raofab47622010-10-15 13:12:29 -07004231 unsigned long this_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004232 unsigned int this_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004233
4234 /* Statistics of the busiest group */
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004235 unsigned int busiest_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004236 unsigned long max_load;
4237 unsigned long busiest_load_per_task;
4238 unsigned long busiest_nr_running;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004239 unsigned long busiest_group_capacity;
Nikhil Raofab47622010-10-15 13:12:29 -07004240 unsigned long busiest_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004241 unsigned int busiest_group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004242
4243 int group_imb; /* Is there imbalance in this sd */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004244};
4245
4246/*
4247 * sg_lb_stats - stats of a sched_group required for load_balancing
4248 */
4249struct sg_lb_stats {
4250 unsigned long avg_load; /*Avg load across the CPUs of the group */
4251 unsigned long group_load; /* Total load over the CPUs of the group */
4252 unsigned long sum_nr_running; /* Nr tasks running in the group */
4253 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4254 unsigned long group_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004255 unsigned long idle_cpus;
4256 unsigned long group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004257 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004258 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004259};
4260
4261/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004262 * get_sd_load_idx - Obtain the load index for a given sched domain.
4263 * @sd: The sched_domain whose load_idx is to be obtained.
4264 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4265 */
4266static inline int get_sd_load_idx(struct sched_domain *sd,
4267 enum cpu_idle_type idle)
4268{
4269 int load_idx;
4270
4271 switch (idle) {
4272 case CPU_NOT_IDLE:
4273 load_idx = sd->busy_idx;
4274 break;
4275
4276 case CPU_NEWLY_IDLE:
4277 load_idx = sd->newidle_idx;
4278 break;
4279 default:
4280 load_idx = sd->idle_idx;
4281 break;
4282 }
4283
4284 return load_idx;
4285}
4286
Li Zefan15f803c2013-03-05 16:07:11 +08004287static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004288{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004289 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004290}
4291
4292unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4293{
4294 return default_scale_freq_power(sd, cpu);
4295}
4296
Li Zefan15f803c2013-03-05 16:07:11 +08004297static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004298{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004299 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004300 unsigned long smt_gain = sd->smt_gain;
4301
4302 smt_gain /= weight;
4303
4304 return smt_gain;
4305}
4306
4307unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4308{
4309 return default_scale_smt_power(sd, cpu);
4310}
4311
Li Zefan15f803c2013-03-05 16:07:11 +08004312static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004313{
4314 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004315 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004316
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004317 /*
4318 * Since we're reading these variables without serialization make sure
4319 * we read them once before doing sanity checks on them.
4320 */
4321 age_stamp = ACCESS_ONCE(rq->age_stamp);
4322 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004323
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004324 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004325
4326 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004327 /* Ensures that power won't end up being negative */
4328 available = 0;
4329 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004330 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004331 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004332
Nikhil Rao1399fa72011-05-18 10:09:39 -07004333 if (unlikely((s64)total < SCHED_POWER_SCALE))
4334 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004335
Nikhil Rao1399fa72011-05-18 10:09:39 -07004336 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004337
4338 return div_u64(available, total);
4339}
4340
4341static void update_cpu_power(struct sched_domain *sd, int cpu)
4342{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004343 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004344 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004345 struct sched_group *sdg = sd->groups;
4346
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004347 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4348 if (sched_feat(ARCH_POWER))
4349 power *= arch_scale_smt_power(sd, cpu);
4350 else
4351 power *= default_scale_smt_power(sd, cpu);
4352
Nikhil Rao1399fa72011-05-18 10:09:39 -07004353 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004354 }
4355
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004356 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004357
4358 if (sched_feat(ARCH_POWER))
4359 power *= arch_scale_freq_power(sd, cpu);
4360 else
4361 power *= default_scale_freq_power(sd, cpu);
4362
Nikhil Rao1399fa72011-05-18 10:09:39 -07004363 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004364
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004365 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004366 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004367
4368 if (!power)
4369 power = 1;
4370
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004371 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004372 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004373}
4374
Peter Zijlstra029632f2011-10-25 10:00:11 +02004375void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004376{
4377 struct sched_domain *child = sd->child;
4378 struct sched_group *group, *sdg = sd->groups;
4379 unsigned long power;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004380 unsigned long interval;
4381
4382 interval = msecs_to_jiffies(sd->balance_interval);
4383 interval = clamp(interval, 1UL, max_load_balance_interval);
4384 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004385
4386 if (!child) {
4387 update_cpu_power(sd, cpu);
4388 return;
4389 }
4390
4391 power = 0;
4392
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004393 if (child->flags & SD_OVERLAP) {
4394 /*
4395 * SD_OVERLAP domains cannot assume that child groups
4396 * span the current group.
4397 */
4398
4399 for_each_cpu(cpu, sched_group_cpus(sdg))
4400 power += power_of(cpu);
4401 } else {
4402 /*
4403 * !SD_OVERLAP domains can assume that child groups
4404 * span the current group.
4405 */
4406
4407 group = child->groups;
4408 do {
4409 power += group->sgp->power;
4410 group = group->next;
4411 } while (group != child->groups);
4412 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004413
Peter Zijlstrac3decf02012-05-31 12:05:32 +02004414 sdg->sgp->power_orig = sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004415}
4416
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004417/*
4418 * Try and fix up capacity for tiny siblings, this is needed when
4419 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4420 * which on its own isn't powerful enough.
4421 *
4422 * See update_sd_pick_busiest() and check_asym_packing().
4423 */
4424static inline int
4425fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4426{
4427 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07004428 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004429 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02004430 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004431 return 0;
4432
4433 /*
4434 * If ~90% of the cpu_power is still there, we're good.
4435 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004436 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004437 return 1;
4438
4439 return 0;
4440}
4441
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004442/**
4443 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004444 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004445 * @group: sched_group whose statistics are to be updated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004446 * @load_idx: Load index of sched_domain of this_cpu for load calc.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004447 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004448 * @balance: Should we balance.
4449 * @sgs: variable to hold the statistics for this group.
4450 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004451static inline void update_sg_lb_stats(struct lb_env *env,
4452 struct sched_group *group, int load_idx,
Michael Wangb94031302012-07-12 16:10:13 +08004453 int local_group, int *balance, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004454{
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004455 unsigned long nr_running, max_nr_running, min_nr_running;
4456 unsigned long load, max_cpu_load, min_cpu_load;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004457 unsigned int balance_cpu = -1, first_idle_cpu = 0;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004458 unsigned long avg_load_per_task = 0;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004459 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004460
Gautham R Shenoy871e35b2010-01-20 14:02:44 -06004461 if (local_group)
Peter Zijlstrac1174872012-05-31 14:47:33 +02004462 balance_cpu = group_balance_cpu(group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004463
4464 /* Tally up the load of all CPUs in the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004465 max_cpu_load = 0;
4466 min_cpu_load = ~0UL;
Nikhil Rao2582f0e2010-10-13 12:09:36 -07004467 max_nr_running = 0;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004468 min_nr_running = ~0UL;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004469
Michael Wangb94031302012-07-12 16:10:13 +08004470 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004471 struct rq *rq = cpu_rq(i);
4472
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004473 nr_running = rq->nr_running;
4474
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004475 /* Bias balancing toward cpus of our domain */
4476 if (local_group) {
Peter Zijlstrac1174872012-05-31 14:47:33 +02004477 if (idle_cpu(i) && !first_idle_cpu &&
4478 cpumask_test_cpu(i, sched_group_mask(group))) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004479 first_idle_cpu = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004480 balance_cpu = i;
4481 }
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004482
4483 load = target_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004484 } else {
4485 load = source_load(i, load_idx);
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004486 if (load > max_cpu_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004487 max_cpu_load = load;
4488 if (min_cpu_load > load)
4489 min_cpu_load = load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004490
4491 if (nr_running > max_nr_running)
4492 max_nr_running = nr_running;
4493 if (min_nr_running > nr_running)
4494 min_nr_running = nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004495 }
4496
4497 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004498 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004499 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004500 if (idle_cpu(i))
4501 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004502 }
4503
4504 /*
4505 * First idle cpu or the first cpu(busiest) in this sched group
4506 * is eligible for doing load balancing at this and above
4507 * domains. In the newly idle case, we will allow all the cpu's
4508 * to do the newly idle load balance.
4509 */
Vincent Guittot4ec44122011-12-12 20:21:08 +01004510 if (local_group) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004511 if (env->idle != CPU_NEWLY_IDLE) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004512 if (balance_cpu != env->dst_cpu) {
Vincent Guittot4ec44122011-12-12 20:21:08 +01004513 *balance = 0;
4514 return;
4515 }
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004516 update_group_power(env->sd, env->dst_cpu);
Vincent Guittot4ec44122011-12-12 20:21:08 +01004517 } else if (time_after_eq(jiffies, group->sgp->next_update))
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004518 update_group_power(env->sd, env->dst_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004519 }
4520
4521 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004522 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004523
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004524 /*
4525 * Consider the group unbalanced when the imbalance is larger
Peter Zijlstra866ab432011-02-21 18:56:47 +01004526 * than the average weight of a task.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004527 *
4528 * APZ: with cgroup the avg task weight can vary wildly and
4529 * might not be a suitable number - should we keep a
4530 * normalized nr_running number somewhere that negates
4531 * the hierarchy?
4532 */
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004533 if (sgs->sum_nr_running)
4534 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004535
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004536 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4537 (max_nr_running - min_nr_running) > 1)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004538 sgs->group_imb = 1;
4539
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004540 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
Nikhil Rao1399fa72011-05-18 10:09:39 -07004541 SCHED_POWER_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004542 if (!sgs->group_capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004543 sgs->group_capacity = fix_small_capacity(env->sd, group);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004544 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07004545
4546 if (sgs->group_capacity > sgs->sum_nr_running)
4547 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004548}
4549
4550/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10004551 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07004552 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004553 * @sds: sched_domain statistics
4554 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10004555 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10004556 *
4557 * Determine if @sg is a busier group than the previously selected
4558 * busiest group.
4559 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004560static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10004561 struct sd_lb_stats *sds,
4562 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004563 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004564{
4565 if (sgs->avg_load <= sds->max_load)
4566 return false;
4567
4568 if (sgs->sum_nr_running > sgs->group_capacity)
4569 return true;
4570
4571 if (sgs->group_imb)
4572 return true;
4573
4574 /*
4575 * ASYM_PACKING needs to move all the work to the lowest
4576 * numbered CPUs in the group, therefore mark all groups
4577 * higher than ourself as busy.
4578 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004579 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4580 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004581 if (!sds->busiest)
4582 return true;
4583
4584 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4585 return true;
4586 }
4587
4588 return false;
4589}
4590
4591/**
Hui Kang461819a2011-10-11 23:00:59 -04004592 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004593 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004594 * @balance: Should we balance.
4595 * @sds: variable to hold the statistics for this sched_domain.
4596 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004597static inline void update_sd_lb_stats(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004598 int *balance, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004599{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004600 struct sched_domain *child = env->sd->child;
4601 struct sched_group *sg = env->sd->groups;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004602 struct sg_lb_stats sgs;
4603 int load_idx, prefer_sibling = 0;
4604
4605 if (child && child->flags & SD_PREFER_SIBLING)
4606 prefer_sibling = 1;
4607
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004608 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004609
4610 do {
4611 int local_group;
4612
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004613 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004614 memset(&sgs, 0, sizeof(sgs));
Michael Wangb94031302012-07-12 16:10:13 +08004615 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004616
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004617 if (local_group && !(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004618 return;
4619
4620 sds->total_load += sgs.group_load;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004621 sds->total_pwr += sg->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004622
4623 /*
4624 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10004625 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07004626 * and move all the excess tasks away. We lower the capacity
4627 * of a group only if the local group has the capacity to fit
4628 * these excess tasks, i.e. nr_running < group_capacity. The
4629 * extra check prevents the case where you always pull from the
4630 * heaviest group when it is already under-utilized (possible
4631 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004632 */
Nikhil Rao75dd3212010-10-15 13:12:30 -07004633 if (prefer_sibling && !local_group && sds->this_has_capacity)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004634 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4635
4636 if (local_group) {
4637 sds->this_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004638 sds->this = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004639 sds->this_nr_running = sgs.sum_nr_running;
4640 sds->this_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004641 sds->this_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004642 sds->this_idle_cpus = sgs.idle_cpus;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004643 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004644 sds->max_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004645 sds->busiest = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004646 sds->busiest_nr_running = sgs.sum_nr_running;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004647 sds->busiest_idle_cpus = sgs.idle_cpus;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004648 sds->busiest_group_capacity = sgs.group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004649 sds->busiest_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004650 sds->busiest_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004651 sds->busiest_group_weight = sgs.group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004652 sds->group_imb = sgs.group_imb;
4653 }
4654
Michael Neuling532cb4c2010-06-08 14:57:02 +10004655 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004656 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10004657}
4658
Michael Neuling532cb4c2010-06-08 14:57:02 +10004659/**
4660 * check_asym_packing - Check to see if the group is packed into the
4661 * sched doman.
4662 *
4663 * This is primarily intended to used at the sibling level. Some
4664 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4665 * case of POWER7, it can move to lower SMT modes only when higher
4666 * threads are idle. When in lower SMT modes, the threads will
4667 * perform better since they share less core resources. Hence when we
4668 * have idle threads, we want them to be the higher ones.
4669 *
4670 * This packing function is run on idle threads. It checks to see if
4671 * the busiest CPU in this domain (core in the P7 case) has a higher
4672 * CPU number than the packing function is being run on. Here we are
4673 * assuming lower CPU number will be equivalent to lower a SMT thread
4674 * number.
4675 *
Michael Neulingb6b12292010-06-10 12:06:21 +10004676 * Returns 1 when packing is required and a task should be moved to
4677 * this CPU. The amount of the imbalance is returned in *imbalance.
4678 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004679 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004680 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10004681 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004682static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004683{
4684 int busiest_cpu;
4685
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004686 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004687 return 0;
4688
4689 if (!sds->busiest)
4690 return 0;
4691
4692 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004693 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004694 return 0;
4695
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004696 env->imbalance = DIV_ROUND_CLOSEST(
4697 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4698
Michael Neuling532cb4c2010-06-08 14:57:02 +10004699 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004700}
4701
4702/**
4703 * fix_small_imbalance - Calculate the minor imbalance that exists
4704 * amongst the groups of a sched_domain, during
4705 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004706 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004707 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004708 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004709static inline
4710void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004711{
4712 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4713 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004714 unsigned long scaled_busy_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004715
4716 if (sds->this_nr_running) {
4717 sds->this_load_per_task /= sds->this_nr_running;
4718 if (sds->busiest_load_per_task >
4719 sds->this_load_per_task)
4720 imbn = 1;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004721 } else {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004722 sds->this_load_per_task =
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004723 cpu_avg_load_per_task(env->dst_cpu);
4724 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004725
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004726 scaled_busy_load_per_task = sds->busiest_load_per_task
Nikhil Rao1399fa72011-05-18 10:09:39 -07004727 * SCHED_POWER_SCALE;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004728 scaled_busy_load_per_task /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004729
4730 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4731 (scaled_busy_load_per_task * imbn)) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004732 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004733 return;
4734 }
4735
4736 /*
4737 * OK, we don't have enough imbalance to justify moving tasks,
4738 * however we may be able to increase total CPU power used by
4739 * moving them.
4740 */
4741
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004742 pwr_now += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004743 min(sds->busiest_load_per_task, sds->max_load);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004744 pwr_now += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004745 min(sds->this_load_per_task, sds->this_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004746 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004747
4748 /* Amount of load we'd subtract */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004749 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004750 sds->busiest->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004751 if (sds->max_load > tmp)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004752 pwr_move += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004753 min(sds->busiest_load_per_task, sds->max_load - tmp);
4754
4755 /* Amount of load we'd add */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004756 if (sds->max_load * sds->busiest->sgp->power <
Nikhil Rao1399fa72011-05-18 10:09:39 -07004757 sds->busiest_load_per_task * SCHED_POWER_SCALE)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004758 tmp = (sds->max_load * sds->busiest->sgp->power) /
4759 sds->this->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004760 else
Nikhil Rao1399fa72011-05-18 10:09:39 -07004761 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004762 sds->this->sgp->power;
4763 pwr_move += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004764 min(sds->this_load_per_task, sds->this_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004765 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004766
4767 /* Move if we gain throughput */
4768 if (pwr_move > pwr_now)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004769 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004770}
4771
4772/**
4773 * calculate_imbalance - Calculate the amount of imbalance present within the
4774 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004775 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004776 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004777 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004778static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004779{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004780 unsigned long max_pull, load_above_capacity = ~0UL;
4781
4782 sds->busiest_load_per_task /= sds->busiest_nr_running;
4783 if (sds->group_imb) {
4784 sds->busiest_load_per_task =
4785 min(sds->busiest_load_per_task, sds->avg_load);
4786 }
4787
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004788 /*
4789 * In the presence of smp nice balancing, certain scenarios can have
4790 * max load less than avg load(as we skip the groups at or below
4791 * its cpu_power, while calculating max_load..)
4792 */
4793 if (sds->max_load < sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004794 env->imbalance = 0;
4795 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004796 }
4797
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004798 if (!sds->group_imb) {
4799 /*
4800 * Don't want to pull so many tasks that a group would go idle.
4801 */
4802 load_above_capacity = (sds->busiest_nr_running -
4803 sds->busiest_group_capacity);
4804
Nikhil Rao1399fa72011-05-18 10:09:39 -07004805 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004806
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004807 load_above_capacity /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004808 }
4809
4810 /*
4811 * We're trying to get all the cpus to the average_load, so we don't
4812 * want to push ourselves above the average load, nor do we wish to
4813 * reduce the max loaded cpu below the average load. At the same time,
4814 * we also don't want to reduce the group load below the group capacity
4815 * (so that we can implement power-savings policies etc). Thus we look
4816 * for the minimum possible imbalance.
4817 * Be careful of negative numbers as they'll appear as very large values
4818 * with unsigned longs.
4819 */
4820 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004821
4822 /* How much load to actually move to equalise the imbalance */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004823 env->imbalance = min(max_pull * sds->busiest->sgp->power,
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004824 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
Nikhil Rao1399fa72011-05-18 10:09:39 -07004825 / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004826
4827 /*
4828 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004829 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004830 * a think about bumping its value to force at least one task to be
4831 * moved
4832 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004833 if (env->imbalance < sds->busiest_load_per_task)
4834 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004835
4836}
Nikhil Raofab47622010-10-15 13:12:29 -07004837
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004838/******* find_busiest_group() helpers end here *********************/
4839
4840/**
4841 * find_busiest_group - Returns the busiest group within the sched_domain
4842 * if there is an imbalance. If there isn't an imbalance, and
4843 * the user has opted for power-savings, it returns a group whose
4844 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4845 * such a group exists.
4846 *
4847 * Also calculates the amount of weighted load which should be moved
4848 * to restore balance.
4849 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004850 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004851 * @balance: Pointer to a variable indicating if this_cpu
4852 * is the appropriate cpu to perform load balancing at this_level.
4853 *
4854 * Returns: - the busiest group if imbalance exists.
4855 * - If no imbalance and user has opted for power-savings balance,
4856 * return the least loaded group whose CPUs can be
4857 * put to idle by rebalancing its tasks onto our group.
4858 */
4859static struct sched_group *
Michael Wangb94031302012-07-12 16:10:13 +08004860find_busiest_group(struct lb_env *env, int *balance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004861{
4862 struct sd_lb_stats sds;
4863
4864 memset(&sds, 0, sizeof(sds));
4865
4866 /*
4867 * Compute the various statistics relavent for load balancing at
4868 * this level.
4869 */
Michael Wangb94031302012-07-12 16:10:13 +08004870 update_sd_lb_stats(env, balance, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004871
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004872 /*
4873 * this_cpu is not the appropriate cpu to perform load balancing at
4874 * this level.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004875 */
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004876 if (!(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004877 goto ret;
4878
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004879 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4880 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004881 return sds.busiest;
4882
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004883 /* There is no busy sibling group to pull tasks from */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004884 if (!sds.busiest || sds.busiest_nr_running == 0)
4885 goto out_balanced;
4886
Nikhil Rao1399fa72011-05-18 10:09:39 -07004887 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07004888
Peter Zijlstra866ab432011-02-21 18:56:47 +01004889 /*
4890 * If the busiest group is imbalanced the below checks don't
4891 * work because they assumes all things are equal, which typically
4892 * isn't true due to cpus_allowed constraints and the like.
4893 */
4894 if (sds.group_imb)
4895 goto force_balance;
4896
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004897 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004898 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
Nikhil Raofab47622010-10-15 13:12:29 -07004899 !sds.busiest_has_capacity)
4900 goto force_balance;
4901
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004902 /*
4903 * If the local group is more busy than the selected busiest group
4904 * don't try and pull any tasks.
4905 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004906 if (sds.this_load >= sds.max_load)
4907 goto out_balanced;
4908
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004909 /*
4910 * Don't pull any tasks if this group is already above the domain
4911 * average load.
4912 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004913 if (sds.this_load >= sds.avg_load)
4914 goto out_balanced;
4915
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004916 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004917 /*
4918 * This cpu is idle. If the busiest group load doesn't
4919 * have more tasks than the number of available cpu's and
4920 * there is no imbalance between this and busiest group
4921 * wrt to idle cpu's, it is balanced.
4922 */
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004923 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004924 sds.busiest_nr_running <= sds.busiest_group_weight)
4925 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004926 } else {
4927 /*
4928 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4929 * imbalance_pct to be conservative.
4930 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004931 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004932 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004933 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004934
Nikhil Raofab47622010-10-15 13:12:29 -07004935force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004936 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004937 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004938 return sds.busiest;
4939
4940out_balanced:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004941ret:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004942 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004943 return NULL;
4944}
4945
4946/*
4947 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4948 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004949static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004950 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004951{
4952 struct rq *busiest = NULL, *rq;
4953 unsigned long max_load = 0;
4954 int i;
4955
4956 for_each_cpu(i, sched_group_cpus(group)) {
4957 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004958 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4959 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004960 unsigned long wl;
4961
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004962 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004963 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004964
Michael Wangb94031302012-07-12 16:10:13 +08004965 if (!cpumask_test_cpu(i, env->cpus))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004966 continue;
4967
4968 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004969 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004970
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004971 /*
4972 * When comparing with imbalance, use weighted_cpuload()
4973 * which is not scaled with the cpu power.
4974 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004975 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004976 continue;
4977
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004978 /*
4979 * For the load comparisons with the other cpu's, consider
4980 * the weighted_cpuload() scaled with the cpu power, so that
4981 * the load can be moved away from the cpu that is potentially
4982 * running at a lower capacity.
4983 */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004984 wl = (wl * SCHED_POWER_SCALE) / power;
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004985
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004986 if (wl > max_load) {
4987 max_load = wl;
4988 busiest = rq;
4989 }
4990 }
4991
4992 return busiest;
4993}
4994
4995/*
4996 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4997 * so long as it is large enough.
4998 */
4999#define MAX_PINNED_INTERVAL 512
5000
5001/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09005002DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005003
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005004static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005005{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005006 struct sched_domain *sd = env->sd;
5007
5008 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005009
5010 /*
5011 * ASYM_PACKING needs to force migrate tasks from busy but
5012 * higher numbered CPUs in order to pack all tasks in the
5013 * lowest numbered CPUs.
5014 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005015 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005016 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005017 }
5018
5019 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5020}
5021
Tejun Heo969c7922010-05-06 18:49:21 +02005022static int active_load_balance_cpu_stop(void *data);
5023
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005024/*
5025 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5026 * tasks if there is an imbalance.
5027 */
5028static int load_balance(int this_cpu, struct rq *this_rq,
5029 struct sched_domain *sd, enum cpu_idle_type idle,
5030 int *balance)
5031{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305032 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005033 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005034 struct rq *busiest;
5035 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005036 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005037
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005038 struct lb_env env = {
5039 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005040 .dst_cpu = this_cpu,
5041 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305042 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005043 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005044 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08005045 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005046 };
5047
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005048 /*
5049 * For NEWLY_IDLE load_balancing, we don't need to consider
5050 * other cpus in our group
5051 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005052 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005053 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005054
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005055 cpumask_copy(cpus, cpu_active_mask);
5056
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005057 schedstat_inc(sd, lb_count[idle]);
5058
5059redo:
Michael Wangb94031302012-07-12 16:10:13 +08005060 group = find_busiest_group(&env, balance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005061
5062 if (*balance == 0)
5063 goto out_balanced;
5064
5065 if (!group) {
5066 schedstat_inc(sd, lb_nobusyg[idle]);
5067 goto out_balanced;
5068 }
5069
Michael Wangb94031302012-07-12 16:10:13 +08005070 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005071 if (!busiest) {
5072 schedstat_inc(sd, lb_nobusyq[idle]);
5073 goto out_balanced;
5074 }
5075
Michael Wang78feefc2012-08-06 16:41:59 +08005076 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005077
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005078 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005079
5080 ld_moved = 0;
5081 if (busiest->nr_running > 1) {
5082 /*
5083 * Attempt to move tasks. If find_busiest_group has found
5084 * an imbalance but busiest->nr_running <= 1, the group is
5085 * still unbalanced. ld_moved simply stays zero, so it is
5086 * correctly treated as an imbalance.
5087 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005088 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005089 env.src_cpu = busiest->cpu;
5090 env.src_rq = busiest;
5091 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005092
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005093 update_h_load(env.src_cpu);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005094more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005095 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005096 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305097
5098 /*
5099 * cur_ld_moved - load moved in current iteration
5100 * ld_moved - cumulative load moved across iterations
5101 */
5102 cur_ld_moved = move_tasks(&env);
5103 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005104 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005105 local_irq_restore(flags);
5106
5107 /*
5108 * some other cpu did the load balance for us.
5109 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305110 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5111 resched_cpu(env.dst_cpu);
5112
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005113 if (env.flags & LBF_NEED_BREAK) {
5114 env.flags &= ~LBF_NEED_BREAK;
5115 goto more_balance;
5116 }
5117
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305118 /*
5119 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5120 * us and move them to an alternate dst_cpu in our sched_group
5121 * where they can run. The upper limit on how many times we
5122 * iterate on same src_cpu is dependent on number of cpus in our
5123 * sched_group.
5124 *
5125 * This changes load balance semantics a bit on who can move
5126 * load to a given_cpu. In addition to the given_cpu itself
5127 * (or a ilb_cpu acting on its behalf where given_cpu is
5128 * nohz-idle), we now have balance_cpu in a position to move
5129 * load to given_cpu. In rare situations, this may cause
5130 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5131 * _independently_ and at _same_ time to move some load to
5132 * given_cpu) causing exceess load to be moved to given_cpu.
5133 * This however should not happen so much in practice and
5134 * moreover subsequent load balance cycles should correct the
5135 * excess load moved.
5136 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005137 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305138
Michael Wang78feefc2012-08-06 16:41:59 +08005139 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305140 env.dst_cpu = env.new_dst_cpu;
5141 env.flags &= ~LBF_SOME_PINNED;
5142 env.loop = 0;
5143 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005144
5145 /* Prevent to re-select dst_cpu via env's cpus */
5146 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5147
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305148 /*
5149 * Go back to "more_balance" rather than "redo" since we
5150 * need to continue with same src_cpu.
5151 */
5152 goto more_balance;
5153 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005154
5155 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005156 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005157 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305158 if (!cpumask_empty(cpus)) {
5159 env.loop = 0;
5160 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005161 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305162 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005163 goto out_balanced;
5164 }
5165 }
5166
5167 if (!ld_moved) {
5168 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005169 /*
5170 * Increment the failure counter only on periodic balance.
5171 * We do not want newidle balance, which can be very
5172 * frequent, pollute the failure counter causing
5173 * excessive cache_hot migrations and active balances.
5174 */
5175 if (idle != CPU_NEWLY_IDLE)
5176 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005177
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005178 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005179 raw_spin_lock_irqsave(&busiest->lock, flags);
5180
Tejun Heo969c7922010-05-06 18:49:21 +02005181 /* don't kick the active_load_balance_cpu_stop,
5182 * if the curr task on busiest cpu can't be
5183 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005184 */
5185 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005186 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005187 raw_spin_unlock_irqrestore(&busiest->lock,
5188 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005189 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005190 goto out_one_pinned;
5191 }
5192
Tejun Heo969c7922010-05-06 18:49:21 +02005193 /*
5194 * ->active_balance synchronizes accesses to
5195 * ->active_balance_work. Once set, it's cleared
5196 * only after active load balance is finished.
5197 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005198 if (!busiest->active_balance) {
5199 busiest->active_balance = 1;
5200 busiest->push_cpu = this_cpu;
5201 active_balance = 1;
5202 }
5203 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005204
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005205 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005206 stop_one_cpu_nowait(cpu_of(busiest),
5207 active_load_balance_cpu_stop, busiest,
5208 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005209 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005210
5211 /*
5212 * We've kicked active balancing, reset the failure
5213 * counter.
5214 */
5215 sd->nr_balance_failed = sd->cache_nice_tries+1;
5216 }
5217 } else
5218 sd->nr_balance_failed = 0;
5219
5220 if (likely(!active_balance)) {
5221 /* We were unbalanced, so reset the balancing interval */
5222 sd->balance_interval = sd->min_interval;
5223 } else {
5224 /*
5225 * If we've begun active balancing, start to back off. This
5226 * case may not be covered by the all_pinned logic if there
5227 * is only 1 task on the busy runqueue (because we don't call
5228 * move_tasks).
5229 */
5230 if (sd->balance_interval < sd->max_interval)
5231 sd->balance_interval *= 2;
5232 }
5233
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005234 goto out;
5235
5236out_balanced:
5237 schedstat_inc(sd, lb_balanced[idle]);
5238
5239 sd->nr_balance_failed = 0;
5240
5241out_one_pinned:
5242 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005243 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005244 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005245 (sd->balance_interval < sd->max_interval))
5246 sd->balance_interval *= 2;
5247
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005248 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005249out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005250 return ld_moved;
5251}
5252
5253/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005254 * idle_balance is called by schedule() if this_cpu is about to become
5255 * idle. Attempts to pull tasks from other CPUs.
5256 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005257void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005258{
5259 struct sched_domain *sd;
5260 int pulled_task = 0;
5261 unsigned long next_balance = jiffies + HZ;
5262
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005263 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005264
5265 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5266 return;
5267
Peter Zijlstraf492e122009-12-23 15:29:42 +01005268 /*
5269 * Drop the rq->lock, but keep IRQ/preempt disabled.
5270 */
5271 raw_spin_unlock(&this_rq->lock);
5272
Paul Turner48a16752012-10-04 13:18:31 +02005273 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005274 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005275 for_each_domain(this_cpu, sd) {
5276 unsigned long interval;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005277 int balance = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005278
5279 if (!(sd->flags & SD_LOAD_BALANCE))
5280 continue;
5281
Peter Zijlstraf492e122009-12-23 15:29:42 +01005282 if (sd->flags & SD_BALANCE_NEWIDLE) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005283 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005284 pulled_task = load_balance(this_cpu, this_rq,
5285 sd, CPU_NEWLY_IDLE, &balance);
5286 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005287
5288 interval = msecs_to_jiffies(sd->balance_interval);
5289 if (time_after(next_balance, sd->last_balance + interval))
5290 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005291 if (pulled_task) {
5292 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005293 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005294 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005295 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005296 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005297
5298 raw_spin_lock(&this_rq->lock);
5299
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005300 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5301 /*
5302 * We are going idle. next_balance may be set based on
5303 * a busy processor. So reset next_balance.
5304 */
5305 this_rq->next_balance = next_balance;
5306 }
5307}
5308
5309/*
Tejun Heo969c7922010-05-06 18:49:21 +02005310 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5311 * running tasks off the busiest CPU onto idle CPUs. It requires at
5312 * least 1 task to be running on each physical CPU where possible, and
5313 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005314 */
Tejun Heo969c7922010-05-06 18:49:21 +02005315static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005316{
Tejun Heo969c7922010-05-06 18:49:21 +02005317 struct rq *busiest_rq = data;
5318 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005319 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005320 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005321 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005322
5323 raw_spin_lock_irq(&busiest_rq->lock);
5324
5325 /* make sure the requested cpu hasn't gone down in the meantime */
5326 if (unlikely(busiest_cpu != smp_processor_id() ||
5327 !busiest_rq->active_balance))
5328 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005329
5330 /* Is there any task to move? */
5331 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005332 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005333
5334 /*
5335 * This condition is "impossible", if it occurs
5336 * we need to fix it. Originally reported by
5337 * Bjorn Helgaas on a 128-cpu setup.
5338 */
5339 BUG_ON(busiest_rq == target_rq);
5340
5341 /* move a task from busiest_rq to target_rq */
5342 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005343
5344 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005345 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005346 for_each_domain(target_cpu, sd) {
5347 if ((sd->flags & SD_LOAD_BALANCE) &&
5348 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5349 break;
5350 }
5351
5352 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005353 struct lb_env env = {
5354 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005355 .dst_cpu = target_cpu,
5356 .dst_rq = target_rq,
5357 .src_cpu = busiest_rq->cpu,
5358 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005359 .idle = CPU_IDLE,
5360 };
5361
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005362 schedstat_inc(sd, alb_count);
5363
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005364 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005365 schedstat_inc(sd, alb_pushed);
5366 else
5367 schedstat_inc(sd, alb_failed);
5368 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005369 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005370 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02005371out_unlock:
5372 busiest_rq->active_balance = 0;
5373 raw_spin_unlock_irq(&busiest_rq->lock);
5374 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005375}
5376
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005377#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005378/*
5379 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005380 * - When one of the busy CPUs notice that there may be an idle rebalancing
5381 * needed, they will kick the idle load balancer, which then does idle
5382 * load balancing for all the idle CPUs.
5383 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005384static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005385 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005386 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005387 unsigned long next_balance; /* in jiffy units */
5388} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005389
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01005390static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005391{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005392 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005393
Suresh Siddha786d6dc72011-12-01 17:07:35 -08005394 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5395 return ilb;
5396
5397 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005398}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005399
5400/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005401 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5402 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5403 * CPU (if there is one).
5404 */
5405static void nohz_balancer_kick(int cpu)
5406{
5407 int ilb_cpu;
5408
5409 nohz.next_balance++;
5410
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005411 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005412
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005413 if (ilb_cpu >= nr_cpu_ids)
5414 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005415
Suresh Siddhacd490c52011-12-06 11:26:34 -08005416 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08005417 return;
5418 /*
5419 * Use smp_send_reschedule() instead of resched_cpu().
5420 * This way we generate a sched IPI on the target cpu which
5421 * is idle. And the softirq performing nohz idle load balance
5422 * will be run before returning from the IPI.
5423 */
5424 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005425 return;
5426}
5427
Alex Shic1cc0172012-09-10 15:10:58 +08005428static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08005429{
5430 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5431 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5432 atomic_dec(&nohz.nr_cpus);
5433 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5434 }
5435}
5436
Suresh Siddha69e1e812011-12-01 17:07:33 -08005437static inline void set_cpu_sd_state_busy(void)
5438{
5439 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005440
Suresh Siddha69e1e812011-12-01 17:07:33 -08005441 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005442 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005443
5444 if (!sd || !sd->nohz_idle)
5445 goto unlock;
5446 sd->nohz_idle = 0;
5447
5448 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005449 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005450unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005451 rcu_read_unlock();
5452}
5453
5454void set_cpu_sd_state_idle(void)
5455{
5456 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005457
Suresh Siddha69e1e812011-12-01 17:07:33 -08005458 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005459 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005460
5461 if (!sd || sd->nohz_idle)
5462 goto unlock;
5463 sd->nohz_idle = 1;
5464
5465 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005466 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005467unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005468 rcu_read_unlock();
5469}
5470
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005471/*
Alex Shic1cc0172012-09-10 15:10:58 +08005472 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005473 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005474 */
Alex Shic1cc0172012-09-10 15:10:58 +08005475void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005476{
Suresh Siddha71325962012-01-19 18:28:57 -08005477 /*
5478 * If this cpu is going down, then nothing needs to be done.
5479 */
5480 if (!cpu_active(cpu))
5481 return;
5482
Alex Shic1cc0172012-09-10 15:10:58 +08005483 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5484 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005485
Alex Shic1cc0172012-09-10 15:10:58 +08005486 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5487 atomic_inc(&nohz.nr_cpus);
5488 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005489}
Suresh Siddha71325962012-01-19 18:28:57 -08005490
5491static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
5492 unsigned long action, void *hcpu)
5493{
5494 switch (action & ~CPU_TASKS_FROZEN) {
5495 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08005496 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08005497 return NOTIFY_OK;
5498 default:
5499 return NOTIFY_DONE;
5500 }
5501}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005502#endif
5503
5504static DEFINE_SPINLOCK(balancing);
5505
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005506/*
5507 * Scale the max load_balance interval with the number of CPUs in the system.
5508 * This trades load-balance latency on larger machines for less cross talk.
5509 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005510void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005511{
5512 max_load_balance_interval = HZ*num_online_cpus()/10;
5513}
5514
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005515/*
5516 * It checks each scheduling domain to see if it is due to be balanced,
5517 * and initiates a balancing operation if so.
5518 *
Libinb9b08532013-04-01 19:14:01 +08005519 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005520 */
5521static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5522{
5523 int balance = 1;
5524 struct rq *rq = cpu_rq(cpu);
5525 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005526 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005527 /* Earliest time when we have to do rebalance again */
5528 unsigned long next_balance = jiffies + 60*HZ;
5529 int update_next_balance = 0;
5530 int need_serialize;
5531
Paul Turner48a16752012-10-04 13:18:31 +02005532 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08005533
Peter Zijlstradce840a2011-04-07 14:09:50 +02005534 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005535 for_each_domain(cpu, sd) {
5536 if (!(sd->flags & SD_LOAD_BALANCE))
5537 continue;
5538
5539 interval = sd->balance_interval;
5540 if (idle != CPU_IDLE)
5541 interval *= sd->busy_factor;
5542
5543 /* scale ms to jiffies */
5544 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005545 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005546
5547 need_serialize = sd->flags & SD_SERIALIZE;
5548
5549 if (need_serialize) {
5550 if (!spin_trylock(&balancing))
5551 goto out;
5552 }
5553
5554 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5555 if (load_balance(cpu, rq, sd, idle, &balance)) {
5556 /*
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005557 * The LBF_SOME_PINNED logic could have changed
5558 * env->dst_cpu, so we can't know our idle
5559 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005560 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005561 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005562 }
5563 sd->last_balance = jiffies;
5564 }
5565 if (need_serialize)
5566 spin_unlock(&balancing);
5567out:
5568 if (time_after(next_balance, sd->last_balance + interval)) {
5569 next_balance = sd->last_balance + interval;
5570 update_next_balance = 1;
5571 }
5572
5573 /*
5574 * Stop the load balance at this level. There is another
5575 * CPU in our sched group which is doing load balancing more
5576 * actively.
5577 */
5578 if (!balance)
5579 break;
5580 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005581 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005582
5583 /*
5584 * next_balance will be updated only when there is a need.
5585 * When the cpu is attached to null domain for ex, it will not be
5586 * updated.
5587 */
5588 if (likely(update_next_balance))
5589 rq->next_balance = next_balance;
5590}
5591
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005592#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005593/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005594 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005595 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5596 */
5597static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5598{
5599 struct rq *this_rq = cpu_rq(this_cpu);
5600 struct rq *rq;
5601 int balance_cpu;
5602
Suresh Siddha1c792db2011-12-01 17:07:32 -08005603 if (idle != CPU_IDLE ||
5604 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5605 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005606
5607 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08005608 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005609 continue;
5610
5611 /*
5612 * If this cpu gets work to do, stop the load balancing
5613 * work being done for other cpus. Next load
5614 * balancing owner will pick it up.
5615 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08005616 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005617 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005618
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02005619 rq = cpu_rq(balance_cpu);
5620
5621 raw_spin_lock_irq(&rq->lock);
5622 update_rq_clock(rq);
5623 update_idle_cpu_load(rq);
5624 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005625
5626 rebalance_domains(balance_cpu, CPU_IDLE);
5627
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005628 if (time_after(this_rq->next_balance, rq->next_balance))
5629 this_rq->next_balance = rq->next_balance;
5630 }
5631 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005632end:
5633 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005634}
5635
5636/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005637 * Current heuristic for kicking the idle load balancer in the presence
5638 * of an idle cpu is the system.
5639 * - This rq has more than one task.
5640 * - At any scheduler domain level, this cpu's scheduler group has multiple
5641 * busy cpu's exceeding the group's power.
5642 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5643 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005644 */
5645static inline int nohz_kick_needed(struct rq *rq, int cpu)
5646{
5647 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005648 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005649
Suresh Siddha1c792db2011-12-01 17:07:32 -08005650 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005651 return 0;
5652
Suresh Siddha1c792db2011-12-01 17:07:32 -08005653 /*
5654 * We may be recently in ticked or tickless idle mode. At the first
5655 * busy tick after returning from idle, we will update the busy stats.
5656 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08005657 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08005658 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005659
5660 /*
5661 * None are in tickless mode and hence no need for NOHZ idle load
5662 * balancing.
5663 */
5664 if (likely(!atomic_read(&nohz.nr_cpus)))
5665 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005666
5667 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005668 return 0;
5669
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005670 if (rq->nr_running >= 2)
5671 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005672
Peter Zijlstra067491b2011-12-07 14:32:08 +01005673 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005674 for_each_domain(cpu, sd) {
5675 struct sched_group *sg = sd->groups;
5676 struct sched_group_power *sgp = sg->sgp;
5677 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005678
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005679 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01005680 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005681
5682 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5683 && (cpumask_first_and(nohz.idle_cpus_mask,
5684 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01005685 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005686
5687 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5688 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005689 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01005690 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005691 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01005692
5693need_kick_unlock:
5694 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005695need_kick:
5696 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005697}
5698#else
5699static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5700#endif
5701
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005702/*
5703 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005704 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005705 */
5706static void run_rebalance_domains(struct softirq_action *h)
5707{
5708 int this_cpu = smp_processor_id();
5709 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07005710 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005711 CPU_IDLE : CPU_NOT_IDLE;
5712
5713 rebalance_domains(this_cpu, idle);
5714
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005715 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005716 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005717 * balancing on behalf of the other idle cpus whose ticks are
5718 * stopped.
5719 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005720 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005721}
5722
5723static inline int on_null_domain(int cpu)
5724{
Paul E. McKenney90a65012010-02-28 08:32:18 -08005725 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005726}
5727
5728/*
5729 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005730 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005731void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005732{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005733 /* Don't need to rebalance while attached to NULL domain */
5734 if (time_after_eq(jiffies, rq->next_balance) &&
5735 likely(!on_null_domain(cpu)))
5736 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005737#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08005738 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005739 nohz_balancer_kick(cpu);
5740#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005741}
5742
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005743static void rq_online_fair(struct rq *rq)
5744{
5745 update_sysctl();
5746}
5747
5748static void rq_offline_fair(struct rq *rq)
5749{
5750 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07005751
5752 /* Ensure any throttled groups are reachable by pick_next_task */
5753 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005754}
5755
Dhaval Giani55e12e52008-06-24 23:39:43 +05305756#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02005757
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005758/*
5759 * scheduler tick hitting a task of our scheduling class:
5760 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005761static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005762{
5763 struct cfs_rq *cfs_rq;
5764 struct sched_entity *se = &curr->se;
5765
5766 for_each_sched_entity(se) {
5767 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005768 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005769 }
Ben Segall18bf2802012-10-04 12:51:20 +02005770
Peter Zijlstracbee9f82012-10-25 14:16:43 +02005771 if (sched_feat_numa(NUMA))
5772 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08005773
Ben Segall18bf2802012-10-04 12:51:20 +02005774 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005775}
5776
5777/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005778 * called on fork with the child task as argument from the parent's context
5779 * - child not yet on the tasklist
5780 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005781 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005782static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005783{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005784 struct cfs_rq *cfs_rq;
5785 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02005786 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005787 struct rq *rq = this_rq();
5788 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005789
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005790 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005791
Peter Zijlstra861d0342010-08-19 13:31:43 +02005792 update_rq_clock(rq);
5793
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005794 cfs_rq = task_cfs_rq(current);
5795 curr = cfs_rq->curr;
5796
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005797 if (unlikely(task_cpu(p) != this_cpu)) {
5798 rcu_read_lock();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005799 __set_task_cpu(p, this_cpu);
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005800 rcu_read_unlock();
5801 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005802
Ting Yang7109c442007-08-28 12:53:24 +02005803 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005804
Mike Galbraithb5d9d732009-09-08 11:12:28 +02005805 if (curr)
5806 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02005807 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005808
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005809 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02005810 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02005811 * Upon rescheduling, sched_class::put_prev_task() will place
5812 * 'current' within the tree based on its new key value.
5813 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005814 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05305815 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005816 }
5817
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005818 se->vruntime -= cfs_rq->min_vruntime;
5819
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005820 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005821}
5822
Steven Rostedtcb469842008-01-25 21:08:22 +01005823/*
5824 * Priority of the task has changed. Check to see if we preempt
5825 * the current task.
5826 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005827static void
5828prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01005829{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005830 if (!p->se.on_rq)
5831 return;
5832
Steven Rostedtcb469842008-01-25 21:08:22 +01005833 /*
5834 * Reschedule if we are currently running on this runqueue and
5835 * our priority decreased, or if we are not currently running on
5836 * this runqueue and our priority is higher than the current's
5837 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005838 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01005839 if (p->prio > oldprio)
5840 resched_task(rq->curr);
5841 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005842 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005843}
5844
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005845static void switched_from_fair(struct rq *rq, struct task_struct *p)
5846{
5847 struct sched_entity *se = &p->se;
5848 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5849
5850 /*
5851 * Ensure the task's vruntime is normalized, so that when its
5852 * switched back to the fair class the enqueue_entity(.flags=0) will
5853 * do the right thing.
5854 *
5855 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5856 * have normalized the vruntime, if it was !on_rq, then only when
5857 * the task is sleeping will it still have non-normalized vruntime.
5858 */
5859 if (!se->on_rq && p->state != TASK_RUNNING) {
5860 /*
5861 * Fix up our vruntime so that the current sleep doesn't
5862 * cause 'unlimited' sleep bonus.
5863 */
5864 place_entity(cfs_rq, se, 0);
5865 se->vruntime -= cfs_rq->min_vruntime;
5866 }
Paul Turner9ee474f2012-10-04 13:18:30 +02005867
5868#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5869 /*
5870 * Remove our load from contribution when we leave sched_fair
5871 * and ensure we don't carry in an old decay_count if we
5872 * switch back.
5873 */
5874 if (p->se.avg.decay_count) {
5875 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5876 __synchronize_entity_decay(&p->se);
5877 subtract_blocked_load_contrib(cfs_rq,
5878 p->se.avg.load_avg_contrib);
5879 }
5880#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005881}
5882
Steven Rostedtcb469842008-01-25 21:08:22 +01005883/*
5884 * We switched to the sched_fair class.
5885 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005886static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005887{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005888 if (!p->se.on_rq)
5889 return;
5890
Steven Rostedtcb469842008-01-25 21:08:22 +01005891 /*
5892 * We were most likely switched from sched_rt, so
5893 * kick off the schedule if running, otherwise just see
5894 * if we can still preempt the current task.
5895 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005896 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005897 resched_task(rq->curr);
5898 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005899 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005900}
5901
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005902/* Account for a task changing its policy or group.
5903 *
5904 * This routine is mostly called to set cfs_rq->curr field when a task
5905 * migrates between groups/classes.
5906 */
5907static void set_curr_task_fair(struct rq *rq)
5908{
5909 struct sched_entity *se = &rq->curr->se;
5910
Paul Turnerec12cb72011-07-21 09:43:30 -07005911 for_each_sched_entity(se) {
5912 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5913
5914 set_next_entity(cfs_rq, se);
5915 /* ensure bandwidth has been allocated on our new cfs_rq */
5916 account_cfs_rq_runtime(cfs_rq, 0);
5917 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005918}
5919
Peter Zijlstra029632f2011-10-25 10:00:11 +02005920void init_cfs_rq(struct cfs_rq *cfs_rq)
5921{
5922 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02005923 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5924#ifndef CONFIG_64BIT
5925 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5926#endif
Paul Turner9ee474f2012-10-04 13:18:30 +02005927#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
5928 atomic64_set(&cfs_rq->decay_counter, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02005929 atomic64_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02005930#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02005931}
5932
Peter Zijlstra810b3812008-02-29 15:21:01 -05005933#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005934static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05005935{
Paul Turneraff3e492012-10-04 13:18:30 +02005936 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005937 /*
5938 * If the task was not on the rq at the time of this cgroup movement
5939 * it must have been asleep, sleeping tasks keep their ->vruntime
5940 * absolute on their old rq until wakeup (needed for the fair sleeper
5941 * bonus in place_entity()).
5942 *
5943 * If it was on the rq, we've just 'preempted' it, which does convert
5944 * ->vruntime to a relative base.
5945 *
5946 * Make sure both cases convert their relative position when migrating
5947 * to another cgroup's rq. This does somewhat interfere with the
5948 * fair sleeper stuff for the first placement, but who cares.
5949 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005950 /*
5951 * When !on_rq, vruntime of the task has usually NOT been normalized.
5952 * But there are some cases where it has already been normalized:
5953 *
5954 * - Moving a forked child which is waiting for being woken up by
5955 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005956 * - Moving a task which has been woken up by try_to_wake_up() and
5957 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005958 *
5959 * To prevent boost or penalty in the new cfs_rq caused by delta
5960 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5961 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005962 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005963 on_rq = 1;
5964
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005965 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005966 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5967 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02005968 if (!on_rq) {
5969 cfs_rq = cfs_rq_of(&p->se);
5970 p->se.vruntime += cfs_rq->min_vruntime;
5971#ifdef CONFIG_SMP
5972 /*
5973 * migrate_task_rq_fair() will have removed our previous
5974 * contribution, but we must synchronize for ongoing future
5975 * decay.
5976 */
5977 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
5978 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
5979#endif
5980 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05005981}
Peter Zijlstra029632f2011-10-25 10:00:11 +02005982
5983void free_fair_sched_group(struct task_group *tg)
5984{
5985 int i;
5986
5987 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5988
5989 for_each_possible_cpu(i) {
5990 if (tg->cfs_rq)
5991 kfree(tg->cfs_rq[i]);
5992 if (tg->se)
5993 kfree(tg->se[i]);
5994 }
5995
5996 kfree(tg->cfs_rq);
5997 kfree(tg->se);
5998}
5999
6000int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6001{
6002 struct cfs_rq *cfs_rq;
6003 struct sched_entity *se;
6004 int i;
6005
6006 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6007 if (!tg->cfs_rq)
6008 goto err;
6009 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6010 if (!tg->se)
6011 goto err;
6012
6013 tg->shares = NICE_0_LOAD;
6014
6015 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6016
6017 for_each_possible_cpu(i) {
6018 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6019 GFP_KERNEL, cpu_to_node(i));
6020 if (!cfs_rq)
6021 goto err;
6022
6023 se = kzalloc_node(sizeof(struct sched_entity),
6024 GFP_KERNEL, cpu_to_node(i));
6025 if (!se)
6026 goto err_free_rq;
6027
6028 init_cfs_rq(cfs_rq);
6029 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6030 }
6031
6032 return 1;
6033
6034err_free_rq:
6035 kfree(cfs_rq);
6036err:
6037 return 0;
6038}
6039
6040void unregister_fair_sched_group(struct task_group *tg, int cpu)
6041{
6042 struct rq *rq = cpu_rq(cpu);
6043 unsigned long flags;
6044
6045 /*
6046 * Only empty task groups can be destroyed; so we can speculatively
6047 * check on_list without danger of it being re-added.
6048 */
6049 if (!tg->cfs_rq[cpu]->on_list)
6050 return;
6051
6052 raw_spin_lock_irqsave(&rq->lock, flags);
6053 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6054 raw_spin_unlock_irqrestore(&rq->lock, flags);
6055}
6056
6057void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6058 struct sched_entity *se, int cpu,
6059 struct sched_entity *parent)
6060{
6061 struct rq *rq = cpu_rq(cpu);
6062
6063 cfs_rq->tg = tg;
6064 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006065 init_cfs_rq_runtime(cfs_rq);
6066
6067 tg->cfs_rq[cpu] = cfs_rq;
6068 tg->se[cpu] = se;
6069
6070 /* se could be NULL for root_task_group */
6071 if (!se)
6072 return;
6073
6074 if (!parent)
6075 se->cfs_rq = &rq->cfs;
6076 else
6077 se->cfs_rq = parent->my_q;
6078
6079 se->my_q = cfs_rq;
6080 update_load_set(&se->load, 0);
6081 se->parent = parent;
6082}
6083
6084static DEFINE_MUTEX(shares_mutex);
6085
6086int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6087{
6088 int i;
6089 unsigned long flags;
6090
6091 /*
6092 * We can't change the weight of the root cgroup.
6093 */
6094 if (!tg->se[0])
6095 return -EINVAL;
6096
6097 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6098
6099 mutex_lock(&shares_mutex);
6100 if (tg->shares == shares)
6101 goto done;
6102
6103 tg->shares = shares;
6104 for_each_possible_cpu(i) {
6105 struct rq *rq = cpu_rq(i);
6106 struct sched_entity *se;
6107
6108 se = tg->se[i];
6109 /* Propagate contribution to hierarchy */
6110 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006111
6112 /* Possible calls to update_curr() need rq clock */
6113 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006114 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006115 update_cfs_shares(group_cfs_rq(se));
6116 raw_spin_unlock_irqrestore(&rq->lock, flags);
6117 }
6118
6119done:
6120 mutex_unlock(&shares_mutex);
6121 return 0;
6122}
6123#else /* CONFIG_FAIR_GROUP_SCHED */
6124
6125void free_fair_sched_group(struct task_group *tg) { }
6126
6127int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6128{
6129 return 1;
6130}
6131
6132void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6133
6134#endif /* CONFIG_FAIR_GROUP_SCHED */
6135
Peter Zijlstra810b3812008-02-29 15:21:01 -05006136
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006137static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006138{
6139 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006140 unsigned int rr_interval = 0;
6141
6142 /*
6143 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6144 * idle runqueue:
6145 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006146 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006147 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006148
6149 return rr_interval;
6150}
6151
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006152/*
6153 * All the scheduling class methods:
6154 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006155const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006156 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006157 .enqueue_task = enqueue_task_fair,
6158 .dequeue_task = dequeue_task_fair,
6159 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006160 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006161
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006162 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006163
6164 .pick_next_task = pick_next_task_fair,
6165 .put_prev_task = put_prev_task_fair,
6166
Peter Williams681f3e62007-10-24 18:23:51 +02006167#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006168 .select_task_rq = select_task_rq_fair,
Paul Turnerf4e26b12012-10-04 13:18:32 +02006169#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner0a74bef2012-10-04 13:18:30 +02006170 .migrate_task_rq = migrate_task_rq_fair,
Paul Turnerf4e26b12012-10-04 13:18:32 +02006171#endif
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006172 .rq_online = rq_online_fair,
6173 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006174
6175 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006176#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006177
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006178 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006179 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006180 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006181
6182 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006183 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006184 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006185
Peter Williams0d721ce2009-09-21 01:31:53 +00006186 .get_rr_interval = get_rr_interval_fair,
6187
Peter Zijlstra810b3812008-02-29 15:21:01 -05006188#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006189 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006190#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006191};
6192
6193#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006194void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006195{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006196 struct cfs_rq *cfs_rq;
6197
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006198 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006199 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006200 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006201 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006202}
6203#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006204
6205__init void init_sched_fair_class(void)
6206{
6207#ifdef CONFIG_SMP
6208 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6209
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006210#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006211 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006212 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006213 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006214#endif
6215#endif /* SMP */
6216
6217}