blob: e1602a0fdbf8d32a8a4d1706c6741cd5c393e2ef [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
Alex Shia75cdaa2013-06-20 10:18:47 +0800683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200703/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200710{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200711 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712
Lucas De Marchi41acab82010-03-10 23:37:45 -0300713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200715
716 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200717 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100719
Ingo Molnare9acbff2007-10-15 17:00:04 +0200720 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200721 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200722}
723
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200724static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200725{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200726 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200727 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200738 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100739 if (!delta_exec)
740 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200741
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
Ingo Molnarf977bb42009-09-13 18:15:54 +0200748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100749 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700750 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100751 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200754}
755
756static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200758{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760}
761
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762/*
763 * Task is being enqueued - update stats:
764 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200766{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200771 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200772 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200773}
774
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200787 }
788#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300789 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200790}
791
792static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200794{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200799 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200800 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200808{
809 /*
810 * We are starting a new run period:
811 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200813}
814
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200819#ifdef CONFIG_NUMA_BALANCING
820/*
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200821 * numa task sample period in ms
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200822 */
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200823unsigned int sysctl_numa_balancing_scan_period_min = 100;
Mel Gormanb8593bf2012-11-21 01:18:23 +0000824unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
825unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200826
827/* Portion of address space to scan in MB */
828unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200829
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200830/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
831unsigned int sysctl_numa_balancing_scan_delay = 1000;
832
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200833static void task_numa_placement(struct task_struct *p)
834{
Hugh Dickins2832bc12012-12-19 17:42:16 -0800835 int seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200836
Hugh Dickins2832bc12012-12-19 17:42:16 -0800837 if (!p->mm) /* for example, ksmd faulting in a user's mm */
838 return;
839 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200840 if (p->numa_scan_seq == seq)
841 return;
842 p->numa_scan_seq = seq;
843
844 /* FIXME: Scheduling placement policy hints go here */
845}
846
847/*
848 * Got a PROT_NONE fault for a page on @node.
849 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000850void task_numa_fault(int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200851{
852 struct task_struct *p = current;
853
Mel Gorman1a687c22012-11-22 11:16:36 +0000854 if (!sched_feat_numa(NUMA))
855 return;
856
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200857 /* FIXME: Allocate task-specific structure for placement policy here */
858
Mel Gormanfb003b82012-11-15 09:01:14 +0000859 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000860 * If pages are properly placed (did not migrate) then scan slower.
861 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +0000862 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000863 if (!migrated)
864 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
865 p->numa_scan_period + jiffies_to_msecs(10));
Mel Gormanfb003b82012-11-15 09:01:14 +0000866
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200867 task_numa_placement(p);
868}
869
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200870static void reset_ptenuma_scan(struct task_struct *p)
871{
872 ACCESS_ONCE(p->mm->numa_scan_seq)++;
873 p->mm->numa_scan_offset = 0;
874}
875
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200876/*
877 * The expensive part of numa migration is done from task_work context.
878 * Triggered from task_tick_numa().
879 */
880void task_numa_work(struct callback_head *work)
881{
882 unsigned long migrate, next_scan, now = jiffies;
883 struct task_struct *p = current;
884 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200885 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +0000886 unsigned long start, end;
887 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200888
889 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
890
891 work->next = work; /* protect against double add */
892 /*
893 * Who cares about NUMA placement when they're dying.
894 *
895 * NOTE: make sure not to dereference p->mm before this check,
896 * exit_task_work() happens _after_ exit_mm() so we could be called
897 * without p->mm even though we still had it when we enqueued this
898 * work.
899 */
900 if (p->flags & PF_EXITING)
901 return;
902
903 /*
Mel Gorman5bca2302012-11-22 14:40:03 +0000904 * We do not care about task placement until a task runs on a node
905 * other than the first one used by the address space. This is
906 * largely because migrations are driven by what CPU the task
907 * is running on. If it's never scheduled on another node, it'll
908 * not migrate so why bother trapping the fault.
909 */
910 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
911 mm->first_nid = numa_node_id();
912 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
913 /* Are we running on a new node yet? */
914 if (numa_node_id() == mm->first_nid &&
915 !sched_feat_numa(NUMA_FORCE))
916 return;
917
918 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
919 }
920
921 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000922 * Reset the scan period if enough time has gone by. Objective is that
923 * scanning will be reduced if pages are properly placed. As tasks
924 * can enter different phases this needs to be re-examined. Lacking
925 * proper tracking of reference behaviour, this blunt hammer is used.
926 */
927 migrate = mm->numa_next_reset;
928 if (time_after(now, migrate)) {
929 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
930 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
931 xchg(&mm->numa_next_reset, next_scan);
932 }
933
934 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200935 * Enforce maximal scan/migration frequency..
936 */
937 migrate = mm->numa_next_scan;
938 if (time_before(now, migrate))
939 return;
940
941 if (p->numa_scan_period == 0)
942 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
943
Mel Gormanfb003b82012-11-15 09:01:14 +0000944 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200945 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
946 return;
947
Mel Gormane14808b2012-11-19 10:59:15 +0000948 /*
949 * Do not set pte_numa if the current running node is rate-limited.
950 * This loses statistics on the fault but if we are unwilling to
951 * migrate to this node, it is less likely we can do useful work
952 */
953 if (migrate_ratelimited(numa_node_id()))
954 return;
955
Mel Gorman9f406042012-11-14 18:34:32 +0000956 start = mm->numa_scan_offset;
957 pages = sysctl_numa_balancing_scan_size;
958 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
959 if (!pages)
960 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200961
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200962 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +0000963 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200964 if (!vma) {
965 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +0000966 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200967 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200968 }
Mel Gorman9f406042012-11-14 18:34:32 +0000969 for (; vma; vma = vma->vm_next) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200970 if (!vma_migratable(vma))
971 continue;
972
973 /* Skip small VMAs. They are not likely to be of relevance */
Mel Gorman221392c2012-12-17 14:05:53 +0000974 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200975 continue;
976
Mel Gorman9f406042012-11-14 18:34:32 +0000977 do {
978 start = max(start, vma->vm_start);
979 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
980 end = min(end, vma->vm_end);
981 pages -= change_prot_numa(vma, start, end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200982
Mel Gorman9f406042012-11-14 18:34:32 +0000983 start = end;
984 if (pages <= 0)
985 goto out;
986 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200987 }
988
Mel Gorman9f406042012-11-14 18:34:32 +0000989out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200990 /*
991 * It is possible to reach the end of the VMA list but the last few VMAs are
992 * not guaranteed to the vma_migratable. If they are not, we would find the
993 * !migratable VMA on the next scan but not reset the scanner to the start
994 * so check it now.
995 */
996 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +0000997 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200998 else
999 reset_ptenuma_scan(p);
1000 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001001}
1002
1003/*
1004 * Drive the periodic memory faults..
1005 */
1006void task_tick_numa(struct rq *rq, struct task_struct *curr)
1007{
1008 struct callback_head *work = &curr->numa_work;
1009 u64 period, now;
1010
1011 /*
1012 * We don't care about NUMA placement if we don't have memory.
1013 */
1014 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1015 return;
1016
1017 /*
1018 * Using runtime rather than walltime has the dual advantage that
1019 * we (mostly) drive the selection from busy threads and that the
1020 * task needs to have done some actual work before we bother with
1021 * NUMA placement.
1022 */
1023 now = curr->se.sum_exec_runtime;
1024 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1025
1026 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001027 if (!curr->node_stamp)
1028 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001029 curr->node_stamp = now;
1030
1031 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1032 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1033 task_work_add(curr, work, true);
1034 }
1035 }
1036}
1037#else
1038static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1039{
1040}
1041#endif /* CONFIG_NUMA_BALANCING */
1042
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001043static void
1044account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1045{
1046 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001047 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001048 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001049#ifdef CONFIG_SMP
1050 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001051 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001052#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001053 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001054}
1055
1056static void
1057account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1058{
1059 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001060 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001061 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001062 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301063 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001064 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001065}
1066
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001067#ifdef CONFIG_FAIR_GROUP_SCHED
1068# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001069static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1070{
1071 long tg_weight;
1072
1073 /*
1074 * Use this CPU's actual weight instead of the last load_contribution
1075 * to gain a more accurate current total weight. See
1076 * update_cfs_rq_load_contribution().
1077 */
Paul Turner82958362012-10-04 13:18:31 +02001078 tg_weight = atomic64_read(&tg->load_avg);
1079 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001080 tg_weight += cfs_rq->load.weight;
1081
1082 return tg_weight;
1083}
1084
Paul Turner6d5ab292011-01-21 20:45:01 -08001085static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001086{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001087 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001088
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001089 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001090 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001091
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001092 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001093 if (tg_weight)
1094 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001095
1096 if (shares < MIN_SHARES)
1097 shares = MIN_SHARES;
1098 if (shares > tg->shares)
1099 shares = tg->shares;
1100
1101 return shares;
1102}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001103# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001104static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001105{
1106 return tg->shares;
1107}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001108# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001109static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1110 unsigned long weight)
1111{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001112 if (se->on_rq) {
1113 /* commit outstanding execution time */
1114 if (cfs_rq->curr == se)
1115 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001116 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001117 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001118
1119 update_load_set(&se->load, weight);
1120
1121 if (se->on_rq)
1122 account_entity_enqueue(cfs_rq, se);
1123}
1124
Paul Turner82958362012-10-04 13:18:31 +02001125static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1126
Paul Turner6d5ab292011-01-21 20:45:01 -08001127static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001128{
1129 struct task_group *tg;
1130 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001131 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001132
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001133 tg = cfs_rq->tg;
1134 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001135 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001136 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001137#ifndef CONFIG_SMP
1138 if (likely(se->load.weight == tg->shares))
1139 return;
1140#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001141 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001142
1143 reweight_entity(cfs_rq_of(se), se, shares);
1144}
1145#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001146static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001147{
1148}
1149#endif /* CONFIG_FAIR_GROUP_SCHED */
1150
Alex Shi141965c2013-06-26 13:05:39 +08001151#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001152/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001153 * We choose a half-life close to 1 scheduling period.
1154 * Note: The tables below are dependent on this value.
1155 */
1156#define LOAD_AVG_PERIOD 32
1157#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1158#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1159
1160/* Precomputed fixed inverse multiplies for multiplication by y^n */
1161static const u32 runnable_avg_yN_inv[] = {
1162 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1163 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1164 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1165 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1166 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1167 0x85aac367, 0x82cd8698,
1168};
1169
1170/*
1171 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1172 * over-estimates when re-combining.
1173 */
1174static const u32 runnable_avg_yN_sum[] = {
1175 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1176 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1177 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1178};
1179
1180/*
Paul Turner9d85f212012-10-04 13:18:29 +02001181 * Approximate:
1182 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1183 */
1184static __always_inline u64 decay_load(u64 val, u64 n)
1185{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001186 unsigned int local_n;
1187
1188 if (!n)
1189 return val;
1190 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1191 return 0;
1192
1193 /* after bounds checking we can collapse to 32-bit */
1194 local_n = n;
1195
1196 /*
1197 * As y^PERIOD = 1/2, we can combine
1198 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1199 * With a look-up table which covers k^n (n<PERIOD)
1200 *
1201 * To achieve constant time decay_load.
1202 */
1203 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1204 val >>= local_n / LOAD_AVG_PERIOD;
1205 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001206 }
1207
Paul Turner5b51f2f2012-10-04 13:18:32 +02001208 val *= runnable_avg_yN_inv[local_n];
1209 /* We don't use SRR here since we always want to round down. */
1210 return val >> 32;
1211}
1212
1213/*
1214 * For updates fully spanning n periods, the contribution to runnable
1215 * average will be: \Sum 1024*y^n
1216 *
1217 * We can compute this reasonably efficiently by combining:
1218 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1219 */
1220static u32 __compute_runnable_contrib(u64 n)
1221{
1222 u32 contrib = 0;
1223
1224 if (likely(n <= LOAD_AVG_PERIOD))
1225 return runnable_avg_yN_sum[n];
1226 else if (unlikely(n >= LOAD_AVG_MAX_N))
1227 return LOAD_AVG_MAX;
1228
1229 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1230 do {
1231 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1232 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1233
1234 n -= LOAD_AVG_PERIOD;
1235 } while (n > LOAD_AVG_PERIOD);
1236
1237 contrib = decay_load(contrib, n);
1238 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001239}
1240
1241/*
1242 * We can represent the historical contribution to runnable average as the
1243 * coefficients of a geometric series. To do this we sub-divide our runnable
1244 * history into segments of approximately 1ms (1024us); label the segment that
1245 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1246 *
1247 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1248 * p0 p1 p2
1249 * (now) (~1ms ago) (~2ms ago)
1250 *
1251 * Let u_i denote the fraction of p_i that the entity was runnable.
1252 *
1253 * We then designate the fractions u_i as our co-efficients, yielding the
1254 * following representation of historical load:
1255 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1256 *
1257 * We choose y based on the with of a reasonably scheduling period, fixing:
1258 * y^32 = 0.5
1259 *
1260 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1261 * approximately half as much as the contribution to load within the last ms
1262 * (u_0).
1263 *
1264 * When a period "rolls over" and we have new u_0`, multiplying the previous
1265 * sum again by y is sufficient to update:
1266 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1267 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1268 */
1269static __always_inline int __update_entity_runnable_avg(u64 now,
1270 struct sched_avg *sa,
1271 int runnable)
1272{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001273 u64 delta, periods;
1274 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001275 int delta_w, decayed = 0;
1276
1277 delta = now - sa->last_runnable_update;
1278 /*
1279 * This should only happen when time goes backwards, which it
1280 * unfortunately does during sched clock init when we swap over to TSC.
1281 */
1282 if ((s64)delta < 0) {
1283 sa->last_runnable_update = now;
1284 return 0;
1285 }
1286
1287 /*
1288 * Use 1024ns as the unit of measurement since it's a reasonable
1289 * approximation of 1us and fast to compute.
1290 */
1291 delta >>= 10;
1292 if (!delta)
1293 return 0;
1294 sa->last_runnable_update = now;
1295
1296 /* delta_w is the amount already accumulated against our next period */
1297 delta_w = sa->runnable_avg_period % 1024;
1298 if (delta + delta_w >= 1024) {
1299 /* period roll-over */
1300 decayed = 1;
1301
1302 /*
1303 * Now that we know we're crossing a period boundary, figure
1304 * out how much from delta we need to complete the current
1305 * period and accrue it.
1306 */
1307 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001308 if (runnable)
1309 sa->runnable_avg_sum += delta_w;
1310 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001311
Paul Turner5b51f2f2012-10-04 13:18:32 +02001312 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001313
Paul Turner5b51f2f2012-10-04 13:18:32 +02001314 /* Figure out how many additional periods this update spans */
1315 periods = delta / 1024;
1316 delta %= 1024;
1317
1318 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1319 periods + 1);
1320 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1321 periods + 1);
1322
1323 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1324 runnable_contrib = __compute_runnable_contrib(periods);
1325 if (runnable)
1326 sa->runnable_avg_sum += runnable_contrib;
1327 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001328 }
1329
1330 /* Remainder of delta accrued against u_0` */
1331 if (runnable)
1332 sa->runnable_avg_sum += delta;
1333 sa->runnable_avg_period += delta;
1334
1335 return decayed;
1336}
1337
Paul Turner9ee474f2012-10-04 13:18:30 +02001338/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001339static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001340{
1341 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1342 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1343
1344 decays -= se->avg.decay_count;
1345 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001346 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001347
1348 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1349 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001350
1351 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001352}
1353
Paul Turnerc566e8e2012-10-04 13:18:30 +02001354#ifdef CONFIG_FAIR_GROUP_SCHED
1355static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1356 int force_update)
1357{
1358 struct task_group *tg = cfs_rq->tg;
1359 s64 tg_contrib;
1360
1361 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1362 tg_contrib -= cfs_rq->tg_load_contrib;
1363
1364 if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1365 atomic64_add(tg_contrib, &tg->load_avg);
1366 cfs_rq->tg_load_contrib += tg_contrib;
1367 }
1368}
Paul Turner8165e142012-10-04 13:18:31 +02001369
Paul Turnerbb17f652012-10-04 13:18:31 +02001370/*
1371 * Aggregate cfs_rq runnable averages into an equivalent task_group
1372 * representation for computing load contributions.
1373 */
1374static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1375 struct cfs_rq *cfs_rq)
1376{
1377 struct task_group *tg = cfs_rq->tg;
1378 long contrib;
1379
1380 /* The fraction of a cpu used by this cfs_rq */
1381 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1382 sa->runnable_avg_period + 1);
1383 contrib -= cfs_rq->tg_runnable_contrib;
1384
1385 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1386 atomic_add(contrib, &tg->runnable_avg);
1387 cfs_rq->tg_runnable_contrib += contrib;
1388 }
1389}
1390
Paul Turner8165e142012-10-04 13:18:31 +02001391static inline void __update_group_entity_contrib(struct sched_entity *se)
1392{
1393 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1394 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001395 int runnable_avg;
1396
Paul Turner8165e142012-10-04 13:18:31 +02001397 u64 contrib;
1398
1399 contrib = cfs_rq->tg_load_contrib * tg->shares;
1400 se->avg.load_avg_contrib = div64_u64(contrib,
1401 atomic64_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001402
1403 /*
1404 * For group entities we need to compute a correction term in the case
1405 * that they are consuming <1 cpu so that we would contribute the same
1406 * load as a task of equal weight.
1407 *
1408 * Explicitly co-ordinating this measurement would be expensive, but
1409 * fortunately the sum of each cpus contribution forms a usable
1410 * lower-bound on the true value.
1411 *
1412 * Consider the aggregate of 2 contributions. Either they are disjoint
1413 * (and the sum represents true value) or they are disjoint and we are
1414 * understating by the aggregate of their overlap.
1415 *
1416 * Extending this to N cpus, for a given overlap, the maximum amount we
1417 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1418 * cpus that overlap for this interval and w_i is the interval width.
1419 *
1420 * On a small machine; the first term is well-bounded which bounds the
1421 * total error since w_i is a subset of the period. Whereas on a
1422 * larger machine, while this first term can be larger, if w_i is the
1423 * of consequential size guaranteed to see n_i*w_i quickly converge to
1424 * our upper bound of 1-cpu.
1425 */
1426 runnable_avg = atomic_read(&tg->runnable_avg);
1427 if (runnable_avg < NICE_0_LOAD) {
1428 se->avg.load_avg_contrib *= runnable_avg;
1429 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1430 }
Paul Turner8165e142012-10-04 13:18:31 +02001431}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001432#else
1433static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1434 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001435static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1436 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001437static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001438#endif
1439
Paul Turner8165e142012-10-04 13:18:31 +02001440static inline void __update_task_entity_contrib(struct sched_entity *se)
1441{
1442 u32 contrib;
1443
1444 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1445 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1446 contrib /= (se->avg.runnable_avg_period + 1);
1447 se->avg.load_avg_contrib = scale_load(contrib);
1448}
1449
Paul Turner2dac7542012-10-04 13:18:30 +02001450/* Compute the current contribution to load_avg by se, return any delta */
1451static long __update_entity_load_avg_contrib(struct sched_entity *se)
1452{
1453 long old_contrib = se->avg.load_avg_contrib;
1454
Paul Turner8165e142012-10-04 13:18:31 +02001455 if (entity_is_task(se)) {
1456 __update_task_entity_contrib(se);
1457 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001458 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001459 __update_group_entity_contrib(se);
1460 }
Paul Turner2dac7542012-10-04 13:18:30 +02001461
1462 return se->avg.load_avg_contrib - old_contrib;
1463}
1464
Paul Turner9ee474f2012-10-04 13:18:30 +02001465static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1466 long load_contrib)
1467{
1468 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1469 cfs_rq->blocked_load_avg -= load_contrib;
1470 else
1471 cfs_rq->blocked_load_avg = 0;
1472}
1473
Paul Turnerf1b17282012-10-04 13:18:31 +02001474static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1475
Paul Turner9d85f212012-10-04 13:18:29 +02001476/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001477static inline void update_entity_load_avg(struct sched_entity *se,
1478 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001479{
Paul Turner2dac7542012-10-04 13:18:30 +02001480 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1481 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001482 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001483
Paul Turnerf1b17282012-10-04 13:18:31 +02001484 /*
1485 * For a group entity we need to use their owned cfs_rq_clock_task() in
1486 * case they are the parent of a throttled hierarchy.
1487 */
1488 if (entity_is_task(se))
1489 now = cfs_rq_clock_task(cfs_rq);
1490 else
1491 now = cfs_rq_clock_task(group_cfs_rq(se));
1492
1493 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001494 return;
1495
1496 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001497
1498 if (!update_cfs_rq)
1499 return;
1500
Paul Turner2dac7542012-10-04 13:18:30 +02001501 if (se->on_rq)
1502 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001503 else
1504 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1505}
1506
1507/*
1508 * Decay the load contributed by all blocked children and account this so that
1509 * their contribution may appropriately discounted when they wake up.
1510 */
Paul Turneraff3e492012-10-04 13:18:30 +02001511static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001512{
Paul Turnerf1b17282012-10-04 13:18:31 +02001513 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001514 u64 decays;
1515
1516 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001517 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001518 return;
1519
Paul Turneraff3e492012-10-04 13:18:30 +02001520 if (atomic64_read(&cfs_rq->removed_load)) {
1521 u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
1522 subtract_blocked_load_contrib(cfs_rq, removed_load);
1523 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001524
Paul Turneraff3e492012-10-04 13:18:30 +02001525 if (decays) {
1526 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1527 decays);
1528 atomic64_add(decays, &cfs_rq->decay_counter);
1529 cfs_rq->last_decay = now;
1530 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001531
1532 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001533}
Ben Segall18bf2802012-10-04 12:51:20 +02001534
1535static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1536{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001537 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001538 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001539}
Paul Turner2dac7542012-10-04 13:18:30 +02001540
1541/* Add the load generated by se into cfs_rq's child load-average */
1542static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001543 struct sched_entity *se,
1544 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001545{
Paul Turneraff3e492012-10-04 13:18:30 +02001546 /*
1547 * We track migrations using entity decay_count <= 0, on a wake-up
1548 * migration we use a negative decay count to track the remote decays
1549 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08001550 *
1551 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1552 * are seen by enqueue_entity_load_avg() as a migration with an already
1553 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02001554 */
1555 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001556 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001557 if (se->avg.decay_count) {
1558 /*
1559 * In a wake-up migration we have to approximate the
1560 * time sleeping. This is because we can't synchronize
1561 * clock_task between the two cpus, and it is not
1562 * guaranteed to be read-safe. Instead, we can
1563 * approximate this using our carried decays, which are
1564 * explicitly atomically readable.
1565 */
1566 se->avg.last_runnable_update -= (-se->avg.decay_count)
1567 << 20;
1568 update_entity_load_avg(se, 0);
1569 /* Indicate that we're now synchronized and on-rq */
1570 se->avg.decay_count = 0;
1571 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001572 wakeup = 0;
1573 } else {
1574 __synchronize_entity_decay(se);
1575 }
1576
Paul Turneraff3e492012-10-04 13:18:30 +02001577 /* migrated tasks did not contribute to our blocked load */
1578 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02001579 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02001580 update_entity_load_avg(se, 0);
1581 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001582
Paul Turner2dac7542012-10-04 13:18:30 +02001583 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02001584 /* we force update consideration on load-balancer moves */
1585 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02001586}
1587
Paul Turner9ee474f2012-10-04 13:18:30 +02001588/*
1589 * Remove se's load from this cfs_rq child load-average, if the entity is
1590 * transitioning to a blocked state we track its projected decay using
1591 * blocked_load_avg.
1592 */
Paul Turner2dac7542012-10-04 13:18:30 +02001593static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001594 struct sched_entity *se,
1595 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02001596{
Paul Turner9ee474f2012-10-04 13:18:30 +02001597 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02001598 /* we force update consideration on load-balancer moves */
1599 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02001600
Paul Turner2dac7542012-10-04 13:18:30 +02001601 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02001602 if (sleep) {
1603 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1604 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1605 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02001606}
Vincent Guittot642dbc32013-04-18 18:34:26 +02001607
1608/*
1609 * Update the rq's load with the elapsed running time before entering
1610 * idle. if the last scheduled task is not a CFS task, idle_enter will
1611 * be the only way to update the runnable statistic.
1612 */
1613void idle_enter_fair(struct rq *this_rq)
1614{
1615 update_rq_runnable_avg(this_rq, 1);
1616}
1617
1618/*
1619 * Update the rq's load with the elapsed idle time before a task is
1620 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1621 * be the only way to update the runnable statistic.
1622 */
1623void idle_exit_fair(struct rq *this_rq)
1624{
1625 update_rq_runnable_avg(this_rq, 0);
1626}
1627
Paul Turner9d85f212012-10-04 13:18:29 +02001628#else
Paul Turner9ee474f2012-10-04 13:18:30 +02001629static inline void update_entity_load_avg(struct sched_entity *se,
1630 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02001631static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001632static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001633 struct sched_entity *se,
1634 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001635static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001636 struct sched_entity *se,
1637 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02001638static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1639 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02001640#endif
1641
Ingo Molnar2396af62007-08-09 11:16:48 +02001642static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001643{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001644#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02001645 struct task_struct *tsk = NULL;
1646
1647 if (entity_is_task(se))
1648 tsk = task_of(se);
1649
Lucas De Marchi41acab82010-03-10 23:37:45 -03001650 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001651 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001652
1653 if ((s64)delta < 0)
1654 delta = 0;
1655
Lucas De Marchi41acab82010-03-10 23:37:45 -03001656 if (unlikely(delta > se->statistics.sleep_max))
1657 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001658
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001659 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001660 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01001661
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001662 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02001663 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001664 trace_sched_stat_sleep(tsk, delta);
1665 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001666 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03001667 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001668 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001669
1670 if ((s64)delta < 0)
1671 delta = 0;
1672
Lucas De Marchi41acab82010-03-10 23:37:45 -03001673 if (unlikely(delta > se->statistics.block_max))
1674 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001675
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001676 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001677 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02001678
Peter Zijlstrae4143142009-07-23 20:13:26 +02001679 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001680 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001681 se->statistics.iowait_sum += delta;
1682 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001683 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001684 }
1685
Andrew Vaginb781a602011-11-28 12:03:35 +03001686 trace_sched_stat_blocked(tsk, delta);
1687
Peter Zijlstrae4143142009-07-23 20:13:26 +02001688 /*
1689 * Blocking time is in units of nanosecs, so shift by
1690 * 20 to get a milliseconds-range estimation of the
1691 * amount of time that the task spent sleeping:
1692 */
1693 if (unlikely(prof_on == SLEEP_PROFILING)) {
1694 profile_hits(SLEEP_PROFILING,
1695 (void *)get_wchan(tsk),
1696 delta >> 20);
1697 }
1698 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02001699 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001700 }
1701#endif
1702}
1703
Peter Zijlstraddc97292007-10-15 17:00:10 +02001704static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1705{
1706#ifdef CONFIG_SCHED_DEBUG
1707 s64 d = se->vruntime - cfs_rq->min_vruntime;
1708
1709 if (d < 0)
1710 d = -d;
1711
1712 if (d > 3*sysctl_sched_latency)
1713 schedstat_inc(cfs_rq, nr_spread_over);
1714#endif
1715}
1716
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001717static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001718place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1719{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001720 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001721
Peter Zijlstra2cb86002007-11-09 22:39:37 +01001722 /*
1723 * The 'current' period is already promised to the current tasks,
1724 * however the extra weight of the new task will slow them down a
1725 * little, place the new task so that it fits in the slot that
1726 * stays open at the end.
1727 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001728 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02001729 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001730
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001731 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01001732 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001733 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001734
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001735 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001736 * Halve their sleep time's effect, to allow
1737 * for a gentler effect of sleepers:
1738 */
1739 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1740 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02001741
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001742 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001743 }
1744
Mike Galbraithb5d9d732009-09-08 11:12:28 +02001745 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05301746 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001747}
1748
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001749static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1750
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001751static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001752enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001753{
1754 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001755 * Update the normalized vruntime before updating min_vruntime
1756 * through callig update_curr().
1757 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001758 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001759 se->vruntime += cfs_rq->min_vruntime;
1760
1761 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001762 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001763 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001764 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02001765 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001766 account_entity_enqueue(cfs_rq, se);
1767 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001768
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001769 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001770 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02001771 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02001772 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001773
Ingo Molnard2417e52007-08-09 11:16:47 +02001774 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02001775 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001776 if (se != cfs_rq->curr)
1777 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001778 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001779
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001780 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001781 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001782 check_enqueue_throttle(cfs_rq);
1783 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001784}
1785
Rik van Riel2c13c9192011-02-01 09:48:37 -05001786static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01001787{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001788 for_each_sched_entity(se) {
1789 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1790 if (cfs_rq->last == se)
1791 cfs_rq->last = NULL;
1792 else
1793 break;
1794 }
1795}
Peter Zijlstra2002c692008-11-11 11:52:33 +01001796
Rik van Riel2c13c9192011-02-01 09:48:37 -05001797static void __clear_buddies_next(struct sched_entity *se)
1798{
1799 for_each_sched_entity(se) {
1800 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1801 if (cfs_rq->next == se)
1802 cfs_rq->next = NULL;
1803 else
1804 break;
1805 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01001806}
1807
Rik van Rielac53db52011-02-01 09:51:03 -05001808static void __clear_buddies_skip(struct sched_entity *se)
1809{
1810 for_each_sched_entity(se) {
1811 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1812 if (cfs_rq->skip == se)
1813 cfs_rq->skip = NULL;
1814 else
1815 break;
1816 }
1817}
1818
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001819static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1820{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001821 if (cfs_rq->last == se)
1822 __clear_buddies_last(se);
1823
1824 if (cfs_rq->next == se)
1825 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05001826
1827 if (cfs_rq->skip == se)
1828 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001829}
1830
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07001831static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07001832
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001833static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001834dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001835{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001836 /*
1837 * Update run-time statistics of the 'current'.
1838 */
1839 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001840 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001841
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02001842 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001843 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001844#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001845 if (entity_is_task(se)) {
1846 struct task_struct *tsk = task_of(se);
1847
1848 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001849 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001850 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001851 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001852 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02001853#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001854 }
1855
Peter Zijlstra2002c692008-11-11 11:52:33 +01001856 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001857
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001858 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001859 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001860 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001861 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001862
1863 /*
1864 * Normalize the entity after updating the min_vruntime because the
1865 * update can refer to the ->curr item and we need to reflect this
1866 * movement in our normalized position.
1867 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001868 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001869 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07001870
Paul Turnerd8b49862011-07-21 09:43:41 -07001871 /* return excess runtime on last dequeue */
1872 return_cfs_rq_runtime(cfs_rq);
1873
Peter Zijlstra1e876232011-05-17 16:21:10 -07001874 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001875 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001876}
1877
1878/*
1879 * Preempt the current task with a newly woken task if needed:
1880 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02001881static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001882check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001883{
Peter Zijlstra11697832007-09-05 14:32:49 +02001884 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001885 struct sched_entity *se;
1886 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02001887
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02001888 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02001889 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001890 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001891 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001892 /*
1893 * The current task ran long enough, ensure it doesn't get
1894 * re-elected due to buddy favours.
1895 */
1896 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001897 return;
1898 }
1899
1900 /*
1901 * Ensure that a task that missed wakeup preemption by a
1902 * narrow margin doesn't have to wait for a full slice.
1903 * This also mitigates buddy induced latencies under load.
1904 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02001905 if (delta_exec < sysctl_sched_min_granularity)
1906 return;
1907
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001908 se = __pick_first_entity(cfs_rq);
1909 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02001910
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001911 if (delta < 0)
1912 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01001913
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001914 if (delta > ideal_runtime)
1915 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001916}
1917
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001918static void
Ingo Molnar8494f412007-08-09 11:16:48 +02001919set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001920{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001921 /* 'current' is not kept within the tree. */
1922 if (se->on_rq) {
1923 /*
1924 * Any task has to be enqueued before it get to execute on
1925 * a CPU. So account for the time it spent waiting on the
1926 * runqueue.
1927 */
1928 update_stats_wait_end(cfs_rq, se);
1929 __dequeue_entity(cfs_rq, se);
1930 }
1931
Ingo Molnar79303e92007-08-09 11:16:47 +02001932 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02001933 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02001934#ifdef CONFIG_SCHEDSTATS
1935 /*
1936 * Track our maximum slice length, if the CPU's load is at
1937 * least twice that of our own weight (i.e. dont track it
1938 * when there are only lesser-weight tasks around):
1939 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02001940 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001941 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02001942 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1943 }
1944#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02001945 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001946}
1947
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02001948static int
1949wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1950
Rik van Rielac53db52011-02-01 09:51:03 -05001951/*
1952 * Pick the next process, keeping these things in mind, in this order:
1953 * 1) keep things fair between processes/task groups
1954 * 2) pick the "next" process, since someone really wants that to run
1955 * 3) pick the "last" process, for cache locality
1956 * 4) do not run the "skip" process, if something else is available
1957 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001958static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001959{
Rik van Rielac53db52011-02-01 09:51:03 -05001960 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001961 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001962
Rik van Rielac53db52011-02-01 09:51:03 -05001963 /*
1964 * Avoid running the skip buddy, if running something else can
1965 * be done without getting too unfair.
1966 */
1967 if (cfs_rq->skip == se) {
1968 struct sched_entity *second = __pick_next_entity(se);
1969 if (second && wakeup_preempt_entity(second, left) < 1)
1970 se = second;
1971 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001972
Mike Galbraithf685cea2009-10-23 23:09:22 +02001973 /*
1974 * Prefer last buddy, try to return the CPU to a preempted task.
1975 */
1976 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1977 se = cfs_rq->last;
1978
Rik van Rielac53db52011-02-01 09:51:03 -05001979 /*
1980 * Someone really wants this to run. If it's not unfair, run it.
1981 */
1982 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1983 se = cfs_rq->next;
1984
Mike Galbraithf685cea2009-10-23 23:09:22 +02001985 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001986
1987 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001988}
1989
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001990static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1991
Ingo Molnarab6cde22007-08-09 11:16:48 +02001992static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001993{
1994 /*
1995 * If still on the runqueue then deactivate_task()
1996 * was not called and update_curr() has to be done:
1997 */
1998 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001999 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002000
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002001 /* throttle cfs_rqs exceeding runtime */
2002 check_cfs_rq_runtime(cfs_rq);
2003
Peter Zijlstraddc97292007-10-15 17:00:10 +02002004 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002005 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002006 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002007 /* Put 'current' back into the tree. */
2008 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002009 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002010 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002011 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002012 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002013}
2014
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002015static void
2016entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002017{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002018 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002019 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002020 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002021 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002022
Paul Turner43365bd2010-12-15 19:10:17 -08002023 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002024 * Ensure that runnable average is periodically updated.
2025 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002026 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002027 update_cfs_rq_blocked_load(cfs_rq, 1);
Paul Turner9d85f212012-10-04 13:18:29 +02002028
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002029#ifdef CONFIG_SCHED_HRTICK
2030 /*
2031 * queued ticks are scheduled to match the slice, so don't bother
2032 * validating it and just reschedule.
2033 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002034 if (queued) {
2035 resched_task(rq_of(cfs_rq)->curr);
2036 return;
2037 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002038 /*
2039 * don't let the period tick interfere with the hrtick preemption
2040 */
2041 if (!sched_feat(DOUBLE_TICK) &&
2042 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2043 return;
2044#endif
2045
Yong Zhang2c2efae2011-07-29 16:20:33 +08002046 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002047 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002048}
2049
Paul Turnerab84d312011-07-21 09:43:28 -07002050
2051/**************************************************
2052 * CFS bandwidth control machinery
2053 */
2054
2055#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002056
2057#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002058static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002059
2060static inline bool cfs_bandwidth_used(void)
2061{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002062 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002063}
2064
2065void account_cfs_bandwidth_used(int enabled, int was_enabled)
2066{
2067 /* only need to count groups transitioning between enabled/!enabled */
2068 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002069 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002070 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002071 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002072}
2073#else /* HAVE_JUMP_LABEL */
2074static bool cfs_bandwidth_used(void)
2075{
2076 return true;
2077}
2078
2079void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2080#endif /* HAVE_JUMP_LABEL */
2081
Paul Turnerab84d312011-07-21 09:43:28 -07002082/*
2083 * default period for cfs group bandwidth.
2084 * default: 0.1s, units: nanoseconds
2085 */
2086static inline u64 default_cfs_period(void)
2087{
2088 return 100000000ULL;
2089}
Paul Turnerec12cb72011-07-21 09:43:30 -07002090
2091static inline u64 sched_cfs_bandwidth_slice(void)
2092{
2093 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2094}
2095
Paul Turnera9cf55b2011-07-21 09:43:32 -07002096/*
2097 * Replenish runtime according to assigned quota and update expiration time.
2098 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2099 * additional synchronization around rq->lock.
2100 *
2101 * requires cfs_b->lock
2102 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002103void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002104{
2105 u64 now;
2106
2107 if (cfs_b->quota == RUNTIME_INF)
2108 return;
2109
2110 now = sched_clock_cpu(smp_processor_id());
2111 cfs_b->runtime = cfs_b->quota;
2112 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2113}
2114
Peter Zijlstra029632f2011-10-25 10:00:11 +02002115static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2116{
2117 return &tg->cfs_bandwidth;
2118}
2119
Paul Turnerf1b17282012-10-04 13:18:31 +02002120/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2121static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2122{
2123 if (unlikely(cfs_rq->throttle_count))
2124 return cfs_rq->throttled_clock_task;
2125
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002126 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002127}
2128
Paul Turner85dac902011-07-21 09:43:33 -07002129/* returns 0 on failure to allocate runtime */
2130static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002131{
2132 struct task_group *tg = cfs_rq->tg;
2133 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002134 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002135
2136 /* note: this is a positive sum as runtime_remaining <= 0 */
2137 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2138
2139 raw_spin_lock(&cfs_b->lock);
2140 if (cfs_b->quota == RUNTIME_INF)
2141 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002142 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002143 /*
2144 * If the bandwidth pool has become inactive, then at least one
2145 * period must have elapsed since the last consumption.
2146 * Refresh the global state and ensure bandwidth timer becomes
2147 * active.
2148 */
2149 if (!cfs_b->timer_active) {
2150 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002151 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002152 }
Paul Turner58088ad2011-07-21 09:43:31 -07002153
2154 if (cfs_b->runtime > 0) {
2155 amount = min(cfs_b->runtime, min_amount);
2156 cfs_b->runtime -= amount;
2157 cfs_b->idle = 0;
2158 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002159 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002160 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002161 raw_spin_unlock(&cfs_b->lock);
2162
2163 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002164 /*
2165 * we may have advanced our local expiration to account for allowed
2166 * spread between our sched_clock and the one on which runtime was
2167 * issued.
2168 */
2169 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2170 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002171
2172 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002173}
2174
2175/*
2176 * Note: This depends on the synchronization provided by sched_clock and the
2177 * fact that rq->clock snapshots this value.
2178 */
2179static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2180{
2181 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002182
2183 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002184 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002185 return;
2186
2187 if (cfs_rq->runtime_remaining < 0)
2188 return;
2189
2190 /*
2191 * If the local deadline has passed we have to consider the
2192 * possibility that our sched_clock is 'fast' and the global deadline
2193 * has not truly expired.
2194 *
2195 * Fortunately we can check determine whether this the case by checking
2196 * whether the global deadline has advanced.
2197 */
2198
2199 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2200 /* extend local deadline, drift is bounded above by 2 ticks */
2201 cfs_rq->runtime_expires += TICK_NSEC;
2202 } else {
2203 /* global deadline is ahead, expiration has passed */
2204 cfs_rq->runtime_remaining = 0;
2205 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002206}
2207
2208static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2209 unsigned long delta_exec)
2210{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002211 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002212 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002213 expire_cfs_rq_runtime(cfs_rq);
2214
2215 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002216 return;
2217
Paul Turner85dac902011-07-21 09:43:33 -07002218 /*
2219 * if we're unable to extend our runtime we resched so that the active
2220 * hierarchy can be throttled
2221 */
2222 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2223 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002224}
2225
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002226static __always_inline
2227void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002228{
Paul Turner56f570e2011-11-07 20:26:33 -08002229 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002230 return;
2231
2232 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2233}
2234
Paul Turner85dac902011-07-21 09:43:33 -07002235static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2236{
Paul Turner56f570e2011-11-07 20:26:33 -08002237 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002238}
2239
Paul Turner64660c82011-07-21 09:43:36 -07002240/* check whether cfs_rq, or any parent, is throttled */
2241static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2242{
Paul Turner56f570e2011-11-07 20:26:33 -08002243 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002244}
2245
2246/*
2247 * Ensure that neither of the group entities corresponding to src_cpu or
2248 * dest_cpu are members of a throttled hierarchy when performing group
2249 * load-balance operations.
2250 */
2251static inline int throttled_lb_pair(struct task_group *tg,
2252 int src_cpu, int dest_cpu)
2253{
2254 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2255
2256 src_cfs_rq = tg->cfs_rq[src_cpu];
2257 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2258
2259 return throttled_hierarchy(src_cfs_rq) ||
2260 throttled_hierarchy(dest_cfs_rq);
2261}
2262
2263/* updated child weight may affect parent so we have to do this bottom up */
2264static int tg_unthrottle_up(struct task_group *tg, void *data)
2265{
2266 struct rq *rq = data;
2267 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2268
2269 cfs_rq->throttle_count--;
2270#ifdef CONFIG_SMP
2271 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002272 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002273 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002274 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002275 }
2276#endif
2277
2278 return 0;
2279}
2280
2281static int tg_throttle_down(struct task_group *tg, void *data)
2282{
2283 struct rq *rq = data;
2284 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2285
Paul Turner82958362012-10-04 13:18:31 +02002286 /* group is entering throttled state, stop time */
2287 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002288 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002289 cfs_rq->throttle_count++;
2290
2291 return 0;
2292}
2293
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002294static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002295{
2296 struct rq *rq = rq_of(cfs_rq);
2297 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2298 struct sched_entity *se;
2299 long task_delta, dequeue = 1;
2300
2301 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2302
Paul Turnerf1b17282012-10-04 13:18:31 +02002303 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002304 rcu_read_lock();
2305 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2306 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002307
2308 task_delta = cfs_rq->h_nr_running;
2309 for_each_sched_entity(se) {
2310 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2311 /* throttled entity or throttle-on-deactivate */
2312 if (!se->on_rq)
2313 break;
2314
2315 if (dequeue)
2316 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2317 qcfs_rq->h_nr_running -= task_delta;
2318
2319 if (qcfs_rq->load.weight)
2320 dequeue = 0;
2321 }
2322
2323 if (!se)
2324 rq->nr_running -= task_delta;
2325
2326 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002327 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002328 raw_spin_lock(&cfs_b->lock);
2329 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2330 raw_spin_unlock(&cfs_b->lock);
2331}
2332
Peter Zijlstra029632f2011-10-25 10:00:11 +02002333void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002334{
2335 struct rq *rq = rq_of(cfs_rq);
2336 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2337 struct sched_entity *se;
2338 int enqueue = 1;
2339 long task_delta;
2340
Michael Wang22b958d2013-06-04 14:23:39 +08002341 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002342
2343 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002344
2345 update_rq_clock(rq);
2346
Paul Turner671fd9d2011-07-21 09:43:34 -07002347 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002348 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002349 list_del_rcu(&cfs_rq->throttled_list);
2350 raw_spin_unlock(&cfs_b->lock);
2351
Paul Turner64660c82011-07-21 09:43:36 -07002352 /* update hierarchical throttle state */
2353 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2354
Paul Turner671fd9d2011-07-21 09:43:34 -07002355 if (!cfs_rq->load.weight)
2356 return;
2357
2358 task_delta = cfs_rq->h_nr_running;
2359 for_each_sched_entity(se) {
2360 if (se->on_rq)
2361 enqueue = 0;
2362
2363 cfs_rq = cfs_rq_of(se);
2364 if (enqueue)
2365 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2366 cfs_rq->h_nr_running += task_delta;
2367
2368 if (cfs_rq_throttled(cfs_rq))
2369 break;
2370 }
2371
2372 if (!se)
2373 rq->nr_running += task_delta;
2374
2375 /* determine whether we need to wake up potentially idle cpu */
2376 if (rq->curr == rq->idle && rq->cfs.nr_running)
2377 resched_task(rq->curr);
2378}
2379
2380static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2381 u64 remaining, u64 expires)
2382{
2383 struct cfs_rq *cfs_rq;
2384 u64 runtime = remaining;
2385
2386 rcu_read_lock();
2387 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2388 throttled_list) {
2389 struct rq *rq = rq_of(cfs_rq);
2390
2391 raw_spin_lock(&rq->lock);
2392 if (!cfs_rq_throttled(cfs_rq))
2393 goto next;
2394
2395 runtime = -cfs_rq->runtime_remaining + 1;
2396 if (runtime > remaining)
2397 runtime = remaining;
2398 remaining -= runtime;
2399
2400 cfs_rq->runtime_remaining += runtime;
2401 cfs_rq->runtime_expires = expires;
2402
2403 /* we check whether we're throttled above */
2404 if (cfs_rq->runtime_remaining > 0)
2405 unthrottle_cfs_rq(cfs_rq);
2406
2407next:
2408 raw_spin_unlock(&rq->lock);
2409
2410 if (!remaining)
2411 break;
2412 }
2413 rcu_read_unlock();
2414
2415 return remaining;
2416}
2417
Paul Turner58088ad2011-07-21 09:43:31 -07002418/*
2419 * Responsible for refilling a task_group's bandwidth and unthrottling its
2420 * cfs_rqs as appropriate. If there has been no activity within the last
2421 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2422 * used to track this state.
2423 */
2424static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2425{
Paul Turner671fd9d2011-07-21 09:43:34 -07002426 u64 runtime, runtime_expires;
2427 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002428
2429 raw_spin_lock(&cfs_b->lock);
2430 /* no need to continue the timer with no bandwidth constraint */
2431 if (cfs_b->quota == RUNTIME_INF)
2432 goto out_unlock;
2433
Paul Turner671fd9d2011-07-21 09:43:34 -07002434 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2435 /* idle depends on !throttled (for the case of a large deficit) */
2436 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002437 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002438
Paul Turnera9cf55b2011-07-21 09:43:32 -07002439 /* if we're going inactive then everything else can be deferred */
2440 if (idle)
2441 goto out_unlock;
2442
2443 __refill_cfs_bandwidth_runtime(cfs_b);
2444
Paul Turner671fd9d2011-07-21 09:43:34 -07002445 if (!throttled) {
2446 /* mark as potentially idle for the upcoming period */
2447 cfs_b->idle = 1;
2448 goto out_unlock;
2449 }
Paul Turner58088ad2011-07-21 09:43:31 -07002450
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002451 /* account preceding periods in which throttling occurred */
2452 cfs_b->nr_throttled += overrun;
2453
Paul Turner671fd9d2011-07-21 09:43:34 -07002454 /*
2455 * There are throttled entities so we must first use the new bandwidth
2456 * to unthrottle them before making it generally available. This
2457 * ensures that all existing debts will be paid before a new cfs_rq is
2458 * allowed to run.
2459 */
2460 runtime = cfs_b->runtime;
2461 runtime_expires = cfs_b->runtime_expires;
2462 cfs_b->runtime = 0;
2463
2464 /*
2465 * This check is repeated as we are holding onto the new bandwidth
2466 * while we unthrottle. This can potentially race with an unthrottled
2467 * group trying to acquire new bandwidth from the global pool.
2468 */
2469 while (throttled && runtime > 0) {
2470 raw_spin_unlock(&cfs_b->lock);
2471 /* we can't nest cfs_b->lock while distributing bandwidth */
2472 runtime = distribute_cfs_runtime(cfs_b, runtime,
2473 runtime_expires);
2474 raw_spin_lock(&cfs_b->lock);
2475
2476 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2477 }
2478
2479 /* return (any) remaining runtime */
2480 cfs_b->runtime = runtime;
2481 /*
2482 * While we are ensured activity in the period following an
2483 * unthrottle, this also covers the case in which the new bandwidth is
2484 * insufficient to cover the existing bandwidth deficit. (Forcing the
2485 * timer to remain active while there are any throttled entities.)
2486 */
2487 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002488out_unlock:
2489 if (idle)
2490 cfs_b->timer_active = 0;
2491 raw_spin_unlock(&cfs_b->lock);
2492
2493 return idle;
2494}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002495
Paul Turnerd8b49862011-07-21 09:43:41 -07002496/* a cfs_rq won't donate quota below this amount */
2497static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2498/* minimum remaining period time to redistribute slack quota */
2499static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2500/* how long we wait to gather additional slack before distributing */
2501static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2502
2503/* are we near the end of the current quota period? */
2504static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2505{
2506 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2507 u64 remaining;
2508
2509 /* if the call-back is running a quota refresh is already occurring */
2510 if (hrtimer_callback_running(refresh_timer))
2511 return 1;
2512
2513 /* is a quota refresh about to occur? */
2514 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2515 if (remaining < min_expire)
2516 return 1;
2517
2518 return 0;
2519}
2520
2521static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2522{
2523 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2524
2525 /* if there's a quota refresh soon don't bother with slack */
2526 if (runtime_refresh_within(cfs_b, min_left))
2527 return;
2528
2529 start_bandwidth_timer(&cfs_b->slack_timer,
2530 ns_to_ktime(cfs_bandwidth_slack_period));
2531}
2532
2533/* we know any runtime found here is valid as update_curr() precedes return */
2534static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2535{
2536 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2537 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2538
2539 if (slack_runtime <= 0)
2540 return;
2541
2542 raw_spin_lock(&cfs_b->lock);
2543 if (cfs_b->quota != RUNTIME_INF &&
2544 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2545 cfs_b->runtime += slack_runtime;
2546
2547 /* we are under rq->lock, defer unthrottling using a timer */
2548 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2549 !list_empty(&cfs_b->throttled_cfs_rq))
2550 start_cfs_slack_bandwidth(cfs_b);
2551 }
2552 raw_spin_unlock(&cfs_b->lock);
2553
2554 /* even if it's not valid for return we don't want to try again */
2555 cfs_rq->runtime_remaining -= slack_runtime;
2556}
2557
2558static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2559{
Paul Turner56f570e2011-11-07 20:26:33 -08002560 if (!cfs_bandwidth_used())
2561 return;
2562
Paul Turnerfccfdc62011-11-07 20:26:34 -08002563 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002564 return;
2565
2566 __return_cfs_rq_runtime(cfs_rq);
2567}
2568
2569/*
2570 * This is done with a timer (instead of inline with bandwidth return) since
2571 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2572 */
2573static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2574{
2575 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2576 u64 expires;
2577
2578 /* confirm we're still not at a refresh boundary */
2579 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2580 return;
2581
2582 raw_spin_lock(&cfs_b->lock);
2583 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2584 runtime = cfs_b->runtime;
2585 cfs_b->runtime = 0;
2586 }
2587 expires = cfs_b->runtime_expires;
2588 raw_spin_unlock(&cfs_b->lock);
2589
2590 if (!runtime)
2591 return;
2592
2593 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2594
2595 raw_spin_lock(&cfs_b->lock);
2596 if (expires == cfs_b->runtime_expires)
2597 cfs_b->runtime = runtime;
2598 raw_spin_unlock(&cfs_b->lock);
2599}
2600
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002601/*
2602 * When a group wakes up we want to make sure that its quota is not already
2603 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2604 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2605 */
2606static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2607{
Paul Turner56f570e2011-11-07 20:26:33 -08002608 if (!cfs_bandwidth_used())
2609 return;
2610
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002611 /* an active group must be handled by the update_curr()->put() path */
2612 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2613 return;
2614
2615 /* ensure the group is not already throttled */
2616 if (cfs_rq_throttled(cfs_rq))
2617 return;
2618
2619 /* update runtime allocation */
2620 account_cfs_rq_runtime(cfs_rq, 0);
2621 if (cfs_rq->runtime_remaining <= 0)
2622 throttle_cfs_rq(cfs_rq);
2623}
2624
2625/* conditionally throttle active cfs_rq's from put_prev_entity() */
2626static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2627{
Paul Turner56f570e2011-11-07 20:26:33 -08002628 if (!cfs_bandwidth_used())
2629 return;
2630
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002631 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2632 return;
2633
2634 /*
2635 * it's possible for a throttled entity to be forced into a running
2636 * state (e.g. set_curr_task), in this case we're finished.
2637 */
2638 if (cfs_rq_throttled(cfs_rq))
2639 return;
2640
2641 throttle_cfs_rq(cfs_rq);
2642}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002643
Peter Zijlstra029632f2011-10-25 10:00:11 +02002644static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2645{
2646 struct cfs_bandwidth *cfs_b =
2647 container_of(timer, struct cfs_bandwidth, slack_timer);
2648 do_sched_cfs_slack_timer(cfs_b);
2649
2650 return HRTIMER_NORESTART;
2651}
2652
2653static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2654{
2655 struct cfs_bandwidth *cfs_b =
2656 container_of(timer, struct cfs_bandwidth, period_timer);
2657 ktime_t now;
2658 int overrun;
2659 int idle = 0;
2660
2661 for (;;) {
2662 now = hrtimer_cb_get_time(timer);
2663 overrun = hrtimer_forward(timer, now, cfs_b->period);
2664
2665 if (!overrun)
2666 break;
2667
2668 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2669 }
2670
2671 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2672}
2673
2674void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2675{
2676 raw_spin_lock_init(&cfs_b->lock);
2677 cfs_b->runtime = 0;
2678 cfs_b->quota = RUNTIME_INF;
2679 cfs_b->period = ns_to_ktime(default_cfs_period());
2680
2681 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2682 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2683 cfs_b->period_timer.function = sched_cfs_period_timer;
2684 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2685 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2686}
2687
2688static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2689{
2690 cfs_rq->runtime_enabled = 0;
2691 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2692}
2693
2694/* requires cfs_b->lock, may release to reprogram timer */
2695void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2696{
2697 /*
2698 * The timer may be active because we're trying to set a new bandwidth
2699 * period or because we're racing with the tear-down path
2700 * (timer_active==0 becomes visible before the hrtimer call-back
2701 * terminates). In either case we ensure that it's re-programmed
2702 */
2703 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2704 raw_spin_unlock(&cfs_b->lock);
2705 /* ensure cfs_b->lock is available while we wait */
2706 hrtimer_cancel(&cfs_b->period_timer);
2707
2708 raw_spin_lock(&cfs_b->lock);
2709 /* if someone else restarted the timer then we're done */
2710 if (cfs_b->timer_active)
2711 return;
2712 }
2713
2714 cfs_b->timer_active = 1;
2715 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2716}
2717
2718static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2719{
2720 hrtimer_cancel(&cfs_b->period_timer);
2721 hrtimer_cancel(&cfs_b->slack_timer);
2722}
2723
Arnd Bergmann38dc3342013-01-25 14:14:22 +00002724static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002725{
2726 struct cfs_rq *cfs_rq;
2727
2728 for_each_leaf_cfs_rq(rq, cfs_rq) {
2729 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2730
2731 if (!cfs_rq->runtime_enabled)
2732 continue;
2733
2734 /*
2735 * clock_task is not advancing so we just need to make sure
2736 * there's some valid quota amount
2737 */
2738 cfs_rq->runtime_remaining = cfs_b->quota;
2739 if (cfs_rq_throttled(cfs_rq))
2740 unthrottle_cfs_rq(cfs_rq);
2741 }
2742}
2743
2744#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02002745static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2746{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002747 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02002748}
2749
2750static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2751 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002752static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2753static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002754static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07002755
2756static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2757{
2758 return 0;
2759}
Paul Turner64660c82011-07-21 09:43:36 -07002760
2761static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2762{
2763 return 0;
2764}
2765
2766static inline int throttled_lb_pair(struct task_group *tg,
2767 int src_cpu, int dest_cpu)
2768{
2769 return 0;
2770}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002771
2772void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2773
2774#ifdef CONFIG_FAIR_GROUP_SCHED
2775static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07002776#endif
2777
Peter Zijlstra029632f2011-10-25 10:00:11 +02002778static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2779{
2780 return NULL;
2781}
2782static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07002783static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002784
2785#endif /* CONFIG_CFS_BANDWIDTH */
2786
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002787/**************************************************
2788 * CFS operations on tasks:
2789 */
2790
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002791#ifdef CONFIG_SCHED_HRTICK
2792static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2793{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002794 struct sched_entity *se = &p->se;
2795 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2796
2797 WARN_ON(task_rq(p) != rq);
2798
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002799 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002800 u64 slice = sched_slice(cfs_rq, se);
2801 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2802 s64 delta = slice - ran;
2803
2804 if (delta < 0) {
2805 if (rq->curr == p)
2806 resched_task(p);
2807 return;
2808 }
2809
2810 /*
2811 * Don't schedule slices shorter than 10000ns, that just
2812 * doesn't make sense. Rely on vruntime for fairness.
2813 */
Peter Zijlstra31656512008-07-18 18:01:23 +02002814 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02002815 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002816
Peter Zijlstra31656512008-07-18 18:01:23 +02002817 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002818 }
2819}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002820
2821/*
2822 * called from enqueue/dequeue and updates the hrtick when the
2823 * current task is from our class and nr_running is low enough
2824 * to matter.
2825 */
2826static void hrtick_update(struct rq *rq)
2827{
2828 struct task_struct *curr = rq->curr;
2829
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002830 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002831 return;
2832
2833 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2834 hrtick_start_fair(rq, curr);
2835}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302836#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002837static inline void
2838hrtick_start_fair(struct rq *rq, struct task_struct *p)
2839{
2840}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002841
2842static inline void hrtick_update(struct rq *rq)
2843{
2844}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002845#endif
2846
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002847/*
2848 * The enqueue_task method is called before nr_running is
2849 * increased. Here we update the fair scheduling stats and
2850 * then put the task into the rbtree:
2851 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00002852static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002853enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002854{
2855 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002856 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002857
2858 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002859 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002860 break;
2861 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002862 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002863
2864 /*
2865 * end evaluation on encountering a throttled cfs_rq
2866 *
2867 * note: in the case of encountering a throttled cfs_rq we will
2868 * post the final h_nr_running increment below.
2869 */
2870 if (cfs_rq_throttled(cfs_rq))
2871 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002872 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07002873
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002874 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002875 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002876
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002877 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002878 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002879 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002880
Paul Turner85dac902011-07-21 09:43:33 -07002881 if (cfs_rq_throttled(cfs_rq))
2882 break;
2883
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002884 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002885 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002886 }
2887
Ben Segall18bf2802012-10-04 12:51:20 +02002888 if (!se) {
2889 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07002890 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002891 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002892 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002893}
2894
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002895static void set_next_buddy(struct sched_entity *se);
2896
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002897/*
2898 * The dequeue_task method is called before nr_running is
2899 * decreased. We remove the task from the rbtree and
2900 * update the fair scheduling stats:
2901 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002902static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002903{
2904 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002905 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002906 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002907
2908 for_each_sched_entity(se) {
2909 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002910 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002911
2912 /*
2913 * end evaluation on encountering a throttled cfs_rq
2914 *
2915 * note: in the case of encountering a throttled cfs_rq we will
2916 * post the final h_nr_running decrement below.
2917 */
2918 if (cfs_rq_throttled(cfs_rq))
2919 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002920 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002921
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002922 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002923 if (cfs_rq->load.weight) {
2924 /*
2925 * Bias pick_next to pick a task from this cfs_rq, as
2926 * p is sleeping when it is within its sched_slice.
2927 */
2928 if (task_sleep && parent_entity(se))
2929 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07002930
2931 /* avoid re-evaluating load for this entity */
2932 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002933 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002934 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002935 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002936 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002937
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002938 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002939 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002940 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002941
Paul Turner85dac902011-07-21 09:43:33 -07002942 if (cfs_rq_throttled(cfs_rq))
2943 break;
2944
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002945 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002946 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002947 }
2948
Ben Segall18bf2802012-10-04 12:51:20 +02002949 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07002950 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002951 update_rq_runnable_avg(rq, 1);
2952 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002953 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002954}
2955
Gregory Haskinse7693a32008-01-25 21:08:09 +01002956#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02002957/* Used instead of source_load when we know the type == 0 */
2958static unsigned long weighted_cpuload(const int cpu)
2959{
2960 return cpu_rq(cpu)->load.weight;
2961}
2962
2963/*
2964 * Return a low guess at the load of a migration-source cpu weighted
2965 * according to the scheduling class and "nice" value.
2966 *
2967 * We want to under-estimate the load of migration sources, to
2968 * balance conservatively.
2969 */
2970static unsigned long source_load(int cpu, int type)
2971{
2972 struct rq *rq = cpu_rq(cpu);
2973 unsigned long total = weighted_cpuload(cpu);
2974
2975 if (type == 0 || !sched_feat(LB_BIAS))
2976 return total;
2977
2978 return min(rq->cpu_load[type-1], total);
2979}
2980
2981/*
2982 * Return a high guess at the load of a migration-target cpu weighted
2983 * according to the scheduling class and "nice" value.
2984 */
2985static unsigned long target_load(int cpu, int type)
2986{
2987 struct rq *rq = cpu_rq(cpu);
2988 unsigned long total = weighted_cpuload(cpu);
2989
2990 if (type == 0 || !sched_feat(LB_BIAS))
2991 return total;
2992
2993 return max(rq->cpu_load[type-1], total);
2994}
2995
2996static unsigned long power_of(int cpu)
2997{
2998 return cpu_rq(cpu)->cpu_power;
2999}
3000
3001static unsigned long cpu_avg_load_per_task(int cpu)
3002{
3003 struct rq *rq = cpu_rq(cpu);
3004 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
3005
3006 if (nr_running)
3007 return rq->load.weight / nr_running;
3008
3009 return 0;
3010}
3011
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003012
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003013static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003014{
3015 struct sched_entity *se = &p->se;
3016 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003017 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003018
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003019#ifndef CONFIG_64BIT
3020 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003021
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003022 do {
3023 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3024 smp_rmb();
3025 min_vruntime = cfs_rq->min_vruntime;
3026 } while (min_vruntime != min_vruntime_copy);
3027#else
3028 min_vruntime = cfs_rq->min_vruntime;
3029#endif
3030
3031 se->vruntime -= min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003032}
3033
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003034#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003035/*
3036 * effective_load() calculates the load change as seen from the root_task_group
3037 *
3038 * Adding load to a group doesn't make a group heavier, but can cause movement
3039 * of group shares between cpus. Assuming the shares were perfectly aligned one
3040 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003041 *
3042 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3043 * on this @cpu and results in a total addition (subtraction) of @wg to the
3044 * total group weight.
3045 *
3046 * Given a runqueue weight distribution (rw_i) we can compute a shares
3047 * distribution (s_i) using:
3048 *
3049 * s_i = rw_i / \Sum rw_j (1)
3050 *
3051 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3052 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3053 * shares distribution (s_i):
3054 *
3055 * rw_i = { 2, 4, 1, 0 }
3056 * s_i = { 2/7, 4/7, 1/7, 0 }
3057 *
3058 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3059 * task used to run on and the CPU the waker is running on), we need to
3060 * compute the effect of waking a task on either CPU and, in case of a sync
3061 * wakeup, compute the effect of the current task going to sleep.
3062 *
3063 * So for a change of @wl to the local @cpu with an overall group weight change
3064 * of @wl we can compute the new shares distribution (s'_i) using:
3065 *
3066 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3067 *
3068 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3069 * differences in waking a task to CPU 0. The additional task changes the
3070 * weight and shares distributions like:
3071 *
3072 * rw'_i = { 3, 4, 1, 0 }
3073 * s'_i = { 3/8, 4/8, 1/8, 0 }
3074 *
3075 * We can then compute the difference in effective weight by using:
3076 *
3077 * dw_i = S * (s'_i - s_i) (3)
3078 *
3079 * Where 'S' is the group weight as seen by its parent.
3080 *
3081 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3082 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3083 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003084 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003085static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003086{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003087 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003088
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003089 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003090 return wl;
3091
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003092 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003093 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003094
Paul Turner977dda72011-01-14 17:57:50 -08003095 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003096
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003097 /*
3098 * W = @wg + \Sum rw_j
3099 */
3100 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003101
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003102 /*
3103 * w = rw_i + @wl
3104 */
3105 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003106
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003107 /*
3108 * wl = S * s'_i; see (2)
3109 */
3110 if (W > 0 && w < W)
3111 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003112 else
3113 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003114
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003115 /*
3116 * Per the above, wl is the new se->load.weight value; since
3117 * those are clipped to [MIN_SHARES, ...) do so now. See
3118 * calc_cfs_shares().
3119 */
Paul Turner977dda72011-01-14 17:57:50 -08003120 if (wl < MIN_SHARES)
3121 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003122
3123 /*
3124 * wl = dw_i = S * (s'_i - s_i); see (3)
3125 */
Paul Turner977dda72011-01-14 17:57:50 -08003126 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003127
3128 /*
3129 * Recursively apply this logic to all parent groups to compute
3130 * the final effective load change on the root group. Since
3131 * only the @tg group gets extra weight, all parent groups can
3132 * only redistribute existing shares. @wl is the shift in shares
3133 * resulting from this level per the above.
3134 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003135 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003136 }
3137
3138 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003139}
3140#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003141
Peter Zijlstra83378262008-06-27 13:41:37 +02003142static inline unsigned long effective_load(struct task_group *tg, int cpu,
3143 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003144{
Peter Zijlstra83378262008-06-27 13:41:37 +02003145 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003146}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003147
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003148#endif
3149
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003150static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003151{
Paul Turnere37b6a72011-01-21 20:44:59 -08003152 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003153 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003154 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003155 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003156 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003157 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003158
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003159 idx = sd->wake_idx;
3160 this_cpu = smp_processor_id();
3161 prev_cpu = task_cpu(p);
3162 load = source_load(prev_cpu, idx);
3163 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003164
3165 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003166 * If sync wakeup then subtract the (maximum possible)
3167 * effect of the currently running task from the load
3168 * of the current CPU:
3169 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003170 if (sync) {
3171 tg = task_group(current);
3172 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003173
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003174 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003175 load += effective_load(tg, prev_cpu, 0, -weight);
3176 }
3177
3178 tg = task_group(p);
3179 weight = p->se.load.weight;
3180
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003181 /*
3182 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003183 * due to the sync cause above having dropped this_load to 0, we'll
3184 * always have an imbalance, but there's really nothing you can do
3185 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003186 *
3187 * Otherwise check if either cpus are near enough in load to allow this
3188 * task to be woken on this_cpu.
3189 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003190 if (this_load > 0) {
3191 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003192
3193 this_eff_load = 100;
3194 this_eff_load *= power_of(prev_cpu);
3195 this_eff_load *= this_load +
3196 effective_load(tg, this_cpu, weight, weight);
3197
3198 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3199 prev_eff_load *= power_of(this_cpu);
3200 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3201
3202 balanced = this_eff_load <= prev_eff_load;
3203 } else
3204 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003205
3206 /*
3207 * If the currently running task will sleep within
3208 * a reasonable amount of time then attract this newly
3209 * woken task:
3210 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003211 if (sync && balanced)
3212 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003213
Lucas De Marchi41acab82010-03-10 23:37:45 -03003214 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003215 tl_per_task = cpu_avg_load_per_task(this_cpu);
3216
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003217 if (balanced ||
3218 (this_load <= load &&
3219 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003220 /*
3221 * This domain has SD_WAKE_AFFINE and
3222 * p is cache cold in this domain, and
3223 * there is no bad imbalance.
3224 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003225 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003226 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003227
3228 return 1;
3229 }
3230 return 0;
3231}
3232
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003233/*
3234 * find_idlest_group finds and returns the least busy CPU group within the
3235 * domain.
3236 */
3237static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003238find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003239 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003240{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003241 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003242 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003243 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003244
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003245 do {
3246 unsigned long load, avg_load;
3247 int local_group;
3248 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003249
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003250 /* Skip over this group if it has no CPUs allowed */
3251 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003252 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003253 continue;
3254
3255 local_group = cpumask_test_cpu(this_cpu,
3256 sched_group_cpus(group));
3257
3258 /* Tally up the load of all CPUs in the group */
3259 avg_load = 0;
3260
3261 for_each_cpu(i, sched_group_cpus(group)) {
3262 /* Bias balancing toward cpus of our domain */
3263 if (local_group)
3264 load = source_load(i, load_idx);
3265 else
3266 load = target_load(i, load_idx);
3267
3268 avg_load += load;
3269 }
3270
3271 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003272 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003273
3274 if (local_group) {
3275 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003276 } else if (avg_load < min_load) {
3277 min_load = avg_load;
3278 idlest = group;
3279 }
3280 } while (group = group->next, group != sd->groups);
3281
3282 if (!idlest || 100*this_load < imbalance*min_load)
3283 return NULL;
3284 return idlest;
3285}
3286
3287/*
3288 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3289 */
3290static int
3291find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3292{
3293 unsigned long load, min_load = ULONG_MAX;
3294 int idlest = -1;
3295 int i;
3296
3297 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003298 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003299 load = weighted_cpuload(i);
3300
3301 if (load < min_load || (load == min_load && i == this_cpu)) {
3302 min_load = load;
3303 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003304 }
3305 }
3306
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003307 return idlest;
3308}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003309
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003310/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003311 * Try and locate an idle CPU in the sched_domain.
3312 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003313static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003314{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003315 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003316 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003317 int i = task_cpu(p);
3318
3319 if (idle_cpu(target))
3320 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003321
3322 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003323 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003324 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003325 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3326 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003327
3328 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003329 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003330 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003331 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003332 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003333 sg = sd->groups;
3334 do {
3335 if (!cpumask_intersects(sched_group_cpus(sg),
3336 tsk_cpus_allowed(p)))
3337 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003338
Linus Torvalds37407ea2012-09-16 12:29:43 -07003339 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003340 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003341 goto next;
3342 }
3343
3344 target = cpumask_first_and(sched_group_cpus(sg),
3345 tsk_cpus_allowed(p));
3346 goto done;
3347next:
3348 sg = sg->next;
3349 } while (sg != sd->groups);
3350 }
3351done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003352 return target;
3353}
3354
3355/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003356 * sched_balance_self: balance the current task (running on cpu) in domains
3357 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3358 * SD_BALANCE_EXEC.
3359 *
3360 * Balance, ie. select the least loaded group.
3361 *
3362 * Returns the target CPU number, or the same CPU if no balancing is needed.
3363 *
3364 * preempt must be disabled.
3365 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003366static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003367select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003368{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003369 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003370 int cpu = smp_processor_id();
3371 int prev_cpu = task_cpu(p);
3372 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003373 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003374 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003375
Peter Zijlstra29baa742012-04-23 12:11:21 +02003376 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003377 return prev_cpu;
3378
Peter Zijlstra0763a662009-09-14 19:37:39 +02003379 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003380 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003381 want_affine = 1;
3382 new_cpu = prev_cpu;
3383 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003384
Peter Zijlstradce840a2011-04-07 14:09:50 +02003385 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003386 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01003387 if (!(tmp->flags & SD_LOAD_BALANCE))
3388 continue;
3389
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003390 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003391 * If both cpu and prev_cpu are part of this domain,
3392 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003393 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003394 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3395 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3396 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003397 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003398 }
3399
Alex Shif03542a2012-07-26 08:55:34 +08003400 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003401 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003402 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003403
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003404 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003405 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003406 prev_cpu = cpu;
3407
3408 new_cpu = select_idle_sibling(p, prev_cpu);
3409 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003410 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003411
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003412 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003413 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003414 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003415 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003416
Peter Zijlstra0763a662009-09-14 19:37:39 +02003417 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003418 sd = sd->child;
3419 continue;
3420 }
3421
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003422 if (sd_flag & SD_BALANCE_WAKE)
3423 load_idx = sd->wake_idx;
3424
3425 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003426 if (!group) {
3427 sd = sd->child;
3428 continue;
3429 }
3430
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003431 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003432 if (new_cpu == -1 || new_cpu == cpu) {
3433 /* Now try balancing at a lower domain level of cpu */
3434 sd = sd->child;
3435 continue;
3436 }
3437
3438 /* Now try balancing at a lower domain level of new_cpu */
3439 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003440 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003441 sd = NULL;
3442 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003443 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003444 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003445 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003446 sd = tmp;
3447 }
3448 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003449 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003450unlock:
3451 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003452
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003453 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003454}
Paul Turner0a74bef2012-10-04 13:18:30 +02003455
3456/*
3457 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3458 * cfs_rq_of(p) references at time of call are still valid and identify the
3459 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3460 * other assumptions, including the state of rq->lock, should be made.
3461 */
3462static void
3463migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3464{
Paul Turneraff3e492012-10-04 13:18:30 +02003465 struct sched_entity *se = &p->se;
3466 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3467
3468 /*
3469 * Load tracking: accumulate removed load so that it can be processed
3470 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3471 * to blocked load iff they have a positive decay-count. It can never
3472 * be negative here since on-rq tasks have decay-count == 0.
3473 */
3474 if (se->avg.decay_count) {
3475 se->avg.decay_count = -__synchronize_entity_decay(se);
3476 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3477 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003478}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003479#endif /* CONFIG_SMP */
3480
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003481static unsigned long
3482wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003483{
3484 unsigned long gran = sysctl_sched_wakeup_granularity;
3485
3486 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003487 * Since its curr running now, convert the gran from real-time
3488 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003489 *
3490 * By using 'se' instead of 'curr' we penalize light tasks, so
3491 * they get preempted easier. That is, if 'se' < 'curr' then
3492 * the resulting gran will be larger, therefore penalizing the
3493 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3494 * be smaller, again penalizing the lighter task.
3495 *
3496 * This is especially important for buddies when the leftmost
3497 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003498 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003499 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003500}
3501
3502/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003503 * Should 'se' preempt 'curr'.
3504 *
3505 * |s1
3506 * |s2
3507 * |s3
3508 * g
3509 * |<--->|c
3510 *
3511 * w(c, s1) = -1
3512 * w(c, s2) = 0
3513 * w(c, s3) = 1
3514 *
3515 */
3516static int
3517wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3518{
3519 s64 gran, vdiff = curr->vruntime - se->vruntime;
3520
3521 if (vdiff <= 0)
3522 return -1;
3523
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003524 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003525 if (vdiff > gran)
3526 return 1;
3527
3528 return 0;
3529}
3530
Peter Zijlstra02479092008-11-04 21:25:10 +01003531static void set_last_buddy(struct sched_entity *se)
3532{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003533 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3534 return;
3535
3536 for_each_sched_entity(se)
3537 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003538}
3539
3540static void set_next_buddy(struct sched_entity *se)
3541{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003542 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3543 return;
3544
3545 for_each_sched_entity(se)
3546 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003547}
3548
Rik van Rielac53db52011-02-01 09:51:03 -05003549static void set_skip_buddy(struct sched_entity *se)
3550{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003551 for_each_sched_entity(se)
3552 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05003553}
3554
Peter Zijlstra464b7522008-10-24 11:06:15 +02003555/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003556 * Preempt the current task with a newly woken task if needed:
3557 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02003558static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003559{
3560 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02003561 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003562 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003563 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003564 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003565
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01003566 if (unlikely(se == pse))
3567 return;
3568
Paul Turner5238cdd2011-07-21 09:43:37 -07003569 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003570 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07003571 * unconditionally check_prempt_curr() after an enqueue (which may have
3572 * lead to a throttle). This both saves work and prevents false
3573 * next-buddy nomination below.
3574 */
3575 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3576 return;
3577
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003578 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02003579 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003580 next_buddy_marked = 1;
3581 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02003582
Bharata B Raoaec0a512008-08-28 14:42:49 +05303583 /*
3584 * We can come here with TIF_NEED_RESCHED already set from new task
3585 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07003586 *
3587 * Note: this also catches the edge-case of curr being in a throttled
3588 * group (e.g. via set_curr_task), since update_curr() (in the
3589 * enqueue of curr) will have resulted in resched being set. This
3590 * prevents us from potentially nominating it as a false LAST_BUDDY
3591 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05303592 */
3593 if (test_tsk_need_resched(curr))
3594 return;
3595
Darren Harta2f5c9a2011-02-22 13:04:33 -08003596 /* Idle tasks are by definition preempted by non-idle tasks. */
3597 if (unlikely(curr->policy == SCHED_IDLE) &&
3598 likely(p->policy != SCHED_IDLE))
3599 goto preempt;
3600
Ingo Molnar91c234b2007-10-15 17:00:18 +02003601 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08003602 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3603 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02003604 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02003605 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02003606 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003607
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003608 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07003609 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003610 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003611 if (wakeup_preempt_entity(se, pse) == 1) {
3612 /*
3613 * Bias pick_next to pick the sched entity that is
3614 * triggering this preemption.
3615 */
3616 if (!next_buddy_marked)
3617 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003618 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003619 }
Jupyung Leea65ac742009-11-17 18:51:40 +09003620
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003621 return;
3622
3623preempt:
3624 resched_task(curr);
3625 /*
3626 * Only set the backward buddy when the current task is still
3627 * on the rq. This can happen when a wakeup gets interleaved
3628 * with schedule on the ->pre_schedule() or idle_balance()
3629 * point, either of which can * drop the rq lock.
3630 *
3631 * Also, during early boot the idle thread is in the fair class,
3632 * for obvious reasons its a bad idea to schedule back to it.
3633 */
3634 if (unlikely(!se->on_rq || curr == rq->idle))
3635 return;
3636
3637 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3638 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003639}
3640
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003641static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003642{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003643 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003644 struct cfs_rq *cfs_rq = &rq->cfs;
3645 struct sched_entity *se;
3646
Tim Blechmann36ace272009-11-24 11:55:45 +01003647 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003648 return NULL;
3649
3650 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02003651 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003652 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003653 cfs_rq = group_cfs_rq(se);
3654 } while (cfs_rq);
3655
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003656 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003657 if (hrtick_enabled(rq))
3658 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003659
3660 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003661}
3662
3663/*
3664 * Account for a descheduled task:
3665 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02003666static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003667{
3668 struct sched_entity *se = &prev->se;
3669 struct cfs_rq *cfs_rq;
3670
3671 for_each_sched_entity(se) {
3672 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02003673 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003674 }
3675}
3676
Rik van Rielac53db52011-02-01 09:51:03 -05003677/*
3678 * sched_yield() is very simple
3679 *
3680 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3681 */
3682static void yield_task_fair(struct rq *rq)
3683{
3684 struct task_struct *curr = rq->curr;
3685 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3686 struct sched_entity *se = &curr->se;
3687
3688 /*
3689 * Are we the only task in the tree?
3690 */
3691 if (unlikely(rq->nr_running == 1))
3692 return;
3693
3694 clear_buddies(cfs_rq, se);
3695
3696 if (curr->policy != SCHED_BATCH) {
3697 update_rq_clock(rq);
3698 /*
3699 * Update run-time statistics of the 'current'.
3700 */
3701 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01003702 /*
3703 * Tell update_rq_clock() that we've just updated,
3704 * so we don't do microscopic update in schedule()
3705 * and double the fastpath cost.
3706 */
3707 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05003708 }
3709
3710 set_skip_buddy(se);
3711}
3712
Mike Galbraithd95f4122011-02-01 09:50:51 -05003713static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3714{
3715 struct sched_entity *se = &p->se;
3716
Paul Turner5238cdd2011-07-21 09:43:37 -07003717 /* throttled hierarchies are not runnable */
3718 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05003719 return false;
3720
3721 /* Tell the scheduler that we'd really like pse to run next. */
3722 set_next_buddy(se);
3723
Mike Galbraithd95f4122011-02-01 09:50:51 -05003724 yield_task_fair(rq);
3725
3726 return true;
3727}
3728
Peter Williams681f3e62007-10-24 18:23:51 +02003729#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003730/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02003731 * Fair scheduling class load-balancing methods.
3732 *
3733 * BASICS
3734 *
3735 * The purpose of load-balancing is to achieve the same basic fairness the
3736 * per-cpu scheduler provides, namely provide a proportional amount of compute
3737 * time to each task. This is expressed in the following equation:
3738 *
3739 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3740 *
3741 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3742 * W_i,0 is defined as:
3743 *
3744 * W_i,0 = \Sum_j w_i,j (2)
3745 *
3746 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3747 * is derived from the nice value as per prio_to_weight[].
3748 *
3749 * The weight average is an exponential decay average of the instantaneous
3750 * weight:
3751 *
3752 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3753 *
3754 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3755 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3756 * can also include other factors [XXX].
3757 *
3758 * To achieve this balance we define a measure of imbalance which follows
3759 * directly from (1):
3760 *
3761 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3762 *
3763 * We them move tasks around to minimize the imbalance. In the continuous
3764 * function space it is obvious this converges, in the discrete case we get
3765 * a few fun cases generally called infeasible weight scenarios.
3766 *
3767 * [XXX expand on:
3768 * - infeasible weights;
3769 * - local vs global optima in the discrete case. ]
3770 *
3771 *
3772 * SCHED DOMAINS
3773 *
3774 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3775 * for all i,j solution, we create a tree of cpus that follows the hardware
3776 * topology where each level pairs two lower groups (or better). This results
3777 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3778 * tree to only the first of the previous level and we decrease the frequency
3779 * of load-balance at each level inv. proportional to the number of cpus in
3780 * the groups.
3781 *
3782 * This yields:
3783 *
3784 * log_2 n 1 n
3785 * \Sum { --- * --- * 2^i } = O(n) (5)
3786 * i = 0 2^i 2^i
3787 * `- size of each group
3788 * | | `- number of cpus doing load-balance
3789 * | `- freq
3790 * `- sum over all levels
3791 *
3792 * Coupled with a limit on how many tasks we can migrate every balance pass,
3793 * this makes (5) the runtime complexity of the balancer.
3794 *
3795 * An important property here is that each CPU is still (indirectly) connected
3796 * to every other cpu in at most O(log n) steps:
3797 *
3798 * The adjacency matrix of the resulting graph is given by:
3799 *
3800 * log_2 n
3801 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3802 * k = 0
3803 *
3804 * And you'll find that:
3805 *
3806 * A^(log_2 n)_i,j != 0 for all i,j (7)
3807 *
3808 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3809 * The task movement gives a factor of O(m), giving a convergence complexity
3810 * of:
3811 *
3812 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3813 *
3814 *
3815 * WORK CONSERVING
3816 *
3817 * In order to avoid CPUs going idle while there's still work to do, new idle
3818 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3819 * tree itself instead of relying on other CPUs to bring it work.
3820 *
3821 * This adds some complexity to both (5) and (8) but it reduces the total idle
3822 * time.
3823 *
3824 * [XXX more?]
3825 *
3826 *
3827 * CGROUPS
3828 *
3829 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3830 *
3831 * s_k,i
3832 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3833 * S_k
3834 *
3835 * Where
3836 *
3837 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
3838 *
3839 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3840 *
3841 * The big problem is S_k, its a global sum needed to compute a local (W_i)
3842 * property.
3843 *
3844 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3845 * rewrite all of this once again.]
3846 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003847
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09003848static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3849
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003850#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01003851#define LBF_NEED_BREAK 0x02
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303852#define LBF_SOME_PINNED 0x04
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003853
3854struct lb_env {
3855 struct sched_domain *sd;
3856
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003857 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05303858 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003859
3860 int dst_cpu;
3861 struct rq *dst_rq;
3862
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303863 struct cpumask *dst_grpmask;
3864 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003865 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02003866 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08003867 /* The set of CPUs under consideration for load-balancing */
3868 struct cpumask *cpus;
3869
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003870 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01003871
3872 unsigned int loop;
3873 unsigned int loop_break;
3874 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003875};
3876
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003877/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003878 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003879 * Both runqueues must be locked.
3880 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003881static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003882{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003883 deactivate_task(env->src_rq, p, 0);
3884 set_task_cpu(p, env->dst_cpu);
3885 activate_task(env->dst_rq, p, 0);
3886 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003887}
3888
3889/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02003890 * Is this task likely cache-hot:
3891 */
3892static int
3893task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3894{
3895 s64 delta;
3896
3897 if (p->sched_class != &fair_sched_class)
3898 return 0;
3899
3900 if (unlikely(p->policy == SCHED_IDLE))
3901 return 0;
3902
3903 /*
3904 * Buddy candidates are cache hot:
3905 */
3906 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3907 (&p->se == cfs_rq_of(&p->se)->next ||
3908 &p->se == cfs_rq_of(&p->se)->last))
3909 return 1;
3910
3911 if (sysctl_sched_migration_cost == -1)
3912 return 1;
3913 if (sysctl_sched_migration_cost == 0)
3914 return 0;
3915
3916 delta = now - p->se.exec_start;
3917
3918 return delta < (s64)sysctl_sched_migration_cost;
3919}
3920
3921/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003922 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3923 */
3924static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003925int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003926{
3927 int tsk_cache_hot = 0;
3928 /*
3929 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09003930 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003931 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09003932 * 3) running (obviously), or
3933 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003934 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09003935 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3936 return 0;
3937
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003938 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003939 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303940
Lucas De Marchi41acab82010-03-10 23:37:45 -03003941 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303942
3943 /*
3944 * Remember if this task can be migrated to any other cpu in
3945 * our sched_group. We may want to revisit it if we couldn't
3946 * meet load balance goals by pulling other tasks on src_cpu.
3947 *
3948 * Also avoid computing new_dst_cpu if we have already computed
3949 * one in current iteration.
3950 */
3951 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3952 return 0;
3953
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003954 /* Prevent to re-select dst_cpu via env's cpus */
3955 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3956 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3957 env->flags |= LBF_SOME_PINNED;
3958 env->new_dst_cpu = cpu;
3959 break;
3960 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303961 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003962
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003963 return 0;
3964 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303965
3966 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003967 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003968
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003969 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003970 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003971 return 0;
3972 }
3973
3974 /*
3975 * Aggressive migration if:
3976 * 1) task is cache cold, or
3977 * 2) too many balance attempts have failed.
3978 */
3979
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003980 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003981 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003982 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003983
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003984 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003985 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003986 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003987 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003988
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003989 return 1;
3990 }
3991
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003992 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3993 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003994}
3995
Peter Zijlstra897c3952009-12-17 17:45:42 +01003996/*
3997 * move_one_task tries to move exactly one task from busiest to this_rq, as
3998 * part of active balancing operations within "domain".
3999 * Returns 1 if successful and 0 otherwise.
4000 *
4001 * Called with both runqueues locked.
4002 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004003static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004004{
4005 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004006
Peter Zijlstra367456c2012-02-20 21:49:09 +01004007 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004008 if (!can_migrate_task(p, env))
4009 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004010
Peter Zijlstra367456c2012-02-20 21:49:09 +01004011 move_task(p, env);
4012 /*
4013 * Right now, this is only the second place move_task()
4014 * is called, so we can safely collect move_task()
4015 * stats here rather than inside move_task().
4016 */
4017 schedstat_inc(env->sd, lb_gained[env->idle]);
4018 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004019 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004020 return 0;
4021}
4022
Peter Zijlstra367456c2012-02-20 21:49:09 +01004023static unsigned long task_h_load(struct task_struct *p);
4024
Peter Zijlstraeb953082012-04-17 13:38:40 +02004025static const unsigned int sched_nr_migrate_break = 32;
4026
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004027/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004028 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004029 * this_rq, as part of a balancing operation within domain "sd".
4030 * Returns 1 if successful and 0 otherwise.
4031 *
4032 * Called with both runqueues locked.
4033 */
4034static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004035{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004036 struct list_head *tasks = &env->src_rq->cfs_tasks;
4037 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004038 unsigned long load;
4039 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004040
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004041 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004042 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004043
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004044 while (!list_empty(tasks)) {
4045 p = list_first_entry(tasks, struct task_struct, se.group_node);
4046
Peter Zijlstra367456c2012-02-20 21:49:09 +01004047 env->loop++;
4048 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004049 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004050 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004051
4052 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004053 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004054 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004055 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004056 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004057 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004058
Joonsoo Kimd3198082013-04-23 17:27:40 +09004059 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004060 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004061
Peter Zijlstra367456c2012-02-20 21:49:09 +01004062 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004063
Peter Zijlstraeb953082012-04-17 13:38:40 +02004064 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004065 goto next;
4066
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004067 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004068 goto next;
4069
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004070 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004071 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004072 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004073
4074#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004075 /*
4076 * NEWIDLE balancing is a source of latency, so preemptible
4077 * kernels will stop after the first task is pulled to minimize
4078 * the critical section.
4079 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004080 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004081 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004082#endif
4083
Peter Zijlstraee00e662009-12-17 17:25:20 +01004084 /*
4085 * We only want to steal up to the prescribed amount of
4086 * weighted load.
4087 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004088 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004089 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004090
Peter Zijlstra367456c2012-02-20 21:49:09 +01004091 continue;
4092next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004093 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004094 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004095
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004096 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004097 * Right now, this is one of only two places move_task() is called,
4098 * so we can safely collect move_task() stats here rather than
4099 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004100 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004101 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004102
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004103 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004104}
4105
Peter Zijlstra230059de2009-12-17 17:47:12 +01004106#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004107/*
4108 * update tg->load_weight by folding this cpu's load_avg
4109 */
Paul Turner48a16752012-10-04 13:18:31 +02004110static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004111{
Paul Turner48a16752012-10-04 13:18:31 +02004112 struct sched_entity *se = tg->se[cpu];
4113 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004114
Paul Turner48a16752012-10-04 13:18:31 +02004115 /* throttled entities do not contribute to load */
4116 if (throttled_hierarchy(cfs_rq))
4117 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004118
Paul Turneraff3e492012-10-04 13:18:30 +02004119 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004120
Paul Turner82958362012-10-04 13:18:31 +02004121 if (se) {
4122 update_entity_load_avg(se, 1);
4123 /*
4124 * We pivot on our runnable average having decayed to zero for
4125 * list removal. This generally implies that all our children
4126 * have also been removed (modulo rounding error or bandwidth
4127 * control); however, such cases are rare and we can fix these
4128 * at enqueue.
4129 *
4130 * TODO: fix up out-of-order children on enqueue.
4131 */
4132 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4133 list_del_leaf_cfs_rq(cfs_rq);
4134 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004135 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004136 update_rq_runnable_avg(rq, rq->nr_running);
4137 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004138}
4139
Paul Turner48a16752012-10-04 13:18:31 +02004140static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004141{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004142 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004143 struct cfs_rq *cfs_rq;
4144 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004145
Paul Turner48a16752012-10-04 13:18:31 +02004146 raw_spin_lock_irqsave(&rq->lock, flags);
4147 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004148 /*
4149 * Iterates the task_group tree in a bottom up fashion, see
4150 * list_add_leaf_cfs_rq() for details.
4151 */
Paul Turner64660c82011-07-21 09:43:36 -07004152 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004153 /*
4154 * Note: We may want to consider periodically releasing
4155 * rq->lock about these updates so that creating many task
4156 * groups does not result in continually extending hold time.
4157 */
4158 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004159 }
Paul Turner48a16752012-10-04 13:18:31 +02004160
4161 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004162}
4163
Peter Zijlstra9763b672011-07-13 13:09:25 +02004164/*
4165 * Compute the cpu's hierarchical load factor for each task group.
4166 * This needs to be done in a top-down fashion because the load of a child
4167 * group is a fraction of its parents load.
4168 */
4169static int tg_load_down(struct task_group *tg, void *data)
4170{
4171 unsigned long load;
4172 long cpu = (long)data;
4173
4174 if (!tg->parent) {
4175 load = cpu_rq(cpu)->load.weight;
4176 } else {
4177 load = tg->parent->cfs_rq[cpu]->h_load;
4178 load *= tg->se[cpu]->load.weight;
4179 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
4180 }
4181
4182 tg->cfs_rq[cpu]->h_load = load;
4183
4184 return 0;
4185}
4186
4187static void update_h_load(long cpu)
4188{
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004189 struct rq *rq = cpu_rq(cpu);
4190 unsigned long now = jiffies;
4191
4192 if (rq->h_load_throttle == now)
4193 return;
4194
4195 rq->h_load_throttle = now;
4196
Peter Zijlstra367456c2012-02-20 21:49:09 +01004197 rcu_read_lock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004198 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstra367456c2012-02-20 21:49:09 +01004199 rcu_read_unlock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004200}
4201
Peter Zijlstra367456c2012-02-20 21:49:09 +01004202static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004203{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004204 struct cfs_rq *cfs_rq = task_cfs_rq(p);
4205 unsigned long load;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004206
Peter Zijlstra367456c2012-02-20 21:49:09 +01004207 load = p->se.load.weight;
4208 load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004209
Peter Zijlstra367456c2012-02-20 21:49:09 +01004210 return load;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004211}
4212#else
Paul Turner48a16752012-10-04 13:18:31 +02004213static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004214{
4215}
4216
Peter Zijlstra367456c2012-02-20 21:49:09 +01004217static inline void update_h_load(long cpu)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004218{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004219}
4220
4221static unsigned long task_h_load(struct task_struct *p)
4222{
4223 return p->se.load.weight;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004224}
4225#endif
4226
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004227/********** Helpers for find_busiest_group ************************/
4228/*
4229 * sd_lb_stats - Structure to store the statistics of a sched_domain
4230 * during load balancing.
4231 */
4232struct sd_lb_stats {
4233 struct sched_group *busiest; /* Busiest group in this sd */
4234 struct sched_group *this; /* Local group in this sd */
4235 unsigned long total_load; /* Total load of all groups in sd */
4236 unsigned long total_pwr; /* Total power of all groups in sd */
4237 unsigned long avg_load; /* Average load across all groups in sd */
4238
4239 /** Statistics of this group */
4240 unsigned long this_load;
4241 unsigned long this_load_per_task;
4242 unsigned long this_nr_running;
Nikhil Raofab47622010-10-15 13:12:29 -07004243 unsigned long this_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004244 unsigned int this_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004245
4246 /* Statistics of the busiest group */
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004247 unsigned int busiest_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004248 unsigned long max_load;
4249 unsigned long busiest_load_per_task;
4250 unsigned long busiest_nr_running;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004251 unsigned long busiest_group_capacity;
Nikhil Raofab47622010-10-15 13:12:29 -07004252 unsigned long busiest_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004253 unsigned int busiest_group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004254
4255 int group_imb; /* Is there imbalance in this sd */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004256};
4257
4258/*
4259 * sg_lb_stats - stats of a sched_group required for load_balancing
4260 */
4261struct sg_lb_stats {
4262 unsigned long avg_load; /*Avg load across the CPUs of the group */
4263 unsigned long group_load; /* Total load over the CPUs of the group */
4264 unsigned long sum_nr_running; /* Nr tasks running in the group */
4265 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4266 unsigned long group_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004267 unsigned long idle_cpus;
4268 unsigned long group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004269 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004270 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004271};
4272
4273/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004274 * get_sd_load_idx - Obtain the load index for a given sched domain.
4275 * @sd: The sched_domain whose load_idx is to be obtained.
4276 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4277 */
4278static inline int get_sd_load_idx(struct sched_domain *sd,
4279 enum cpu_idle_type idle)
4280{
4281 int load_idx;
4282
4283 switch (idle) {
4284 case CPU_NOT_IDLE:
4285 load_idx = sd->busy_idx;
4286 break;
4287
4288 case CPU_NEWLY_IDLE:
4289 load_idx = sd->newidle_idx;
4290 break;
4291 default:
4292 load_idx = sd->idle_idx;
4293 break;
4294 }
4295
4296 return load_idx;
4297}
4298
Li Zefan15f803c2013-03-05 16:07:11 +08004299static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004300{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004301 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004302}
4303
4304unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4305{
4306 return default_scale_freq_power(sd, cpu);
4307}
4308
Li Zefan15f803c2013-03-05 16:07:11 +08004309static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004310{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004311 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004312 unsigned long smt_gain = sd->smt_gain;
4313
4314 smt_gain /= weight;
4315
4316 return smt_gain;
4317}
4318
4319unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4320{
4321 return default_scale_smt_power(sd, cpu);
4322}
4323
Li Zefan15f803c2013-03-05 16:07:11 +08004324static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004325{
4326 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004327 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004328
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004329 /*
4330 * Since we're reading these variables without serialization make sure
4331 * we read them once before doing sanity checks on them.
4332 */
4333 age_stamp = ACCESS_ONCE(rq->age_stamp);
4334 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004335
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004336 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004337
4338 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004339 /* Ensures that power won't end up being negative */
4340 available = 0;
4341 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004342 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004343 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004344
Nikhil Rao1399fa72011-05-18 10:09:39 -07004345 if (unlikely((s64)total < SCHED_POWER_SCALE))
4346 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004347
Nikhil Rao1399fa72011-05-18 10:09:39 -07004348 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004349
4350 return div_u64(available, total);
4351}
4352
4353static void update_cpu_power(struct sched_domain *sd, int cpu)
4354{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004355 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004356 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004357 struct sched_group *sdg = sd->groups;
4358
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004359 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4360 if (sched_feat(ARCH_POWER))
4361 power *= arch_scale_smt_power(sd, cpu);
4362 else
4363 power *= default_scale_smt_power(sd, cpu);
4364
Nikhil Rao1399fa72011-05-18 10:09:39 -07004365 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004366 }
4367
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004368 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004369
4370 if (sched_feat(ARCH_POWER))
4371 power *= arch_scale_freq_power(sd, cpu);
4372 else
4373 power *= default_scale_freq_power(sd, cpu);
4374
Nikhil Rao1399fa72011-05-18 10:09:39 -07004375 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004376
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004377 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004378 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004379
4380 if (!power)
4381 power = 1;
4382
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004383 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004384 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004385}
4386
Peter Zijlstra029632f2011-10-25 10:00:11 +02004387void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004388{
4389 struct sched_domain *child = sd->child;
4390 struct sched_group *group, *sdg = sd->groups;
4391 unsigned long power;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004392 unsigned long interval;
4393
4394 interval = msecs_to_jiffies(sd->balance_interval);
4395 interval = clamp(interval, 1UL, max_load_balance_interval);
4396 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004397
4398 if (!child) {
4399 update_cpu_power(sd, cpu);
4400 return;
4401 }
4402
4403 power = 0;
4404
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004405 if (child->flags & SD_OVERLAP) {
4406 /*
4407 * SD_OVERLAP domains cannot assume that child groups
4408 * span the current group.
4409 */
4410
4411 for_each_cpu(cpu, sched_group_cpus(sdg))
4412 power += power_of(cpu);
4413 } else {
4414 /*
4415 * !SD_OVERLAP domains can assume that child groups
4416 * span the current group.
4417 */
4418
4419 group = child->groups;
4420 do {
4421 power += group->sgp->power;
4422 group = group->next;
4423 } while (group != child->groups);
4424 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004425
Peter Zijlstrac3decf02012-05-31 12:05:32 +02004426 sdg->sgp->power_orig = sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004427}
4428
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004429/*
4430 * Try and fix up capacity for tiny siblings, this is needed when
4431 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4432 * which on its own isn't powerful enough.
4433 *
4434 * See update_sd_pick_busiest() and check_asym_packing().
4435 */
4436static inline int
4437fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4438{
4439 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07004440 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004441 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02004442 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004443 return 0;
4444
4445 /*
4446 * If ~90% of the cpu_power is still there, we're good.
4447 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004448 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004449 return 1;
4450
4451 return 0;
4452}
4453
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004454/**
4455 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004456 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004457 * @group: sched_group whose statistics are to be updated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004458 * @load_idx: Load index of sched_domain of this_cpu for load calc.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004459 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004460 * @balance: Should we balance.
4461 * @sgs: variable to hold the statistics for this group.
4462 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004463static inline void update_sg_lb_stats(struct lb_env *env,
4464 struct sched_group *group, int load_idx,
Michael Wangb94031302012-07-12 16:10:13 +08004465 int local_group, int *balance, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004466{
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004467 unsigned long nr_running, max_nr_running, min_nr_running;
4468 unsigned long load, max_cpu_load, min_cpu_load;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004469 unsigned int balance_cpu = -1, first_idle_cpu = 0;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004470 unsigned long avg_load_per_task = 0;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004471 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004472
Gautham R Shenoy871e35b2010-01-20 14:02:44 -06004473 if (local_group)
Peter Zijlstrac1174872012-05-31 14:47:33 +02004474 balance_cpu = group_balance_cpu(group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004475
4476 /* Tally up the load of all CPUs in the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004477 max_cpu_load = 0;
4478 min_cpu_load = ~0UL;
Nikhil Rao2582f0e2010-10-13 12:09:36 -07004479 max_nr_running = 0;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004480 min_nr_running = ~0UL;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004481
Michael Wangb94031302012-07-12 16:10:13 +08004482 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004483 struct rq *rq = cpu_rq(i);
4484
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004485 nr_running = rq->nr_running;
4486
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004487 /* Bias balancing toward cpus of our domain */
4488 if (local_group) {
Peter Zijlstrac1174872012-05-31 14:47:33 +02004489 if (idle_cpu(i) && !first_idle_cpu &&
4490 cpumask_test_cpu(i, sched_group_mask(group))) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004491 first_idle_cpu = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004492 balance_cpu = i;
4493 }
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004494
4495 load = target_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004496 } else {
4497 load = source_load(i, load_idx);
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004498 if (load > max_cpu_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004499 max_cpu_load = load;
4500 if (min_cpu_load > load)
4501 min_cpu_load = load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004502
4503 if (nr_running > max_nr_running)
4504 max_nr_running = nr_running;
4505 if (min_nr_running > nr_running)
4506 min_nr_running = nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004507 }
4508
4509 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004510 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004511 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004512 if (idle_cpu(i))
4513 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004514 }
4515
4516 /*
4517 * First idle cpu or the first cpu(busiest) in this sched group
4518 * is eligible for doing load balancing at this and above
4519 * domains. In the newly idle case, we will allow all the cpu's
4520 * to do the newly idle load balance.
4521 */
Vincent Guittot4ec44122011-12-12 20:21:08 +01004522 if (local_group) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004523 if (env->idle != CPU_NEWLY_IDLE) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004524 if (balance_cpu != env->dst_cpu) {
Vincent Guittot4ec44122011-12-12 20:21:08 +01004525 *balance = 0;
4526 return;
4527 }
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004528 update_group_power(env->sd, env->dst_cpu);
Vincent Guittot4ec44122011-12-12 20:21:08 +01004529 } else if (time_after_eq(jiffies, group->sgp->next_update))
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004530 update_group_power(env->sd, env->dst_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004531 }
4532
4533 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004534 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004535
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004536 /*
4537 * Consider the group unbalanced when the imbalance is larger
Peter Zijlstra866ab432011-02-21 18:56:47 +01004538 * than the average weight of a task.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004539 *
4540 * APZ: with cgroup the avg task weight can vary wildly and
4541 * might not be a suitable number - should we keep a
4542 * normalized nr_running number somewhere that negates
4543 * the hierarchy?
4544 */
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004545 if (sgs->sum_nr_running)
4546 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004547
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004548 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4549 (max_nr_running - min_nr_running) > 1)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004550 sgs->group_imb = 1;
4551
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004552 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
Nikhil Rao1399fa72011-05-18 10:09:39 -07004553 SCHED_POWER_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004554 if (!sgs->group_capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004555 sgs->group_capacity = fix_small_capacity(env->sd, group);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004556 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07004557
4558 if (sgs->group_capacity > sgs->sum_nr_running)
4559 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004560}
4561
4562/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10004563 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07004564 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004565 * @sds: sched_domain statistics
4566 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10004567 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10004568 *
4569 * Determine if @sg is a busier group than the previously selected
4570 * busiest group.
4571 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004572static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10004573 struct sd_lb_stats *sds,
4574 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004575 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004576{
4577 if (sgs->avg_load <= sds->max_load)
4578 return false;
4579
4580 if (sgs->sum_nr_running > sgs->group_capacity)
4581 return true;
4582
4583 if (sgs->group_imb)
4584 return true;
4585
4586 /*
4587 * ASYM_PACKING needs to move all the work to the lowest
4588 * numbered CPUs in the group, therefore mark all groups
4589 * higher than ourself as busy.
4590 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004591 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4592 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004593 if (!sds->busiest)
4594 return true;
4595
4596 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4597 return true;
4598 }
4599
4600 return false;
4601}
4602
4603/**
Hui Kang461819a2011-10-11 23:00:59 -04004604 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004605 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004606 * @balance: Should we balance.
4607 * @sds: variable to hold the statistics for this sched_domain.
4608 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004609static inline void update_sd_lb_stats(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004610 int *balance, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004611{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004612 struct sched_domain *child = env->sd->child;
4613 struct sched_group *sg = env->sd->groups;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004614 struct sg_lb_stats sgs;
4615 int load_idx, prefer_sibling = 0;
4616
4617 if (child && child->flags & SD_PREFER_SIBLING)
4618 prefer_sibling = 1;
4619
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004620 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004621
4622 do {
4623 int local_group;
4624
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004625 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004626 memset(&sgs, 0, sizeof(sgs));
Michael Wangb94031302012-07-12 16:10:13 +08004627 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004628
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004629 if (local_group && !(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004630 return;
4631
4632 sds->total_load += sgs.group_load;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004633 sds->total_pwr += sg->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004634
4635 /*
4636 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10004637 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07004638 * and move all the excess tasks away. We lower the capacity
4639 * of a group only if the local group has the capacity to fit
4640 * these excess tasks, i.e. nr_running < group_capacity. The
4641 * extra check prevents the case where you always pull from the
4642 * heaviest group when it is already under-utilized (possible
4643 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004644 */
Nikhil Rao75dd3212010-10-15 13:12:30 -07004645 if (prefer_sibling && !local_group && sds->this_has_capacity)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004646 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4647
4648 if (local_group) {
4649 sds->this_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004650 sds->this = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004651 sds->this_nr_running = sgs.sum_nr_running;
4652 sds->this_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004653 sds->this_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004654 sds->this_idle_cpus = sgs.idle_cpus;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004655 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004656 sds->max_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004657 sds->busiest = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004658 sds->busiest_nr_running = sgs.sum_nr_running;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004659 sds->busiest_idle_cpus = sgs.idle_cpus;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004660 sds->busiest_group_capacity = sgs.group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004661 sds->busiest_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004662 sds->busiest_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004663 sds->busiest_group_weight = sgs.group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004664 sds->group_imb = sgs.group_imb;
4665 }
4666
Michael Neuling532cb4c2010-06-08 14:57:02 +10004667 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004668 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10004669}
4670
Michael Neuling532cb4c2010-06-08 14:57:02 +10004671/**
4672 * check_asym_packing - Check to see if the group is packed into the
4673 * sched doman.
4674 *
4675 * This is primarily intended to used at the sibling level. Some
4676 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4677 * case of POWER7, it can move to lower SMT modes only when higher
4678 * threads are idle. When in lower SMT modes, the threads will
4679 * perform better since they share less core resources. Hence when we
4680 * have idle threads, we want them to be the higher ones.
4681 *
4682 * This packing function is run on idle threads. It checks to see if
4683 * the busiest CPU in this domain (core in the P7 case) has a higher
4684 * CPU number than the packing function is being run on. Here we are
4685 * assuming lower CPU number will be equivalent to lower a SMT thread
4686 * number.
4687 *
Michael Neulingb6b12292010-06-10 12:06:21 +10004688 * Returns 1 when packing is required and a task should be moved to
4689 * this CPU. The amount of the imbalance is returned in *imbalance.
4690 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004691 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004692 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10004693 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004694static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004695{
4696 int busiest_cpu;
4697
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004698 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004699 return 0;
4700
4701 if (!sds->busiest)
4702 return 0;
4703
4704 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004705 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004706 return 0;
4707
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004708 env->imbalance = DIV_ROUND_CLOSEST(
4709 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4710
Michael Neuling532cb4c2010-06-08 14:57:02 +10004711 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004712}
4713
4714/**
4715 * fix_small_imbalance - Calculate the minor imbalance that exists
4716 * amongst the groups of a sched_domain, during
4717 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004718 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004719 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004720 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004721static inline
4722void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004723{
4724 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4725 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004726 unsigned long scaled_busy_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004727
4728 if (sds->this_nr_running) {
4729 sds->this_load_per_task /= sds->this_nr_running;
4730 if (sds->busiest_load_per_task >
4731 sds->this_load_per_task)
4732 imbn = 1;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004733 } else {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004734 sds->this_load_per_task =
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004735 cpu_avg_load_per_task(env->dst_cpu);
4736 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004737
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004738 scaled_busy_load_per_task = sds->busiest_load_per_task
Nikhil Rao1399fa72011-05-18 10:09:39 -07004739 * SCHED_POWER_SCALE;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004740 scaled_busy_load_per_task /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004741
4742 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4743 (scaled_busy_load_per_task * imbn)) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004744 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004745 return;
4746 }
4747
4748 /*
4749 * OK, we don't have enough imbalance to justify moving tasks,
4750 * however we may be able to increase total CPU power used by
4751 * moving them.
4752 */
4753
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004754 pwr_now += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004755 min(sds->busiest_load_per_task, sds->max_load);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004756 pwr_now += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004757 min(sds->this_load_per_task, sds->this_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004758 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004759
4760 /* Amount of load we'd subtract */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004761 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004762 sds->busiest->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004763 if (sds->max_load > tmp)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004764 pwr_move += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004765 min(sds->busiest_load_per_task, sds->max_load - tmp);
4766
4767 /* Amount of load we'd add */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004768 if (sds->max_load * sds->busiest->sgp->power <
Nikhil Rao1399fa72011-05-18 10:09:39 -07004769 sds->busiest_load_per_task * SCHED_POWER_SCALE)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004770 tmp = (sds->max_load * sds->busiest->sgp->power) /
4771 sds->this->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004772 else
Nikhil Rao1399fa72011-05-18 10:09:39 -07004773 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004774 sds->this->sgp->power;
4775 pwr_move += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004776 min(sds->this_load_per_task, sds->this_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004777 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004778
4779 /* Move if we gain throughput */
4780 if (pwr_move > pwr_now)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004781 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004782}
4783
4784/**
4785 * calculate_imbalance - Calculate the amount of imbalance present within the
4786 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004787 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004788 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004789 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004790static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004791{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004792 unsigned long max_pull, load_above_capacity = ~0UL;
4793
4794 sds->busiest_load_per_task /= sds->busiest_nr_running;
4795 if (sds->group_imb) {
4796 sds->busiest_load_per_task =
4797 min(sds->busiest_load_per_task, sds->avg_load);
4798 }
4799
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004800 /*
4801 * In the presence of smp nice balancing, certain scenarios can have
4802 * max load less than avg load(as we skip the groups at or below
4803 * its cpu_power, while calculating max_load..)
4804 */
4805 if (sds->max_load < sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004806 env->imbalance = 0;
4807 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004808 }
4809
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004810 if (!sds->group_imb) {
4811 /*
4812 * Don't want to pull so many tasks that a group would go idle.
4813 */
4814 load_above_capacity = (sds->busiest_nr_running -
4815 sds->busiest_group_capacity);
4816
Nikhil Rao1399fa72011-05-18 10:09:39 -07004817 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004818
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004819 load_above_capacity /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004820 }
4821
4822 /*
4823 * We're trying to get all the cpus to the average_load, so we don't
4824 * want to push ourselves above the average load, nor do we wish to
4825 * reduce the max loaded cpu below the average load. At the same time,
4826 * we also don't want to reduce the group load below the group capacity
4827 * (so that we can implement power-savings policies etc). Thus we look
4828 * for the minimum possible imbalance.
4829 * Be careful of negative numbers as they'll appear as very large values
4830 * with unsigned longs.
4831 */
4832 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004833
4834 /* How much load to actually move to equalise the imbalance */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004835 env->imbalance = min(max_pull * sds->busiest->sgp->power,
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004836 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
Nikhil Rao1399fa72011-05-18 10:09:39 -07004837 / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004838
4839 /*
4840 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004841 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004842 * a think about bumping its value to force at least one task to be
4843 * moved
4844 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004845 if (env->imbalance < sds->busiest_load_per_task)
4846 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004847
4848}
Nikhil Raofab47622010-10-15 13:12:29 -07004849
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004850/******* find_busiest_group() helpers end here *********************/
4851
4852/**
4853 * find_busiest_group - Returns the busiest group within the sched_domain
4854 * if there is an imbalance. If there isn't an imbalance, and
4855 * the user has opted for power-savings, it returns a group whose
4856 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4857 * such a group exists.
4858 *
4859 * Also calculates the amount of weighted load which should be moved
4860 * to restore balance.
4861 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004862 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004863 * @balance: Pointer to a variable indicating if this_cpu
4864 * is the appropriate cpu to perform load balancing at this_level.
4865 *
4866 * Returns: - the busiest group if imbalance exists.
4867 * - If no imbalance and user has opted for power-savings balance,
4868 * return the least loaded group whose CPUs can be
4869 * put to idle by rebalancing its tasks onto our group.
4870 */
4871static struct sched_group *
Michael Wangb94031302012-07-12 16:10:13 +08004872find_busiest_group(struct lb_env *env, int *balance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004873{
4874 struct sd_lb_stats sds;
4875
4876 memset(&sds, 0, sizeof(sds));
4877
4878 /*
4879 * Compute the various statistics relavent for load balancing at
4880 * this level.
4881 */
Michael Wangb94031302012-07-12 16:10:13 +08004882 update_sd_lb_stats(env, balance, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004883
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004884 /*
4885 * this_cpu is not the appropriate cpu to perform load balancing at
4886 * this level.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004887 */
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004888 if (!(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004889 goto ret;
4890
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004891 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4892 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004893 return sds.busiest;
4894
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004895 /* There is no busy sibling group to pull tasks from */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004896 if (!sds.busiest || sds.busiest_nr_running == 0)
4897 goto out_balanced;
4898
Nikhil Rao1399fa72011-05-18 10:09:39 -07004899 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07004900
Peter Zijlstra866ab432011-02-21 18:56:47 +01004901 /*
4902 * If the busiest group is imbalanced the below checks don't
4903 * work because they assumes all things are equal, which typically
4904 * isn't true due to cpus_allowed constraints and the like.
4905 */
4906 if (sds.group_imb)
4907 goto force_balance;
4908
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004909 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004910 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
Nikhil Raofab47622010-10-15 13:12:29 -07004911 !sds.busiest_has_capacity)
4912 goto force_balance;
4913
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004914 /*
4915 * If the local group is more busy than the selected busiest group
4916 * don't try and pull any tasks.
4917 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004918 if (sds.this_load >= sds.max_load)
4919 goto out_balanced;
4920
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004921 /*
4922 * Don't pull any tasks if this group is already above the domain
4923 * average load.
4924 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004925 if (sds.this_load >= sds.avg_load)
4926 goto out_balanced;
4927
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004928 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004929 /*
4930 * This cpu is idle. If the busiest group load doesn't
4931 * have more tasks than the number of available cpu's and
4932 * there is no imbalance between this and busiest group
4933 * wrt to idle cpu's, it is balanced.
4934 */
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004935 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004936 sds.busiest_nr_running <= sds.busiest_group_weight)
4937 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004938 } else {
4939 /*
4940 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4941 * imbalance_pct to be conservative.
4942 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004943 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004944 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004945 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004946
Nikhil Raofab47622010-10-15 13:12:29 -07004947force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004948 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004949 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004950 return sds.busiest;
4951
4952out_balanced:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004953ret:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004954 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004955 return NULL;
4956}
4957
4958/*
4959 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4960 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004961static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004962 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004963{
4964 struct rq *busiest = NULL, *rq;
4965 unsigned long max_load = 0;
4966 int i;
4967
4968 for_each_cpu(i, sched_group_cpus(group)) {
4969 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004970 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4971 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004972 unsigned long wl;
4973
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004974 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004975 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004976
Michael Wangb94031302012-07-12 16:10:13 +08004977 if (!cpumask_test_cpu(i, env->cpus))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004978 continue;
4979
4980 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004981 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004982
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004983 /*
4984 * When comparing with imbalance, use weighted_cpuload()
4985 * which is not scaled with the cpu power.
4986 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004987 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004988 continue;
4989
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004990 /*
4991 * For the load comparisons with the other cpu's, consider
4992 * the weighted_cpuload() scaled with the cpu power, so that
4993 * the load can be moved away from the cpu that is potentially
4994 * running at a lower capacity.
4995 */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004996 wl = (wl * SCHED_POWER_SCALE) / power;
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004997
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004998 if (wl > max_load) {
4999 max_load = wl;
5000 busiest = rq;
5001 }
5002 }
5003
5004 return busiest;
5005}
5006
5007/*
5008 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5009 * so long as it is large enough.
5010 */
5011#define MAX_PINNED_INTERVAL 512
5012
5013/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09005014DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005015
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005016static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005017{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005018 struct sched_domain *sd = env->sd;
5019
5020 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005021
5022 /*
5023 * ASYM_PACKING needs to force migrate tasks from busy but
5024 * higher numbered CPUs in order to pack all tasks in the
5025 * lowest numbered CPUs.
5026 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005027 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005028 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005029 }
5030
5031 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5032}
5033
Tejun Heo969c7922010-05-06 18:49:21 +02005034static int active_load_balance_cpu_stop(void *data);
5035
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005036/*
5037 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5038 * tasks if there is an imbalance.
5039 */
5040static int load_balance(int this_cpu, struct rq *this_rq,
5041 struct sched_domain *sd, enum cpu_idle_type idle,
5042 int *balance)
5043{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305044 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005045 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005046 struct rq *busiest;
5047 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005048 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005049
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005050 struct lb_env env = {
5051 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005052 .dst_cpu = this_cpu,
5053 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305054 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005055 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005056 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08005057 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005058 };
5059
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005060 /*
5061 * For NEWLY_IDLE load_balancing, we don't need to consider
5062 * other cpus in our group
5063 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005064 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005065 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005066
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005067 cpumask_copy(cpus, cpu_active_mask);
5068
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005069 schedstat_inc(sd, lb_count[idle]);
5070
5071redo:
Michael Wangb94031302012-07-12 16:10:13 +08005072 group = find_busiest_group(&env, balance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005073
5074 if (*balance == 0)
5075 goto out_balanced;
5076
5077 if (!group) {
5078 schedstat_inc(sd, lb_nobusyg[idle]);
5079 goto out_balanced;
5080 }
5081
Michael Wangb94031302012-07-12 16:10:13 +08005082 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005083 if (!busiest) {
5084 schedstat_inc(sd, lb_nobusyq[idle]);
5085 goto out_balanced;
5086 }
5087
Michael Wang78feefc2012-08-06 16:41:59 +08005088 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005089
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005090 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005091
5092 ld_moved = 0;
5093 if (busiest->nr_running > 1) {
5094 /*
5095 * Attempt to move tasks. If find_busiest_group has found
5096 * an imbalance but busiest->nr_running <= 1, the group is
5097 * still unbalanced. ld_moved simply stays zero, so it is
5098 * correctly treated as an imbalance.
5099 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005100 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005101 env.src_cpu = busiest->cpu;
5102 env.src_rq = busiest;
5103 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005104
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005105 update_h_load(env.src_cpu);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005106more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005107 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005108 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305109
5110 /*
5111 * cur_ld_moved - load moved in current iteration
5112 * ld_moved - cumulative load moved across iterations
5113 */
5114 cur_ld_moved = move_tasks(&env);
5115 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005116 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005117 local_irq_restore(flags);
5118
5119 /*
5120 * some other cpu did the load balance for us.
5121 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305122 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5123 resched_cpu(env.dst_cpu);
5124
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005125 if (env.flags & LBF_NEED_BREAK) {
5126 env.flags &= ~LBF_NEED_BREAK;
5127 goto more_balance;
5128 }
5129
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305130 /*
5131 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5132 * us and move them to an alternate dst_cpu in our sched_group
5133 * where they can run. The upper limit on how many times we
5134 * iterate on same src_cpu is dependent on number of cpus in our
5135 * sched_group.
5136 *
5137 * This changes load balance semantics a bit on who can move
5138 * load to a given_cpu. In addition to the given_cpu itself
5139 * (or a ilb_cpu acting on its behalf where given_cpu is
5140 * nohz-idle), we now have balance_cpu in a position to move
5141 * load to given_cpu. In rare situations, this may cause
5142 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5143 * _independently_ and at _same_ time to move some load to
5144 * given_cpu) causing exceess load to be moved to given_cpu.
5145 * This however should not happen so much in practice and
5146 * moreover subsequent load balance cycles should correct the
5147 * excess load moved.
5148 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005149 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305150
Michael Wang78feefc2012-08-06 16:41:59 +08005151 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305152 env.dst_cpu = env.new_dst_cpu;
5153 env.flags &= ~LBF_SOME_PINNED;
5154 env.loop = 0;
5155 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005156
5157 /* Prevent to re-select dst_cpu via env's cpus */
5158 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5159
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305160 /*
5161 * Go back to "more_balance" rather than "redo" since we
5162 * need to continue with same src_cpu.
5163 */
5164 goto more_balance;
5165 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005166
5167 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005168 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005169 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305170 if (!cpumask_empty(cpus)) {
5171 env.loop = 0;
5172 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005173 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305174 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005175 goto out_balanced;
5176 }
5177 }
5178
5179 if (!ld_moved) {
5180 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005181 /*
5182 * Increment the failure counter only on periodic balance.
5183 * We do not want newidle balance, which can be very
5184 * frequent, pollute the failure counter causing
5185 * excessive cache_hot migrations and active balances.
5186 */
5187 if (idle != CPU_NEWLY_IDLE)
5188 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005189
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005190 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005191 raw_spin_lock_irqsave(&busiest->lock, flags);
5192
Tejun Heo969c7922010-05-06 18:49:21 +02005193 /* don't kick the active_load_balance_cpu_stop,
5194 * if the curr task on busiest cpu can't be
5195 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005196 */
5197 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005198 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005199 raw_spin_unlock_irqrestore(&busiest->lock,
5200 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005201 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005202 goto out_one_pinned;
5203 }
5204
Tejun Heo969c7922010-05-06 18:49:21 +02005205 /*
5206 * ->active_balance synchronizes accesses to
5207 * ->active_balance_work. Once set, it's cleared
5208 * only after active load balance is finished.
5209 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005210 if (!busiest->active_balance) {
5211 busiest->active_balance = 1;
5212 busiest->push_cpu = this_cpu;
5213 active_balance = 1;
5214 }
5215 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005216
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005217 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005218 stop_one_cpu_nowait(cpu_of(busiest),
5219 active_load_balance_cpu_stop, busiest,
5220 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005221 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005222
5223 /*
5224 * We've kicked active balancing, reset the failure
5225 * counter.
5226 */
5227 sd->nr_balance_failed = sd->cache_nice_tries+1;
5228 }
5229 } else
5230 sd->nr_balance_failed = 0;
5231
5232 if (likely(!active_balance)) {
5233 /* We were unbalanced, so reset the balancing interval */
5234 sd->balance_interval = sd->min_interval;
5235 } else {
5236 /*
5237 * If we've begun active balancing, start to back off. This
5238 * case may not be covered by the all_pinned logic if there
5239 * is only 1 task on the busy runqueue (because we don't call
5240 * move_tasks).
5241 */
5242 if (sd->balance_interval < sd->max_interval)
5243 sd->balance_interval *= 2;
5244 }
5245
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005246 goto out;
5247
5248out_balanced:
5249 schedstat_inc(sd, lb_balanced[idle]);
5250
5251 sd->nr_balance_failed = 0;
5252
5253out_one_pinned:
5254 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005255 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005256 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005257 (sd->balance_interval < sd->max_interval))
5258 sd->balance_interval *= 2;
5259
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005260 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005261out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005262 return ld_moved;
5263}
5264
5265/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005266 * idle_balance is called by schedule() if this_cpu is about to become
5267 * idle. Attempts to pull tasks from other CPUs.
5268 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005269void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005270{
5271 struct sched_domain *sd;
5272 int pulled_task = 0;
5273 unsigned long next_balance = jiffies + HZ;
5274
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005275 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005276
5277 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5278 return;
5279
Peter Zijlstraf492e122009-12-23 15:29:42 +01005280 /*
5281 * Drop the rq->lock, but keep IRQ/preempt disabled.
5282 */
5283 raw_spin_unlock(&this_rq->lock);
5284
Paul Turner48a16752012-10-04 13:18:31 +02005285 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005286 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005287 for_each_domain(this_cpu, sd) {
5288 unsigned long interval;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005289 int balance = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005290
5291 if (!(sd->flags & SD_LOAD_BALANCE))
5292 continue;
5293
Peter Zijlstraf492e122009-12-23 15:29:42 +01005294 if (sd->flags & SD_BALANCE_NEWIDLE) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005295 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005296 pulled_task = load_balance(this_cpu, this_rq,
5297 sd, CPU_NEWLY_IDLE, &balance);
5298 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005299
5300 interval = msecs_to_jiffies(sd->balance_interval);
5301 if (time_after(next_balance, sd->last_balance + interval))
5302 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005303 if (pulled_task) {
5304 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005305 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005306 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005307 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005308 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005309
5310 raw_spin_lock(&this_rq->lock);
5311
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005312 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5313 /*
5314 * We are going idle. next_balance may be set based on
5315 * a busy processor. So reset next_balance.
5316 */
5317 this_rq->next_balance = next_balance;
5318 }
5319}
5320
5321/*
Tejun Heo969c7922010-05-06 18:49:21 +02005322 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5323 * running tasks off the busiest CPU onto idle CPUs. It requires at
5324 * least 1 task to be running on each physical CPU where possible, and
5325 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005326 */
Tejun Heo969c7922010-05-06 18:49:21 +02005327static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005328{
Tejun Heo969c7922010-05-06 18:49:21 +02005329 struct rq *busiest_rq = data;
5330 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005331 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005332 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005333 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005334
5335 raw_spin_lock_irq(&busiest_rq->lock);
5336
5337 /* make sure the requested cpu hasn't gone down in the meantime */
5338 if (unlikely(busiest_cpu != smp_processor_id() ||
5339 !busiest_rq->active_balance))
5340 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005341
5342 /* Is there any task to move? */
5343 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005344 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005345
5346 /*
5347 * This condition is "impossible", if it occurs
5348 * we need to fix it. Originally reported by
5349 * Bjorn Helgaas on a 128-cpu setup.
5350 */
5351 BUG_ON(busiest_rq == target_rq);
5352
5353 /* move a task from busiest_rq to target_rq */
5354 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005355
5356 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005357 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005358 for_each_domain(target_cpu, sd) {
5359 if ((sd->flags & SD_LOAD_BALANCE) &&
5360 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5361 break;
5362 }
5363
5364 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005365 struct lb_env env = {
5366 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005367 .dst_cpu = target_cpu,
5368 .dst_rq = target_rq,
5369 .src_cpu = busiest_rq->cpu,
5370 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005371 .idle = CPU_IDLE,
5372 };
5373
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005374 schedstat_inc(sd, alb_count);
5375
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005376 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005377 schedstat_inc(sd, alb_pushed);
5378 else
5379 schedstat_inc(sd, alb_failed);
5380 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005381 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005382 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02005383out_unlock:
5384 busiest_rq->active_balance = 0;
5385 raw_spin_unlock_irq(&busiest_rq->lock);
5386 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005387}
5388
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005389#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005390/*
5391 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005392 * - When one of the busy CPUs notice that there may be an idle rebalancing
5393 * needed, they will kick the idle load balancer, which then does idle
5394 * load balancing for all the idle CPUs.
5395 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005396static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005397 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005398 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005399 unsigned long next_balance; /* in jiffy units */
5400} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005401
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01005402static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005403{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005404 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005405
Suresh Siddha786d6dc72011-12-01 17:07:35 -08005406 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5407 return ilb;
5408
5409 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005410}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005411
5412/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005413 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5414 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5415 * CPU (if there is one).
5416 */
5417static void nohz_balancer_kick(int cpu)
5418{
5419 int ilb_cpu;
5420
5421 nohz.next_balance++;
5422
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005423 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005424
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005425 if (ilb_cpu >= nr_cpu_ids)
5426 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005427
Suresh Siddhacd490c52011-12-06 11:26:34 -08005428 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08005429 return;
5430 /*
5431 * Use smp_send_reschedule() instead of resched_cpu().
5432 * This way we generate a sched IPI on the target cpu which
5433 * is idle. And the softirq performing nohz idle load balance
5434 * will be run before returning from the IPI.
5435 */
5436 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005437 return;
5438}
5439
Alex Shic1cc0172012-09-10 15:10:58 +08005440static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08005441{
5442 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5443 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5444 atomic_dec(&nohz.nr_cpus);
5445 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5446 }
5447}
5448
Suresh Siddha69e1e812011-12-01 17:07:33 -08005449static inline void set_cpu_sd_state_busy(void)
5450{
5451 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005452
Suresh Siddha69e1e812011-12-01 17:07:33 -08005453 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005454 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005455
5456 if (!sd || !sd->nohz_idle)
5457 goto unlock;
5458 sd->nohz_idle = 0;
5459
5460 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005461 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005462unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005463 rcu_read_unlock();
5464}
5465
5466void set_cpu_sd_state_idle(void)
5467{
5468 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005469
Suresh Siddha69e1e812011-12-01 17:07:33 -08005470 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005471 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005472
5473 if (!sd || sd->nohz_idle)
5474 goto unlock;
5475 sd->nohz_idle = 1;
5476
5477 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005478 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005479unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005480 rcu_read_unlock();
5481}
5482
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005483/*
Alex Shic1cc0172012-09-10 15:10:58 +08005484 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005485 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005486 */
Alex Shic1cc0172012-09-10 15:10:58 +08005487void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005488{
Suresh Siddha71325962012-01-19 18:28:57 -08005489 /*
5490 * If this cpu is going down, then nothing needs to be done.
5491 */
5492 if (!cpu_active(cpu))
5493 return;
5494
Alex Shic1cc0172012-09-10 15:10:58 +08005495 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5496 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005497
Alex Shic1cc0172012-09-10 15:10:58 +08005498 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5499 atomic_inc(&nohz.nr_cpus);
5500 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005501}
Suresh Siddha71325962012-01-19 18:28:57 -08005502
5503static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
5504 unsigned long action, void *hcpu)
5505{
5506 switch (action & ~CPU_TASKS_FROZEN) {
5507 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08005508 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08005509 return NOTIFY_OK;
5510 default:
5511 return NOTIFY_DONE;
5512 }
5513}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005514#endif
5515
5516static DEFINE_SPINLOCK(balancing);
5517
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005518/*
5519 * Scale the max load_balance interval with the number of CPUs in the system.
5520 * This trades load-balance latency on larger machines for less cross talk.
5521 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005522void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005523{
5524 max_load_balance_interval = HZ*num_online_cpus()/10;
5525}
5526
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005527/*
5528 * It checks each scheduling domain to see if it is due to be balanced,
5529 * and initiates a balancing operation if so.
5530 *
Libinb9b08532013-04-01 19:14:01 +08005531 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005532 */
5533static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5534{
5535 int balance = 1;
5536 struct rq *rq = cpu_rq(cpu);
5537 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005538 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005539 /* Earliest time when we have to do rebalance again */
5540 unsigned long next_balance = jiffies + 60*HZ;
5541 int update_next_balance = 0;
5542 int need_serialize;
5543
Paul Turner48a16752012-10-04 13:18:31 +02005544 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08005545
Peter Zijlstradce840a2011-04-07 14:09:50 +02005546 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005547 for_each_domain(cpu, sd) {
5548 if (!(sd->flags & SD_LOAD_BALANCE))
5549 continue;
5550
5551 interval = sd->balance_interval;
5552 if (idle != CPU_IDLE)
5553 interval *= sd->busy_factor;
5554
5555 /* scale ms to jiffies */
5556 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005557 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005558
5559 need_serialize = sd->flags & SD_SERIALIZE;
5560
5561 if (need_serialize) {
5562 if (!spin_trylock(&balancing))
5563 goto out;
5564 }
5565
5566 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5567 if (load_balance(cpu, rq, sd, idle, &balance)) {
5568 /*
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005569 * The LBF_SOME_PINNED logic could have changed
5570 * env->dst_cpu, so we can't know our idle
5571 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005572 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005573 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005574 }
5575 sd->last_balance = jiffies;
5576 }
5577 if (need_serialize)
5578 spin_unlock(&balancing);
5579out:
5580 if (time_after(next_balance, sd->last_balance + interval)) {
5581 next_balance = sd->last_balance + interval;
5582 update_next_balance = 1;
5583 }
5584
5585 /*
5586 * Stop the load balance at this level. There is another
5587 * CPU in our sched group which is doing load balancing more
5588 * actively.
5589 */
5590 if (!balance)
5591 break;
5592 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005593 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005594
5595 /*
5596 * next_balance will be updated only when there is a need.
5597 * When the cpu is attached to null domain for ex, it will not be
5598 * updated.
5599 */
5600 if (likely(update_next_balance))
5601 rq->next_balance = next_balance;
5602}
5603
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005604#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005605/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005606 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005607 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5608 */
5609static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5610{
5611 struct rq *this_rq = cpu_rq(this_cpu);
5612 struct rq *rq;
5613 int balance_cpu;
5614
Suresh Siddha1c792db2011-12-01 17:07:32 -08005615 if (idle != CPU_IDLE ||
5616 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5617 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005618
5619 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08005620 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005621 continue;
5622
5623 /*
5624 * If this cpu gets work to do, stop the load balancing
5625 * work being done for other cpus. Next load
5626 * balancing owner will pick it up.
5627 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08005628 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005629 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005630
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02005631 rq = cpu_rq(balance_cpu);
5632
5633 raw_spin_lock_irq(&rq->lock);
5634 update_rq_clock(rq);
5635 update_idle_cpu_load(rq);
5636 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005637
5638 rebalance_domains(balance_cpu, CPU_IDLE);
5639
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005640 if (time_after(this_rq->next_balance, rq->next_balance))
5641 this_rq->next_balance = rq->next_balance;
5642 }
5643 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005644end:
5645 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005646}
5647
5648/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005649 * Current heuristic for kicking the idle load balancer in the presence
5650 * of an idle cpu is the system.
5651 * - This rq has more than one task.
5652 * - At any scheduler domain level, this cpu's scheduler group has multiple
5653 * busy cpu's exceeding the group's power.
5654 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5655 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005656 */
5657static inline int nohz_kick_needed(struct rq *rq, int cpu)
5658{
5659 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005660 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005661
Suresh Siddha1c792db2011-12-01 17:07:32 -08005662 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005663 return 0;
5664
Suresh Siddha1c792db2011-12-01 17:07:32 -08005665 /*
5666 * We may be recently in ticked or tickless idle mode. At the first
5667 * busy tick after returning from idle, we will update the busy stats.
5668 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08005669 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08005670 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005671
5672 /*
5673 * None are in tickless mode and hence no need for NOHZ idle load
5674 * balancing.
5675 */
5676 if (likely(!atomic_read(&nohz.nr_cpus)))
5677 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005678
5679 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005680 return 0;
5681
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005682 if (rq->nr_running >= 2)
5683 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005684
Peter Zijlstra067491b2011-12-07 14:32:08 +01005685 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005686 for_each_domain(cpu, sd) {
5687 struct sched_group *sg = sd->groups;
5688 struct sched_group_power *sgp = sg->sgp;
5689 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005690
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005691 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01005692 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005693
5694 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5695 && (cpumask_first_and(nohz.idle_cpus_mask,
5696 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01005697 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005698
5699 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5700 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005701 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01005702 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005703 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01005704
5705need_kick_unlock:
5706 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005707need_kick:
5708 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005709}
5710#else
5711static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5712#endif
5713
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005714/*
5715 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005716 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005717 */
5718static void run_rebalance_domains(struct softirq_action *h)
5719{
5720 int this_cpu = smp_processor_id();
5721 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07005722 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005723 CPU_IDLE : CPU_NOT_IDLE;
5724
5725 rebalance_domains(this_cpu, idle);
5726
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005727 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005728 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005729 * balancing on behalf of the other idle cpus whose ticks are
5730 * stopped.
5731 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005732 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005733}
5734
5735static inline int on_null_domain(int cpu)
5736{
Paul E. McKenney90a65012010-02-28 08:32:18 -08005737 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005738}
5739
5740/*
5741 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005742 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005743void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005744{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005745 /* Don't need to rebalance while attached to NULL domain */
5746 if (time_after_eq(jiffies, rq->next_balance) &&
5747 likely(!on_null_domain(cpu)))
5748 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005749#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08005750 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005751 nohz_balancer_kick(cpu);
5752#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005753}
5754
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005755static void rq_online_fair(struct rq *rq)
5756{
5757 update_sysctl();
5758}
5759
5760static void rq_offline_fair(struct rq *rq)
5761{
5762 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07005763
5764 /* Ensure any throttled groups are reachable by pick_next_task */
5765 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005766}
5767
Dhaval Giani55e12e52008-06-24 23:39:43 +05305768#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02005769
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005770/*
5771 * scheduler tick hitting a task of our scheduling class:
5772 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005773static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005774{
5775 struct cfs_rq *cfs_rq;
5776 struct sched_entity *se = &curr->se;
5777
5778 for_each_sched_entity(se) {
5779 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005780 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005781 }
Ben Segall18bf2802012-10-04 12:51:20 +02005782
Peter Zijlstracbee9f82012-10-25 14:16:43 +02005783 if (sched_feat_numa(NUMA))
5784 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08005785
Ben Segall18bf2802012-10-04 12:51:20 +02005786 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005787}
5788
5789/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005790 * called on fork with the child task as argument from the parent's context
5791 * - child not yet on the tasklist
5792 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005793 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005794static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005795{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005796 struct cfs_rq *cfs_rq;
5797 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02005798 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005799 struct rq *rq = this_rq();
5800 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005801
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005802 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005803
Peter Zijlstra861d0342010-08-19 13:31:43 +02005804 update_rq_clock(rq);
5805
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005806 cfs_rq = task_cfs_rq(current);
5807 curr = cfs_rq->curr;
5808
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005809 if (unlikely(task_cpu(p) != this_cpu)) {
5810 rcu_read_lock();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005811 __set_task_cpu(p, this_cpu);
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005812 rcu_read_unlock();
5813 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005814
Ting Yang7109c442007-08-28 12:53:24 +02005815 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005816
Mike Galbraithb5d9d732009-09-08 11:12:28 +02005817 if (curr)
5818 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02005819 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005820
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005821 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02005822 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02005823 * Upon rescheduling, sched_class::put_prev_task() will place
5824 * 'current' within the tree based on its new key value.
5825 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005826 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05305827 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005828 }
5829
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005830 se->vruntime -= cfs_rq->min_vruntime;
5831
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005832 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005833}
5834
Steven Rostedtcb469842008-01-25 21:08:22 +01005835/*
5836 * Priority of the task has changed. Check to see if we preempt
5837 * the current task.
5838 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005839static void
5840prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01005841{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005842 if (!p->se.on_rq)
5843 return;
5844
Steven Rostedtcb469842008-01-25 21:08:22 +01005845 /*
5846 * Reschedule if we are currently running on this runqueue and
5847 * our priority decreased, or if we are not currently running on
5848 * this runqueue and our priority is higher than the current's
5849 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005850 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01005851 if (p->prio > oldprio)
5852 resched_task(rq->curr);
5853 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005854 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005855}
5856
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005857static void switched_from_fair(struct rq *rq, struct task_struct *p)
5858{
5859 struct sched_entity *se = &p->se;
5860 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5861
5862 /*
5863 * Ensure the task's vruntime is normalized, so that when its
5864 * switched back to the fair class the enqueue_entity(.flags=0) will
5865 * do the right thing.
5866 *
5867 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5868 * have normalized the vruntime, if it was !on_rq, then only when
5869 * the task is sleeping will it still have non-normalized vruntime.
5870 */
5871 if (!se->on_rq && p->state != TASK_RUNNING) {
5872 /*
5873 * Fix up our vruntime so that the current sleep doesn't
5874 * cause 'unlimited' sleep bonus.
5875 */
5876 place_entity(cfs_rq, se, 0);
5877 se->vruntime -= cfs_rq->min_vruntime;
5878 }
Paul Turner9ee474f2012-10-04 13:18:30 +02005879
Alex Shi141965c2013-06-26 13:05:39 +08005880#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02005881 /*
5882 * Remove our load from contribution when we leave sched_fair
5883 * and ensure we don't carry in an old decay_count if we
5884 * switch back.
5885 */
5886 if (p->se.avg.decay_count) {
5887 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5888 __synchronize_entity_decay(&p->se);
5889 subtract_blocked_load_contrib(cfs_rq,
5890 p->se.avg.load_avg_contrib);
5891 }
5892#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005893}
5894
Steven Rostedtcb469842008-01-25 21:08:22 +01005895/*
5896 * We switched to the sched_fair class.
5897 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005898static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005899{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005900 if (!p->se.on_rq)
5901 return;
5902
Steven Rostedtcb469842008-01-25 21:08:22 +01005903 /*
5904 * We were most likely switched from sched_rt, so
5905 * kick off the schedule if running, otherwise just see
5906 * if we can still preempt the current task.
5907 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005908 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005909 resched_task(rq->curr);
5910 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005911 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005912}
5913
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005914/* Account for a task changing its policy or group.
5915 *
5916 * This routine is mostly called to set cfs_rq->curr field when a task
5917 * migrates between groups/classes.
5918 */
5919static void set_curr_task_fair(struct rq *rq)
5920{
5921 struct sched_entity *se = &rq->curr->se;
5922
Paul Turnerec12cb72011-07-21 09:43:30 -07005923 for_each_sched_entity(se) {
5924 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5925
5926 set_next_entity(cfs_rq, se);
5927 /* ensure bandwidth has been allocated on our new cfs_rq */
5928 account_cfs_rq_runtime(cfs_rq, 0);
5929 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005930}
5931
Peter Zijlstra029632f2011-10-25 10:00:11 +02005932void init_cfs_rq(struct cfs_rq *cfs_rq)
5933{
5934 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02005935 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5936#ifndef CONFIG_64BIT
5937 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5938#endif
Alex Shi141965c2013-06-26 13:05:39 +08005939#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02005940 atomic64_set(&cfs_rq->decay_counter, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02005941 atomic64_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02005942#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02005943}
5944
Peter Zijlstra810b3812008-02-29 15:21:01 -05005945#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005946static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05005947{
Paul Turneraff3e492012-10-04 13:18:30 +02005948 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005949 /*
5950 * If the task was not on the rq at the time of this cgroup movement
5951 * it must have been asleep, sleeping tasks keep their ->vruntime
5952 * absolute on their old rq until wakeup (needed for the fair sleeper
5953 * bonus in place_entity()).
5954 *
5955 * If it was on the rq, we've just 'preempted' it, which does convert
5956 * ->vruntime to a relative base.
5957 *
5958 * Make sure both cases convert their relative position when migrating
5959 * to another cgroup's rq. This does somewhat interfere with the
5960 * fair sleeper stuff for the first placement, but who cares.
5961 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005962 /*
5963 * When !on_rq, vruntime of the task has usually NOT been normalized.
5964 * But there are some cases where it has already been normalized:
5965 *
5966 * - Moving a forked child which is waiting for being woken up by
5967 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005968 * - Moving a task which has been woken up by try_to_wake_up() and
5969 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005970 *
5971 * To prevent boost or penalty in the new cfs_rq caused by delta
5972 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5973 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005974 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005975 on_rq = 1;
5976
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005977 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005978 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5979 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02005980 if (!on_rq) {
5981 cfs_rq = cfs_rq_of(&p->se);
5982 p->se.vruntime += cfs_rq->min_vruntime;
5983#ifdef CONFIG_SMP
5984 /*
5985 * migrate_task_rq_fair() will have removed our previous
5986 * contribution, but we must synchronize for ongoing future
5987 * decay.
5988 */
5989 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
5990 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
5991#endif
5992 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05005993}
Peter Zijlstra029632f2011-10-25 10:00:11 +02005994
5995void free_fair_sched_group(struct task_group *tg)
5996{
5997 int i;
5998
5999 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6000
6001 for_each_possible_cpu(i) {
6002 if (tg->cfs_rq)
6003 kfree(tg->cfs_rq[i]);
6004 if (tg->se)
6005 kfree(tg->se[i]);
6006 }
6007
6008 kfree(tg->cfs_rq);
6009 kfree(tg->se);
6010}
6011
6012int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6013{
6014 struct cfs_rq *cfs_rq;
6015 struct sched_entity *se;
6016 int i;
6017
6018 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6019 if (!tg->cfs_rq)
6020 goto err;
6021 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6022 if (!tg->se)
6023 goto err;
6024
6025 tg->shares = NICE_0_LOAD;
6026
6027 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6028
6029 for_each_possible_cpu(i) {
6030 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6031 GFP_KERNEL, cpu_to_node(i));
6032 if (!cfs_rq)
6033 goto err;
6034
6035 se = kzalloc_node(sizeof(struct sched_entity),
6036 GFP_KERNEL, cpu_to_node(i));
6037 if (!se)
6038 goto err_free_rq;
6039
6040 init_cfs_rq(cfs_rq);
6041 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6042 }
6043
6044 return 1;
6045
6046err_free_rq:
6047 kfree(cfs_rq);
6048err:
6049 return 0;
6050}
6051
6052void unregister_fair_sched_group(struct task_group *tg, int cpu)
6053{
6054 struct rq *rq = cpu_rq(cpu);
6055 unsigned long flags;
6056
6057 /*
6058 * Only empty task groups can be destroyed; so we can speculatively
6059 * check on_list without danger of it being re-added.
6060 */
6061 if (!tg->cfs_rq[cpu]->on_list)
6062 return;
6063
6064 raw_spin_lock_irqsave(&rq->lock, flags);
6065 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6066 raw_spin_unlock_irqrestore(&rq->lock, flags);
6067}
6068
6069void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6070 struct sched_entity *se, int cpu,
6071 struct sched_entity *parent)
6072{
6073 struct rq *rq = cpu_rq(cpu);
6074
6075 cfs_rq->tg = tg;
6076 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006077 init_cfs_rq_runtime(cfs_rq);
6078
6079 tg->cfs_rq[cpu] = cfs_rq;
6080 tg->se[cpu] = se;
6081
6082 /* se could be NULL for root_task_group */
6083 if (!se)
6084 return;
6085
6086 if (!parent)
6087 se->cfs_rq = &rq->cfs;
6088 else
6089 se->cfs_rq = parent->my_q;
6090
6091 se->my_q = cfs_rq;
6092 update_load_set(&se->load, 0);
6093 se->parent = parent;
6094}
6095
6096static DEFINE_MUTEX(shares_mutex);
6097
6098int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6099{
6100 int i;
6101 unsigned long flags;
6102
6103 /*
6104 * We can't change the weight of the root cgroup.
6105 */
6106 if (!tg->se[0])
6107 return -EINVAL;
6108
6109 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6110
6111 mutex_lock(&shares_mutex);
6112 if (tg->shares == shares)
6113 goto done;
6114
6115 tg->shares = shares;
6116 for_each_possible_cpu(i) {
6117 struct rq *rq = cpu_rq(i);
6118 struct sched_entity *se;
6119
6120 se = tg->se[i];
6121 /* Propagate contribution to hierarchy */
6122 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006123
6124 /* Possible calls to update_curr() need rq clock */
6125 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006126 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006127 update_cfs_shares(group_cfs_rq(se));
6128 raw_spin_unlock_irqrestore(&rq->lock, flags);
6129 }
6130
6131done:
6132 mutex_unlock(&shares_mutex);
6133 return 0;
6134}
6135#else /* CONFIG_FAIR_GROUP_SCHED */
6136
6137void free_fair_sched_group(struct task_group *tg) { }
6138
6139int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6140{
6141 return 1;
6142}
6143
6144void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6145
6146#endif /* CONFIG_FAIR_GROUP_SCHED */
6147
Peter Zijlstra810b3812008-02-29 15:21:01 -05006148
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006149static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006150{
6151 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006152 unsigned int rr_interval = 0;
6153
6154 /*
6155 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6156 * idle runqueue:
6157 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006158 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006159 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006160
6161 return rr_interval;
6162}
6163
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006164/*
6165 * All the scheduling class methods:
6166 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006167const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006168 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006169 .enqueue_task = enqueue_task_fair,
6170 .dequeue_task = dequeue_task_fair,
6171 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006172 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006173
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006174 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006175
6176 .pick_next_task = pick_next_task_fair,
6177 .put_prev_task = put_prev_task_fair,
6178
Peter Williams681f3e62007-10-24 18:23:51 +02006179#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006180 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02006181 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08006182
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006183 .rq_online = rq_online_fair,
6184 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006185
6186 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006187#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006188
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006189 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006190 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006191 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006192
6193 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006194 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006195 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006196
Peter Williams0d721ce2009-09-21 01:31:53 +00006197 .get_rr_interval = get_rr_interval_fair,
6198
Peter Zijlstra810b3812008-02-29 15:21:01 -05006199#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006200 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006201#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006202};
6203
6204#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006205void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006206{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006207 struct cfs_rq *cfs_rq;
6208
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006209 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006210 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006211 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006212 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006213}
6214#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006215
6216__init void init_sched_fair_class(void)
6217{
6218#ifdef CONFIG_SMP
6219 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6220
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006221#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006222 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006223 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006224 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006225#endif
6226#endif /* SMP */
6227
6228}