blob: 36eadaaa4e5b0f1b13c3ce605b35992c12d26d66 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
683/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200684 * Update the current task's runtime statistics. Skip current tasks that
685 * are not in our scheduling class.
686 */
687static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200688__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
689 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200690{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200691 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200692
Lucas De Marchi41acab82010-03-10 23:37:45 -0300693 schedstat_set(curr->statistics.exec_max,
694 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200695
696 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200697 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200698 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100699
Ingo Molnare9acbff2007-10-15 17:00:04 +0200700 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200701 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200702}
703
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200704static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200705{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200706 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200707 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200708 unsigned long delta_exec;
709
710 if (unlikely(!curr))
711 return;
712
713 /*
714 * Get the amount of time the current task was running
715 * since the last time we changed load (this cannot
716 * overflow on 32 bits):
717 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200718 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100719 if (!delta_exec)
720 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200721
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200722 __update_curr(cfs_rq, curr, delta_exec);
723 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100724
725 if (entity_is_task(curr)) {
726 struct task_struct *curtask = task_of(curr);
727
Ingo Molnarf977bb42009-09-13 18:15:54 +0200728 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100729 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700730 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100731 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700732
733 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200734}
735
736static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200737update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200738{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200739 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200740}
741
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200742/*
743 * Task is being enqueued - update stats:
744 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200745static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200746{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200747 /*
748 * Are we enqueueing a waiting task? (for current tasks
749 * a dequeue/enqueue event is a NOP)
750 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200751 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200752 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200753}
754
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200755static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200756update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200757{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300758 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200759 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300760 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
761 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200762 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200763#ifdef CONFIG_SCHEDSTATS
764 if (entity_is_task(se)) {
765 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200766 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200767 }
768#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300769 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200770}
771
772static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200773update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200774{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775 /*
776 * Mark the end of the wait period if dequeueing a
777 * waiting task:
778 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200779 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200780 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200781}
782
783/*
784 * We are picking a new current task - update its stats:
785 */
786static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200787update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200788{
789 /*
790 * We are starting a new run period:
791 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200792 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200793}
794
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795/**************************************************
796 * Scheduling class queueing methods:
797 */
798
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200799#ifdef CONFIG_NUMA_BALANCING
800/*
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200801 * numa task sample period in ms
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200802 */
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200803unsigned int sysctl_numa_balancing_scan_period_min = 100;
Mel Gormanb8593bf2012-11-21 01:18:23 +0000804unsigned int sysctl_numa_balancing_scan_period_max = 100*50;
805unsigned int sysctl_numa_balancing_scan_period_reset = 100*600;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200806
807/* Portion of address space to scan in MB */
808unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200809
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200810/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
811unsigned int sysctl_numa_balancing_scan_delay = 1000;
812
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200813static void task_numa_placement(struct task_struct *p)
814{
Hugh Dickins2832bc12012-12-19 17:42:16 -0800815 int seq;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200816
Hugh Dickins2832bc12012-12-19 17:42:16 -0800817 if (!p->mm) /* for example, ksmd faulting in a user's mm */
818 return;
819 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200820 if (p->numa_scan_seq == seq)
821 return;
822 p->numa_scan_seq = seq;
823
824 /* FIXME: Scheduling placement policy hints go here */
825}
826
827/*
828 * Got a PROT_NONE fault for a page on @node.
829 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000830void task_numa_fault(int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200831{
832 struct task_struct *p = current;
833
Mel Gorman1a687c22012-11-22 11:16:36 +0000834 if (!sched_feat_numa(NUMA))
835 return;
836
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200837 /* FIXME: Allocate task-specific structure for placement policy here */
838
Mel Gormanfb003b82012-11-15 09:01:14 +0000839 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000840 * If pages are properly placed (did not migrate) then scan slower.
841 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +0000842 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000843 if (!migrated)
844 p->numa_scan_period = min(sysctl_numa_balancing_scan_period_max,
845 p->numa_scan_period + jiffies_to_msecs(10));
Mel Gormanfb003b82012-11-15 09:01:14 +0000846
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200847 task_numa_placement(p);
848}
849
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200850static void reset_ptenuma_scan(struct task_struct *p)
851{
852 ACCESS_ONCE(p->mm->numa_scan_seq)++;
853 p->mm->numa_scan_offset = 0;
854}
855
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200856/*
857 * The expensive part of numa migration is done from task_work context.
858 * Triggered from task_tick_numa().
859 */
860void task_numa_work(struct callback_head *work)
861{
862 unsigned long migrate, next_scan, now = jiffies;
863 struct task_struct *p = current;
864 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200865 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +0000866 unsigned long start, end;
867 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200868
869 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
870
871 work->next = work; /* protect against double add */
872 /*
873 * Who cares about NUMA placement when they're dying.
874 *
875 * NOTE: make sure not to dereference p->mm before this check,
876 * exit_task_work() happens _after_ exit_mm() so we could be called
877 * without p->mm even though we still had it when we enqueued this
878 * work.
879 */
880 if (p->flags & PF_EXITING)
881 return;
882
883 /*
Mel Gorman5bca2302012-11-22 14:40:03 +0000884 * We do not care about task placement until a task runs on a node
885 * other than the first one used by the address space. This is
886 * largely because migrations are driven by what CPU the task
887 * is running on. If it's never scheduled on another node, it'll
888 * not migrate so why bother trapping the fault.
889 */
890 if (mm->first_nid == NUMA_PTE_SCAN_INIT)
891 mm->first_nid = numa_node_id();
892 if (mm->first_nid != NUMA_PTE_SCAN_ACTIVE) {
893 /* Are we running on a new node yet? */
894 if (numa_node_id() == mm->first_nid &&
895 !sched_feat_numa(NUMA_FORCE))
896 return;
897
898 mm->first_nid = NUMA_PTE_SCAN_ACTIVE;
899 }
900
901 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000902 * Reset the scan period if enough time has gone by. Objective is that
903 * scanning will be reduced if pages are properly placed. As tasks
904 * can enter different phases this needs to be re-examined. Lacking
905 * proper tracking of reference behaviour, this blunt hammer is used.
906 */
907 migrate = mm->numa_next_reset;
908 if (time_after(now, migrate)) {
909 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
910 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
911 xchg(&mm->numa_next_reset, next_scan);
912 }
913
914 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200915 * Enforce maximal scan/migration frequency..
916 */
917 migrate = mm->numa_next_scan;
918 if (time_before(now, migrate))
919 return;
920
921 if (p->numa_scan_period == 0)
922 p->numa_scan_period = sysctl_numa_balancing_scan_period_min;
923
Mel Gormanfb003b82012-11-15 09:01:14 +0000924 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200925 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
926 return;
927
Mel Gormane14808b2012-11-19 10:59:15 +0000928 /*
929 * Do not set pte_numa if the current running node is rate-limited.
930 * This loses statistics on the fault but if we are unwilling to
931 * migrate to this node, it is less likely we can do useful work
932 */
933 if (migrate_ratelimited(numa_node_id()))
934 return;
935
Mel Gorman9f406042012-11-14 18:34:32 +0000936 start = mm->numa_scan_offset;
937 pages = sysctl_numa_balancing_scan_size;
938 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
939 if (!pages)
940 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200941
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200942 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +0000943 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200944 if (!vma) {
945 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +0000946 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200947 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200948 }
Mel Gorman9f406042012-11-14 18:34:32 +0000949 for (; vma; vma = vma->vm_next) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200950 if (!vma_migratable(vma))
951 continue;
952
953 /* Skip small VMAs. They are not likely to be of relevance */
Mel Gorman221392c2012-12-17 14:05:53 +0000954 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200955 continue;
956
Mel Gorman9f406042012-11-14 18:34:32 +0000957 do {
958 start = max(start, vma->vm_start);
959 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
960 end = min(end, vma->vm_end);
961 pages -= change_prot_numa(vma, start, end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200962
Mel Gorman9f406042012-11-14 18:34:32 +0000963 start = end;
964 if (pages <= 0)
965 goto out;
966 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200967 }
968
Mel Gorman9f406042012-11-14 18:34:32 +0000969out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200970 /*
971 * It is possible to reach the end of the VMA list but the last few VMAs are
972 * not guaranteed to the vma_migratable. If they are not, we would find the
973 * !migratable VMA on the next scan but not reset the scanner to the start
974 * so check it now.
975 */
976 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +0000977 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200978 else
979 reset_ptenuma_scan(p);
980 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200981}
982
983/*
984 * Drive the periodic memory faults..
985 */
986void task_tick_numa(struct rq *rq, struct task_struct *curr)
987{
988 struct callback_head *work = &curr->numa_work;
989 u64 period, now;
990
991 /*
992 * We don't care about NUMA placement if we don't have memory.
993 */
994 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
995 return;
996
997 /*
998 * Using runtime rather than walltime has the dual advantage that
999 * we (mostly) drive the selection from busy threads and that the
1000 * task needs to have done some actual work before we bother with
1001 * NUMA placement.
1002 */
1003 now = curr->se.sum_exec_runtime;
1004 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1005
1006 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001007 if (!curr->node_stamp)
1008 curr->numa_scan_period = sysctl_numa_balancing_scan_period_min;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001009 curr->node_stamp = now;
1010
1011 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1012 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1013 task_work_add(curr, work, true);
1014 }
1015 }
1016}
1017#else
1018static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1019{
1020}
1021#endif /* CONFIG_NUMA_BALANCING */
1022
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001023static void
1024account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1025{
1026 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001027 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001028 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001029#ifdef CONFIG_SMP
1030 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001031 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001032#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001033 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001034}
1035
1036static void
1037account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1038{
1039 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001040 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001041 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001042 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301043 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001044 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001045}
1046
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001047#ifdef CONFIG_FAIR_GROUP_SCHED
1048# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001049static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1050{
1051 long tg_weight;
1052
1053 /*
1054 * Use this CPU's actual weight instead of the last load_contribution
1055 * to gain a more accurate current total weight. See
1056 * update_cfs_rq_load_contribution().
1057 */
Paul Turner82958362012-10-04 13:18:31 +02001058 tg_weight = atomic64_read(&tg->load_avg);
1059 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001060 tg_weight += cfs_rq->load.weight;
1061
1062 return tg_weight;
1063}
1064
Paul Turner6d5ab292011-01-21 20:45:01 -08001065static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001066{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001067 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001068
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001069 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001070 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001071
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001072 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001073 if (tg_weight)
1074 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001075
1076 if (shares < MIN_SHARES)
1077 shares = MIN_SHARES;
1078 if (shares > tg->shares)
1079 shares = tg->shares;
1080
1081 return shares;
1082}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001083# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001084static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001085{
1086 return tg->shares;
1087}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001088# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001089static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1090 unsigned long weight)
1091{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001092 if (se->on_rq) {
1093 /* commit outstanding execution time */
1094 if (cfs_rq->curr == se)
1095 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001096 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001097 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001098
1099 update_load_set(&se->load, weight);
1100
1101 if (se->on_rq)
1102 account_entity_enqueue(cfs_rq, se);
1103}
1104
Paul Turner82958362012-10-04 13:18:31 +02001105static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1106
Paul Turner6d5ab292011-01-21 20:45:01 -08001107static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001108{
1109 struct task_group *tg;
1110 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001111 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001112
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001113 tg = cfs_rq->tg;
1114 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001115 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001116 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001117#ifndef CONFIG_SMP
1118 if (likely(se->load.weight == tg->shares))
1119 return;
1120#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001121 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001122
1123 reweight_entity(cfs_rq_of(se), se, shares);
1124}
1125#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001126static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001127{
1128}
1129#endif /* CONFIG_FAIR_GROUP_SCHED */
1130
Alex Shi141965c2013-06-26 13:05:39 +08001131#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001132/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001133 * We choose a half-life close to 1 scheduling period.
1134 * Note: The tables below are dependent on this value.
1135 */
1136#define LOAD_AVG_PERIOD 32
1137#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1138#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1139
1140/* Precomputed fixed inverse multiplies for multiplication by y^n */
1141static const u32 runnable_avg_yN_inv[] = {
1142 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1143 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1144 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1145 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1146 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1147 0x85aac367, 0x82cd8698,
1148};
1149
1150/*
1151 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1152 * over-estimates when re-combining.
1153 */
1154static const u32 runnable_avg_yN_sum[] = {
1155 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1156 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1157 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1158};
1159
1160/*
Paul Turner9d85f212012-10-04 13:18:29 +02001161 * Approximate:
1162 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1163 */
1164static __always_inline u64 decay_load(u64 val, u64 n)
1165{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001166 unsigned int local_n;
1167
1168 if (!n)
1169 return val;
1170 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1171 return 0;
1172
1173 /* after bounds checking we can collapse to 32-bit */
1174 local_n = n;
1175
1176 /*
1177 * As y^PERIOD = 1/2, we can combine
1178 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1179 * With a look-up table which covers k^n (n<PERIOD)
1180 *
1181 * To achieve constant time decay_load.
1182 */
1183 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1184 val >>= local_n / LOAD_AVG_PERIOD;
1185 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001186 }
1187
Paul Turner5b51f2f2012-10-04 13:18:32 +02001188 val *= runnable_avg_yN_inv[local_n];
1189 /* We don't use SRR here since we always want to round down. */
1190 return val >> 32;
1191}
1192
1193/*
1194 * For updates fully spanning n periods, the contribution to runnable
1195 * average will be: \Sum 1024*y^n
1196 *
1197 * We can compute this reasonably efficiently by combining:
1198 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1199 */
1200static u32 __compute_runnable_contrib(u64 n)
1201{
1202 u32 contrib = 0;
1203
1204 if (likely(n <= LOAD_AVG_PERIOD))
1205 return runnable_avg_yN_sum[n];
1206 else if (unlikely(n >= LOAD_AVG_MAX_N))
1207 return LOAD_AVG_MAX;
1208
1209 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1210 do {
1211 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1212 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1213
1214 n -= LOAD_AVG_PERIOD;
1215 } while (n > LOAD_AVG_PERIOD);
1216
1217 contrib = decay_load(contrib, n);
1218 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001219}
1220
1221/*
1222 * We can represent the historical contribution to runnable average as the
1223 * coefficients of a geometric series. To do this we sub-divide our runnable
1224 * history into segments of approximately 1ms (1024us); label the segment that
1225 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1226 *
1227 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1228 * p0 p1 p2
1229 * (now) (~1ms ago) (~2ms ago)
1230 *
1231 * Let u_i denote the fraction of p_i that the entity was runnable.
1232 *
1233 * We then designate the fractions u_i as our co-efficients, yielding the
1234 * following representation of historical load:
1235 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1236 *
1237 * We choose y based on the with of a reasonably scheduling period, fixing:
1238 * y^32 = 0.5
1239 *
1240 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1241 * approximately half as much as the contribution to load within the last ms
1242 * (u_0).
1243 *
1244 * When a period "rolls over" and we have new u_0`, multiplying the previous
1245 * sum again by y is sufficient to update:
1246 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1247 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1248 */
1249static __always_inline int __update_entity_runnable_avg(u64 now,
1250 struct sched_avg *sa,
1251 int runnable)
1252{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001253 u64 delta, periods;
1254 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001255 int delta_w, decayed = 0;
1256
1257 delta = now - sa->last_runnable_update;
1258 /*
1259 * This should only happen when time goes backwards, which it
1260 * unfortunately does during sched clock init when we swap over to TSC.
1261 */
1262 if ((s64)delta < 0) {
1263 sa->last_runnable_update = now;
1264 return 0;
1265 }
1266
1267 /*
1268 * Use 1024ns as the unit of measurement since it's a reasonable
1269 * approximation of 1us and fast to compute.
1270 */
1271 delta >>= 10;
1272 if (!delta)
1273 return 0;
1274 sa->last_runnable_update = now;
1275
1276 /* delta_w is the amount already accumulated against our next period */
1277 delta_w = sa->runnable_avg_period % 1024;
1278 if (delta + delta_w >= 1024) {
1279 /* period roll-over */
1280 decayed = 1;
1281
1282 /*
1283 * Now that we know we're crossing a period boundary, figure
1284 * out how much from delta we need to complete the current
1285 * period and accrue it.
1286 */
1287 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001288 if (runnable)
1289 sa->runnable_avg_sum += delta_w;
1290 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001291
Paul Turner5b51f2f2012-10-04 13:18:32 +02001292 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001293
Paul Turner5b51f2f2012-10-04 13:18:32 +02001294 /* Figure out how many additional periods this update spans */
1295 periods = delta / 1024;
1296 delta %= 1024;
1297
1298 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1299 periods + 1);
1300 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1301 periods + 1);
1302
1303 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1304 runnable_contrib = __compute_runnable_contrib(periods);
1305 if (runnable)
1306 sa->runnable_avg_sum += runnable_contrib;
1307 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001308 }
1309
1310 /* Remainder of delta accrued against u_0` */
1311 if (runnable)
1312 sa->runnable_avg_sum += delta;
1313 sa->runnable_avg_period += delta;
1314
1315 return decayed;
1316}
1317
Paul Turner9ee474f2012-10-04 13:18:30 +02001318/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001319static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001320{
1321 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1322 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1323
1324 decays -= se->avg.decay_count;
1325 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001326 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001327
1328 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1329 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001330
1331 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001332}
1333
Paul Turnerc566e8e2012-10-04 13:18:30 +02001334#ifdef CONFIG_FAIR_GROUP_SCHED
1335static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1336 int force_update)
1337{
1338 struct task_group *tg = cfs_rq->tg;
1339 s64 tg_contrib;
1340
1341 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1342 tg_contrib -= cfs_rq->tg_load_contrib;
1343
1344 if (force_update || abs64(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1345 atomic64_add(tg_contrib, &tg->load_avg);
1346 cfs_rq->tg_load_contrib += tg_contrib;
1347 }
1348}
Paul Turner8165e142012-10-04 13:18:31 +02001349
Paul Turnerbb17f652012-10-04 13:18:31 +02001350/*
1351 * Aggregate cfs_rq runnable averages into an equivalent task_group
1352 * representation for computing load contributions.
1353 */
1354static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1355 struct cfs_rq *cfs_rq)
1356{
1357 struct task_group *tg = cfs_rq->tg;
1358 long contrib;
1359
1360 /* The fraction of a cpu used by this cfs_rq */
1361 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1362 sa->runnable_avg_period + 1);
1363 contrib -= cfs_rq->tg_runnable_contrib;
1364
1365 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1366 atomic_add(contrib, &tg->runnable_avg);
1367 cfs_rq->tg_runnable_contrib += contrib;
1368 }
1369}
1370
Paul Turner8165e142012-10-04 13:18:31 +02001371static inline void __update_group_entity_contrib(struct sched_entity *se)
1372{
1373 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1374 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001375 int runnable_avg;
1376
Paul Turner8165e142012-10-04 13:18:31 +02001377 u64 contrib;
1378
1379 contrib = cfs_rq->tg_load_contrib * tg->shares;
1380 se->avg.load_avg_contrib = div64_u64(contrib,
1381 atomic64_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001382
1383 /*
1384 * For group entities we need to compute a correction term in the case
1385 * that they are consuming <1 cpu so that we would contribute the same
1386 * load as a task of equal weight.
1387 *
1388 * Explicitly co-ordinating this measurement would be expensive, but
1389 * fortunately the sum of each cpus contribution forms a usable
1390 * lower-bound on the true value.
1391 *
1392 * Consider the aggregate of 2 contributions. Either they are disjoint
1393 * (and the sum represents true value) or they are disjoint and we are
1394 * understating by the aggregate of their overlap.
1395 *
1396 * Extending this to N cpus, for a given overlap, the maximum amount we
1397 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1398 * cpus that overlap for this interval and w_i is the interval width.
1399 *
1400 * On a small machine; the first term is well-bounded which bounds the
1401 * total error since w_i is a subset of the period. Whereas on a
1402 * larger machine, while this first term can be larger, if w_i is the
1403 * of consequential size guaranteed to see n_i*w_i quickly converge to
1404 * our upper bound of 1-cpu.
1405 */
1406 runnable_avg = atomic_read(&tg->runnable_avg);
1407 if (runnable_avg < NICE_0_LOAD) {
1408 se->avg.load_avg_contrib *= runnable_avg;
1409 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1410 }
Paul Turner8165e142012-10-04 13:18:31 +02001411}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001412#else
1413static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1414 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001415static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1416 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001417static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001418#endif
1419
Paul Turner8165e142012-10-04 13:18:31 +02001420static inline void __update_task_entity_contrib(struct sched_entity *se)
1421{
1422 u32 contrib;
1423
1424 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1425 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1426 contrib /= (se->avg.runnable_avg_period + 1);
1427 se->avg.load_avg_contrib = scale_load(contrib);
1428}
1429
Paul Turner2dac7542012-10-04 13:18:30 +02001430/* Compute the current contribution to load_avg by se, return any delta */
1431static long __update_entity_load_avg_contrib(struct sched_entity *se)
1432{
1433 long old_contrib = se->avg.load_avg_contrib;
1434
Paul Turner8165e142012-10-04 13:18:31 +02001435 if (entity_is_task(se)) {
1436 __update_task_entity_contrib(se);
1437 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001438 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001439 __update_group_entity_contrib(se);
1440 }
Paul Turner2dac7542012-10-04 13:18:30 +02001441
1442 return se->avg.load_avg_contrib - old_contrib;
1443}
1444
Paul Turner9ee474f2012-10-04 13:18:30 +02001445static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1446 long load_contrib)
1447{
1448 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1449 cfs_rq->blocked_load_avg -= load_contrib;
1450 else
1451 cfs_rq->blocked_load_avg = 0;
1452}
1453
Paul Turnerf1b17282012-10-04 13:18:31 +02001454static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1455
Paul Turner9d85f212012-10-04 13:18:29 +02001456/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001457static inline void update_entity_load_avg(struct sched_entity *se,
1458 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001459{
Paul Turner2dac7542012-10-04 13:18:30 +02001460 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1461 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001462 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001463
Paul Turnerf1b17282012-10-04 13:18:31 +02001464 /*
1465 * For a group entity we need to use their owned cfs_rq_clock_task() in
1466 * case they are the parent of a throttled hierarchy.
1467 */
1468 if (entity_is_task(se))
1469 now = cfs_rq_clock_task(cfs_rq);
1470 else
1471 now = cfs_rq_clock_task(group_cfs_rq(se));
1472
1473 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001474 return;
1475
1476 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001477
1478 if (!update_cfs_rq)
1479 return;
1480
Paul Turner2dac7542012-10-04 13:18:30 +02001481 if (se->on_rq)
1482 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001483 else
1484 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1485}
1486
1487/*
1488 * Decay the load contributed by all blocked children and account this so that
1489 * their contribution may appropriately discounted when they wake up.
1490 */
Paul Turneraff3e492012-10-04 13:18:30 +02001491static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001492{
Paul Turnerf1b17282012-10-04 13:18:31 +02001493 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001494 u64 decays;
1495
1496 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001497 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001498 return;
1499
Paul Turneraff3e492012-10-04 13:18:30 +02001500 if (atomic64_read(&cfs_rq->removed_load)) {
1501 u64 removed_load = atomic64_xchg(&cfs_rq->removed_load, 0);
1502 subtract_blocked_load_contrib(cfs_rq, removed_load);
1503 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001504
Paul Turneraff3e492012-10-04 13:18:30 +02001505 if (decays) {
1506 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1507 decays);
1508 atomic64_add(decays, &cfs_rq->decay_counter);
1509 cfs_rq->last_decay = now;
1510 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001511
1512 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001513}
Ben Segall18bf2802012-10-04 12:51:20 +02001514
1515static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1516{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001517 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001518 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001519}
Paul Turner2dac7542012-10-04 13:18:30 +02001520
1521/* Add the load generated by se into cfs_rq's child load-average */
1522static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001523 struct sched_entity *se,
1524 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001525{
Paul Turneraff3e492012-10-04 13:18:30 +02001526 /*
1527 * We track migrations using entity decay_count <= 0, on a wake-up
1528 * migration we use a negative decay count to track the remote decays
1529 * accumulated while sleeping.
1530 */
1531 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001532 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001533 if (se->avg.decay_count) {
1534 /*
1535 * In a wake-up migration we have to approximate the
1536 * time sleeping. This is because we can't synchronize
1537 * clock_task between the two cpus, and it is not
1538 * guaranteed to be read-safe. Instead, we can
1539 * approximate this using our carried decays, which are
1540 * explicitly atomically readable.
1541 */
1542 se->avg.last_runnable_update -= (-se->avg.decay_count)
1543 << 20;
1544 update_entity_load_avg(se, 0);
1545 /* Indicate that we're now synchronized and on-rq */
1546 se->avg.decay_count = 0;
1547 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001548 wakeup = 0;
1549 } else {
1550 __synchronize_entity_decay(se);
1551 }
1552
Paul Turneraff3e492012-10-04 13:18:30 +02001553 /* migrated tasks did not contribute to our blocked load */
1554 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02001555 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02001556 update_entity_load_avg(se, 0);
1557 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001558
Paul Turner2dac7542012-10-04 13:18:30 +02001559 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02001560 /* we force update consideration on load-balancer moves */
1561 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02001562}
1563
Paul Turner9ee474f2012-10-04 13:18:30 +02001564/*
1565 * Remove se's load from this cfs_rq child load-average, if the entity is
1566 * transitioning to a blocked state we track its projected decay using
1567 * blocked_load_avg.
1568 */
Paul Turner2dac7542012-10-04 13:18:30 +02001569static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001570 struct sched_entity *se,
1571 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02001572{
Paul Turner9ee474f2012-10-04 13:18:30 +02001573 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02001574 /* we force update consideration on load-balancer moves */
1575 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02001576
Paul Turner2dac7542012-10-04 13:18:30 +02001577 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02001578 if (sleep) {
1579 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1580 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1581 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02001582}
Vincent Guittot642dbc32013-04-18 18:34:26 +02001583
1584/*
1585 * Update the rq's load with the elapsed running time before entering
1586 * idle. if the last scheduled task is not a CFS task, idle_enter will
1587 * be the only way to update the runnable statistic.
1588 */
1589void idle_enter_fair(struct rq *this_rq)
1590{
1591 update_rq_runnable_avg(this_rq, 1);
1592}
1593
1594/*
1595 * Update the rq's load with the elapsed idle time before a task is
1596 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1597 * be the only way to update the runnable statistic.
1598 */
1599void idle_exit_fair(struct rq *this_rq)
1600{
1601 update_rq_runnable_avg(this_rq, 0);
1602}
1603
Paul Turner9d85f212012-10-04 13:18:29 +02001604#else
Paul Turner9ee474f2012-10-04 13:18:30 +02001605static inline void update_entity_load_avg(struct sched_entity *se,
1606 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02001607static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001608static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001609 struct sched_entity *se,
1610 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001611static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001612 struct sched_entity *se,
1613 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02001614static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1615 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02001616#endif
1617
Ingo Molnar2396af62007-08-09 11:16:48 +02001618static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001619{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001620#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02001621 struct task_struct *tsk = NULL;
1622
1623 if (entity_is_task(se))
1624 tsk = task_of(se);
1625
Lucas De Marchi41acab82010-03-10 23:37:45 -03001626 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001627 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001628
1629 if ((s64)delta < 0)
1630 delta = 0;
1631
Lucas De Marchi41acab82010-03-10 23:37:45 -03001632 if (unlikely(delta > se->statistics.sleep_max))
1633 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001634
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001635 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001636 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01001637
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001638 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02001639 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001640 trace_sched_stat_sleep(tsk, delta);
1641 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001642 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03001643 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001644 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001645
1646 if ((s64)delta < 0)
1647 delta = 0;
1648
Lucas De Marchi41acab82010-03-10 23:37:45 -03001649 if (unlikely(delta > se->statistics.block_max))
1650 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001651
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001652 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001653 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02001654
Peter Zijlstrae4143142009-07-23 20:13:26 +02001655 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001656 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001657 se->statistics.iowait_sum += delta;
1658 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001659 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001660 }
1661
Andrew Vaginb781a602011-11-28 12:03:35 +03001662 trace_sched_stat_blocked(tsk, delta);
1663
Peter Zijlstrae4143142009-07-23 20:13:26 +02001664 /*
1665 * Blocking time is in units of nanosecs, so shift by
1666 * 20 to get a milliseconds-range estimation of the
1667 * amount of time that the task spent sleeping:
1668 */
1669 if (unlikely(prof_on == SLEEP_PROFILING)) {
1670 profile_hits(SLEEP_PROFILING,
1671 (void *)get_wchan(tsk),
1672 delta >> 20);
1673 }
1674 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02001675 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001676 }
1677#endif
1678}
1679
Peter Zijlstraddc97292007-10-15 17:00:10 +02001680static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1681{
1682#ifdef CONFIG_SCHED_DEBUG
1683 s64 d = se->vruntime - cfs_rq->min_vruntime;
1684
1685 if (d < 0)
1686 d = -d;
1687
1688 if (d > 3*sysctl_sched_latency)
1689 schedstat_inc(cfs_rq, nr_spread_over);
1690#endif
1691}
1692
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001693static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001694place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1695{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001696 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001697
Peter Zijlstra2cb86002007-11-09 22:39:37 +01001698 /*
1699 * The 'current' period is already promised to the current tasks,
1700 * however the extra weight of the new task will slow them down a
1701 * little, place the new task so that it fits in the slot that
1702 * stays open at the end.
1703 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001704 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02001705 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001706
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001707 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01001708 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001709 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001710
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001711 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001712 * Halve their sleep time's effect, to allow
1713 * for a gentler effect of sleepers:
1714 */
1715 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1716 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02001717
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001718 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001719 }
1720
Mike Galbraithb5d9d732009-09-08 11:12:28 +02001721 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05301722 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001723}
1724
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001725static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1726
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001727static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001728enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001729{
1730 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001731 * Update the normalized vruntime before updating min_vruntime
1732 * through callig update_curr().
1733 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001734 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001735 se->vruntime += cfs_rq->min_vruntime;
1736
1737 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001738 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001739 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001740 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02001741 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001742 account_entity_enqueue(cfs_rq, se);
1743 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001744
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001745 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001746 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02001747 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02001748 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001749
Ingo Molnard2417e52007-08-09 11:16:47 +02001750 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02001751 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001752 if (se != cfs_rq->curr)
1753 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001754 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001755
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001756 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001757 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001758 check_enqueue_throttle(cfs_rq);
1759 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001760}
1761
Rik van Riel2c13c9192011-02-01 09:48:37 -05001762static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01001763{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001764 for_each_sched_entity(se) {
1765 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1766 if (cfs_rq->last == se)
1767 cfs_rq->last = NULL;
1768 else
1769 break;
1770 }
1771}
Peter Zijlstra2002c692008-11-11 11:52:33 +01001772
Rik van Riel2c13c9192011-02-01 09:48:37 -05001773static void __clear_buddies_next(struct sched_entity *se)
1774{
1775 for_each_sched_entity(se) {
1776 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1777 if (cfs_rq->next == se)
1778 cfs_rq->next = NULL;
1779 else
1780 break;
1781 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01001782}
1783
Rik van Rielac53db52011-02-01 09:51:03 -05001784static void __clear_buddies_skip(struct sched_entity *se)
1785{
1786 for_each_sched_entity(se) {
1787 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1788 if (cfs_rq->skip == se)
1789 cfs_rq->skip = NULL;
1790 else
1791 break;
1792 }
1793}
1794
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001795static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1796{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001797 if (cfs_rq->last == se)
1798 __clear_buddies_last(se);
1799
1800 if (cfs_rq->next == se)
1801 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05001802
1803 if (cfs_rq->skip == se)
1804 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001805}
1806
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07001807static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07001808
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001809static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001810dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001811{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001812 /*
1813 * Update run-time statistics of the 'current'.
1814 */
1815 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001816 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001817
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02001818 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001819 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001820#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001821 if (entity_is_task(se)) {
1822 struct task_struct *tsk = task_of(se);
1823
1824 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001825 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001826 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001827 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001828 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02001829#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001830 }
1831
Peter Zijlstra2002c692008-11-11 11:52:33 +01001832 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001833
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001834 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001835 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001836 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001837 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001838
1839 /*
1840 * Normalize the entity after updating the min_vruntime because the
1841 * update can refer to the ->curr item and we need to reflect this
1842 * movement in our normalized position.
1843 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001844 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001845 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07001846
Paul Turnerd8b49862011-07-21 09:43:41 -07001847 /* return excess runtime on last dequeue */
1848 return_cfs_rq_runtime(cfs_rq);
1849
Peter Zijlstra1e876232011-05-17 16:21:10 -07001850 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001851 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001852}
1853
1854/*
1855 * Preempt the current task with a newly woken task if needed:
1856 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02001857static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001858check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001859{
Peter Zijlstra11697832007-09-05 14:32:49 +02001860 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001861 struct sched_entity *se;
1862 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02001863
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02001864 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02001865 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001866 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001867 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001868 /*
1869 * The current task ran long enough, ensure it doesn't get
1870 * re-elected due to buddy favours.
1871 */
1872 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001873 return;
1874 }
1875
1876 /*
1877 * Ensure that a task that missed wakeup preemption by a
1878 * narrow margin doesn't have to wait for a full slice.
1879 * This also mitigates buddy induced latencies under load.
1880 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02001881 if (delta_exec < sysctl_sched_min_granularity)
1882 return;
1883
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001884 se = __pick_first_entity(cfs_rq);
1885 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02001886
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001887 if (delta < 0)
1888 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01001889
Wang Xingchaof4cfb332011-09-16 13:35:52 -04001890 if (delta > ideal_runtime)
1891 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001892}
1893
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001894static void
Ingo Molnar8494f412007-08-09 11:16:48 +02001895set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001896{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001897 /* 'current' is not kept within the tree. */
1898 if (se->on_rq) {
1899 /*
1900 * Any task has to be enqueued before it get to execute on
1901 * a CPU. So account for the time it spent waiting on the
1902 * runqueue.
1903 */
1904 update_stats_wait_end(cfs_rq, se);
1905 __dequeue_entity(cfs_rq, se);
1906 }
1907
Ingo Molnar79303e92007-08-09 11:16:47 +02001908 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02001909 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02001910#ifdef CONFIG_SCHEDSTATS
1911 /*
1912 * Track our maximum slice length, if the CPU's load is at
1913 * least twice that of our own weight (i.e. dont track it
1914 * when there are only lesser-weight tasks around):
1915 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02001916 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001917 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02001918 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1919 }
1920#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02001921 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001922}
1923
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02001924static int
1925wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1926
Rik van Rielac53db52011-02-01 09:51:03 -05001927/*
1928 * Pick the next process, keeping these things in mind, in this order:
1929 * 1) keep things fair between processes/task groups
1930 * 2) pick the "next" process, since someone really wants that to run
1931 * 3) pick the "last" process, for cache locality
1932 * 4) do not run the "skip" process, if something else is available
1933 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001934static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001935{
Rik van Rielac53db52011-02-01 09:51:03 -05001936 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001937 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001938
Rik van Rielac53db52011-02-01 09:51:03 -05001939 /*
1940 * Avoid running the skip buddy, if running something else can
1941 * be done without getting too unfair.
1942 */
1943 if (cfs_rq->skip == se) {
1944 struct sched_entity *second = __pick_next_entity(se);
1945 if (second && wakeup_preempt_entity(second, left) < 1)
1946 se = second;
1947 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001948
Mike Galbraithf685cea2009-10-23 23:09:22 +02001949 /*
1950 * Prefer last buddy, try to return the CPU to a preempted task.
1951 */
1952 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1953 se = cfs_rq->last;
1954
Rik van Rielac53db52011-02-01 09:51:03 -05001955 /*
1956 * Someone really wants this to run. If it's not unfair, run it.
1957 */
1958 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1959 se = cfs_rq->next;
1960
Mike Galbraithf685cea2009-10-23 23:09:22 +02001961 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001962
1963 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001964}
1965
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001966static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1967
Ingo Molnarab6cde22007-08-09 11:16:48 +02001968static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001969{
1970 /*
1971 * If still on the runqueue then deactivate_task()
1972 * was not called and update_curr() has to be done:
1973 */
1974 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001975 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001976
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001977 /* throttle cfs_rqs exceeding runtime */
1978 check_cfs_rq_runtime(cfs_rq);
1979
Peter Zijlstraddc97292007-10-15 17:00:10 +02001980 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001981 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02001982 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001983 /* Put 'current' back into the tree. */
1984 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02001985 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02001986 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001987 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02001988 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001989}
1990
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001991static void
1992entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001993{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001994 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001995 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001996 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001997 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001998
Paul Turner43365bd2010-12-15 19:10:17 -08001999 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002000 * Ensure that runnable average is periodically updated.
2001 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002002 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002003 update_cfs_rq_blocked_load(cfs_rq, 1);
Paul Turner9d85f212012-10-04 13:18:29 +02002004
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002005#ifdef CONFIG_SCHED_HRTICK
2006 /*
2007 * queued ticks are scheduled to match the slice, so don't bother
2008 * validating it and just reschedule.
2009 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002010 if (queued) {
2011 resched_task(rq_of(cfs_rq)->curr);
2012 return;
2013 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002014 /*
2015 * don't let the period tick interfere with the hrtick preemption
2016 */
2017 if (!sched_feat(DOUBLE_TICK) &&
2018 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2019 return;
2020#endif
2021
Yong Zhang2c2efae2011-07-29 16:20:33 +08002022 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002023 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002024}
2025
Paul Turnerab84d312011-07-21 09:43:28 -07002026
2027/**************************************************
2028 * CFS bandwidth control machinery
2029 */
2030
2031#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002032
2033#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002034static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002035
2036static inline bool cfs_bandwidth_used(void)
2037{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002038 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002039}
2040
2041void account_cfs_bandwidth_used(int enabled, int was_enabled)
2042{
2043 /* only need to count groups transitioning between enabled/!enabled */
2044 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002045 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002046 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002047 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002048}
2049#else /* HAVE_JUMP_LABEL */
2050static bool cfs_bandwidth_used(void)
2051{
2052 return true;
2053}
2054
2055void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2056#endif /* HAVE_JUMP_LABEL */
2057
Paul Turnerab84d312011-07-21 09:43:28 -07002058/*
2059 * default period for cfs group bandwidth.
2060 * default: 0.1s, units: nanoseconds
2061 */
2062static inline u64 default_cfs_period(void)
2063{
2064 return 100000000ULL;
2065}
Paul Turnerec12cb72011-07-21 09:43:30 -07002066
2067static inline u64 sched_cfs_bandwidth_slice(void)
2068{
2069 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2070}
2071
Paul Turnera9cf55b2011-07-21 09:43:32 -07002072/*
2073 * Replenish runtime according to assigned quota and update expiration time.
2074 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2075 * additional synchronization around rq->lock.
2076 *
2077 * requires cfs_b->lock
2078 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002079void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002080{
2081 u64 now;
2082
2083 if (cfs_b->quota == RUNTIME_INF)
2084 return;
2085
2086 now = sched_clock_cpu(smp_processor_id());
2087 cfs_b->runtime = cfs_b->quota;
2088 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2089}
2090
Peter Zijlstra029632f2011-10-25 10:00:11 +02002091static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2092{
2093 return &tg->cfs_bandwidth;
2094}
2095
Paul Turnerf1b17282012-10-04 13:18:31 +02002096/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2097static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2098{
2099 if (unlikely(cfs_rq->throttle_count))
2100 return cfs_rq->throttled_clock_task;
2101
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002102 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002103}
2104
Paul Turner85dac902011-07-21 09:43:33 -07002105/* returns 0 on failure to allocate runtime */
2106static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002107{
2108 struct task_group *tg = cfs_rq->tg;
2109 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002110 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002111
2112 /* note: this is a positive sum as runtime_remaining <= 0 */
2113 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2114
2115 raw_spin_lock(&cfs_b->lock);
2116 if (cfs_b->quota == RUNTIME_INF)
2117 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002118 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002119 /*
2120 * If the bandwidth pool has become inactive, then at least one
2121 * period must have elapsed since the last consumption.
2122 * Refresh the global state and ensure bandwidth timer becomes
2123 * active.
2124 */
2125 if (!cfs_b->timer_active) {
2126 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002127 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002128 }
Paul Turner58088ad2011-07-21 09:43:31 -07002129
2130 if (cfs_b->runtime > 0) {
2131 amount = min(cfs_b->runtime, min_amount);
2132 cfs_b->runtime -= amount;
2133 cfs_b->idle = 0;
2134 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002135 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002136 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002137 raw_spin_unlock(&cfs_b->lock);
2138
2139 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002140 /*
2141 * we may have advanced our local expiration to account for allowed
2142 * spread between our sched_clock and the one on which runtime was
2143 * issued.
2144 */
2145 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2146 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002147
2148 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002149}
2150
2151/*
2152 * Note: This depends on the synchronization provided by sched_clock and the
2153 * fact that rq->clock snapshots this value.
2154 */
2155static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2156{
2157 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002158
2159 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002160 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002161 return;
2162
2163 if (cfs_rq->runtime_remaining < 0)
2164 return;
2165
2166 /*
2167 * If the local deadline has passed we have to consider the
2168 * possibility that our sched_clock is 'fast' and the global deadline
2169 * has not truly expired.
2170 *
2171 * Fortunately we can check determine whether this the case by checking
2172 * whether the global deadline has advanced.
2173 */
2174
2175 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2176 /* extend local deadline, drift is bounded above by 2 ticks */
2177 cfs_rq->runtime_expires += TICK_NSEC;
2178 } else {
2179 /* global deadline is ahead, expiration has passed */
2180 cfs_rq->runtime_remaining = 0;
2181 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002182}
2183
2184static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2185 unsigned long delta_exec)
2186{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002187 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002188 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002189 expire_cfs_rq_runtime(cfs_rq);
2190
2191 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002192 return;
2193
Paul Turner85dac902011-07-21 09:43:33 -07002194 /*
2195 * if we're unable to extend our runtime we resched so that the active
2196 * hierarchy can be throttled
2197 */
2198 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2199 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002200}
2201
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002202static __always_inline
2203void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002204{
Paul Turner56f570e2011-11-07 20:26:33 -08002205 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002206 return;
2207
2208 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2209}
2210
Paul Turner85dac902011-07-21 09:43:33 -07002211static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2212{
Paul Turner56f570e2011-11-07 20:26:33 -08002213 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002214}
2215
Paul Turner64660c82011-07-21 09:43:36 -07002216/* check whether cfs_rq, or any parent, is throttled */
2217static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2218{
Paul Turner56f570e2011-11-07 20:26:33 -08002219 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002220}
2221
2222/*
2223 * Ensure that neither of the group entities corresponding to src_cpu or
2224 * dest_cpu are members of a throttled hierarchy when performing group
2225 * load-balance operations.
2226 */
2227static inline int throttled_lb_pair(struct task_group *tg,
2228 int src_cpu, int dest_cpu)
2229{
2230 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2231
2232 src_cfs_rq = tg->cfs_rq[src_cpu];
2233 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2234
2235 return throttled_hierarchy(src_cfs_rq) ||
2236 throttled_hierarchy(dest_cfs_rq);
2237}
2238
2239/* updated child weight may affect parent so we have to do this bottom up */
2240static int tg_unthrottle_up(struct task_group *tg, void *data)
2241{
2242 struct rq *rq = data;
2243 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2244
2245 cfs_rq->throttle_count--;
2246#ifdef CONFIG_SMP
2247 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002248 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002249 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002250 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002251 }
2252#endif
2253
2254 return 0;
2255}
2256
2257static int tg_throttle_down(struct task_group *tg, void *data)
2258{
2259 struct rq *rq = data;
2260 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2261
Paul Turner82958362012-10-04 13:18:31 +02002262 /* group is entering throttled state, stop time */
2263 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002264 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002265 cfs_rq->throttle_count++;
2266
2267 return 0;
2268}
2269
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002270static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002271{
2272 struct rq *rq = rq_of(cfs_rq);
2273 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2274 struct sched_entity *se;
2275 long task_delta, dequeue = 1;
2276
2277 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2278
Paul Turnerf1b17282012-10-04 13:18:31 +02002279 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002280 rcu_read_lock();
2281 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2282 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002283
2284 task_delta = cfs_rq->h_nr_running;
2285 for_each_sched_entity(se) {
2286 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2287 /* throttled entity or throttle-on-deactivate */
2288 if (!se->on_rq)
2289 break;
2290
2291 if (dequeue)
2292 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2293 qcfs_rq->h_nr_running -= task_delta;
2294
2295 if (qcfs_rq->load.weight)
2296 dequeue = 0;
2297 }
2298
2299 if (!se)
2300 rq->nr_running -= task_delta;
2301
2302 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002303 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002304 raw_spin_lock(&cfs_b->lock);
2305 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2306 raw_spin_unlock(&cfs_b->lock);
2307}
2308
Peter Zijlstra029632f2011-10-25 10:00:11 +02002309void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002310{
2311 struct rq *rq = rq_of(cfs_rq);
2312 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2313 struct sched_entity *se;
2314 int enqueue = 1;
2315 long task_delta;
2316
Michael Wang22b958d2013-06-04 14:23:39 +08002317 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002318
2319 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002320
2321 update_rq_clock(rq);
2322
Paul Turner671fd9d2011-07-21 09:43:34 -07002323 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002324 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002325 list_del_rcu(&cfs_rq->throttled_list);
2326 raw_spin_unlock(&cfs_b->lock);
2327
Paul Turner64660c82011-07-21 09:43:36 -07002328 /* update hierarchical throttle state */
2329 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2330
Paul Turner671fd9d2011-07-21 09:43:34 -07002331 if (!cfs_rq->load.weight)
2332 return;
2333
2334 task_delta = cfs_rq->h_nr_running;
2335 for_each_sched_entity(se) {
2336 if (se->on_rq)
2337 enqueue = 0;
2338
2339 cfs_rq = cfs_rq_of(se);
2340 if (enqueue)
2341 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2342 cfs_rq->h_nr_running += task_delta;
2343
2344 if (cfs_rq_throttled(cfs_rq))
2345 break;
2346 }
2347
2348 if (!se)
2349 rq->nr_running += task_delta;
2350
2351 /* determine whether we need to wake up potentially idle cpu */
2352 if (rq->curr == rq->idle && rq->cfs.nr_running)
2353 resched_task(rq->curr);
2354}
2355
2356static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2357 u64 remaining, u64 expires)
2358{
2359 struct cfs_rq *cfs_rq;
2360 u64 runtime = remaining;
2361
2362 rcu_read_lock();
2363 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2364 throttled_list) {
2365 struct rq *rq = rq_of(cfs_rq);
2366
2367 raw_spin_lock(&rq->lock);
2368 if (!cfs_rq_throttled(cfs_rq))
2369 goto next;
2370
2371 runtime = -cfs_rq->runtime_remaining + 1;
2372 if (runtime > remaining)
2373 runtime = remaining;
2374 remaining -= runtime;
2375
2376 cfs_rq->runtime_remaining += runtime;
2377 cfs_rq->runtime_expires = expires;
2378
2379 /* we check whether we're throttled above */
2380 if (cfs_rq->runtime_remaining > 0)
2381 unthrottle_cfs_rq(cfs_rq);
2382
2383next:
2384 raw_spin_unlock(&rq->lock);
2385
2386 if (!remaining)
2387 break;
2388 }
2389 rcu_read_unlock();
2390
2391 return remaining;
2392}
2393
Paul Turner58088ad2011-07-21 09:43:31 -07002394/*
2395 * Responsible for refilling a task_group's bandwidth and unthrottling its
2396 * cfs_rqs as appropriate. If there has been no activity within the last
2397 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2398 * used to track this state.
2399 */
2400static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2401{
Paul Turner671fd9d2011-07-21 09:43:34 -07002402 u64 runtime, runtime_expires;
2403 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002404
2405 raw_spin_lock(&cfs_b->lock);
2406 /* no need to continue the timer with no bandwidth constraint */
2407 if (cfs_b->quota == RUNTIME_INF)
2408 goto out_unlock;
2409
Paul Turner671fd9d2011-07-21 09:43:34 -07002410 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2411 /* idle depends on !throttled (for the case of a large deficit) */
2412 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002413 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002414
Paul Turnera9cf55b2011-07-21 09:43:32 -07002415 /* if we're going inactive then everything else can be deferred */
2416 if (idle)
2417 goto out_unlock;
2418
2419 __refill_cfs_bandwidth_runtime(cfs_b);
2420
Paul Turner671fd9d2011-07-21 09:43:34 -07002421 if (!throttled) {
2422 /* mark as potentially idle for the upcoming period */
2423 cfs_b->idle = 1;
2424 goto out_unlock;
2425 }
Paul Turner58088ad2011-07-21 09:43:31 -07002426
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002427 /* account preceding periods in which throttling occurred */
2428 cfs_b->nr_throttled += overrun;
2429
Paul Turner671fd9d2011-07-21 09:43:34 -07002430 /*
2431 * There are throttled entities so we must first use the new bandwidth
2432 * to unthrottle them before making it generally available. This
2433 * ensures that all existing debts will be paid before a new cfs_rq is
2434 * allowed to run.
2435 */
2436 runtime = cfs_b->runtime;
2437 runtime_expires = cfs_b->runtime_expires;
2438 cfs_b->runtime = 0;
2439
2440 /*
2441 * This check is repeated as we are holding onto the new bandwidth
2442 * while we unthrottle. This can potentially race with an unthrottled
2443 * group trying to acquire new bandwidth from the global pool.
2444 */
2445 while (throttled && runtime > 0) {
2446 raw_spin_unlock(&cfs_b->lock);
2447 /* we can't nest cfs_b->lock while distributing bandwidth */
2448 runtime = distribute_cfs_runtime(cfs_b, runtime,
2449 runtime_expires);
2450 raw_spin_lock(&cfs_b->lock);
2451
2452 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2453 }
2454
2455 /* return (any) remaining runtime */
2456 cfs_b->runtime = runtime;
2457 /*
2458 * While we are ensured activity in the period following an
2459 * unthrottle, this also covers the case in which the new bandwidth is
2460 * insufficient to cover the existing bandwidth deficit. (Forcing the
2461 * timer to remain active while there are any throttled entities.)
2462 */
2463 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002464out_unlock:
2465 if (idle)
2466 cfs_b->timer_active = 0;
2467 raw_spin_unlock(&cfs_b->lock);
2468
2469 return idle;
2470}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002471
Paul Turnerd8b49862011-07-21 09:43:41 -07002472/* a cfs_rq won't donate quota below this amount */
2473static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2474/* minimum remaining period time to redistribute slack quota */
2475static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2476/* how long we wait to gather additional slack before distributing */
2477static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2478
2479/* are we near the end of the current quota period? */
2480static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2481{
2482 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2483 u64 remaining;
2484
2485 /* if the call-back is running a quota refresh is already occurring */
2486 if (hrtimer_callback_running(refresh_timer))
2487 return 1;
2488
2489 /* is a quota refresh about to occur? */
2490 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2491 if (remaining < min_expire)
2492 return 1;
2493
2494 return 0;
2495}
2496
2497static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2498{
2499 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2500
2501 /* if there's a quota refresh soon don't bother with slack */
2502 if (runtime_refresh_within(cfs_b, min_left))
2503 return;
2504
2505 start_bandwidth_timer(&cfs_b->slack_timer,
2506 ns_to_ktime(cfs_bandwidth_slack_period));
2507}
2508
2509/* we know any runtime found here is valid as update_curr() precedes return */
2510static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2511{
2512 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2513 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2514
2515 if (slack_runtime <= 0)
2516 return;
2517
2518 raw_spin_lock(&cfs_b->lock);
2519 if (cfs_b->quota != RUNTIME_INF &&
2520 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2521 cfs_b->runtime += slack_runtime;
2522
2523 /* we are under rq->lock, defer unthrottling using a timer */
2524 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2525 !list_empty(&cfs_b->throttled_cfs_rq))
2526 start_cfs_slack_bandwidth(cfs_b);
2527 }
2528 raw_spin_unlock(&cfs_b->lock);
2529
2530 /* even if it's not valid for return we don't want to try again */
2531 cfs_rq->runtime_remaining -= slack_runtime;
2532}
2533
2534static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2535{
Paul Turner56f570e2011-11-07 20:26:33 -08002536 if (!cfs_bandwidth_used())
2537 return;
2538
Paul Turnerfccfdc62011-11-07 20:26:34 -08002539 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002540 return;
2541
2542 __return_cfs_rq_runtime(cfs_rq);
2543}
2544
2545/*
2546 * This is done with a timer (instead of inline with bandwidth return) since
2547 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2548 */
2549static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2550{
2551 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2552 u64 expires;
2553
2554 /* confirm we're still not at a refresh boundary */
2555 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2556 return;
2557
2558 raw_spin_lock(&cfs_b->lock);
2559 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2560 runtime = cfs_b->runtime;
2561 cfs_b->runtime = 0;
2562 }
2563 expires = cfs_b->runtime_expires;
2564 raw_spin_unlock(&cfs_b->lock);
2565
2566 if (!runtime)
2567 return;
2568
2569 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2570
2571 raw_spin_lock(&cfs_b->lock);
2572 if (expires == cfs_b->runtime_expires)
2573 cfs_b->runtime = runtime;
2574 raw_spin_unlock(&cfs_b->lock);
2575}
2576
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002577/*
2578 * When a group wakes up we want to make sure that its quota is not already
2579 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2580 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2581 */
2582static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2583{
Paul Turner56f570e2011-11-07 20:26:33 -08002584 if (!cfs_bandwidth_used())
2585 return;
2586
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002587 /* an active group must be handled by the update_curr()->put() path */
2588 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2589 return;
2590
2591 /* ensure the group is not already throttled */
2592 if (cfs_rq_throttled(cfs_rq))
2593 return;
2594
2595 /* update runtime allocation */
2596 account_cfs_rq_runtime(cfs_rq, 0);
2597 if (cfs_rq->runtime_remaining <= 0)
2598 throttle_cfs_rq(cfs_rq);
2599}
2600
2601/* conditionally throttle active cfs_rq's from put_prev_entity() */
2602static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2603{
Paul Turner56f570e2011-11-07 20:26:33 -08002604 if (!cfs_bandwidth_used())
2605 return;
2606
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002607 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2608 return;
2609
2610 /*
2611 * it's possible for a throttled entity to be forced into a running
2612 * state (e.g. set_curr_task), in this case we're finished.
2613 */
2614 if (cfs_rq_throttled(cfs_rq))
2615 return;
2616
2617 throttle_cfs_rq(cfs_rq);
2618}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002619
Peter Zijlstra029632f2011-10-25 10:00:11 +02002620static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2621{
2622 struct cfs_bandwidth *cfs_b =
2623 container_of(timer, struct cfs_bandwidth, slack_timer);
2624 do_sched_cfs_slack_timer(cfs_b);
2625
2626 return HRTIMER_NORESTART;
2627}
2628
2629static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2630{
2631 struct cfs_bandwidth *cfs_b =
2632 container_of(timer, struct cfs_bandwidth, period_timer);
2633 ktime_t now;
2634 int overrun;
2635 int idle = 0;
2636
2637 for (;;) {
2638 now = hrtimer_cb_get_time(timer);
2639 overrun = hrtimer_forward(timer, now, cfs_b->period);
2640
2641 if (!overrun)
2642 break;
2643
2644 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2645 }
2646
2647 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2648}
2649
2650void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2651{
2652 raw_spin_lock_init(&cfs_b->lock);
2653 cfs_b->runtime = 0;
2654 cfs_b->quota = RUNTIME_INF;
2655 cfs_b->period = ns_to_ktime(default_cfs_period());
2656
2657 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2658 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2659 cfs_b->period_timer.function = sched_cfs_period_timer;
2660 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2661 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2662}
2663
2664static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2665{
2666 cfs_rq->runtime_enabled = 0;
2667 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2668}
2669
2670/* requires cfs_b->lock, may release to reprogram timer */
2671void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2672{
2673 /*
2674 * The timer may be active because we're trying to set a new bandwidth
2675 * period or because we're racing with the tear-down path
2676 * (timer_active==0 becomes visible before the hrtimer call-back
2677 * terminates). In either case we ensure that it's re-programmed
2678 */
2679 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2680 raw_spin_unlock(&cfs_b->lock);
2681 /* ensure cfs_b->lock is available while we wait */
2682 hrtimer_cancel(&cfs_b->period_timer);
2683
2684 raw_spin_lock(&cfs_b->lock);
2685 /* if someone else restarted the timer then we're done */
2686 if (cfs_b->timer_active)
2687 return;
2688 }
2689
2690 cfs_b->timer_active = 1;
2691 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2692}
2693
2694static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2695{
2696 hrtimer_cancel(&cfs_b->period_timer);
2697 hrtimer_cancel(&cfs_b->slack_timer);
2698}
2699
Arnd Bergmann38dc3342013-01-25 14:14:22 +00002700static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002701{
2702 struct cfs_rq *cfs_rq;
2703
2704 for_each_leaf_cfs_rq(rq, cfs_rq) {
2705 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2706
2707 if (!cfs_rq->runtime_enabled)
2708 continue;
2709
2710 /*
2711 * clock_task is not advancing so we just need to make sure
2712 * there's some valid quota amount
2713 */
2714 cfs_rq->runtime_remaining = cfs_b->quota;
2715 if (cfs_rq_throttled(cfs_rq))
2716 unthrottle_cfs_rq(cfs_rq);
2717 }
2718}
2719
2720#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02002721static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2722{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002723 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02002724}
2725
2726static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2727 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002728static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2729static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002730static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07002731
2732static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2733{
2734 return 0;
2735}
Paul Turner64660c82011-07-21 09:43:36 -07002736
2737static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2738{
2739 return 0;
2740}
2741
2742static inline int throttled_lb_pair(struct task_group *tg,
2743 int src_cpu, int dest_cpu)
2744{
2745 return 0;
2746}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002747
2748void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2749
2750#ifdef CONFIG_FAIR_GROUP_SCHED
2751static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07002752#endif
2753
Peter Zijlstra029632f2011-10-25 10:00:11 +02002754static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2755{
2756 return NULL;
2757}
2758static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07002759static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002760
2761#endif /* CONFIG_CFS_BANDWIDTH */
2762
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002763/**************************************************
2764 * CFS operations on tasks:
2765 */
2766
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002767#ifdef CONFIG_SCHED_HRTICK
2768static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2769{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002770 struct sched_entity *se = &p->se;
2771 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2772
2773 WARN_ON(task_rq(p) != rq);
2774
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002775 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002776 u64 slice = sched_slice(cfs_rq, se);
2777 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2778 s64 delta = slice - ran;
2779
2780 if (delta < 0) {
2781 if (rq->curr == p)
2782 resched_task(p);
2783 return;
2784 }
2785
2786 /*
2787 * Don't schedule slices shorter than 10000ns, that just
2788 * doesn't make sense. Rely on vruntime for fairness.
2789 */
Peter Zijlstra31656512008-07-18 18:01:23 +02002790 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02002791 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002792
Peter Zijlstra31656512008-07-18 18:01:23 +02002793 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002794 }
2795}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002796
2797/*
2798 * called from enqueue/dequeue and updates the hrtick when the
2799 * current task is from our class and nr_running is low enough
2800 * to matter.
2801 */
2802static void hrtick_update(struct rq *rq)
2803{
2804 struct task_struct *curr = rq->curr;
2805
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002806 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002807 return;
2808
2809 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2810 hrtick_start_fair(rq, curr);
2811}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302812#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002813static inline void
2814hrtick_start_fair(struct rq *rq, struct task_struct *p)
2815{
2816}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002817
2818static inline void hrtick_update(struct rq *rq)
2819{
2820}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002821#endif
2822
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002823/*
2824 * The enqueue_task method is called before nr_running is
2825 * increased. Here we update the fair scheduling stats and
2826 * then put the task into the rbtree:
2827 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00002828static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002829enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002830{
2831 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002832 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002833
2834 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002835 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002836 break;
2837 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002838 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002839
2840 /*
2841 * end evaluation on encountering a throttled cfs_rq
2842 *
2843 * note: in the case of encountering a throttled cfs_rq we will
2844 * post the final h_nr_running increment below.
2845 */
2846 if (cfs_rq_throttled(cfs_rq))
2847 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002848 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07002849
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002850 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002851 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002852
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002853 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002854 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002855 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002856
Paul Turner85dac902011-07-21 09:43:33 -07002857 if (cfs_rq_throttled(cfs_rq))
2858 break;
2859
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002860 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002861 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002862 }
2863
Ben Segall18bf2802012-10-04 12:51:20 +02002864 if (!se) {
2865 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07002866 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002867 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002868 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002869}
2870
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002871static void set_next_buddy(struct sched_entity *se);
2872
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002873/*
2874 * The dequeue_task method is called before nr_running is
2875 * decreased. We remove the task from the rbtree and
2876 * update the fair scheduling stats:
2877 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002878static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002879{
2880 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01002881 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002882 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002883
2884 for_each_sched_entity(se) {
2885 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002886 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07002887
2888 /*
2889 * end evaluation on encountering a throttled cfs_rq
2890 *
2891 * note: in the case of encountering a throttled cfs_rq we will
2892 * post the final h_nr_running decrement below.
2893 */
2894 if (cfs_rq_throttled(cfs_rq))
2895 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07002896 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002897
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002898 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002899 if (cfs_rq->load.weight) {
2900 /*
2901 * Bias pick_next to pick a task from this cfs_rq, as
2902 * p is sleeping when it is within its sched_slice.
2903 */
2904 if (task_sleep && parent_entity(se))
2905 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07002906
2907 /* avoid re-evaluating load for this entity */
2908 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002909 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07002910 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002911 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002912 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002913
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002914 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08002915 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07002916 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002917
Paul Turner85dac902011-07-21 09:43:33 -07002918 if (cfs_rq_throttled(cfs_rq))
2919 break;
2920
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002921 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02002922 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002923 }
2924
Ben Segall18bf2802012-10-04 12:51:20 +02002925 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07002926 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02002927 update_rq_runnable_avg(rq, 1);
2928 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002929 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002930}
2931
Gregory Haskinse7693a32008-01-25 21:08:09 +01002932#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02002933/* Used instead of source_load when we know the type == 0 */
2934static unsigned long weighted_cpuload(const int cpu)
2935{
2936 return cpu_rq(cpu)->load.weight;
2937}
2938
2939/*
2940 * Return a low guess at the load of a migration-source cpu weighted
2941 * according to the scheduling class and "nice" value.
2942 *
2943 * We want to under-estimate the load of migration sources, to
2944 * balance conservatively.
2945 */
2946static unsigned long source_load(int cpu, int type)
2947{
2948 struct rq *rq = cpu_rq(cpu);
2949 unsigned long total = weighted_cpuload(cpu);
2950
2951 if (type == 0 || !sched_feat(LB_BIAS))
2952 return total;
2953
2954 return min(rq->cpu_load[type-1], total);
2955}
2956
2957/*
2958 * Return a high guess at the load of a migration-target cpu weighted
2959 * according to the scheduling class and "nice" value.
2960 */
2961static unsigned long target_load(int cpu, int type)
2962{
2963 struct rq *rq = cpu_rq(cpu);
2964 unsigned long total = weighted_cpuload(cpu);
2965
2966 if (type == 0 || !sched_feat(LB_BIAS))
2967 return total;
2968
2969 return max(rq->cpu_load[type-1], total);
2970}
2971
2972static unsigned long power_of(int cpu)
2973{
2974 return cpu_rq(cpu)->cpu_power;
2975}
2976
2977static unsigned long cpu_avg_load_per_task(int cpu)
2978{
2979 struct rq *rq = cpu_rq(cpu);
2980 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2981
2982 if (nr_running)
2983 return rq->load.weight / nr_running;
2984
2985 return 0;
2986}
2987
Ingo Molnar098fb9d2008-03-16 20:36:10 +01002988
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02002989static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002990{
2991 struct sched_entity *se = &p->se;
2992 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02002993 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002994
Peter Zijlstra3fe16982011-04-05 17:23:48 +02002995#ifndef CONFIG_64BIT
2996 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02002997
Peter Zijlstra3fe16982011-04-05 17:23:48 +02002998 do {
2999 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3000 smp_rmb();
3001 min_vruntime = cfs_rq->min_vruntime;
3002 } while (min_vruntime != min_vruntime_copy);
3003#else
3004 min_vruntime = cfs_rq->min_vruntime;
3005#endif
3006
3007 se->vruntime -= min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003008}
3009
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003010#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003011/*
3012 * effective_load() calculates the load change as seen from the root_task_group
3013 *
3014 * Adding load to a group doesn't make a group heavier, but can cause movement
3015 * of group shares between cpus. Assuming the shares were perfectly aligned one
3016 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003017 *
3018 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3019 * on this @cpu and results in a total addition (subtraction) of @wg to the
3020 * total group weight.
3021 *
3022 * Given a runqueue weight distribution (rw_i) we can compute a shares
3023 * distribution (s_i) using:
3024 *
3025 * s_i = rw_i / \Sum rw_j (1)
3026 *
3027 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3028 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3029 * shares distribution (s_i):
3030 *
3031 * rw_i = { 2, 4, 1, 0 }
3032 * s_i = { 2/7, 4/7, 1/7, 0 }
3033 *
3034 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3035 * task used to run on and the CPU the waker is running on), we need to
3036 * compute the effect of waking a task on either CPU and, in case of a sync
3037 * wakeup, compute the effect of the current task going to sleep.
3038 *
3039 * So for a change of @wl to the local @cpu with an overall group weight change
3040 * of @wl we can compute the new shares distribution (s'_i) using:
3041 *
3042 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3043 *
3044 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3045 * differences in waking a task to CPU 0. The additional task changes the
3046 * weight and shares distributions like:
3047 *
3048 * rw'_i = { 3, 4, 1, 0 }
3049 * s'_i = { 3/8, 4/8, 1/8, 0 }
3050 *
3051 * We can then compute the difference in effective weight by using:
3052 *
3053 * dw_i = S * (s'_i - s_i) (3)
3054 *
3055 * Where 'S' is the group weight as seen by its parent.
3056 *
3057 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3058 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3059 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003060 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003061static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003062{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003063 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003064
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003065 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003066 return wl;
3067
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003068 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003069 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003070
Paul Turner977dda72011-01-14 17:57:50 -08003071 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003072
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003073 /*
3074 * W = @wg + \Sum rw_j
3075 */
3076 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003077
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003078 /*
3079 * w = rw_i + @wl
3080 */
3081 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003082
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003083 /*
3084 * wl = S * s'_i; see (2)
3085 */
3086 if (W > 0 && w < W)
3087 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003088 else
3089 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003090
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003091 /*
3092 * Per the above, wl is the new se->load.weight value; since
3093 * those are clipped to [MIN_SHARES, ...) do so now. See
3094 * calc_cfs_shares().
3095 */
Paul Turner977dda72011-01-14 17:57:50 -08003096 if (wl < MIN_SHARES)
3097 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003098
3099 /*
3100 * wl = dw_i = S * (s'_i - s_i); see (3)
3101 */
Paul Turner977dda72011-01-14 17:57:50 -08003102 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003103
3104 /*
3105 * Recursively apply this logic to all parent groups to compute
3106 * the final effective load change on the root group. Since
3107 * only the @tg group gets extra weight, all parent groups can
3108 * only redistribute existing shares. @wl is the shift in shares
3109 * resulting from this level per the above.
3110 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003111 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003112 }
3113
3114 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003115}
3116#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003117
Peter Zijlstra83378262008-06-27 13:41:37 +02003118static inline unsigned long effective_load(struct task_group *tg, int cpu,
3119 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003120{
Peter Zijlstra83378262008-06-27 13:41:37 +02003121 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003122}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003123
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003124#endif
3125
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003126static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003127{
Paul Turnere37b6a72011-01-21 20:44:59 -08003128 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003129 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003130 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003131 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003132 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003133 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003134
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003135 idx = sd->wake_idx;
3136 this_cpu = smp_processor_id();
3137 prev_cpu = task_cpu(p);
3138 load = source_load(prev_cpu, idx);
3139 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003140
3141 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003142 * If sync wakeup then subtract the (maximum possible)
3143 * effect of the currently running task from the load
3144 * of the current CPU:
3145 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003146 if (sync) {
3147 tg = task_group(current);
3148 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003149
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003150 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003151 load += effective_load(tg, prev_cpu, 0, -weight);
3152 }
3153
3154 tg = task_group(p);
3155 weight = p->se.load.weight;
3156
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003157 /*
3158 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003159 * due to the sync cause above having dropped this_load to 0, we'll
3160 * always have an imbalance, but there's really nothing you can do
3161 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003162 *
3163 * Otherwise check if either cpus are near enough in load to allow this
3164 * task to be woken on this_cpu.
3165 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003166 if (this_load > 0) {
3167 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003168
3169 this_eff_load = 100;
3170 this_eff_load *= power_of(prev_cpu);
3171 this_eff_load *= this_load +
3172 effective_load(tg, this_cpu, weight, weight);
3173
3174 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3175 prev_eff_load *= power_of(this_cpu);
3176 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3177
3178 balanced = this_eff_load <= prev_eff_load;
3179 } else
3180 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003181
3182 /*
3183 * If the currently running task will sleep within
3184 * a reasonable amount of time then attract this newly
3185 * woken task:
3186 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003187 if (sync && balanced)
3188 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003189
Lucas De Marchi41acab82010-03-10 23:37:45 -03003190 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003191 tl_per_task = cpu_avg_load_per_task(this_cpu);
3192
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003193 if (balanced ||
3194 (this_load <= load &&
3195 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003196 /*
3197 * This domain has SD_WAKE_AFFINE and
3198 * p is cache cold in this domain, and
3199 * there is no bad imbalance.
3200 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003201 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003202 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003203
3204 return 1;
3205 }
3206 return 0;
3207}
3208
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003209/*
3210 * find_idlest_group finds and returns the least busy CPU group within the
3211 * domain.
3212 */
3213static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003214find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003215 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003216{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003217 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003218 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003219 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003220
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003221 do {
3222 unsigned long load, avg_load;
3223 int local_group;
3224 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003225
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003226 /* Skip over this group if it has no CPUs allowed */
3227 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003228 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003229 continue;
3230
3231 local_group = cpumask_test_cpu(this_cpu,
3232 sched_group_cpus(group));
3233
3234 /* Tally up the load of all CPUs in the group */
3235 avg_load = 0;
3236
3237 for_each_cpu(i, sched_group_cpus(group)) {
3238 /* Bias balancing toward cpus of our domain */
3239 if (local_group)
3240 load = source_load(i, load_idx);
3241 else
3242 load = target_load(i, load_idx);
3243
3244 avg_load += load;
3245 }
3246
3247 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003248 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003249
3250 if (local_group) {
3251 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003252 } else if (avg_load < min_load) {
3253 min_load = avg_load;
3254 idlest = group;
3255 }
3256 } while (group = group->next, group != sd->groups);
3257
3258 if (!idlest || 100*this_load < imbalance*min_load)
3259 return NULL;
3260 return idlest;
3261}
3262
3263/*
3264 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3265 */
3266static int
3267find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3268{
3269 unsigned long load, min_load = ULONG_MAX;
3270 int idlest = -1;
3271 int i;
3272
3273 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003274 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003275 load = weighted_cpuload(i);
3276
3277 if (load < min_load || (load == min_load && i == this_cpu)) {
3278 min_load = load;
3279 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003280 }
3281 }
3282
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003283 return idlest;
3284}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003285
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003286/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003287 * Try and locate an idle CPU in the sched_domain.
3288 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003289static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003290{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003291 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003292 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003293 int i = task_cpu(p);
3294
3295 if (idle_cpu(target))
3296 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003297
3298 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003299 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003300 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003301 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3302 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003303
3304 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003305 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003306 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003307 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003308 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003309 sg = sd->groups;
3310 do {
3311 if (!cpumask_intersects(sched_group_cpus(sg),
3312 tsk_cpus_allowed(p)))
3313 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003314
Linus Torvalds37407ea2012-09-16 12:29:43 -07003315 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003316 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003317 goto next;
3318 }
3319
3320 target = cpumask_first_and(sched_group_cpus(sg),
3321 tsk_cpus_allowed(p));
3322 goto done;
3323next:
3324 sg = sg->next;
3325 } while (sg != sd->groups);
3326 }
3327done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003328 return target;
3329}
3330
3331/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003332 * sched_balance_self: balance the current task (running on cpu) in domains
3333 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3334 * SD_BALANCE_EXEC.
3335 *
3336 * Balance, ie. select the least loaded group.
3337 *
3338 * Returns the target CPU number, or the same CPU if no balancing is needed.
3339 *
3340 * preempt must be disabled.
3341 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003342static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003343select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003344{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003345 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003346 int cpu = smp_processor_id();
3347 int prev_cpu = task_cpu(p);
3348 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003349 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003350 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003351
Peter Zijlstra29baa742012-04-23 12:11:21 +02003352 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003353 return prev_cpu;
3354
Peter Zijlstra0763a662009-09-14 19:37:39 +02003355 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003356 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003357 want_affine = 1;
3358 new_cpu = prev_cpu;
3359 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003360
Peter Zijlstradce840a2011-04-07 14:09:50 +02003361 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003362 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01003363 if (!(tmp->flags & SD_LOAD_BALANCE))
3364 continue;
3365
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003366 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003367 * If both cpu and prev_cpu are part of this domain,
3368 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003369 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003370 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3371 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3372 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003373 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003374 }
3375
Alex Shif03542a2012-07-26 08:55:34 +08003376 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003377 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003378 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003379
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003380 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003381 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003382 prev_cpu = cpu;
3383
3384 new_cpu = select_idle_sibling(p, prev_cpu);
3385 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003386 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003387
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003388 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003389 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003390 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003391 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003392
Peter Zijlstra0763a662009-09-14 19:37:39 +02003393 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003394 sd = sd->child;
3395 continue;
3396 }
3397
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003398 if (sd_flag & SD_BALANCE_WAKE)
3399 load_idx = sd->wake_idx;
3400
3401 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003402 if (!group) {
3403 sd = sd->child;
3404 continue;
3405 }
3406
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003407 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003408 if (new_cpu == -1 || new_cpu == cpu) {
3409 /* Now try balancing at a lower domain level of cpu */
3410 sd = sd->child;
3411 continue;
3412 }
3413
3414 /* Now try balancing at a lower domain level of new_cpu */
3415 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003416 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003417 sd = NULL;
3418 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003419 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003420 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003421 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003422 sd = tmp;
3423 }
3424 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003425 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003426unlock:
3427 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003428
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003429 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003430}
Paul Turner0a74bef2012-10-04 13:18:30 +02003431
3432/*
3433 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3434 * cfs_rq_of(p) references at time of call are still valid and identify the
3435 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3436 * other assumptions, including the state of rq->lock, should be made.
3437 */
3438static void
3439migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3440{
Paul Turneraff3e492012-10-04 13:18:30 +02003441 struct sched_entity *se = &p->se;
3442 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3443
3444 /*
3445 * Load tracking: accumulate removed load so that it can be processed
3446 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3447 * to blocked load iff they have a positive decay-count. It can never
3448 * be negative here since on-rq tasks have decay-count == 0.
3449 */
3450 if (se->avg.decay_count) {
3451 se->avg.decay_count = -__synchronize_entity_decay(se);
3452 atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
3453 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003454}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003455#endif /* CONFIG_SMP */
3456
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003457static unsigned long
3458wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003459{
3460 unsigned long gran = sysctl_sched_wakeup_granularity;
3461
3462 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003463 * Since its curr running now, convert the gran from real-time
3464 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003465 *
3466 * By using 'se' instead of 'curr' we penalize light tasks, so
3467 * they get preempted easier. That is, if 'se' < 'curr' then
3468 * the resulting gran will be larger, therefore penalizing the
3469 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3470 * be smaller, again penalizing the lighter task.
3471 *
3472 * This is especially important for buddies when the leftmost
3473 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003474 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003475 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003476}
3477
3478/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003479 * Should 'se' preempt 'curr'.
3480 *
3481 * |s1
3482 * |s2
3483 * |s3
3484 * g
3485 * |<--->|c
3486 *
3487 * w(c, s1) = -1
3488 * w(c, s2) = 0
3489 * w(c, s3) = 1
3490 *
3491 */
3492static int
3493wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3494{
3495 s64 gran, vdiff = curr->vruntime - se->vruntime;
3496
3497 if (vdiff <= 0)
3498 return -1;
3499
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003500 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003501 if (vdiff > gran)
3502 return 1;
3503
3504 return 0;
3505}
3506
Peter Zijlstra02479092008-11-04 21:25:10 +01003507static void set_last_buddy(struct sched_entity *se)
3508{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003509 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3510 return;
3511
3512 for_each_sched_entity(se)
3513 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003514}
3515
3516static void set_next_buddy(struct sched_entity *se)
3517{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003518 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3519 return;
3520
3521 for_each_sched_entity(se)
3522 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003523}
3524
Rik van Rielac53db52011-02-01 09:51:03 -05003525static void set_skip_buddy(struct sched_entity *se)
3526{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003527 for_each_sched_entity(se)
3528 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05003529}
3530
Peter Zijlstra464b7522008-10-24 11:06:15 +02003531/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003532 * Preempt the current task with a newly woken task if needed:
3533 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02003534static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003535{
3536 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02003537 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003538 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003539 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003540 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003541
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01003542 if (unlikely(se == pse))
3543 return;
3544
Paul Turner5238cdd2011-07-21 09:43:37 -07003545 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003546 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07003547 * unconditionally check_prempt_curr() after an enqueue (which may have
3548 * lead to a throttle). This both saves work and prevents false
3549 * next-buddy nomination below.
3550 */
3551 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3552 return;
3553
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003554 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02003555 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003556 next_buddy_marked = 1;
3557 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02003558
Bharata B Raoaec0a512008-08-28 14:42:49 +05303559 /*
3560 * We can come here with TIF_NEED_RESCHED already set from new task
3561 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07003562 *
3563 * Note: this also catches the edge-case of curr being in a throttled
3564 * group (e.g. via set_curr_task), since update_curr() (in the
3565 * enqueue of curr) will have resulted in resched being set. This
3566 * prevents us from potentially nominating it as a false LAST_BUDDY
3567 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05303568 */
3569 if (test_tsk_need_resched(curr))
3570 return;
3571
Darren Harta2f5c9a2011-02-22 13:04:33 -08003572 /* Idle tasks are by definition preempted by non-idle tasks. */
3573 if (unlikely(curr->policy == SCHED_IDLE) &&
3574 likely(p->policy != SCHED_IDLE))
3575 goto preempt;
3576
Ingo Molnar91c234b2007-10-15 17:00:18 +02003577 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08003578 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3579 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02003580 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02003581 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02003582 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003583
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003584 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07003585 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003586 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003587 if (wakeup_preempt_entity(se, pse) == 1) {
3588 /*
3589 * Bias pick_next to pick the sched entity that is
3590 * triggering this preemption.
3591 */
3592 if (!next_buddy_marked)
3593 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003594 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003595 }
Jupyung Leea65ac742009-11-17 18:51:40 +09003596
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003597 return;
3598
3599preempt:
3600 resched_task(curr);
3601 /*
3602 * Only set the backward buddy when the current task is still
3603 * on the rq. This can happen when a wakeup gets interleaved
3604 * with schedule on the ->pre_schedule() or idle_balance()
3605 * point, either of which can * drop the rq lock.
3606 *
3607 * Also, during early boot the idle thread is in the fair class,
3608 * for obvious reasons its a bad idea to schedule back to it.
3609 */
3610 if (unlikely(!se->on_rq || curr == rq->idle))
3611 return;
3612
3613 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3614 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003615}
3616
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003617static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003618{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003619 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003620 struct cfs_rq *cfs_rq = &rq->cfs;
3621 struct sched_entity *se;
3622
Tim Blechmann36ace272009-11-24 11:55:45 +01003623 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003624 return NULL;
3625
3626 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02003627 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003628 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003629 cfs_rq = group_cfs_rq(se);
3630 } while (cfs_rq);
3631
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003632 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003633 if (hrtick_enabled(rq))
3634 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003635
3636 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003637}
3638
3639/*
3640 * Account for a descheduled task:
3641 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02003642static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003643{
3644 struct sched_entity *se = &prev->se;
3645 struct cfs_rq *cfs_rq;
3646
3647 for_each_sched_entity(se) {
3648 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02003649 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003650 }
3651}
3652
Rik van Rielac53db52011-02-01 09:51:03 -05003653/*
3654 * sched_yield() is very simple
3655 *
3656 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3657 */
3658static void yield_task_fair(struct rq *rq)
3659{
3660 struct task_struct *curr = rq->curr;
3661 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3662 struct sched_entity *se = &curr->se;
3663
3664 /*
3665 * Are we the only task in the tree?
3666 */
3667 if (unlikely(rq->nr_running == 1))
3668 return;
3669
3670 clear_buddies(cfs_rq, se);
3671
3672 if (curr->policy != SCHED_BATCH) {
3673 update_rq_clock(rq);
3674 /*
3675 * Update run-time statistics of the 'current'.
3676 */
3677 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01003678 /*
3679 * Tell update_rq_clock() that we've just updated,
3680 * so we don't do microscopic update in schedule()
3681 * and double the fastpath cost.
3682 */
3683 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05003684 }
3685
3686 set_skip_buddy(se);
3687}
3688
Mike Galbraithd95f4122011-02-01 09:50:51 -05003689static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3690{
3691 struct sched_entity *se = &p->se;
3692
Paul Turner5238cdd2011-07-21 09:43:37 -07003693 /* throttled hierarchies are not runnable */
3694 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05003695 return false;
3696
3697 /* Tell the scheduler that we'd really like pse to run next. */
3698 set_next_buddy(se);
3699
Mike Galbraithd95f4122011-02-01 09:50:51 -05003700 yield_task_fair(rq);
3701
3702 return true;
3703}
3704
Peter Williams681f3e62007-10-24 18:23:51 +02003705#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003706/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02003707 * Fair scheduling class load-balancing methods.
3708 *
3709 * BASICS
3710 *
3711 * The purpose of load-balancing is to achieve the same basic fairness the
3712 * per-cpu scheduler provides, namely provide a proportional amount of compute
3713 * time to each task. This is expressed in the following equation:
3714 *
3715 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3716 *
3717 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3718 * W_i,0 is defined as:
3719 *
3720 * W_i,0 = \Sum_j w_i,j (2)
3721 *
3722 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3723 * is derived from the nice value as per prio_to_weight[].
3724 *
3725 * The weight average is an exponential decay average of the instantaneous
3726 * weight:
3727 *
3728 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3729 *
3730 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3731 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3732 * can also include other factors [XXX].
3733 *
3734 * To achieve this balance we define a measure of imbalance which follows
3735 * directly from (1):
3736 *
3737 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3738 *
3739 * We them move tasks around to minimize the imbalance. In the continuous
3740 * function space it is obvious this converges, in the discrete case we get
3741 * a few fun cases generally called infeasible weight scenarios.
3742 *
3743 * [XXX expand on:
3744 * - infeasible weights;
3745 * - local vs global optima in the discrete case. ]
3746 *
3747 *
3748 * SCHED DOMAINS
3749 *
3750 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3751 * for all i,j solution, we create a tree of cpus that follows the hardware
3752 * topology where each level pairs two lower groups (or better). This results
3753 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3754 * tree to only the first of the previous level and we decrease the frequency
3755 * of load-balance at each level inv. proportional to the number of cpus in
3756 * the groups.
3757 *
3758 * This yields:
3759 *
3760 * log_2 n 1 n
3761 * \Sum { --- * --- * 2^i } = O(n) (5)
3762 * i = 0 2^i 2^i
3763 * `- size of each group
3764 * | | `- number of cpus doing load-balance
3765 * | `- freq
3766 * `- sum over all levels
3767 *
3768 * Coupled with a limit on how many tasks we can migrate every balance pass,
3769 * this makes (5) the runtime complexity of the balancer.
3770 *
3771 * An important property here is that each CPU is still (indirectly) connected
3772 * to every other cpu in at most O(log n) steps:
3773 *
3774 * The adjacency matrix of the resulting graph is given by:
3775 *
3776 * log_2 n
3777 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
3778 * k = 0
3779 *
3780 * And you'll find that:
3781 *
3782 * A^(log_2 n)_i,j != 0 for all i,j (7)
3783 *
3784 * Showing there's indeed a path between every cpu in at most O(log n) steps.
3785 * The task movement gives a factor of O(m), giving a convergence complexity
3786 * of:
3787 *
3788 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
3789 *
3790 *
3791 * WORK CONSERVING
3792 *
3793 * In order to avoid CPUs going idle while there's still work to do, new idle
3794 * balancing is more aggressive and has the newly idle cpu iterate up the domain
3795 * tree itself instead of relying on other CPUs to bring it work.
3796 *
3797 * This adds some complexity to both (5) and (8) but it reduces the total idle
3798 * time.
3799 *
3800 * [XXX more?]
3801 *
3802 *
3803 * CGROUPS
3804 *
3805 * Cgroups make a horror show out of (2), instead of a simple sum we get:
3806 *
3807 * s_k,i
3808 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
3809 * S_k
3810 *
3811 * Where
3812 *
3813 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
3814 *
3815 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
3816 *
3817 * The big problem is S_k, its a global sum needed to compute a local (W_i)
3818 * property.
3819 *
3820 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
3821 * rewrite all of this once again.]
3822 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003823
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09003824static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3825
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003826#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01003827#define LBF_NEED_BREAK 0x02
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303828#define LBF_SOME_PINNED 0x04
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003829
3830struct lb_env {
3831 struct sched_domain *sd;
3832
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003833 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05303834 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003835
3836 int dst_cpu;
3837 struct rq *dst_rq;
3838
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303839 struct cpumask *dst_grpmask;
3840 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003841 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02003842 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08003843 /* The set of CPUs under consideration for load-balancing */
3844 struct cpumask *cpus;
3845
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003846 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01003847
3848 unsigned int loop;
3849 unsigned int loop_break;
3850 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003851};
3852
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003853/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003854 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003855 * Both runqueues must be locked.
3856 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003857static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003858{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003859 deactivate_task(env->src_rq, p, 0);
3860 set_task_cpu(p, env->dst_cpu);
3861 activate_task(env->dst_rq, p, 0);
3862 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003863}
3864
3865/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02003866 * Is this task likely cache-hot:
3867 */
3868static int
3869task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3870{
3871 s64 delta;
3872
3873 if (p->sched_class != &fair_sched_class)
3874 return 0;
3875
3876 if (unlikely(p->policy == SCHED_IDLE))
3877 return 0;
3878
3879 /*
3880 * Buddy candidates are cache hot:
3881 */
3882 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3883 (&p->se == cfs_rq_of(&p->se)->next ||
3884 &p->se == cfs_rq_of(&p->se)->last))
3885 return 1;
3886
3887 if (sysctl_sched_migration_cost == -1)
3888 return 1;
3889 if (sysctl_sched_migration_cost == 0)
3890 return 0;
3891
3892 delta = now - p->se.exec_start;
3893
3894 return delta < (s64)sysctl_sched_migration_cost;
3895}
3896
3897/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003898 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3899 */
3900static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003901int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003902{
3903 int tsk_cache_hot = 0;
3904 /*
3905 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09003906 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003907 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09003908 * 3) running (obviously), or
3909 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003910 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09003911 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
3912 return 0;
3913
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003914 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003915 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303916
Lucas De Marchi41acab82010-03-10 23:37:45 -03003917 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303918
3919 /*
3920 * Remember if this task can be migrated to any other cpu in
3921 * our sched_group. We may want to revisit it if we couldn't
3922 * meet load balance goals by pulling other tasks on src_cpu.
3923 *
3924 * Also avoid computing new_dst_cpu if we have already computed
3925 * one in current iteration.
3926 */
3927 if (!env->dst_grpmask || (env->flags & LBF_SOME_PINNED))
3928 return 0;
3929
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003930 /* Prevent to re-select dst_cpu via env's cpus */
3931 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
3932 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
3933 env->flags |= LBF_SOME_PINNED;
3934 env->new_dst_cpu = cpu;
3935 break;
3936 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303937 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09003938
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003939 return 0;
3940 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05303941
3942 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003943 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003944
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003945 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003946 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003947 return 0;
3948 }
3949
3950 /*
3951 * Aggressive migration if:
3952 * 1) task is cache cold, or
3953 * 2) too many balance attempts have failed.
3954 */
3955
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003956 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003957 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003958 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003959
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003960 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003961 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003962 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003963 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003964
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003965 return 1;
3966 }
3967
Zhang Hang4e2dcb72013-04-10 14:04:55 +08003968 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3969 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003970}
3971
Peter Zijlstra897c3952009-12-17 17:45:42 +01003972/*
3973 * move_one_task tries to move exactly one task from busiest to this_rq, as
3974 * part of active balancing operations within "domain".
3975 * Returns 1 if successful and 0 otherwise.
3976 *
3977 * Called with both runqueues locked.
3978 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01003979static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01003980{
3981 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01003982
Peter Zijlstra367456c2012-02-20 21:49:09 +01003983 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01003984 if (!can_migrate_task(p, env))
3985 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01003986
Peter Zijlstra367456c2012-02-20 21:49:09 +01003987 move_task(p, env);
3988 /*
3989 * Right now, this is only the second place move_task()
3990 * is called, so we can safely collect move_task()
3991 * stats here rather than inside move_task().
3992 */
3993 schedstat_inc(env->sd, lb_gained[env->idle]);
3994 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01003995 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01003996 return 0;
3997}
3998
Peter Zijlstra367456c2012-02-20 21:49:09 +01003999static unsigned long task_h_load(struct task_struct *p);
4000
Peter Zijlstraeb953082012-04-17 13:38:40 +02004001static const unsigned int sched_nr_migrate_break = 32;
4002
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004003/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004004 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004005 * this_rq, as part of a balancing operation within domain "sd".
4006 * Returns 1 if successful and 0 otherwise.
4007 *
4008 * Called with both runqueues locked.
4009 */
4010static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004011{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004012 struct list_head *tasks = &env->src_rq->cfs_tasks;
4013 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004014 unsigned long load;
4015 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004016
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004017 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004018 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004019
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004020 while (!list_empty(tasks)) {
4021 p = list_first_entry(tasks, struct task_struct, se.group_node);
4022
Peter Zijlstra367456c2012-02-20 21:49:09 +01004023 env->loop++;
4024 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004025 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004026 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004027
4028 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004029 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004030 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004031 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004032 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004033 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004034
Joonsoo Kimd3198082013-04-23 17:27:40 +09004035 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004036 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004037
Peter Zijlstra367456c2012-02-20 21:49:09 +01004038 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004039
Peter Zijlstraeb953082012-04-17 13:38:40 +02004040 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004041 goto next;
4042
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004043 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004044 goto next;
4045
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004046 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004047 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004048 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004049
4050#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004051 /*
4052 * NEWIDLE balancing is a source of latency, so preemptible
4053 * kernels will stop after the first task is pulled to minimize
4054 * the critical section.
4055 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004056 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004057 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004058#endif
4059
Peter Zijlstraee00e662009-12-17 17:25:20 +01004060 /*
4061 * We only want to steal up to the prescribed amount of
4062 * weighted load.
4063 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004064 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004065 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004066
Peter Zijlstra367456c2012-02-20 21:49:09 +01004067 continue;
4068next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004069 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004070 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004071
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004072 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004073 * Right now, this is one of only two places move_task() is called,
4074 * so we can safely collect move_task() stats here rather than
4075 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004076 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004077 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004078
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004079 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004080}
4081
Peter Zijlstra230059de2009-12-17 17:47:12 +01004082#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004083/*
4084 * update tg->load_weight by folding this cpu's load_avg
4085 */
Paul Turner48a16752012-10-04 13:18:31 +02004086static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004087{
Paul Turner48a16752012-10-04 13:18:31 +02004088 struct sched_entity *se = tg->se[cpu];
4089 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004090
Paul Turner48a16752012-10-04 13:18:31 +02004091 /* throttled entities do not contribute to load */
4092 if (throttled_hierarchy(cfs_rq))
4093 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004094
Paul Turneraff3e492012-10-04 13:18:30 +02004095 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004096
Paul Turner82958362012-10-04 13:18:31 +02004097 if (se) {
4098 update_entity_load_avg(se, 1);
4099 /*
4100 * We pivot on our runnable average having decayed to zero for
4101 * list removal. This generally implies that all our children
4102 * have also been removed (modulo rounding error or bandwidth
4103 * control); however, such cases are rare and we can fix these
4104 * at enqueue.
4105 *
4106 * TODO: fix up out-of-order children on enqueue.
4107 */
4108 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4109 list_del_leaf_cfs_rq(cfs_rq);
4110 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004111 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004112 update_rq_runnable_avg(rq, rq->nr_running);
4113 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004114}
4115
Paul Turner48a16752012-10-04 13:18:31 +02004116static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004117{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004118 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004119 struct cfs_rq *cfs_rq;
4120 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004121
Paul Turner48a16752012-10-04 13:18:31 +02004122 raw_spin_lock_irqsave(&rq->lock, flags);
4123 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004124 /*
4125 * Iterates the task_group tree in a bottom up fashion, see
4126 * list_add_leaf_cfs_rq() for details.
4127 */
Paul Turner64660c82011-07-21 09:43:36 -07004128 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004129 /*
4130 * Note: We may want to consider periodically releasing
4131 * rq->lock about these updates so that creating many task
4132 * groups does not result in continually extending hold time.
4133 */
4134 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004135 }
Paul Turner48a16752012-10-04 13:18:31 +02004136
4137 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004138}
4139
Peter Zijlstra9763b672011-07-13 13:09:25 +02004140/*
4141 * Compute the cpu's hierarchical load factor for each task group.
4142 * This needs to be done in a top-down fashion because the load of a child
4143 * group is a fraction of its parents load.
4144 */
4145static int tg_load_down(struct task_group *tg, void *data)
4146{
4147 unsigned long load;
4148 long cpu = (long)data;
4149
4150 if (!tg->parent) {
4151 load = cpu_rq(cpu)->load.weight;
4152 } else {
4153 load = tg->parent->cfs_rq[cpu]->h_load;
4154 load *= tg->se[cpu]->load.weight;
4155 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
4156 }
4157
4158 tg->cfs_rq[cpu]->h_load = load;
4159
4160 return 0;
4161}
4162
4163static void update_h_load(long cpu)
4164{
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004165 struct rq *rq = cpu_rq(cpu);
4166 unsigned long now = jiffies;
4167
4168 if (rq->h_load_throttle == now)
4169 return;
4170
4171 rq->h_load_throttle = now;
4172
Peter Zijlstra367456c2012-02-20 21:49:09 +01004173 rcu_read_lock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004174 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
Peter Zijlstra367456c2012-02-20 21:49:09 +01004175 rcu_read_unlock();
Peter Zijlstra9763b672011-07-13 13:09:25 +02004176}
4177
Peter Zijlstra367456c2012-02-20 21:49:09 +01004178static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004179{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004180 struct cfs_rq *cfs_rq = task_cfs_rq(p);
4181 unsigned long load;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004182
Peter Zijlstra367456c2012-02-20 21:49:09 +01004183 load = p->se.load.weight;
4184 load = div_u64(load * cfs_rq->h_load, cfs_rq->load.weight + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004185
Peter Zijlstra367456c2012-02-20 21:49:09 +01004186 return load;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004187}
4188#else
Paul Turner48a16752012-10-04 13:18:31 +02004189static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004190{
4191}
4192
Peter Zijlstra367456c2012-02-20 21:49:09 +01004193static inline void update_h_load(long cpu)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004194{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004195}
4196
4197static unsigned long task_h_load(struct task_struct *p)
4198{
4199 return p->se.load.weight;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004200}
4201#endif
4202
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004203/********** Helpers for find_busiest_group ************************/
4204/*
4205 * sd_lb_stats - Structure to store the statistics of a sched_domain
4206 * during load balancing.
4207 */
4208struct sd_lb_stats {
4209 struct sched_group *busiest; /* Busiest group in this sd */
4210 struct sched_group *this; /* Local group in this sd */
4211 unsigned long total_load; /* Total load of all groups in sd */
4212 unsigned long total_pwr; /* Total power of all groups in sd */
4213 unsigned long avg_load; /* Average load across all groups in sd */
4214
4215 /** Statistics of this group */
4216 unsigned long this_load;
4217 unsigned long this_load_per_task;
4218 unsigned long this_nr_running;
Nikhil Raofab47622010-10-15 13:12:29 -07004219 unsigned long this_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004220 unsigned int this_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004221
4222 /* Statistics of the busiest group */
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004223 unsigned int busiest_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004224 unsigned long max_load;
4225 unsigned long busiest_load_per_task;
4226 unsigned long busiest_nr_running;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004227 unsigned long busiest_group_capacity;
Nikhil Raofab47622010-10-15 13:12:29 -07004228 unsigned long busiest_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004229 unsigned int busiest_group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004230
4231 int group_imb; /* Is there imbalance in this sd */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004232};
4233
4234/*
4235 * sg_lb_stats - stats of a sched_group required for load_balancing
4236 */
4237struct sg_lb_stats {
4238 unsigned long avg_load; /*Avg load across the CPUs of the group */
4239 unsigned long group_load; /* Total load over the CPUs of the group */
4240 unsigned long sum_nr_running; /* Nr tasks running in the group */
4241 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
4242 unsigned long group_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004243 unsigned long idle_cpus;
4244 unsigned long group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004245 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004246 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004247};
4248
4249/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004250 * get_sd_load_idx - Obtain the load index for a given sched domain.
4251 * @sd: The sched_domain whose load_idx is to be obtained.
4252 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
4253 */
4254static inline int get_sd_load_idx(struct sched_domain *sd,
4255 enum cpu_idle_type idle)
4256{
4257 int load_idx;
4258
4259 switch (idle) {
4260 case CPU_NOT_IDLE:
4261 load_idx = sd->busy_idx;
4262 break;
4263
4264 case CPU_NEWLY_IDLE:
4265 load_idx = sd->newidle_idx;
4266 break;
4267 default:
4268 load_idx = sd->idle_idx;
4269 break;
4270 }
4271
4272 return load_idx;
4273}
4274
Li Zefan15f803c2013-03-05 16:07:11 +08004275static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004276{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004277 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004278}
4279
4280unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4281{
4282 return default_scale_freq_power(sd, cpu);
4283}
4284
Li Zefan15f803c2013-03-05 16:07:11 +08004285static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004286{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004287 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004288 unsigned long smt_gain = sd->smt_gain;
4289
4290 smt_gain /= weight;
4291
4292 return smt_gain;
4293}
4294
4295unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4296{
4297 return default_scale_smt_power(sd, cpu);
4298}
4299
Li Zefan15f803c2013-03-05 16:07:11 +08004300static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004301{
4302 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004303 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004304
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004305 /*
4306 * Since we're reading these variables without serialization make sure
4307 * we read them once before doing sanity checks on them.
4308 */
4309 age_stamp = ACCESS_ONCE(rq->age_stamp);
4310 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004311
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004312 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004313
4314 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004315 /* Ensures that power won't end up being negative */
4316 available = 0;
4317 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004318 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004319 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004320
Nikhil Rao1399fa72011-05-18 10:09:39 -07004321 if (unlikely((s64)total < SCHED_POWER_SCALE))
4322 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004323
Nikhil Rao1399fa72011-05-18 10:09:39 -07004324 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004325
4326 return div_u64(available, total);
4327}
4328
4329static void update_cpu_power(struct sched_domain *sd, int cpu)
4330{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004331 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004332 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004333 struct sched_group *sdg = sd->groups;
4334
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004335 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4336 if (sched_feat(ARCH_POWER))
4337 power *= arch_scale_smt_power(sd, cpu);
4338 else
4339 power *= default_scale_smt_power(sd, cpu);
4340
Nikhil Rao1399fa72011-05-18 10:09:39 -07004341 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004342 }
4343
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004344 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004345
4346 if (sched_feat(ARCH_POWER))
4347 power *= arch_scale_freq_power(sd, cpu);
4348 else
4349 power *= default_scale_freq_power(sd, cpu);
4350
Nikhil Rao1399fa72011-05-18 10:09:39 -07004351 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004352
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004353 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004354 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004355
4356 if (!power)
4357 power = 1;
4358
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004359 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004360 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004361}
4362
Peter Zijlstra029632f2011-10-25 10:00:11 +02004363void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004364{
4365 struct sched_domain *child = sd->child;
4366 struct sched_group *group, *sdg = sd->groups;
4367 unsigned long power;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004368 unsigned long interval;
4369
4370 interval = msecs_to_jiffies(sd->balance_interval);
4371 interval = clamp(interval, 1UL, max_load_balance_interval);
4372 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004373
4374 if (!child) {
4375 update_cpu_power(sd, cpu);
4376 return;
4377 }
4378
4379 power = 0;
4380
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004381 if (child->flags & SD_OVERLAP) {
4382 /*
4383 * SD_OVERLAP domains cannot assume that child groups
4384 * span the current group.
4385 */
4386
4387 for_each_cpu(cpu, sched_group_cpus(sdg))
4388 power += power_of(cpu);
4389 } else {
4390 /*
4391 * !SD_OVERLAP domains can assume that child groups
4392 * span the current group.
4393 */
4394
4395 group = child->groups;
4396 do {
4397 power += group->sgp->power;
4398 group = group->next;
4399 } while (group != child->groups);
4400 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004401
Peter Zijlstrac3decf02012-05-31 12:05:32 +02004402 sdg->sgp->power_orig = sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004403}
4404
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004405/*
4406 * Try and fix up capacity for tiny siblings, this is needed when
4407 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4408 * which on its own isn't powerful enough.
4409 *
4410 * See update_sd_pick_busiest() and check_asym_packing().
4411 */
4412static inline int
4413fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4414{
4415 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07004416 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004417 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02004418 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004419 return 0;
4420
4421 /*
4422 * If ~90% of the cpu_power is still there, we're good.
4423 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004424 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004425 return 1;
4426
4427 return 0;
4428}
4429
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004430/**
4431 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004432 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004433 * @group: sched_group whose statistics are to be updated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004434 * @load_idx: Load index of sched_domain of this_cpu for load calc.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004435 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004436 * @balance: Should we balance.
4437 * @sgs: variable to hold the statistics for this group.
4438 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004439static inline void update_sg_lb_stats(struct lb_env *env,
4440 struct sched_group *group, int load_idx,
Michael Wangb94031302012-07-12 16:10:13 +08004441 int local_group, int *balance, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004442{
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004443 unsigned long nr_running, max_nr_running, min_nr_running;
4444 unsigned long load, max_cpu_load, min_cpu_load;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004445 unsigned int balance_cpu = -1, first_idle_cpu = 0;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004446 unsigned long avg_load_per_task = 0;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004447 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004448
Gautham R Shenoy871e35b2010-01-20 14:02:44 -06004449 if (local_group)
Peter Zijlstrac1174872012-05-31 14:47:33 +02004450 balance_cpu = group_balance_cpu(group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004451
4452 /* Tally up the load of all CPUs in the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004453 max_cpu_load = 0;
4454 min_cpu_load = ~0UL;
Nikhil Rao2582f0e2010-10-13 12:09:36 -07004455 max_nr_running = 0;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004456 min_nr_running = ~0UL;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004457
Michael Wangb94031302012-07-12 16:10:13 +08004458 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004459 struct rq *rq = cpu_rq(i);
4460
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004461 nr_running = rq->nr_running;
4462
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004463 /* Bias balancing toward cpus of our domain */
4464 if (local_group) {
Peter Zijlstrac1174872012-05-31 14:47:33 +02004465 if (idle_cpu(i) && !first_idle_cpu &&
4466 cpumask_test_cpu(i, sched_group_mask(group))) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004467 first_idle_cpu = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004468 balance_cpu = i;
4469 }
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004470
4471 load = target_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004472 } else {
4473 load = source_load(i, load_idx);
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004474 if (load > max_cpu_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004475 max_cpu_load = load;
4476 if (min_cpu_load > load)
4477 min_cpu_load = load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004478
4479 if (nr_running > max_nr_running)
4480 max_nr_running = nr_running;
4481 if (min_nr_running > nr_running)
4482 min_nr_running = nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004483 }
4484
4485 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004486 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004487 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004488 if (idle_cpu(i))
4489 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004490 }
4491
4492 /*
4493 * First idle cpu or the first cpu(busiest) in this sched group
4494 * is eligible for doing load balancing at this and above
4495 * domains. In the newly idle case, we will allow all the cpu's
4496 * to do the newly idle load balance.
4497 */
Vincent Guittot4ec44122011-12-12 20:21:08 +01004498 if (local_group) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004499 if (env->idle != CPU_NEWLY_IDLE) {
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004500 if (balance_cpu != env->dst_cpu) {
Vincent Guittot4ec44122011-12-12 20:21:08 +01004501 *balance = 0;
4502 return;
4503 }
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004504 update_group_power(env->sd, env->dst_cpu);
Vincent Guittot4ec44122011-12-12 20:21:08 +01004505 } else if (time_after_eq(jiffies, group->sgp->next_update))
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004506 update_group_power(env->sd, env->dst_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004507 }
4508
4509 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004510 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004511
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004512 /*
4513 * Consider the group unbalanced when the imbalance is larger
Peter Zijlstra866ab432011-02-21 18:56:47 +01004514 * than the average weight of a task.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004515 *
4516 * APZ: with cgroup the avg task weight can vary wildly and
4517 * might not be a suitable number - should we keep a
4518 * normalized nr_running number somewhere that negates
4519 * the hierarchy?
4520 */
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004521 if (sgs->sum_nr_running)
4522 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004523
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004524 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
4525 (max_nr_running - min_nr_running) > 1)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004526 sgs->group_imb = 1;
4527
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004528 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
Nikhil Rao1399fa72011-05-18 10:09:39 -07004529 SCHED_POWER_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004530 if (!sgs->group_capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004531 sgs->group_capacity = fix_small_capacity(env->sd, group);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004532 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07004533
4534 if (sgs->group_capacity > sgs->sum_nr_running)
4535 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004536}
4537
4538/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10004539 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07004540 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004541 * @sds: sched_domain statistics
4542 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10004543 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10004544 *
4545 * Determine if @sg is a busier group than the previously selected
4546 * busiest group.
4547 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004548static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10004549 struct sd_lb_stats *sds,
4550 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004551 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004552{
4553 if (sgs->avg_load <= sds->max_load)
4554 return false;
4555
4556 if (sgs->sum_nr_running > sgs->group_capacity)
4557 return true;
4558
4559 if (sgs->group_imb)
4560 return true;
4561
4562 /*
4563 * ASYM_PACKING needs to move all the work to the lowest
4564 * numbered CPUs in the group, therefore mark all groups
4565 * higher than ourself as busy.
4566 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004567 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4568 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004569 if (!sds->busiest)
4570 return true;
4571
4572 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4573 return true;
4574 }
4575
4576 return false;
4577}
4578
4579/**
Hui Kang461819a2011-10-11 23:00:59 -04004580 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004581 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004582 * @balance: Should we balance.
4583 * @sds: variable to hold the statistics for this sched_domain.
4584 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004585static inline void update_sd_lb_stats(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004586 int *balance, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004587{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004588 struct sched_domain *child = env->sd->child;
4589 struct sched_group *sg = env->sd->groups;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004590 struct sg_lb_stats sgs;
4591 int load_idx, prefer_sibling = 0;
4592
4593 if (child && child->flags & SD_PREFER_SIBLING)
4594 prefer_sibling = 1;
4595
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004596 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004597
4598 do {
4599 int local_group;
4600
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004601 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004602 memset(&sgs, 0, sizeof(sgs));
Michael Wangb94031302012-07-12 16:10:13 +08004603 update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004604
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004605 if (local_group && !(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004606 return;
4607
4608 sds->total_load += sgs.group_load;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004609 sds->total_pwr += sg->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004610
4611 /*
4612 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10004613 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07004614 * and move all the excess tasks away. We lower the capacity
4615 * of a group only if the local group has the capacity to fit
4616 * these excess tasks, i.e. nr_running < group_capacity. The
4617 * extra check prevents the case where you always pull from the
4618 * heaviest group when it is already under-utilized (possible
4619 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004620 */
Nikhil Rao75dd3212010-10-15 13:12:30 -07004621 if (prefer_sibling && !local_group && sds->this_has_capacity)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004622 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4623
4624 if (local_group) {
4625 sds->this_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004626 sds->this = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004627 sds->this_nr_running = sgs.sum_nr_running;
4628 sds->this_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004629 sds->this_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004630 sds->this_idle_cpus = sgs.idle_cpus;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004631 } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004632 sds->max_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10004633 sds->busiest = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004634 sds->busiest_nr_running = sgs.sum_nr_running;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004635 sds->busiest_idle_cpus = sgs.idle_cpus;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004636 sds->busiest_group_capacity = sgs.group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004637 sds->busiest_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07004638 sds->busiest_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004639 sds->busiest_group_weight = sgs.group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004640 sds->group_imb = sgs.group_imb;
4641 }
4642
Michael Neuling532cb4c2010-06-08 14:57:02 +10004643 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004644 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10004645}
4646
Michael Neuling532cb4c2010-06-08 14:57:02 +10004647/**
4648 * check_asym_packing - Check to see if the group is packed into the
4649 * sched doman.
4650 *
4651 * This is primarily intended to used at the sibling level. Some
4652 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4653 * case of POWER7, it can move to lower SMT modes only when higher
4654 * threads are idle. When in lower SMT modes, the threads will
4655 * perform better since they share less core resources. Hence when we
4656 * have idle threads, we want them to be the higher ones.
4657 *
4658 * This packing function is run on idle threads. It checks to see if
4659 * the busiest CPU in this domain (core in the P7 case) has a higher
4660 * CPU number than the packing function is being run on. Here we are
4661 * assuming lower CPU number will be equivalent to lower a SMT thread
4662 * number.
4663 *
Michael Neulingb6b12292010-06-10 12:06:21 +10004664 * Returns 1 when packing is required and a task should be moved to
4665 * this CPU. The amount of the imbalance is returned in *imbalance.
4666 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004667 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004668 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10004669 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004670static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004671{
4672 int busiest_cpu;
4673
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004674 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004675 return 0;
4676
4677 if (!sds->busiest)
4678 return 0;
4679
4680 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004681 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004682 return 0;
4683
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004684 env->imbalance = DIV_ROUND_CLOSEST(
4685 sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
4686
Michael Neuling532cb4c2010-06-08 14:57:02 +10004687 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004688}
4689
4690/**
4691 * fix_small_imbalance - Calculate the minor imbalance that exists
4692 * amongst the groups of a sched_domain, during
4693 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004694 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004695 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004696 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004697static inline
4698void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004699{
4700 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4701 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004702 unsigned long scaled_busy_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004703
4704 if (sds->this_nr_running) {
4705 sds->this_load_per_task /= sds->this_nr_running;
4706 if (sds->busiest_load_per_task >
4707 sds->this_load_per_task)
4708 imbn = 1;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004709 } else {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004710 sds->this_load_per_task =
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004711 cpu_avg_load_per_task(env->dst_cpu);
4712 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004713
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004714 scaled_busy_load_per_task = sds->busiest_load_per_task
Nikhil Rao1399fa72011-05-18 10:09:39 -07004715 * SCHED_POWER_SCALE;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004716 scaled_busy_load_per_task /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004717
4718 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4719 (scaled_busy_load_per_task * imbn)) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004720 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004721 return;
4722 }
4723
4724 /*
4725 * OK, we don't have enough imbalance to justify moving tasks,
4726 * however we may be able to increase total CPU power used by
4727 * moving them.
4728 */
4729
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004730 pwr_now += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004731 min(sds->busiest_load_per_task, sds->max_load);
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004732 pwr_now += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004733 min(sds->this_load_per_task, sds->this_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004734 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004735
4736 /* Amount of load we'd subtract */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004737 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004738 sds->busiest->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004739 if (sds->max_load > tmp)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004740 pwr_move += sds->busiest->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004741 min(sds->busiest_load_per_task, sds->max_load - tmp);
4742
4743 /* Amount of load we'd add */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004744 if (sds->max_load * sds->busiest->sgp->power <
Nikhil Rao1399fa72011-05-18 10:09:39 -07004745 sds->busiest_load_per_task * SCHED_POWER_SCALE)
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004746 tmp = (sds->max_load * sds->busiest->sgp->power) /
4747 sds->this->sgp->power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004748 else
Nikhil Rao1399fa72011-05-18 10:09:39 -07004749 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004750 sds->this->sgp->power;
4751 pwr_move += sds->this->sgp->power *
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004752 min(sds->this_load_per_task, sds->this_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004753 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004754
4755 /* Move if we gain throughput */
4756 if (pwr_move > pwr_now)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004757 env->imbalance = sds->busiest_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004758}
4759
4760/**
4761 * calculate_imbalance - Calculate the amount of imbalance present within the
4762 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004763 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004764 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004765 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004766static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004767{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004768 unsigned long max_pull, load_above_capacity = ~0UL;
4769
4770 sds->busiest_load_per_task /= sds->busiest_nr_running;
4771 if (sds->group_imb) {
4772 sds->busiest_load_per_task =
4773 min(sds->busiest_load_per_task, sds->avg_load);
4774 }
4775
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004776 /*
4777 * In the presence of smp nice balancing, certain scenarios can have
4778 * max load less than avg load(as we skip the groups at or below
4779 * its cpu_power, while calculating max_load..)
4780 */
4781 if (sds->max_load < sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004782 env->imbalance = 0;
4783 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004784 }
4785
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004786 if (!sds->group_imb) {
4787 /*
4788 * Don't want to pull so many tasks that a group would go idle.
4789 */
4790 load_above_capacity = (sds->busiest_nr_running -
4791 sds->busiest_group_capacity);
4792
Nikhil Rao1399fa72011-05-18 10:09:39 -07004793 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004794
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004795 load_above_capacity /= sds->busiest->sgp->power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004796 }
4797
4798 /*
4799 * We're trying to get all the cpus to the average_load, so we don't
4800 * want to push ourselves above the average load, nor do we wish to
4801 * reduce the max loaded cpu below the average load. At the same time,
4802 * we also don't want to reduce the group load below the group capacity
4803 * (so that we can implement power-savings policies etc). Thus we look
4804 * for the minimum possible imbalance.
4805 * Be careful of negative numbers as they'll appear as very large values
4806 * with unsigned longs.
4807 */
4808 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004809
4810 /* How much load to actually move to equalise the imbalance */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004811 env->imbalance = min(max_pull * sds->busiest->sgp->power,
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004812 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
Nikhil Rao1399fa72011-05-18 10:09:39 -07004813 / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004814
4815 /*
4816 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004817 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004818 * a think about bumping its value to force at least one task to be
4819 * moved
4820 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004821 if (env->imbalance < sds->busiest_load_per_task)
4822 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004823
4824}
Nikhil Raofab47622010-10-15 13:12:29 -07004825
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004826/******* find_busiest_group() helpers end here *********************/
4827
4828/**
4829 * find_busiest_group - Returns the busiest group within the sched_domain
4830 * if there is an imbalance. If there isn't an imbalance, and
4831 * the user has opted for power-savings, it returns a group whose
4832 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4833 * such a group exists.
4834 *
4835 * Also calculates the amount of weighted load which should be moved
4836 * to restore balance.
4837 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004838 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004839 * @balance: Pointer to a variable indicating if this_cpu
4840 * is the appropriate cpu to perform load balancing at this_level.
4841 *
4842 * Returns: - the busiest group if imbalance exists.
4843 * - If no imbalance and user has opted for power-savings balance,
4844 * return the least loaded group whose CPUs can be
4845 * put to idle by rebalancing its tasks onto our group.
4846 */
4847static struct sched_group *
Michael Wangb94031302012-07-12 16:10:13 +08004848find_busiest_group(struct lb_env *env, int *balance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004849{
4850 struct sd_lb_stats sds;
4851
4852 memset(&sds, 0, sizeof(sds));
4853
4854 /*
4855 * Compute the various statistics relavent for load balancing at
4856 * this level.
4857 */
Michael Wangb94031302012-07-12 16:10:13 +08004858 update_sd_lb_stats(env, balance, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004859
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004860 /*
4861 * this_cpu is not the appropriate cpu to perform load balancing at
4862 * this level.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004863 */
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01004864 if (!(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004865 goto ret;
4866
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004867 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
4868 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10004869 return sds.busiest;
4870
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004871 /* There is no busy sibling group to pull tasks from */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004872 if (!sds.busiest || sds.busiest_nr_running == 0)
4873 goto out_balanced;
4874
Nikhil Rao1399fa72011-05-18 10:09:39 -07004875 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07004876
Peter Zijlstra866ab432011-02-21 18:56:47 +01004877 /*
4878 * If the busiest group is imbalanced the below checks don't
4879 * work because they assumes all things are equal, which typically
4880 * isn't true due to cpus_allowed constraints and the like.
4881 */
4882 if (sds.group_imb)
4883 goto force_balance;
4884
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004885 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004886 if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
Nikhil Raofab47622010-10-15 13:12:29 -07004887 !sds.busiest_has_capacity)
4888 goto force_balance;
4889
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004890 /*
4891 * If the local group is more busy than the selected busiest group
4892 * don't try and pull any tasks.
4893 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004894 if (sds.this_load >= sds.max_load)
4895 goto out_balanced;
4896
Peter Zijlstracc57aa82011-02-21 18:55:32 +01004897 /*
4898 * Don't pull any tasks if this group is already above the domain
4899 * average load.
4900 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004901 if (sds.this_load >= sds.avg_load)
4902 goto out_balanced;
4903
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004904 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004905 /*
4906 * This cpu is idle. If the busiest group load doesn't
4907 * have more tasks than the number of available cpu's and
4908 * there is no imbalance between this and busiest group
4909 * wrt to idle cpu's, it is balanced.
4910 */
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004911 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004912 sds.busiest_nr_running <= sds.busiest_group_weight)
4913 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004914 } else {
4915 /*
4916 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4917 * imbalance_pct to be conservative.
4918 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004919 if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01004920 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004921 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004922
Nikhil Raofab47622010-10-15 13:12:29 -07004923force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004924 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004925 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004926 return sds.busiest;
4927
4928out_balanced:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004929ret:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004930 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004931 return NULL;
4932}
4933
4934/*
4935 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4936 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004937static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08004938 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004939{
4940 struct rq *busiest = NULL, *rq;
4941 unsigned long max_load = 0;
4942 int i;
4943
4944 for_each_cpu(i, sched_group_cpus(group)) {
4945 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004946 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4947 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004948 unsigned long wl;
4949
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004950 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004951 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004952
Michael Wangb94031302012-07-12 16:10:13 +08004953 if (!cpumask_test_cpu(i, env->cpus))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004954 continue;
4955
4956 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004957 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004958
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004959 /*
4960 * When comparing with imbalance, use weighted_cpuload()
4961 * which is not scaled with the cpu power.
4962 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004963 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004964 continue;
4965
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004966 /*
4967 * For the load comparisons with the other cpu's, consider
4968 * the weighted_cpuload() scaled with the cpu power, so that
4969 * the load can be moved away from the cpu that is potentially
4970 * running at a lower capacity.
4971 */
Nikhil Rao1399fa72011-05-18 10:09:39 -07004972 wl = (wl * SCHED_POWER_SCALE) / power;
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01004973
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004974 if (wl > max_load) {
4975 max_load = wl;
4976 busiest = rq;
4977 }
4978 }
4979
4980 return busiest;
4981}
4982
4983/*
4984 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4985 * so long as it is large enough.
4986 */
4987#define MAX_PINNED_INTERVAL 512
4988
4989/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09004990DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004991
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004992static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01004993{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004994 struct sched_domain *sd = env->sd;
4995
4996 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004997
4998 /*
4999 * ASYM_PACKING needs to force migrate tasks from busy but
5000 * higher numbered CPUs in order to pack all tasks in the
5001 * lowest numbered CPUs.
5002 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005003 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005004 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005005 }
5006
5007 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5008}
5009
Tejun Heo969c7922010-05-06 18:49:21 +02005010static int active_load_balance_cpu_stop(void *data);
5011
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005012/*
5013 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5014 * tasks if there is an imbalance.
5015 */
5016static int load_balance(int this_cpu, struct rq *this_rq,
5017 struct sched_domain *sd, enum cpu_idle_type idle,
5018 int *balance)
5019{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305020 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005021 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005022 struct rq *busiest;
5023 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005024 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005025
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005026 struct lb_env env = {
5027 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005028 .dst_cpu = this_cpu,
5029 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305030 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005031 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005032 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08005033 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005034 };
5035
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005036 /*
5037 * For NEWLY_IDLE load_balancing, we don't need to consider
5038 * other cpus in our group
5039 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005040 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005041 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005042
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005043 cpumask_copy(cpus, cpu_active_mask);
5044
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005045 schedstat_inc(sd, lb_count[idle]);
5046
5047redo:
Michael Wangb94031302012-07-12 16:10:13 +08005048 group = find_busiest_group(&env, balance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005049
5050 if (*balance == 0)
5051 goto out_balanced;
5052
5053 if (!group) {
5054 schedstat_inc(sd, lb_nobusyg[idle]);
5055 goto out_balanced;
5056 }
5057
Michael Wangb94031302012-07-12 16:10:13 +08005058 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005059 if (!busiest) {
5060 schedstat_inc(sd, lb_nobusyq[idle]);
5061 goto out_balanced;
5062 }
5063
Michael Wang78feefc2012-08-06 16:41:59 +08005064 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005065
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005066 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005067
5068 ld_moved = 0;
5069 if (busiest->nr_running > 1) {
5070 /*
5071 * Attempt to move tasks. If find_busiest_group has found
5072 * an imbalance but busiest->nr_running <= 1, the group is
5073 * still unbalanced. ld_moved simply stays zero, so it is
5074 * correctly treated as an imbalance.
5075 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005076 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005077 env.src_cpu = busiest->cpu;
5078 env.src_rq = busiest;
5079 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005080
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005081 update_h_load(env.src_cpu);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005082more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005083 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005084 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305085
5086 /*
5087 * cur_ld_moved - load moved in current iteration
5088 * ld_moved - cumulative load moved across iterations
5089 */
5090 cur_ld_moved = move_tasks(&env);
5091 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005092 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005093 local_irq_restore(flags);
5094
5095 /*
5096 * some other cpu did the load balance for us.
5097 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305098 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5099 resched_cpu(env.dst_cpu);
5100
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005101 if (env.flags & LBF_NEED_BREAK) {
5102 env.flags &= ~LBF_NEED_BREAK;
5103 goto more_balance;
5104 }
5105
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305106 /*
5107 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5108 * us and move them to an alternate dst_cpu in our sched_group
5109 * where they can run. The upper limit on how many times we
5110 * iterate on same src_cpu is dependent on number of cpus in our
5111 * sched_group.
5112 *
5113 * This changes load balance semantics a bit on who can move
5114 * load to a given_cpu. In addition to the given_cpu itself
5115 * (or a ilb_cpu acting on its behalf where given_cpu is
5116 * nohz-idle), we now have balance_cpu in a position to move
5117 * load to given_cpu. In rare situations, this may cause
5118 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5119 * _independently_ and at _same_ time to move some load to
5120 * given_cpu) causing exceess load to be moved to given_cpu.
5121 * This however should not happen so much in practice and
5122 * moreover subsequent load balance cycles should correct the
5123 * excess load moved.
5124 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005125 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305126
Michael Wang78feefc2012-08-06 16:41:59 +08005127 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305128 env.dst_cpu = env.new_dst_cpu;
5129 env.flags &= ~LBF_SOME_PINNED;
5130 env.loop = 0;
5131 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005132
5133 /* Prevent to re-select dst_cpu via env's cpus */
5134 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5135
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305136 /*
5137 * Go back to "more_balance" rather than "redo" since we
5138 * need to continue with same src_cpu.
5139 */
5140 goto more_balance;
5141 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005142
5143 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005144 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005145 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305146 if (!cpumask_empty(cpus)) {
5147 env.loop = 0;
5148 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005149 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305150 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005151 goto out_balanced;
5152 }
5153 }
5154
5155 if (!ld_moved) {
5156 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005157 /*
5158 * Increment the failure counter only on periodic balance.
5159 * We do not want newidle balance, which can be very
5160 * frequent, pollute the failure counter causing
5161 * excessive cache_hot migrations and active balances.
5162 */
5163 if (idle != CPU_NEWLY_IDLE)
5164 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005165
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005166 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005167 raw_spin_lock_irqsave(&busiest->lock, flags);
5168
Tejun Heo969c7922010-05-06 18:49:21 +02005169 /* don't kick the active_load_balance_cpu_stop,
5170 * if the curr task on busiest cpu can't be
5171 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005172 */
5173 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005174 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005175 raw_spin_unlock_irqrestore(&busiest->lock,
5176 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005177 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005178 goto out_one_pinned;
5179 }
5180
Tejun Heo969c7922010-05-06 18:49:21 +02005181 /*
5182 * ->active_balance synchronizes accesses to
5183 * ->active_balance_work. Once set, it's cleared
5184 * only after active load balance is finished.
5185 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005186 if (!busiest->active_balance) {
5187 busiest->active_balance = 1;
5188 busiest->push_cpu = this_cpu;
5189 active_balance = 1;
5190 }
5191 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005192
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005193 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005194 stop_one_cpu_nowait(cpu_of(busiest),
5195 active_load_balance_cpu_stop, busiest,
5196 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005197 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005198
5199 /*
5200 * We've kicked active balancing, reset the failure
5201 * counter.
5202 */
5203 sd->nr_balance_failed = sd->cache_nice_tries+1;
5204 }
5205 } else
5206 sd->nr_balance_failed = 0;
5207
5208 if (likely(!active_balance)) {
5209 /* We were unbalanced, so reset the balancing interval */
5210 sd->balance_interval = sd->min_interval;
5211 } else {
5212 /*
5213 * If we've begun active balancing, start to back off. This
5214 * case may not be covered by the all_pinned logic if there
5215 * is only 1 task on the busy runqueue (because we don't call
5216 * move_tasks).
5217 */
5218 if (sd->balance_interval < sd->max_interval)
5219 sd->balance_interval *= 2;
5220 }
5221
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005222 goto out;
5223
5224out_balanced:
5225 schedstat_inc(sd, lb_balanced[idle]);
5226
5227 sd->nr_balance_failed = 0;
5228
5229out_one_pinned:
5230 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005231 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005232 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005233 (sd->balance_interval < sd->max_interval))
5234 sd->balance_interval *= 2;
5235
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005236 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005237out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005238 return ld_moved;
5239}
5240
5241/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005242 * idle_balance is called by schedule() if this_cpu is about to become
5243 * idle. Attempts to pull tasks from other CPUs.
5244 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005245void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005246{
5247 struct sched_domain *sd;
5248 int pulled_task = 0;
5249 unsigned long next_balance = jiffies + HZ;
5250
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005251 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005252
5253 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5254 return;
5255
Peter Zijlstraf492e122009-12-23 15:29:42 +01005256 /*
5257 * Drop the rq->lock, but keep IRQ/preempt disabled.
5258 */
5259 raw_spin_unlock(&this_rq->lock);
5260
Paul Turner48a16752012-10-04 13:18:31 +02005261 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005262 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005263 for_each_domain(this_cpu, sd) {
5264 unsigned long interval;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005265 int balance = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005266
5267 if (!(sd->flags & SD_LOAD_BALANCE))
5268 continue;
5269
Peter Zijlstraf492e122009-12-23 15:29:42 +01005270 if (sd->flags & SD_BALANCE_NEWIDLE) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005271 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005272 pulled_task = load_balance(this_cpu, this_rq,
5273 sd, CPU_NEWLY_IDLE, &balance);
5274 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005275
5276 interval = msecs_to_jiffies(sd->balance_interval);
5277 if (time_after(next_balance, sd->last_balance + interval))
5278 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005279 if (pulled_task) {
5280 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005281 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005282 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005283 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005284 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005285
5286 raw_spin_lock(&this_rq->lock);
5287
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005288 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5289 /*
5290 * We are going idle. next_balance may be set based on
5291 * a busy processor. So reset next_balance.
5292 */
5293 this_rq->next_balance = next_balance;
5294 }
5295}
5296
5297/*
Tejun Heo969c7922010-05-06 18:49:21 +02005298 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5299 * running tasks off the busiest CPU onto idle CPUs. It requires at
5300 * least 1 task to be running on each physical CPU where possible, and
5301 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005302 */
Tejun Heo969c7922010-05-06 18:49:21 +02005303static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005304{
Tejun Heo969c7922010-05-06 18:49:21 +02005305 struct rq *busiest_rq = data;
5306 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005307 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005308 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005309 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005310
5311 raw_spin_lock_irq(&busiest_rq->lock);
5312
5313 /* make sure the requested cpu hasn't gone down in the meantime */
5314 if (unlikely(busiest_cpu != smp_processor_id() ||
5315 !busiest_rq->active_balance))
5316 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005317
5318 /* Is there any task to move? */
5319 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005320 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005321
5322 /*
5323 * This condition is "impossible", if it occurs
5324 * we need to fix it. Originally reported by
5325 * Bjorn Helgaas on a 128-cpu setup.
5326 */
5327 BUG_ON(busiest_rq == target_rq);
5328
5329 /* move a task from busiest_rq to target_rq */
5330 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005331
5332 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005333 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005334 for_each_domain(target_cpu, sd) {
5335 if ((sd->flags & SD_LOAD_BALANCE) &&
5336 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5337 break;
5338 }
5339
5340 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005341 struct lb_env env = {
5342 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005343 .dst_cpu = target_cpu,
5344 .dst_rq = target_rq,
5345 .src_cpu = busiest_rq->cpu,
5346 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005347 .idle = CPU_IDLE,
5348 };
5349
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005350 schedstat_inc(sd, alb_count);
5351
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005352 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005353 schedstat_inc(sd, alb_pushed);
5354 else
5355 schedstat_inc(sd, alb_failed);
5356 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005357 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005358 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02005359out_unlock:
5360 busiest_rq->active_balance = 0;
5361 raw_spin_unlock_irq(&busiest_rq->lock);
5362 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005363}
5364
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005365#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005366/*
5367 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005368 * - When one of the busy CPUs notice that there may be an idle rebalancing
5369 * needed, they will kick the idle load balancer, which then does idle
5370 * load balancing for all the idle CPUs.
5371 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005372static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005373 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005374 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005375 unsigned long next_balance; /* in jiffy units */
5376} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005377
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01005378static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005379{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005380 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005381
Suresh Siddha786d6dc72011-12-01 17:07:35 -08005382 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5383 return ilb;
5384
5385 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005386}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005387
5388/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005389 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5390 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5391 * CPU (if there is one).
5392 */
5393static void nohz_balancer_kick(int cpu)
5394{
5395 int ilb_cpu;
5396
5397 nohz.next_balance++;
5398
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005399 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005400
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005401 if (ilb_cpu >= nr_cpu_ids)
5402 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005403
Suresh Siddhacd490c52011-12-06 11:26:34 -08005404 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08005405 return;
5406 /*
5407 * Use smp_send_reschedule() instead of resched_cpu().
5408 * This way we generate a sched IPI on the target cpu which
5409 * is idle. And the softirq performing nohz idle load balance
5410 * will be run before returning from the IPI.
5411 */
5412 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005413 return;
5414}
5415
Alex Shic1cc0172012-09-10 15:10:58 +08005416static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08005417{
5418 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5419 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5420 atomic_dec(&nohz.nr_cpus);
5421 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5422 }
5423}
5424
Suresh Siddha69e1e812011-12-01 17:07:33 -08005425static inline void set_cpu_sd_state_busy(void)
5426{
5427 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005428
Suresh Siddha69e1e812011-12-01 17:07:33 -08005429 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005430 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005431
5432 if (!sd || !sd->nohz_idle)
5433 goto unlock;
5434 sd->nohz_idle = 0;
5435
5436 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005437 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005438unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005439 rcu_read_unlock();
5440}
5441
5442void set_cpu_sd_state_idle(void)
5443{
5444 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005445
Suresh Siddha69e1e812011-12-01 17:07:33 -08005446 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005447 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005448
5449 if (!sd || sd->nohz_idle)
5450 goto unlock;
5451 sd->nohz_idle = 1;
5452
5453 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005454 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005455unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005456 rcu_read_unlock();
5457}
5458
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005459/*
Alex Shic1cc0172012-09-10 15:10:58 +08005460 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005461 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005462 */
Alex Shic1cc0172012-09-10 15:10:58 +08005463void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005464{
Suresh Siddha71325962012-01-19 18:28:57 -08005465 /*
5466 * If this cpu is going down, then nothing needs to be done.
5467 */
5468 if (!cpu_active(cpu))
5469 return;
5470
Alex Shic1cc0172012-09-10 15:10:58 +08005471 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5472 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005473
Alex Shic1cc0172012-09-10 15:10:58 +08005474 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5475 atomic_inc(&nohz.nr_cpus);
5476 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005477}
Suresh Siddha71325962012-01-19 18:28:57 -08005478
5479static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
5480 unsigned long action, void *hcpu)
5481{
5482 switch (action & ~CPU_TASKS_FROZEN) {
5483 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08005484 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08005485 return NOTIFY_OK;
5486 default:
5487 return NOTIFY_DONE;
5488 }
5489}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005490#endif
5491
5492static DEFINE_SPINLOCK(balancing);
5493
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005494/*
5495 * Scale the max load_balance interval with the number of CPUs in the system.
5496 * This trades load-balance latency on larger machines for less cross talk.
5497 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005498void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005499{
5500 max_load_balance_interval = HZ*num_online_cpus()/10;
5501}
5502
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005503/*
5504 * It checks each scheduling domain to see if it is due to be balanced,
5505 * and initiates a balancing operation if so.
5506 *
Libinb9b08532013-04-01 19:14:01 +08005507 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005508 */
5509static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5510{
5511 int balance = 1;
5512 struct rq *rq = cpu_rq(cpu);
5513 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005514 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005515 /* Earliest time when we have to do rebalance again */
5516 unsigned long next_balance = jiffies + 60*HZ;
5517 int update_next_balance = 0;
5518 int need_serialize;
5519
Paul Turner48a16752012-10-04 13:18:31 +02005520 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08005521
Peter Zijlstradce840a2011-04-07 14:09:50 +02005522 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005523 for_each_domain(cpu, sd) {
5524 if (!(sd->flags & SD_LOAD_BALANCE))
5525 continue;
5526
5527 interval = sd->balance_interval;
5528 if (idle != CPU_IDLE)
5529 interval *= sd->busy_factor;
5530
5531 /* scale ms to jiffies */
5532 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005533 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005534
5535 need_serialize = sd->flags & SD_SERIALIZE;
5536
5537 if (need_serialize) {
5538 if (!spin_trylock(&balancing))
5539 goto out;
5540 }
5541
5542 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5543 if (load_balance(cpu, rq, sd, idle, &balance)) {
5544 /*
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005545 * The LBF_SOME_PINNED logic could have changed
5546 * env->dst_cpu, so we can't know our idle
5547 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005548 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005549 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005550 }
5551 sd->last_balance = jiffies;
5552 }
5553 if (need_serialize)
5554 spin_unlock(&balancing);
5555out:
5556 if (time_after(next_balance, sd->last_balance + interval)) {
5557 next_balance = sd->last_balance + interval;
5558 update_next_balance = 1;
5559 }
5560
5561 /*
5562 * Stop the load balance at this level. There is another
5563 * CPU in our sched group which is doing load balancing more
5564 * actively.
5565 */
5566 if (!balance)
5567 break;
5568 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005569 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005570
5571 /*
5572 * next_balance will be updated only when there is a need.
5573 * When the cpu is attached to null domain for ex, it will not be
5574 * updated.
5575 */
5576 if (likely(update_next_balance))
5577 rq->next_balance = next_balance;
5578}
5579
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005580#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005581/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005582 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005583 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5584 */
5585static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5586{
5587 struct rq *this_rq = cpu_rq(this_cpu);
5588 struct rq *rq;
5589 int balance_cpu;
5590
Suresh Siddha1c792db2011-12-01 17:07:32 -08005591 if (idle != CPU_IDLE ||
5592 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
5593 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005594
5595 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08005596 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005597 continue;
5598
5599 /*
5600 * If this cpu gets work to do, stop the load balancing
5601 * work being done for other cpus. Next load
5602 * balancing owner will pick it up.
5603 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08005604 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005605 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005606
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02005607 rq = cpu_rq(balance_cpu);
5608
5609 raw_spin_lock_irq(&rq->lock);
5610 update_rq_clock(rq);
5611 update_idle_cpu_load(rq);
5612 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005613
5614 rebalance_domains(balance_cpu, CPU_IDLE);
5615
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005616 if (time_after(this_rq->next_balance, rq->next_balance))
5617 this_rq->next_balance = rq->next_balance;
5618 }
5619 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005620end:
5621 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005622}
5623
5624/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005625 * Current heuristic for kicking the idle load balancer in the presence
5626 * of an idle cpu is the system.
5627 * - This rq has more than one task.
5628 * - At any scheduler domain level, this cpu's scheduler group has multiple
5629 * busy cpu's exceeding the group's power.
5630 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
5631 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005632 */
5633static inline int nohz_kick_needed(struct rq *rq, int cpu)
5634{
5635 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005636 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005637
Suresh Siddha1c792db2011-12-01 17:07:32 -08005638 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005639 return 0;
5640
Suresh Siddha1c792db2011-12-01 17:07:32 -08005641 /*
5642 * We may be recently in ticked or tickless idle mode. At the first
5643 * busy tick after returning from idle, we will update the busy stats.
5644 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08005645 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08005646 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005647
5648 /*
5649 * None are in tickless mode and hence no need for NOHZ idle load
5650 * balancing.
5651 */
5652 if (likely(!atomic_read(&nohz.nr_cpus)))
5653 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08005654
5655 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005656 return 0;
5657
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005658 if (rq->nr_running >= 2)
5659 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005660
Peter Zijlstra067491b2011-12-07 14:32:08 +01005661 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005662 for_each_domain(cpu, sd) {
5663 struct sched_group *sg = sd->groups;
5664 struct sched_group_power *sgp = sg->sgp;
5665 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005666
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005667 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01005668 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005669
5670 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
5671 && (cpumask_first_and(nohz.idle_cpus_mask,
5672 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01005673 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005674
5675 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
5676 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005677 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01005678 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005679 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01005680
5681need_kick_unlock:
5682 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005683need_kick:
5684 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005685}
5686#else
5687static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5688#endif
5689
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005690/*
5691 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005692 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005693 */
5694static void run_rebalance_domains(struct softirq_action *h)
5695{
5696 int this_cpu = smp_processor_id();
5697 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07005698 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005699 CPU_IDLE : CPU_NOT_IDLE;
5700
5701 rebalance_domains(this_cpu, idle);
5702
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005703 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005704 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005705 * balancing on behalf of the other idle cpus whose ticks are
5706 * stopped.
5707 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005708 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005709}
5710
5711static inline int on_null_domain(int cpu)
5712{
Paul E. McKenney90a65012010-02-28 08:32:18 -08005713 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005714}
5715
5716/*
5717 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005718 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005719void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005720{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005721 /* Don't need to rebalance while attached to NULL domain */
5722 if (time_after_eq(jiffies, rq->next_balance) &&
5723 likely(!on_null_domain(cpu)))
5724 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005725#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08005726 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005727 nohz_balancer_kick(cpu);
5728#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005729}
5730
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005731static void rq_online_fair(struct rq *rq)
5732{
5733 update_sysctl();
5734}
5735
5736static void rq_offline_fair(struct rq *rq)
5737{
5738 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07005739
5740 /* Ensure any throttled groups are reachable by pick_next_task */
5741 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01005742}
5743
Dhaval Giani55e12e52008-06-24 23:39:43 +05305744#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02005745
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005746/*
5747 * scheduler tick hitting a task of our scheduling class:
5748 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005749static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005750{
5751 struct cfs_rq *cfs_rq;
5752 struct sched_entity *se = &curr->se;
5753
5754 for_each_sched_entity(se) {
5755 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005756 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005757 }
Ben Segall18bf2802012-10-04 12:51:20 +02005758
Peter Zijlstracbee9f82012-10-25 14:16:43 +02005759 if (sched_feat_numa(NUMA))
5760 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08005761
Ben Segall18bf2802012-10-04 12:51:20 +02005762 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005763}
5764
5765/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005766 * called on fork with the child task as argument from the parent's context
5767 * - child not yet on the tasklist
5768 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005769 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005770static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005771{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005772 struct cfs_rq *cfs_rq;
5773 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02005774 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005775 struct rq *rq = this_rq();
5776 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005777
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005778 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005779
Peter Zijlstra861d0342010-08-19 13:31:43 +02005780 update_rq_clock(rq);
5781
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09005782 cfs_rq = task_cfs_rq(current);
5783 curr = cfs_rq->curr;
5784
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005785 if (unlikely(task_cpu(p) != this_cpu)) {
5786 rcu_read_lock();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005787 __set_task_cpu(p, this_cpu);
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07005788 rcu_read_unlock();
5789 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005790
Ting Yang7109c442007-08-28 12:53:24 +02005791 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005792
Mike Galbraithb5d9d732009-09-08 11:12:28 +02005793 if (curr)
5794 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02005795 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005796
Peter Zijlstracd29fe62009-11-27 17:32:46 +01005797 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02005798 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02005799 * Upon rescheduling, sched_class::put_prev_task() will place
5800 * 'current' within the tree based on its new key value.
5801 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005802 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05305803 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02005804 }
5805
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005806 se->vruntime -= cfs_rq->min_vruntime;
5807
Thomas Gleixner05fa7852009-11-17 14:28:38 +01005808 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005809}
5810
Steven Rostedtcb469842008-01-25 21:08:22 +01005811/*
5812 * Priority of the task has changed. Check to see if we preempt
5813 * the current task.
5814 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005815static void
5816prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01005817{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005818 if (!p->se.on_rq)
5819 return;
5820
Steven Rostedtcb469842008-01-25 21:08:22 +01005821 /*
5822 * Reschedule if we are currently running on this runqueue and
5823 * our priority decreased, or if we are not currently running on
5824 * this runqueue and our priority is higher than the current's
5825 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005826 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01005827 if (p->prio > oldprio)
5828 resched_task(rq->curr);
5829 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005830 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005831}
5832
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005833static void switched_from_fair(struct rq *rq, struct task_struct *p)
5834{
5835 struct sched_entity *se = &p->se;
5836 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5837
5838 /*
5839 * Ensure the task's vruntime is normalized, so that when its
5840 * switched back to the fair class the enqueue_entity(.flags=0) will
5841 * do the right thing.
5842 *
5843 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5844 * have normalized the vruntime, if it was !on_rq, then only when
5845 * the task is sleeping will it still have non-normalized vruntime.
5846 */
5847 if (!se->on_rq && p->state != TASK_RUNNING) {
5848 /*
5849 * Fix up our vruntime so that the current sleep doesn't
5850 * cause 'unlimited' sleep bonus.
5851 */
5852 place_entity(cfs_rq, se, 0);
5853 se->vruntime -= cfs_rq->min_vruntime;
5854 }
Paul Turner9ee474f2012-10-04 13:18:30 +02005855
Alex Shi141965c2013-06-26 13:05:39 +08005856#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02005857 /*
5858 * Remove our load from contribution when we leave sched_fair
5859 * and ensure we don't carry in an old decay_count if we
5860 * switch back.
5861 */
5862 if (p->se.avg.decay_count) {
5863 struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
5864 __synchronize_entity_decay(&p->se);
5865 subtract_blocked_load_contrib(cfs_rq,
5866 p->se.avg.load_avg_contrib);
5867 }
5868#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005869}
5870
Steven Rostedtcb469842008-01-25 21:08:22 +01005871/*
5872 * We switched to the sched_fair class.
5873 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005874static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005875{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005876 if (!p->se.on_rq)
5877 return;
5878
Steven Rostedtcb469842008-01-25 21:08:22 +01005879 /*
5880 * We were most likely switched from sched_rt, so
5881 * kick off the schedule if running, otherwise just see
5882 * if we can still preempt the current task.
5883 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01005884 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01005885 resched_task(rq->curr);
5886 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02005887 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01005888}
5889
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005890/* Account for a task changing its policy or group.
5891 *
5892 * This routine is mostly called to set cfs_rq->curr field when a task
5893 * migrates between groups/classes.
5894 */
5895static void set_curr_task_fair(struct rq *rq)
5896{
5897 struct sched_entity *se = &rq->curr->se;
5898
Paul Turnerec12cb72011-07-21 09:43:30 -07005899 for_each_sched_entity(se) {
5900 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5901
5902 set_next_entity(cfs_rq, se);
5903 /* ensure bandwidth has been allocated on our new cfs_rq */
5904 account_cfs_rq_runtime(cfs_rq, 0);
5905 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02005906}
5907
Peter Zijlstra029632f2011-10-25 10:00:11 +02005908void init_cfs_rq(struct cfs_rq *cfs_rq)
5909{
5910 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02005911 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5912#ifndef CONFIG_64BIT
5913 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5914#endif
Alex Shi141965c2013-06-26 13:05:39 +08005915#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02005916 atomic64_set(&cfs_rq->decay_counter, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02005917 atomic64_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02005918#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02005919}
5920
Peter Zijlstra810b3812008-02-29 15:21:01 -05005921#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005922static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05005923{
Paul Turneraff3e492012-10-04 13:18:30 +02005924 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005925 /*
5926 * If the task was not on the rq at the time of this cgroup movement
5927 * it must have been asleep, sleeping tasks keep their ->vruntime
5928 * absolute on their old rq until wakeup (needed for the fair sleeper
5929 * bonus in place_entity()).
5930 *
5931 * If it was on the rq, we've just 'preempted' it, which does convert
5932 * ->vruntime to a relative base.
5933 *
5934 * Make sure both cases convert their relative position when migrating
5935 * to another cgroup's rq. This does somewhat interfere with the
5936 * fair sleeper stuff for the first placement, but who cares.
5937 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005938 /*
5939 * When !on_rq, vruntime of the task has usually NOT been normalized.
5940 * But there are some cases where it has already been normalized:
5941 *
5942 * - Moving a forked child which is waiting for being woken up by
5943 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005944 * - Moving a task which has been woken up by try_to_wake_up() and
5945 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005946 *
5947 * To prevent boost or penalty in the new cfs_rq caused by delta
5948 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
5949 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09005950 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09005951 on_rq = 1;
5952
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01005953 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02005954 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5955 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02005956 if (!on_rq) {
5957 cfs_rq = cfs_rq_of(&p->se);
5958 p->se.vruntime += cfs_rq->min_vruntime;
5959#ifdef CONFIG_SMP
5960 /*
5961 * migrate_task_rq_fair() will have removed our previous
5962 * contribution, but we must synchronize for ongoing future
5963 * decay.
5964 */
5965 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
5966 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
5967#endif
5968 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05005969}
Peter Zijlstra029632f2011-10-25 10:00:11 +02005970
5971void free_fair_sched_group(struct task_group *tg)
5972{
5973 int i;
5974
5975 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5976
5977 for_each_possible_cpu(i) {
5978 if (tg->cfs_rq)
5979 kfree(tg->cfs_rq[i]);
5980 if (tg->se)
5981 kfree(tg->se[i]);
5982 }
5983
5984 kfree(tg->cfs_rq);
5985 kfree(tg->se);
5986}
5987
5988int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5989{
5990 struct cfs_rq *cfs_rq;
5991 struct sched_entity *se;
5992 int i;
5993
5994 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5995 if (!tg->cfs_rq)
5996 goto err;
5997 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5998 if (!tg->se)
5999 goto err;
6000
6001 tg->shares = NICE_0_LOAD;
6002
6003 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6004
6005 for_each_possible_cpu(i) {
6006 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6007 GFP_KERNEL, cpu_to_node(i));
6008 if (!cfs_rq)
6009 goto err;
6010
6011 se = kzalloc_node(sizeof(struct sched_entity),
6012 GFP_KERNEL, cpu_to_node(i));
6013 if (!se)
6014 goto err_free_rq;
6015
6016 init_cfs_rq(cfs_rq);
6017 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6018 }
6019
6020 return 1;
6021
6022err_free_rq:
6023 kfree(cfs_rq);
6024err:
6025 return 0;
6026}
6027
6028void unregister_fair_sched_group(struct task_group *tg, int cpu)
6029{
6030 struct rq *rq = cpu_rq(cpu);
6031 unsigned long flags;
6032
6033 /*
6034 * Only empty task groups can be destroyed; so we can speculatively
6035 * check on_list without danger of it being re-added.
6036 */
6037 if (!tg->cfs_rq[cpu]->on_list)
6038 return;
6039
6040 raw_spin_lock_irqsave(&rq->lock, flags);
6041 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6042 raw_spin_unlock_irqrestore(&rq->lock, flags);
6043}
6044
6045void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6046 struct sched_entity *se, int cpu,
6047 struct sched_entity *parent)
6048{
6049 struct rq *rq = cpu_rq(cpu);
6050
6051 cfs_rq->tg = tg;
6052 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006053 init_cfs_rq_runtime(cfs_rq);
6054
6055 tg->cfs_rq[cpu] = cfs_rq;
6056 tg->se[cpu] = se;
6057
6058 /* se could be NULL for root_task_group */
6059 if (!se)
6060 return;
6061
6062 if (!parent)
6063 se->cfs_rq = &rq->cfs;
6064 else
6065 se->cfs_rq = parent->my_q;
6066
6067 se->my_q = cfs_rq;
6068 update_load_set(&se->load, 0);
6069 se->parent = parent;
6070}
6071
6072static DEFINE_MUTEX(shares_mutex);
6073
6074int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6075{
6076 int i;
6077 unsigned long flags;
6078
6079 /*
6080 * We can't change the weight of the root cgroup.
6081 */
6082 if (!tg->se[0])
6083 return -EINVAL;
6084
6085 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6086
6087 mutex_lock(&shares_mutex);
6088 if (tg->shares == shares)
6089 goto done;
6090
6091 tg->shares = shares;
6092 for_each_possible_cpu(i) {
6093 struct rq *rq = cpu_rq(i);
6094 struct sched_entity *se;
6095
6096 se = tg->se[i];
6097 /* Propagate contribution to hierarchy */
6098 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006099
6100 /* Possible calls to update_curr() need rq clock */
6101 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006102 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006103 update_cfs_shares(group_cfs_rq(se));
6104 raw_spin_unlock_irqrestore(&rq->lock, flags);
6105 }
6106
6107done:
6108 mutex_unlock(&shares_mutex);
6109 return 0;
6110}
6111#else /* CONFIG_FAIR_GROUP_SCHED */
6112
6113void free_fair_sched_group(struct task_group *tg) { }
6114
6115int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6116{
6117 return 1;
6118}
6119
6120void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6121
6122#endif /* CONFIG_FAIR_GROUP_SCHED */
6123
Peter Zijlstra810b3812008-02-29 15:21:01 -05006124
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006125static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006126{
6127 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006128 unsigned int rr_interval = 0;
6129
6130 /*
6131 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6132 * idle runqueue:
6133 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006134 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006135 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006136
6137 return rr_interval;
6138}
6139
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006140/*
6141 * All the scheduling class methods:
6142 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006143const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006144 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006145 .enqueue_task = enqueue_task_fair,
6146 .dequeue_task = dequeue_task_fair,
6147 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006148 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006149
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006150 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006151
6152 .pick_next_task = pick_next_task_fair,
6153 .put_prev_task = put_prev_task_fair,
6154
Peter Williams681f3e62007-10-24 18:23:51 +02006155#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006156 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02006157 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08006158
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006159 .rq_online = rq_online_fair,
6160 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006161
6162 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006163#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006164
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006165 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006166 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006167 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006168
6169 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006170 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006171 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006172
Peter Williams0d721ce2009-09-21 01:31:53 +00006173 .get_rr_interval = get_rr_interval_fair,
6174
Peter Zijlstra810b3812008-02-29 15:21:01 -05006175#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006176 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006177#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006178};
6179
6180#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006181void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006182{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006183 struct cfs_rq *cfs_rq;
6184
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006185 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006186 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006187 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006188 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006189}
6190#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006191
6192__init void init_sched_fair_class(void)
6193{
6194#ifdef CONFIG_SMP
6195 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6196
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006197#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006198 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006199 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006200 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006201#endif
6202#endif /* SMP */
6203
6204}