blob: 8b15e9e1d1b889b51c37ae10b9e579b12ff78936 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
Alex Shia75cdaa2013-06-20 10:18:47 +0800683#ifdef CONFIG_SMP
684static inline void __update_task_entity_contrib(struct sched_entity *se);
685
686/* Give new task start runnable values to heavy its load in infant time */
687void init_task_runnable_average(struct task_struct *p)
688{
689 u32 slice;
690
691 p->se.avg.decay_count = 0;
692 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
693 p->se.avg.runnable_avg_sum = slice;
694 p->se.avg.runnable_avg_period = slice;
695 __update_task_entity_contrib(&p->se);
696}
697#else
698void init_task_runnable_average(struct task_struct *p)
699{
700}
701#endif
702
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200703/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200704 * Update the current task's runtime statistics. Skip current tasks that
705 * are not in our scheduling class.
706 */
707static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200708__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
709 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200710{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200711 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712
Lucas De Marchi41acab82010-03-10 23:37:45 -0300713 schedstat_set(curr->statistics.exec_max,
714 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200715
716 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200717 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200718 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100719
Ingo Molnare9acbff2007-10-15 17:00:04 +0200720 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200721 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200722}
723
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200724static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200725{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200726 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200727 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200728 unsigned long delta_exec;
729
730 if (unlikely(!curr))
731 return;
732
733 /*
734 * Get the amount of time the current task was running
735 * since the last time we changed load (this cannot
736 * overflow on 32 bits):
737 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200738 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100739 if (!delta_exec)
740 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200741
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200742 __update_curr(cfs_rq, curr, delta_exec);
743 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100744
745 if (entity_is_task(curr)) {
746 struct task_struct *curtask = task_of(curr);
747
Ingo Molnarf977bb42009-09-13 18:15:54 +0200748 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100749 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700750 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100751 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700752
753 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200754}
755
756static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200757update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200758{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200759 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760}
761
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762/*
763 * Task is being enqueued - update stats:
764 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200765static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200766{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200767 /*
768 * Are we enqueueing a waiting task? (for current tasks
769 * a dequeue/enqueue event is a NOP)
770 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200771 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200772 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200773}
774
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200776update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300778 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200779 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300780 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
781 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200782 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200783#ifdef CONFIG_SCHEDSTATS
784 if (entity_is_task(se)) {
785 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200786 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200787 }
788#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300789 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200790}
791
792static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200793update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200794{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200795 /*
796 * Mark the end of the wait period if dequeueing a
797 * waiting task:
798 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200799 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200800 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801}
802
803/*
804 * We are picking a new current task - update its stats:
805 */
806static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200807update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200808{
809 /*
810 * We are starting a new run period:
811 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200812 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200813}
814
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200815/**************************************************
816 * Scheduling class queueing methods:
817 */
818
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200819#ifdef CONFIG_NUMA_BALANCING
820/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100821 * Approximate time to scan a full NUMA task in ms. The task scan period is
822 * calculated based on the tasks virtual memory size and
823 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200824 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100825unsigned int sysctl_numa_balancing_scan_period_min = 1000;
826unsigned int sysctl_numa_balancing_scan_period_max = 60000;
827unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200828
829/* Portion of address space to scan in MB */
830unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200831
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200832/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
833unsigned int sysctl_numa_balancing_scan_delay = 1000;
834
Mel Gorman598f0ec2013-10-07 11:28:55 +0100835static unsigned int task_nr_scan_windows(struct task_struct *p)
836{
837 unsigned long rss = 0;
838 unsigned long nr_scan_pages;
839
840 /*
841 * Calculations based on RSS as non-present and empty pages are skipped
842 * by the PTE scanner and NUMA hinting faults should be trapped based
843 * on resident pages
844 */
845 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
846 rss = get_mm_rss(p->mm);
847 if (!rss)
848 rss = nr_scan_pages;
849
850 rss = round_up(rss, nr_scan_pages);
851 return rss / nr_scan_pages;
852}
853
854/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
855#define MAX_SCAN_WINDOW 2560
856
857static unsigned int task_scan_min(struct task_struct *p)
858{
859 unsigned int scan, floor;
860 unsigned int windows = 1;
861
862 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
863 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
864 floor = 1000 / windows;
865
866 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
867 return max_t(unsigned int, floor, scan);
868}
869
870static unsigned int task_scan_max(struct task_struct *p)
871{
872 unsigned int smin = task_scan_min(p);
873 unsigned int smax;
874
875 /* Watch for min being lower than max due to floor calculations */
876 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
877 return max(smin, smax);
878}
879
Mel Gorman3a7053b2013-10-07 11:29:00 +0100880/*
881 * Once a preferred node is selected the scheduler balancer will prefer moving
882 * a task to that node for sysctl_numa_balancing_settle_count number of PTE
883 * scans. This will give the process the chance to accumulate more faults on
884 * the preferred node but still allow the scheduler to move the task again if
885 * the nodes CPUs are overloaded.
886 */
887unsigned int sysctl_numa_balancing_settle_count __read_mostly = 3;
888
Mel Gormane6628d52013-10-07 11:29:02 +0100889static unsigned long weighted_cpuload(const int cpu);
890
891
892static int
893find_idlest_cpu_node(int this_cpu, int nid)
894{
895 unsigned long load, min_load = ULONG_MAX;
896 int i, idlest_cpu = this_cpu;
897
898 BUG_ON(cpu_to_node(this_cpu) == nid);
899
900 rcu_read_lock();
901 for_each_cpu(i, cpumask_of_node(nid)) {
902 load = weighted_cpuload(i);
903
904 if (load < min_load) {
905 min_load = load;
906 idlest_cpu = i;
907 }
908 }
909 rcu_read_unlock();
910
911 return idlest_cpu;
912}
913
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200914static void task_numa_placement(struct task_struct *p)
915{
Mel Gorman688b7582013-10-07 11:28:58 +0100916 int seq, nid, max_nid = -1;
917 unsigned long max_faults = 0;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200918
Hugh Dickins2832bc12012-12-19 17:42:16 -0800919 if (!p->mm) /* for example, ksmd faulting in a user's mm */
920 return;
921 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200922 if (p->numa_scan_seq == seq)
923 return;
924 p->numa_scan_seq = seq;
Mel Gorman3a7053b2013-10-07 11:29:00 +0100925 p->numa_migrate_seq++;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100926 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200927
Mel Gorman688b7582013-10-07 11:28:58 +0100928 /* Find the node with the highest number of faults */
929 for_each_online_node(nid) {
Mel Gorman745d6142013-10-07 11:28:59 +0100930 unsigned long faults;
931
932 /* Decay existing window and copy faults since last scan */
Mel Gorman688b7582013-10-07 11:28:58 +0100933 p->numa_faults[nid] >>= 1;
Mel Gorman745d6142013-10-07 11:28:59 +0100934 p->numa_faults[nid] += p->numa_faults_buffer[nid];
935 p->numa_faults_buffer[nid] = 0;
936
937 faults = p->numa_faults[nid];
Mel Gorman688b7582013-10-07 11:28:58 +0100938 if (faults > max_faults) {
939 max_faults = faults;
940 max_nid = nid;
941 }
942 }
943
Mel Gormane6628d52013-10-07 11:29:02 +0100944 /*
945 * Record the preferred node as the node with the most faults,
946 * requeue the task to be running on the idlest CPU on the
947 * preferred node and reset the scanning rate to recheck
948 * the working set placement.
949 */
Mel Gorman3a7053b2013-10-07 11:29:00 +0100950 if (max_faults && max_nid != p->numa_preferred_nid) {
Mel Gormane6628d52013-10-07 11:29:02 +0100951 int preferred_cpu;
952
953 /*
954 * If the task is not on the preferred node then find the most
955 * idle CPU to migrate to.
956 */
957 preferred_cpu = task_cpu(p);
958 if (cpu_to_node(preferred_cpu) != max_nid) {
959 preferred_cpu = find_idlest_cpu_node(preferred_cpu,
960 max_nid);
961 }
962
963 /* Update the preferred nid and migrate task if possible */
Mel Gorman688b7582013-10-07 11:28:58 +0100964 p->numa_preferred_nid = max_nid;
Mel Gorman3a7053b2013-10-07 11:29:00 +0100965 p->numa_migrate_seq = 0;
Mel Gormane6628d52013-10-07 11:29:02 +0100966 migrate_task_to(p, preferred_cpu);
Mel Gorman3a7053b2013-10-07 11:29:00 +0100967 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200968}
969
970/*
971 * Got a PROT_NONE fault for a page on @node.
972 */
Mel Gormanb8593bf2012-11-21 01:18:23 +0000973void task_numa_fault(int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200974{
975 struct task_struct *p = current;
976
Dave Kleikamp10e84b92013-07-31 13:53:35 -0700977 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +0000978 return;
979
Mel Gormanf809ca92013-10-07 11:28:57 +0100980 /* Allocate buffer to track faults on a per-node basis */
981 if (unlikely(!p->numa_faults)) {
982 int size = sizeof(*p->numa_faults) * nr_node_ids;
983
Mel Gorman745d6142013-10-07 11:28:59 +0100984 /* numa_faults and numa_faults_buffer share the allocation */
985 p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
Mel Gormanf809ca92013-10-07 11:28:57 +0100986 if (!p->numa_faults)
987 return;
Mel Gorman745d6142013-10-07 11:28:59 +0100988
989 BUG_ON(p->numa_faults_buffer);
990 p->numa_faults_buffer = p->numa_faults + nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +0100991 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200992
Mel Gormanfb003b82012-11-15 09:01:14 +0000993 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +0000994 * If pages are properly placed (did not migrate) then scan slower.
995 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +0000996 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100997 if (!migrated) {
998 /* Initialise if necessary */
999 if (!p->numa_scan_period_max)
1000 p->numa_scan_period_max = task_scan_max(p);
1001
1002 p->numa_scan_period = min(p->numa_scan_period_max,
1003 p->numa_scan_period + 10);
1004 }
Mel Gormanfb003b82012-11-15 09:01:14 +00001005
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001006 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01001007
Mel Gorman745d6142013-10-07 11:28:59 +01001008 p->numa_faults_buffer[node] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001009}
1010
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001011static void reset_ptenuma_scan(struct task_struct *p)
1012{
1013 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1014 p->mm->numa_scan_offset = 0;
1015}
1016
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001017/*
1018 * The expensive part of numa migration is done from task_work context.
1019 * Triggered from task_tick_numa().
1020 */
1021void task_numa_work(struct callback_head *work)
1022{
1023 unsigned long migrate, next_scan, now = jiffies;
1024 struct task_struct *p = current;
1025 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001026 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00001027 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001028 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +00001029 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001030
1031 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1032
1033 work->next = work; /* protect against double add */
1034 /*
1035 * Who cares about NUMA placement when they're dying.
1036 *
1037 * NOTE: make sure not to dereference p->mm before this check,
1038 * exit_task_work() happens _after_ exit_mm() so we could be called
1039 * without p->mm even though we still had it when we enqueued this
1040 * work.
1041 */
1042 if (p->flags & PF_EXITING)
1043 return;
1044
Mel Gorman7e8d16b2013-10-07 11:28:54 +01001045 if (!mm->numa_next_reset || !mm->numa_next_scan) {
1046 mm->numa_next_scan = now +
1047 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1048 mm->numa_next_reset = now +
1049 msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1050 }
1051
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001052 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +00001053 * Reset the scan period if enough time has gone by. Objective is that
1054 * scanning will be reduced if pages are properly placed. As tasks
1055 * can enter different phases this needs to be re-examined. Lacking
1056 * proper tracking of reference behaviour, this blunt hammer is used.
1057 */
1058 migrate = mm->numa_next_reset;
1059 if (time_after(now, migrate)) {
Mel Gorman598f0ec2013-10-07 11:28:55 +01001060 p->numa_scan_period = task_scan_min(p);
Mel Gormanb8593bf2012-11-21 01:18:23 +00001061 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1062 xchg(&mm->numa_next_reset, next_scan);
1063 }
1064
1065 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001066 * Enforce maximal scan/migration frequency..
1067 */
1068 migrate = mm->numa_next_scan;
1069 if (time_before(now, migrate))
1070 return;
1071
Mel Gorman598f0ec2013-10-07 11:28:55 +01001072 if (p->numa_scan_period == 0) {
1073 p->numa_scan_period_max = task_scan_max(p);
1074 p->numa_scan_period = task_scan_min(p);
1075 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001076
Mel Gormanfb003b82012-11-15 09:01:14 +00001077 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001078 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1079 return;
1080
Mel Gormane14808b2012-11-19 10:59:15 +00001081 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001082 * Delay this task enough that another task of this mm will likely win
1083 * the next time around.
1084 */
1085 p->node_stamp += 2 * TICK_NSEC;
1086
Mel Gorman9f406042012-11-14 18:34:32 +00001087 start = mm->numa_scan_offset;
1088 pages = sysctl_numa_balancing_scan_size;
1089 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1090 if (!pages)
1091 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001092
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001093 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00001094 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001095 if (!vma) {
1096 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00001097 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001098 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001099 }
Mel Gorman9f406042012-11-14 18:34:32 +00001100 for (; vma; vma = vma->vm_next) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001101 if (!vma_migratable(vma))
1102 continue;
1103
1104 /* Skip small VMAs. They are not likely to be of relevance */
Mel Gorman221392c2012-12-17 14:05:53 +00001105 if (vma->vm_end - vma->vm_start < HPAGE_SIZE)
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001106 continue;
1107
Mel Gorman9f406042012-11-14 18:34:32 +00001108 do {
1109 start = max(start, vma->vm_start);
1110 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1111 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01001112 nr_pte_updates += change_prot_numa(vma, start, end);
1113
1114 /*
1115 * Scan sysctl_numa_balancing_scan_size but ensure that
1116 * at least one PTE is updated so that unused virtual
1117 * address space is quickly skipped.
1118 */
1119 if (nr_pte_updates)
1120 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001121
Mel Gorman9f406042012-11-14 18:34:32 +00001122 start = end;
1123 if (pages <= 0)
1124 goto out;
1125 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001126 }
1127
Mel Gorman9f406042012-11-14 18:34:32 +00001128out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001129 /*
Mel Gormanf307cd12013-10-07 11:28:56 +01001130 * If the whole process was scanned without updates then no NUMA
1131 * hinting faults are being recorded and scan rate should be lower.
1132 */
1133 if (mm->numa_scan_offset == 0 && !nr_pte_updates) {
1134 p->numa_scan_period = min(p->numa_scan_period_max,
1135 p->numa_scan_period << 1);
1136
1137 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1138 mm->numa_next_scan = next_scan;
1139 }
1140
1141 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001142 * It is possible to reach the end of the VMA list but the last few
1143 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1144 * would find the !migratable VMA on the next scan but not reset the
1145 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001146 */
1147 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00001148 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001149 else
1150 reset_ptenuma_scan(p);
1151 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001152}
1153
1154/*
1155 * Drive the periodic memory faults..
1156 */
1157void task_tick_numa(struct rq *rq, struct task_struct *curr)
1158{
1159 struct callback_head *work = &curr->numa_work;
1160 u64 period, now;
1161
1162 /*
1163 * We don't care about NUMA placement if we don't have memory.
1164 */
1165 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1166 return;
1167
1168 /*
1169 * Using runtime rather than walltime has the dual advantage that
1170 * we (mostly) drive the selection from busy threads and that the
1171 * task needs to have done some actual work before we bother with
1172 * NUMA placement.
1173 */
1174 now = curr->se.sum_exec_runtime;
1175 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1176
1177 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001178 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01001179 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001180 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001181
1182 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1183 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1184 task_work_add(curr, work, true);
1185 }
1186 }
1187}
1188#else
1189static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1190{
1191}
1192#endif /* CONFIG_NUMA_BALANCING */
1193
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001194static void
1195account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1196{
1197 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001198 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001199 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001200#ifdef CONFIG_SMP
1201 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001202 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001203#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001204 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001205}
1206
1207static void
1208account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1209{
1210 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001211 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001212 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001213 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301214 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001215 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001216}
1217
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001218#ifdef CONFIG_FAIR_GROUP_SCHED
1219# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001220static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1221{
1222 long tg_weight;
1223
1224 /*
1225 * Use this CPU's actual weight instead of the last load_contribution
1226 * to gain a more accurate current total weight. See
1227 * update_cfs_rq_load_contribution().
1228 */
Alex Shibf5b9862013-06-20 10:18:54 +08001229 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02001230 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001231 tg_weight += cfs_rq->load.weight;
1232
1233 return tg_weight;
1234}
1235
Paul Turner6d5ab292011-01-21 20:45:01 -08001236static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001237{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001238 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001239
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001240 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001241 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001242
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001243 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001244 if (tg_weight)
1245 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001246
1247 if (shares < MIN_SHARES)
1248 shares = MIN_SHARES;
1249 if (shares > tg->shares)
1250 shares = tg->shares;
1251
1252 return shares;
1253}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001254# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001255static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001256{
1257 return tg->shares;
1258}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001259# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001260static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1261 unsigned long weight)
1262{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001263 if (se->on_rq) {
1264 /* commit outstanding execution time */
1265 if (cfs_rq->curr == se)
1266 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001267 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001268 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001269
1270 update_load_set(&se->load, weight);
1271
1272 if (se->on_rq)
1273 account_entity_enqueue(cfs_rq, se);
1274}
1275
Paul Turner82958362012-10-04 13:18:31 +02001276static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1277
Paul Turner6d5ab292011-01-21 20:45:01 -08001278static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001279{
1280 struct task_group *tg;
1281 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001282 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001283
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001284 tg = cfs_rq->tg;
1285 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001286 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001287 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001288#ifndef CONFIG_SMP
1289 if (likely(se->load.weight == tg->shares))
1290 return;
1291#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001292 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001293
1294 reweight_entity(cfs_rq_of(se), se, shares);
1295}
1296#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001297static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001298{
1299}
1300#endif /* CONFIG_FAIR_GROUP_SCHED */
1301
Alex Shi141965c2013-06-26 13:05:39 +08001302#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001303/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001304 * We choose a half-life close to 1 scheduling period.
1305 * Note: The tables below are dependent on this value.
1306 */
1307#define LOAD_AVG_PERIOD 32
1308#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1309#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1310
1311/* Precomputed fixed inverse multiplies for multiplication by y^n */
1312static const u32 runnable_avg_yN_inv[] = {
1313 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1314 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1315 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1316 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1317 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1318 0x85aac367, 0x82cd8698,
1319};
1320
1321/*
1322 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1323 * over-estimates when re-combining.
1324 */
1325static const u32 runnable_avg_yN_sum[] = {
1326 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1327 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1328 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1329};
1330
1331/*
Paul Turner9d85f212012-10-04 13:18:29 +02001332 * Approximate:
1333 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1334 */
1335static __always_inline u64 decay_load(u64 val, u64 n)
1336{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001337 unsigned int local_n;
1338
1339 if (!n)
1340 return val;
1341 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1342 return 0;
1343
1344 /* after bounds checking we can collapse to 32-bit */
1345 local_n = n;
1346
1347 /*
1348 * As y^PERIOD = 1/2, we can combine
1349 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1350 * With a look-up table which covers k^n (n<PERIOD)
1351 *
1352 * To achieve constant time decay_load.
1353 */
1354 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1355 val >>= local_n / LOAD_AVG_PERIOD;
1356 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001357 }
1358
Paul Turner5b51f2f2012-10-04 13:18:32 +02001359 val *= runnable_avg_yN_inv[local_n];
1360 /* We don't use SRR here since we always want to round down. */
1361 return val >> 32;
1362}
1363
1364/*
1365 * For updates fully spanning n periods, the contribution to runnable
1366 * average will be: \Sum 1024*y^n
1367 *
1368 * We can compute this reasonably efficiently by combining:
1369 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1370 */
1371static u32 __compute_runnable_contrib(u64 n)
1372{
1373 u32 contrib = 0;
1374
1375 if (likely(n <= LOAD_AVG_PERIOD))
1376 return runnable_avg_yN_sum[n];
1377 else if (unlikely(n >= LOAD_AVG_MAX_N))
1378 return LOAD_AVG_MAX;
1379
1380 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1381 do {
1382 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1383 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1384
1385 n -= LOAD_AVG_PERIOD;
1386 } while (n > LOAD_AVG_PERIOD);
1387
1388 contrib = decay_load(contrib, n);
1389 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001390}
1391
1392/*
1393 * We can represent the historical contribution to runnable average as the
1394 * coefficients of a geometric series. To do this we sub-divide our runnable
1395 * history into segments of approximately 1ms (1024us); label the segment that
1396 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1397 *
1398 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1399 * p0 p1 p2
1400 * (now) (~1ms ago) (~2ms ago)
1401 *
1402 * Let u_i denote the fraction of p_i that the entity was runnable.
1403 *
1404 * We then designate the fractions u_i as our co-efficients, yielding the
1405 * following representation of historical load:
1406 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1407 *
1408 * We choose y based on the with of a reasonably scheduling period, fixing:
1409 * y^32 = 0.5
1410 *
1411 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1412 * approximately half as much as the contribution to load within the last ms
1413 * (u_0).
1414 *
1415 * When a period "rolls over" and we have new u_0`, multiplying the previous
1416 * sum again by y is sufficient to update:
1417 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1418 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1419 */
1420static __always_inline int __update_entity_runnable_avg(u64 now,
1421 struct sched_avg *sa,
1422 int runnable)
1423{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001424 u64 delta, periods;
1425 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001426 int delta_w, decayed = 0;
1427
1428 delta = now - sa->last_runnable_update;
1429 /*
1430 * This should only happen when time goes backwards, which it
1431 * unfortunately does during sched clock init when we swap over to TSC.
1432 */
1433 if ((s64)delta < 0) {
1434 sa->last_runnable_update = now;
1435 return 0;
1436 }
1437
1438 /*
1439 * Use 1024ns as the unit of measurement since it's a reasonable
1440 * approximation of 1us and fast to compute.
1441 */
1442 delta >>= 10;
1443 if (!delta)
1444 return 0;
1445 sa->last_runnable_update = now;
1446
1447 /* delta_w is the amount already accumulated against our next period */
1448 delta_w = sa->runnable_avg_period % 1024;
1449 if (delta + delta_w >= 1024) {
1450 /* period roll-over */
1451 decayed = 1;
1452
1453 /*
1454 * Now that we know we're crossing a period boundary, figure
1455 * out how much from delta we need to complete the current
1456 * period and accrue it.
1457 */
1458 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001459 if (runnable)
1460 sa->runnable_avg_sum += delta_w;
1461 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001462
Paul Turner5b51f2f2012-10-04 13:18:32 +02001463 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001464
Paul Turner5b51f2f2012-10-04 13:18:32 +02001465 /* Figure out how many additional periods this update spans */
1466 periods = delta / 1024;
1467 delta %= 1024;
1468
1469 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1470 periods + 1);
1471 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1472 periods + 1);
1473
1474 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1475 runnable_contrib = __compute_runnable_contrib(periods);
1476 if (runnable)
1477 sa->runnable_avg_sum += runnable_contrib;
1478 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001479 }
1480
1481 /* Remainder of delta accrued against u_0` */
1482 if (runnable)
1483 sa->runnable_avg_sum += delta;
1484 sa->runnable_avg_period += delta;
1485
1486 return decayed;
1487}
1488
Paul Turner9ee474f2012-10-04 13:18:30 +02001489/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001490static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001491{
1492 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1493 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1494
1495 decays -= se->avg.decay_count;
1496 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001497 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001498
1499 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1500 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001501
1502 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001503}
1504
Paul Turnerc566e8e2012-10-04 13:18:30 +02001505#ifdef CONFIG_FAIR_GROUP_SCHED
1506static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1507 int force_update)
1508{
1509 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08001510 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02001511
1512 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1513 tg_contrib -= cfs_rq->tg_load_contrib;
1514
Alex Shibf5b9862013-06-20 10:18:54 +08001515 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1516 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02001517 cfs_rq->tg_load_contrib += tg_contrib;
1518 }
1519}
Paul Turner8165e142012-10-04 13:18:31 +02001520
Paul Turnerbb17f652012-10-04 13:18:31 +02001521/*
1522 * Aggregate cfs_rq runnable averages into an equivalent task_group
1523 * representation for computing load contributions.
1524 */
1525static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1526 struct cfs_rq *cfs_rq)
1527{
1528 struct task_group *tg = cfs_rq->tg;
1529 long contrib;
1530
1531 /* The fraction of a cpu used by this cfs_rq */
1532 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1533 sa->runnable_avg_period + 1);
1534 contrib -= cfs_rq->tg_runnable_contrib;
1535
1536 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1537 atomic_add(contrib, &tg->runnable_avg);
1538 cfs_rq->tg_runnable_contrib += contrib;
1539 }
1540}
1541
Paul Turner8165e142012-10-04 13:18:31 +02001542static inline void __update_group_entity_contrib(struct sched_entity *se)
1543{
1544 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1545 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001546 int runnable_avg;
1547
Paul Turner8165e142012-10-04 13:18:31 +02001548 u64 contrib;
1549
1550 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08001551 se->avg.load_avg_contrib = div_u64(contrib,
1552 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001553
1554 /*
1555 * For group entities we need to compute a correction term in the case
1556 * that they are consuming <1 cpu so that we would contribute the same
1557 * load as a task of equal weight.
1558 *
1559 * Explicitly co-ordinating this measurement would be expensive, but
1560 * fortunately the sum of each cpus contribution forms a usable
1561 * lower-bound on the true value.
1562 *
1563 * Consider the aggregate of 2 contributions. Either they are disjoint
1564 * (and the sum represents true value) or they are disjoint and we are
1565 * understating by the aggregate of their overlap.
1566 *
1567 * Extending this to N cpus, for a given overlap, the maximum amount we
1568 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1569 * cpus that overlap for this interval and w_i is the interval width.
1570 *
1571 * On a small machine; the first term is well-bounded which bounds the
1572 * total error since w_i is a subset of the period. Whereas on a
1573 * larger machine, while this first term can be larger, if w_i is the
1574 * of consequential size guaranteed to see n_i*w_i quickly converge to
1575 * our upper bound of 1-cpu.
1576 */
1577 runnable_avg = atomic_read(&tg->runnable_avg);
1578 if (runnable_avg < NICE_0_LOAD) {
1579 se->avg.load_avg_contrib *= runnable_avg;
1580 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1581 }
Paul Turner8165e142012-10-04 13:18:31 +02001582}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001583#else
1584static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1585 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001586static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1587 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001588static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001589#endif
1590
Paul Turner8165e142012-10-04 13:18:31 +02001591static inline void __update_task_entity_contrib(struct sched_entity *se)
1592{
1593 u32 contrib;
1594
1595 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1596 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1597 contrib /= (se->avg.runnable_avg_period + 1);
1598 se->avg.load_avg_contrib = scale_load(contrib);
1599}
1600
Paul Turner2dac7542012-10-04 13:18:30 +02001601/* Compute the current contribution to load_avg by se, return any delta */
1602static long __update_entity_load_avg_contrib(struct sched_entity *se)
1603{
1604 long old_contrib = se->avg.load_avg_contrib;
1605
Paul Turner8165e142012-10-04 13:18:31 +02001606 if (entity_is_task(se)) {
1607 __update_task_entity_contrib(se);
1608 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001609 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001610 __update_group_entity_contrib(se);
1611 }
Paul Turner2dac7542012-10-04 13:18:30 +02001612
1613 return se->avg.load_avg_contrib - old_contrib;
1614}
1615
Paul Turner9ee474f2012-10-04 13:18:30 +02001616static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1617 long load_contrib)
1618{
1619 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1620 cfs_rq->blocked_load_avg -= load_contrib;
1621 else
1622 cfs_rq->blocked_load_avg = 0;
1623}
1624
Paul Turnerf1b17282012-10-04 13:18:31 +02001625static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1626
Paul Turner9d85f212012-10-04 13:18:29 +02001627/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001628static inline void update_entity_load_avg(struct sched_entity *se,
1629 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001630{
Paul Turner2dac7542012-10-04 13:18:30 +02001631 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1632 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001633 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001634
Paul Turnerf1b17282012-10-04 13:18:31 +02001635 /*
1636 * For a group entity we need to use their owned cfs_rq_clock_task() in
1637 * case they are the parent of a throttled hierarchy.
1638 */
1639 if (entity_is_task(se))
1640 now = cfs_rq_clock_task(cfs_rq);
1641 else
1642 now = cfs_rq_clock_task(group_cfs_rq(se));
1643
1644 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001645 return;
1646
1647 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001648
1649 if (!update_cfs_rq)
1650 return;
1651
Paul Turner2dac7542012-10-04 13:18:30 +02001652 if (se->on_rq)
1653 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001654 else
1655 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1656}
1657
1658/*
1659 * Decay the load contributed by all blocked children and account this so that
1660 * their contribution may appropriately discounted when they wake up.
1661 */
Paul Turneraff3e492012-10-04 13:18:30 +02001662static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001663{
Paul Turnerf1b17282012-10-04 13:18:31 +02001664 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001665 u64 decays;
1666
1667 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001668 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001669 return;
1670
Alex Shi25099402013-06-20 10:18:55 +08001671 if (atomic_long_read(&cfs_rq->removed_load)) {
1672 unsigned long removed_load;
1673 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02001674 subtract_blocked_load_contrib(cfs_rq, removed_load);
1675 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001676
Paul Turneraff3e492012-10-04 13:18:30 +02001677 if (decays) {
1678 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1679 decays);
1680 atomic64_add(decays, &cfs_rq->decay_counter);
1681 cfs_rq->last_decay = now;
1682 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001683
1684 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001685}
Ben Segall18bf2802012-10-04 12:51:20 +02001686
1687static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1688{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001689 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001690 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001691}
Paul Turner2dac7542012-10-04 13:18:30 +02001692
1693/* Add the load generated by se into cfs_rq's child load-average */
1694static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001695 struct sched_entity *se,
1696 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001697{
Paul Turneraff3e492012-10-04 13:18:30 +02001698 /*
1699 * We track migrations using entity decay_count <= 0, on a wake-up
1700 * migration we use a negative decay count to track the remote decays
1701 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08001702 *
1703 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1704 * are seen by enqueue_entity_load_avg() as a migration with an already
1705 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02001706 */
1707 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001708 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001709 if (se->avg.decay_count) {
1710 /*
1711 * In a wake-up migration we have to approximate the
1712 * time sleeping. This is because we can't synchronize
1713 * clock_task between the two cpus, and it is not
1714 * guaranteed to be read-safe. Instead, we can
1715 * approximate this using our carried decays, which are
1716 * explicitly atomically readable.
1717 */
1718 se->avg.last_runnable_update -= (-se->avg.decay_count)
1719 << 20;
1720 update_entity_load_avg(se, 0);
1721 /* Indicate that we're now synchronized and on-rq */
1722 se->avg.decay_count = 0;
1723 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001724 wakeup = 0;
1725 } else {
Alex Shi282cf492013-06-20 10:18:48 +08001726 /*
1727 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1728 * would have made count negative); we must be careful to avoid
1729 * double-accounting blocked time after synchronizing decays.
1730 */
1731 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1732 << 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001733 }
1734
Paul Turneraff3e492012-10-04 13:18:30 +02001735 /* migrated tasks did not contribute to our blocked load */
1736 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02001737 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02001738 update_entity_load_avg(se, 0);
1739 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001740
Paul Turner2dac7542012-10-04 13:18:30 +02001741 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02001742 /* we force update consideration on load-balancer moves */
1743 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02001744}
1745
Paul Turner9ee474f2012-10-04 13:18:30 +02001746/*
1747 * Remove se's load from this cfs_rq child load-average, if the entity is
1748 * transitioning to a blocked state we track its projected decay using
1749 * blocked_load_avg.
1750 */
Paul Turner2dac7542012-10-04 13:18:30 +02001751static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001752 struct sched_entity *se,
1753 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02001754{
Paul Turner9ee474f2012-10-04 13:18:30 +02001755 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02001756 /* we force update consideration on load-balancer moves */
1757 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02001758
Paul Turner2dac7542012-10-04 13:18:30 +02001759 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02001760 if (sleep) {
1761 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
1762 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
1763 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02001764}
Vincent Guittot642dbc32013-04-18 18:34:26 +02001765
1766/*
1767 * Update the rq's load with the elapsed running time before entering
1768 * idle. if the last scheduled task is not a CFS task, idle_enter will
1769 * be the only way to update the runnable statistic.
1770 */
1771void idle_enter_fair(struct rq *this_rq)
1772{
1773 update_rq_runnable_avg(this_rq, 1);
1774}
1775
1776/*
1777 * Update the rq's load with the elapsed idle time before a task is
1778 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
1779 * be the only way to update the runnable statistic.
1780 */
1781void idle_exit_fair(struct rq *this_rq)
1782{
1783 update_rq_runnable_avg(this_rq, 0);
1784}
1785
Paul Turner9d85f212012-10-04 13:18:29 +02001786#else
Paul Turner9ee474f2012-10-04 13:18:30 +02001787static inline void update_entity_load_avg(struct sched_entity *se,
1788 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02001789static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001790static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001791 struct sched_entity *se,
1792 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02001793static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001794 struct sched_entity *se,
1795 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02001796static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
1797 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02001798#endif
1799
Ingo Molnar2396af62007-08-09 11:16:48 +02001800static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001801{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001802#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02001803 struct task_struct *tsk = NULL;
1804
1805 if (entity_is_task(se))
1806 tsk = task_of(se);
1807
Lucas De Marchi41acab82010-03-10 23:37:45 -03001808 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001809 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001810
1811 if ((s64)delta < 0)
1812 delta = 0;
1813
Lucas De Marchi41acab82010-03-10 23:37:45 -03001814 if (unlikely(delta > se->statistics.sleep_max))
1815 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001816
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001817 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001818 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01001819
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001820 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02001821 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001822 trace_sched_stat_sleep(tsk, delta);
1823 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001824 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03001825 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001826 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001827
1828 if ((s64)delta < 0)
1829 delta = 0;
1830
Lucas De Marchi41acab82010-03-10 23:37:45 -03001831 if (unlikely(delta > se->statistics.block_max))
1832 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001833
Peter Zijlstra8c79a042012-01-30 14:51:37 +01001834 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03001835 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02001836
Peter Zijlstrae4143142009-07-23 20:13:26 +02001837 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001838 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001839 se->statistics.iowait_sum += delta;
1840 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02001841 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07001842 }
1843
Andrew Vaginb781a602011-11-28 12:03:35 +03001844 trace_sched_stat_blocked(tsk, delta);
1845
Peter Zijlstrae4143142009-07-23 20:13:26 +02001846 /*
1847 * Blocking time is in units of nanosecs, so shift by
1848 * 20 to get a milliseconds-range estimation of the
1849 * amount of time that the task spent sleeping:
1850 */
1851 if (unlikely(prof_on == SLEEP_PROFILING)) {
1852 profile_hits(SLEEP_PROFILING,
1853 (void *)get_wchan(tsk),
1854 delta >> 20);
1855 }
1856 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02001857 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001858 }
1859#endif
1860}
1861
Peter Zijlstraddc97292007-10-15 17:00:10 +02001862static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1863{
1864#ifdef CONFIG_SCHED_DEBUG
1865 s64 d = se->vruntime - cfs_rq->min_vruntime;
1866
1867 if (d < 0)
1868 d = -d;
1869
1870 if (d > 3*sysctl_sched_latency)
1871 schedstat_inc(cfs_rq, nr_spread_over);
1872#endif
1873}
1874
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001875static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001876place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1877{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001878 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001879
Peter Zijlstra2cb86002007-11-09 22:39:37 +01001880 /*
1881 * The 'current' period is already promised to the current tasks,
1882 * however the extra weight of the new task will slow them down a
1883 * little, place the new task so that it fits in the slot that
1884 * stays open at the end.
1885 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02001886 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02001887 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001888
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001889 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01001890 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001891 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02001892
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001893 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001894 * Halve their sleep time's effect, to allow
1895 * for a gentler effect of sleepers:
1896 */
1897 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1898 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02001899
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02001900 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001901 }
1902
Mike Galbraithb5d9d732009-09-08 11:12:28 +02001903 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05301904 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001905}
1906
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001907static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1908
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001909static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001910enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001911{
1912 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001913 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05301914 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001915 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001916 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001917 se->vruntime += cfs_rq->min_vruntime;
1918
1919 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001920 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001921 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001922 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02001923 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001924 account_entity_enqueue(cfs_rq, se);
1925 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001926
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001927 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02001928 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02001929 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02001930 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001931
Ingo Molnard2417e52007-08-09 11:16:47 +02001932 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02001933 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001934 if (se != cfs_rq->curr)
1935 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001936 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001937
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001938 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08001939 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07001940 check_enqueue_throttle(cfs_rq);
1941 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001942}
1943
Rik van Riel2c13c9192011-02-01 09:48:37 -05001944static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01001945{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001946 for_each_sched_entity(se) {
1947 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1948 if (cfs_rq->last == se)
1949 cfs_rq->last = NULL;
1950 else
1951 break;
1952 }
1953}
Peter Zijlstra2002c692008-11-11 11:52:33 +01001954
Rik van Riel2c13c9192011-02-01 09:48:37 -05001955static void __clear_buddies_next(struct sched_entity *se)
1956{
1957 for_each_sched_entity(se) {
1958 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1959 if (cfs_rq->next == se)
1960 cfs_rq->next = NULL;
1961 else
1962 break;
1963 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01001964}
1965
Rik van Rielac53db52011-02-01 09:51:03 -05001966static void __clear_buddies_skip(struct sched_entity *se)
1967{
1968 for_each_sched_entity(se) {
1969 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1970 if (cfs_rq->skip == se)
1971 cfs_rq->skip = NULL;
1972 else
1973 break;
1974 }
1975}
1976
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001977static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1978{
Rik van Riel2c13c9192011-02-01 09:48:37 -05001979 if (cfs_rq->last == se)
1980 __clear_buddies_last(se);
1981
1982 if (cfs_rq->next == se)
1983 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05001984
1985 if (cfs_rq->skip == se)
1986 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01001987}
1988
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07001989static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07001990
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001991static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001992dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001993{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001994 /*
1995 * Update run-time statistics of the 'current'.
1996 */
1997 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08001998 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001999
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02002000 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002001 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002002#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002003 if (entity_is_task(se)) {
2004 struct task_struct *tsk = task_of(se);
2005
2006 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002007 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002008 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002009 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002010 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02002011#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002012 }
2013
Peter Zijlstra2002c692008-11-11 11:52:33 +01002014 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002015
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002016 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002017 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002018 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002019 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002020
2021 /*
2022 * Normalize the entity after updating the min_vruntime because the
2023 * update can refer to the ->curr item and we need to reflect this
2024 * movement in our normalized position.
2025 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002026 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002027 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07002028
Paul Turnerd8b49862011-07-21 09:43:41 -07002029 /* return excess runtime on last dequeue */
2030 return_cfs_rq_runtime(cfs_rq);
2031
Peter Zijlstra1e876232011-05-17 16:21:10 -07002032 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002033 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002034}
2035
2036/*
2037 * Preempt the current task with a newly woken task if needed:
2038 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02002039static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002040check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002041{
Peter Zijlstra11697832007-09-05 14:32:49 +02002042 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002043 struct sched_entity *se;
2044 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02002045
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02002046 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02002047 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002048 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002049 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002050 /*
2051 * The current task ran long enough, ensure it doesn't get
2052 * re-elected due to buddy favours.
2053 */
2054 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002055 return;
2056 }
2057
2058 /*
2059 * Ensure that a task that missed wakeup preemption by a
2060 * narrow margin doesn't have to wait for a full slice.
2061 * This also mitigates buddy induced latencies under load.
2062 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002063 if (delta_exec < sysctl_sched_min_granularity)
2064 return;
2065
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002066 se = __pick_first_entity(cfs_rq);
2067 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02002068
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002069 if (delta < 0)
2070 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01002071
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002072 if (delta > ideal_runtime)
2073 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002074}
2075
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002076static void
Ingo Molnar8494f412007-08-09 11:16:48 +02002077set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002078{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002079 /* 'current' is not kept within the tree. */
2080 if (se->on_rq) {
2081 /*
2082 * Any task has to be enqueued before it get to execute on
2083 * a CPU. So account for the time it spent waiting on the
2084 * runqueue.
2085 */
2086 update_stats_wait_end(cfs_rq, se);
2087 __dequeue_entity(cfs_rq, se);
2088 }
2089
Ingo Molnar79303e92007-08-09 11:16:47 +02002090 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02002091 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02002092#ifdef CONFIG_SCHEDSTATS
2093 /*
2094 * Track our maximum slice length, if the CPU's load is at
2095 * least twice that of our own weight (i.e. dont track it
2096 * when there are only lesser-weight tasks around):
2097 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002098 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002099 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02002100 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2101 }
2102#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02002103 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002104}
2105
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02002106static int
2107wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2108
Rik van Rielac53db52011-02-01 09:51:03 -05002109/*
2110 * Pick the next process, keeping these things in mind, in this order:
2111 * 1) keep things fair between processes/task groups
2112 * 2) pick the "next" process, since someone really wants that to run
2113 * 3) pick the "last" process, for cache locality
2114 * 4) do not run the "skip" process, if something else is available
2115 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002116static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002117{
Rik van Rielac53db52011-02-01 09:51:03 -05002118 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002119 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002120
Rik van Rielac53db52011-02-01 09:51:03 -05002121 /*
2122 * Avoid running the skip buddy, if running something else can
2123 * be done without getting too unfair.
2124 */
2125 if (cfs_rq->skip == se) {
2126 struct sched_entity *second = __pick_next_entity(se);
2127 if (second && wakeup_preempt_entity(second, left) < 1)
2128 se = second;
2129 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002130
Mike Galbraithf685cea2009-10-23 23:09:22 +02002131 /*
2132 * Prefer last buddy, try to return the CPU to a preempted task.
2133 */
2134 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2135 se = cfs_rq->last;
2136
Rik van Rielac53db52011-02-01 09:51:03 -05002137 /*
2138 * Someone really wants this to run. If it's not unfair, run it.
2139 */
2140 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2141 se = cfs_rq->next;
2142
Mike Galbraithf685cea2009-10-23 23:09:22 +02002143 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002144
2145 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002146}
2147
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002148static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2149
Ingo Molnarab6cde22007-08-09 11:16:48 +02002150static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002151{
2152 /*
2153 * If still on the runqueue then deactivate_task()
2154 * was not called and update_curr() has to be done:
2155 */
2156 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002157 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002158
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002159 /* throttle cfs_rqs exceeding runtime */
2160 check_cfs_rq_runtime(cfs_rq);
2161
Peter Zijlstraddc97292007-10-15 17:00:10 +02002162 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002163 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002164 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002165 /* Put 'current' back into the tree. */
2166 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002167 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002168 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002169 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002170 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002171}
2172
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002173static void
2174entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002175{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002176 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002177 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002178 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002179 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002180
Paul Turner43365bd2010-12-15 19:10:17 -08002181 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002182 * Ensure that runnable average is periodically updated.
2183 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002184 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002185 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02002186 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02002187
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002188#ifdef CONFIG_SCHED_HRTICK
2189 /*
2190 * queued ticks are scheduled to match the slice, so don't bother
2191 * validating it and just reschedule.
2192 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002193 if (queued) {
2194 resched_task(rq_of(cfs_rq)->curr);
2195 return;
2196 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002197 /*
2198 * don't let the period tick interfere with the hrtick preemption
2199 */
2200 if (!sched_feat(DOUBLE_TICK) &&
2201 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2202 return;
2203#endif
2204
Yong Zhang2c2efae2011-07-29 16:20:33 +08002205 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002206 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002207}
2208
Paul Turnerab84d312011-07-21 09:43:28 -07002209
2210/**************************************************
2211 * CFS bandwidth control machinery
2212 */
2213
2214#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002215
2216#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002217static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002218
2219static inline bool cfs_bandwidth_used(void)
2220{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002221 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002222}
2223
2224void account_cfs_bandwidth_used(int enabled, int was_enabled)
2225{
2226 /* only need to count groups transitioning between enabled/!enabled */
2227 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002228 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002229 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002230 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002231}
2232#else /* HAVE_JUMP_LABEL */
2233static bool cfs_bandwidth_used(void)
2234{
2235 return true;
2236}
2237
2238void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2239#endif /* HAVE_JUMP_LABEL */
2240
Paul Turnerab84d312011-07-21 09:43:28 -07002241/*
2242 * default period for cfs group bandwidth.
2243 * default: 0.1s, units: nanoseconds
2244 */
2245static inline u64 default_cfs_period(void)
2246{
2247 return 100000000ULL;
2248}
Paul Turnerec12cb72011-07-21 09:43:30 -07002249
2250static inline u64 sched_cfs_bandwidth_slice(void)
2251{
2252 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2253}
2254
Paul Turnera9cf55b2011-07-21 09:43:32 -07002255/*
2256 * Replenish runtime according to assigned quota and update expiration time.
2257 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2258 * additional synchronization around rq->lock.
2259 *
2260 * requires cfs_b->lock
2261 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002262void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002263{
2264 u64 now;
2265
2266 if (cfs_b->quota == RUNTIME_INF)
2267 return;
2268
2269 now = sched_clock_cpu(smp_processor_id());
2270 cfs_b->runtime = cfs_b->quota;
2271 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2272}
2273
Peter Zijlstra029632f2011-10-25 10:00:11 +02002274static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2275{
2276 return &tg->cfs_bandwidth;
2277}
2278
Paul Turnerf1b17282012-10-04 13:18:31 +02002279/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2280static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2281{
2282 if (unlikely(cfs_rq->throttle_count))
2283 return cfs_rq->throttled_clock_task;
2284
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002285 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002286}
2287
Paul Turner85dac902011-07-21 09:43:33 -07002288/* returns 0 on failure to allocate runtime */
2289static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002290{
2291 struct task_group *tg = cfs_rq->tg;
2292 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002293 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002294
2295 /* note: this is a positive sum as runtime_remaining <= 0 */
2296 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2297
2298 raw_spin_lock(&cfs_b->lock);
2299 if (cfs_b->quota == RUNTIME_INF)
2300 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002301 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002302 /*
2303 * If the bandwidth pool has become inactive, then at least one
2304 * period must have elapsed since the last consumption.
2305 * Refresh the global state and ensure bandwidth timer becomes
2306 * active.
2307 */
2308 if (!cfs_b->timer_active) {
2309 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002310 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002311 }
Paul Turner58088ad2011-07-21 09:43:31 -07002312
2313 if (cfs_b->runtime > 0) {
2314 amount = min(cfs_b->runtime, min_amount);
2315 cfs_b->runtime -= amount;
2316 cfs_b->idle = 0;
2317 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002318 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002319 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002320 raw_spin_unlock(&cfs_b->lock);
2321
2322 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002323 /*
2324 * we may have advanced our local expiration to account for allowed
2325 * spread between our sched_clock and the one on which runtime was
2326 * issued.
2327 */
2328 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2329 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002330
2331 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002332}
2333
2334/*
2335 * Note: This depends on the synchronization provided by sched_clock and the
2336 * fact that rq->clock snapshots this value.
2337 */
2338static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2339{
2340 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002341
2342 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002343 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002344 return;
2345
2346 if (cfs_rq->runtime_remaining < 0)
2347 return;
2348
2349 /*
2350 * If the local deadline has passed we have to consider the
2351 * possibility that our sched_clock is 'fast' and the global deadline
2352 * has not truly expired.
2353 *
2354 * Fortunately we can check determine whether this the case by checking
2355 * whether the global deadline has advanced.
2356 */
2357
2358 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2359 /* extend local deadline, drift is bounded above by 2 ticks */
2360 cfs_rq->runtime_expires += TICK_NSEC;
2361 } else {
2362 /* global deadline is ahead, expiration has passed */
2363 cfs_rq->runtime_remaining = 0;
2364 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002365}
2366
2367static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2368 unsigned long delta_exec)
2369{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002370 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002371 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002372 expire_cfs_rq_runtime(cfs_rq);
2373
2374 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002375 return;
2376
Paul Turner85dac902011-07-21 09:43:33 -07002377 /*
2378 * if we're unable to extend our runtime we resched so that the active
2379 * hierarchy can be throttled
2380 */
2381 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2382 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002383}
2384
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002385static __always_inline
2386void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002387{
Paul Turner56f570e2011-11-07 20:26:33 -08002388 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002389 return;
2390
2391 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2392}
2393
Paul Turner85dac902011-07-21 09:43:33 -07002394static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2395{
Paul Turner56f570e2011-11-07 20:26:33 -08002396 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002397}
2398
Paul Turner64660c82011-07-21 09:43:36 -07002399/* check whether cfs_rq, or any parent, is throttled */
2400static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2401{
Paul Turner56f570e2011-11-07 20:26:33 -08002402 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002403}
2404
2405/*
2406 * Ensure that neither of the group entities corresponding to src_cpu or
2407 * dest_cpu are members of a throttled hierarchy when performing group
2408 * load-balance operations.
2409 */
2410static inline int throttled_lb_pair(struct task_group *tg,
2411 int src_cpu, int dest_cpu)
2412{
2413 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2414
2415 src_cfs_rq = tg->cfs_rq[src_cpu];
2416 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2417
2418 return throttled_hierarchy(src_cfs_rq) ||
2419 throttled_hierarchy(dest_cfs_rq);
2420}
2421
2422/* updated child weight may affect parent so we have to do this bottom up */
2423static int tg_unthrottle_up(struct task_group *tg, void *data)
2424{
2425 struct rq *rq = data;
2426 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2427
2428 cfs_rq->throttle_count--;
2429#ifdef CONFIG_SMP
2430 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002431 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002432 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002433 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002434 }
2435#endif
2436
2437 return 0;
2438}
2439
2440static int tg_throttle_down(struct task_group *tg, void *data)
2441{
2442 struct rq *rq = data;
2443 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2444
Paul Turner82958362012-10-04 13:18:31 +02002445 /* group is entering throttled state, stop time */
2446 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002447 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002448 cfs_rq->throttle_count++;
2449
2450 return 0;
2451}
2452
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002453static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002454{
2455 struct rq *rq = rq_of(cfs_rq);
2456 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2457 struct sched_entity *se;
2458 long task_delta, dequeue = 1;
2459
2460 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2461
Paul Turnerf1b17282012-10-04 13:18:31 +02002462 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002463 rcu_read_lock();
2464 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2465 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002466
2467 task_delta = cfs_rq->h_nr_running;
2468 for_each_sched_entity(se) {
2469 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2470 /* throttled entity or throttle-on-deactivate */
2471 if (!se->on_rq)
2472 break;
2473
2474 if (dequeue)
2475 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2476 qcfs_rq->h_nr_running -= task_delta;
2477
2478 if (qcfs_rq->load.weight)
2479 dequeue = 0;
2480 }
2481
2482 if (!se)
2483 rq->nr_running -= task_delta;
2484
2485 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002486 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002487 raw_spin_lock(&cfs_b->lock);
2488 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2489 raw_spin_unlock(&cfs_b->lock);
2490}
2491
Peter Zijlstra029632f2011-10-25 10:00:11 +02002492void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002493{
2494 struct rq *rq = rq_of(cfs_rq);
2495 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2496 struct sched_entity *se;
2497 int enqueue = 1;
2498 long task_delta;
2499
Michael Wang22b958d2013-06-04 14:23:39 +08002500 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002501
2502 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002503
2504 update_rq_clock(rq);
2505
Paul Turner671fd9d2011-07-21 09:43:34 -07002506 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002507 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002508 list_del_rcu(&cfs_rq->throttled_list);
2509 raw_spin_unlock(&cfs_b->lock);
2510
Paul Turner64660c82011-07-21 09:43:36 -07002511 /* update hierarchical throttle state */
2512 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2513
Paul Turner671fd9d2011-07-21 09:43:34 -07002514 if (!cfs_rq->load.weight)
2515 return;
2516
2517 task_delta = cfs_rq->h_nr_running;
2518 for_each_sched_entity(se) {
2519 if (se->on_rq)
2520 enqueue = 0;
2521
2522 cfs_rq = cfs_rq_of(se);
2523 if (enqueue)
2524 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2525 cfs_rq->h_nr_running += task_delta;
2526
2527 if (cfs_rq_throttled(cfs_rq))
2528 break;
2529 }
2530
2531 if (!se)
2532 rq->nr_running += task_delta;
2533
2534 /* determine whether we need to wake up potentially idle cpu */
2535 if (rq->curr == rq->idle && rq->cfs.nr_running)
2536 resched_task(rq->curr);
2537}
2538
2539static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2540 u64 remaining, u64 expires)
2541{
2542 struct cfs_rq *cfs_rq;
2543 u64 runtime = remaining;
2544
2545 rcu_read_lock();
2546 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2547 throttled_list) {
2548 struct rq *rq = rq_of(cfs_rq);
2549
2550 raw_spin_lock(&rq->lock);
2551 if (!cfs_rq_throttled(cfs_rq))
2552 goto next;
2553
2554 runtime = -cfs_rq->runtime_remaining + 1;
2555 if (runtime > remaining)
2556 runtime = remaining;
2557 remaining -= runtime;
2558
2559 cfs_rq->runtime_remaining += runtime;
2560 cfs_rq->runtime_expires = expires;
2561
2562 /* we check whether we're throttled above */
2563 if (cfs_rq->runtime_remaining > 0)
2564 unthrottle_cfs_rq(cfs_rq);
2565
2566next:
2567 raw_spin_unlock(&rq->lock);
2568
2569 if (!remaining)
2570 break;
2571 }
2572 rcu_read_unlock();
2573
2574 return remaining;
2575}
2576
Paul Turner58088ad2011-07-21 09:43:31 -07002577/*
2578 * Responsible for refilling a task_group's bandwidth and unthrottling its
2579 * cfs_rqs as appropriate. If there has been no activity within the last
2580 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2581 * used to track this state.
2582 */
2583static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2584{
Paul Turner671fd9d2011-07-21 09:43:34 -07002585 u64 runtime, runtime_expires;
2586 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002587
2588 raw_spin_lock(&cfs_b->lock);
2589 /* no need to continue the timer with no bandwidth constraint */
2590 if (cfs_b->quota == RUNTIME_INF)
2591 goto out_unlock;
2592
Paul Turner671fd9d2011-07-21 09:43:34 -07002593 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2594 /* idle depends on !throttled (for the case of a large deficit) */
2595 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002596 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002597
Paul Turnera9cf55b2011-07-21 09:43:32 -07002598 /* if we're going inactive then everything else can be deferred */
2599 if (idle)
2600 goto out_unlock;
2601
2602 __refill_cfs_bandwidth_runtime(cfs_b);
2603
Paul Turner671fd9d2011-07-21 09:43:34 -07002604 if (!throttled) {
2605 /* mark as potentially idle for the upcoming period */
2606 cfs_b->idle = 1;
2607 goto out_unlock;
2608 }
Paul Turner58088ad2011-07-21 09:43:31 -07002609
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002610 /* account preceding periods in which throttling occurred */
2611 cfs_b->nr_throttled += overrun;
2612
Paul Turner671fd9d2011-07-21 09:43:34 -07002613 /*
2614 * There are throttled entities so we must first use the new bandwidth
2615 * to unthrottle them before making it generally available. This
2616 * ensures that all existing debts will be paid before a new cfs_rq is
2617 * allowed to run.
2618 */
2619 runtime = cfs_b->runtime;
2620 runtime_expires = cfs_b->runtime_expires;
2621 cfs_b->runtime = 0;
2622
2623 /*
2624 * This check is repeated as we are holding onto the new bandwidth
2625 * while we unthrottle. This can potentially race with an unthrottled
2626 * group trying to acquire new bandwidth from the global pool.
2627 */
2628 while (throttled && runtime > 0) {
2629 raw_spin_unlock(&cfs_b->lock);
2630 /* we can't nest cfs_b->lock while distributing bandwidth */
2631 runtime = distribute_cfs_runtime(cfs_b, runtime,
2632 runtime_expires);
2633 raw_spin_lock(&cfs_b->lock);
2634
2635 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2636 }
2637
2638 /* return (any) remaining runtime */
2639 cfs_b->runtime = runtime;
2640 /*
2641 * While we are ensured activity in the period following an
2642 * unthrottle, this also covers the case in which the new bandwidth is
2643 * insufficient to cover the existing bandwidth deficit. (Forcing the
2644 * timer to remain active while there are any throttled entities.)
2645 */
2646 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002647out_unlock:
2648 if (idle)
2649 cfs_b->timer_active = 0;
2650 raw_spin_unlock(&cfs_b->lock);
2651
2652 return idle;
2653}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002654
Paul Turnerd8b49862011-07-21 09:43:41 -07002655/* a cfs_rq won't donate quota below this amount */
2656static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2657/* minimum remaining period time to redistribute slack quota */
2658static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2659/* how long we wait to gather additional slack before distributing */
2660static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2661
2662/* are we near the end of the current quota period? */
2663static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2664{
2665 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2666 u64 remaining;
2667
2668 /* if the call-back is running a quota refresh is already occurring */
2669 if (hrtimer_callback_running(refresh_timer))
2670 return 1;
2671
2672 /* is a quota refresh about to occur? */
2673 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2674 if (remaining < min_expire)
2675 return 1;
2676
2677 return 0;
2678}
2679
2680static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2681{
2682 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2683
2684 /* if there's a quota refresh soon don't bother with slack */
2685 if (runtime_refresh_within(cfs_b, min_left))
2686 return;
2687
2688 start_bandwidth_timer(&cfs_b->slack_timer,
2689 ns_to_ktime(cfs_bandwidth_slack_period));
2690}
2691
2692/* we know any runtime found here is valid as update_curr() precedes return */
2693static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2694{
2695 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2696 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2697
2698 if (slack_runtime <= 0)
2699 return;
2700
2701 raw_spin_lock(&cfs_b->lock);
2702 if (cfs_b->quota != RUNTIME_INF &&
2703 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2704 cfs_b->runtime += slack_runtime;
2705
2706 /* we are under rq->lock, defer unthrottling using a timer */
2707 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2708 !list_empty(&cfs_b->throttled_cfs_rq))
2709 start_cfs_slack_bandwidth(cfs_b);
2710 }
2711 raw_spin_unlock(&cfs_b->lock);
2712
2713 /* even if it's not valid for return we don't want to try again */
2714 cfs_rq->runtime_remaining -= slack_runtime;
2715}
2716
2717static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2718{
Paul Turner56f570e2011-11-07 20:26:33 -08002719 if (!cfs_bandwidth_used())
2720 return;
2721
Paul Turnerfccfdc62011-11-07 20:26:34 -08002722 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002723 return;
2724
2725 __return_cfs_rq_runtime(cfs_rq);
2726}
2727
2728/*
2729 * This is done with a timer (instead of inline with bandwidth return) since
2730 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2731 */
2732static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2733{
2734 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2735 u64 expires;
2736
2737 /* confirm we're still not at a refresh boundary */
2738 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
2739 return;
2740
2741 raw_spin_lock(&cfs_b->lock);
2742 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
2743 runtime = cfs_b->runtime;
2744 cfs_b->runtime = 0;
2745 }
2746 expires = cfs_b->runtime_expires;
2747 raw_spin_unlock(&cfs_b->lock);
2748
2749 if (!runtime)
2750 return;
2751
2752 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
2753
2754 raw_spin_lock(&cfs_b->lock);
2755 if (expires == cfs_b->runtime_expires)
2756 cfs_b->runtime = runtime;
2757 raw_spin_unlock(&cfs_b->lock);
2758}
2759
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002760/*
2761 * When a group wakes up we want to make sure that its quota is not already
2762 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
2763 * runtime as update_curr() throttling can not not trigger until it's on-rq.
2764 */
2765static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
2766{
Paul Turner56f570e2011-11-07 20:26:33 -08002767 if (!cfs_bandwidth_used())
2768 return;
2769
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002770 /* an active group must be handled by the update_curr()->put() path */
2771 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
2772 return;
2773
2774 /* ensure the group is not already throttled */
2775 if (cfs_rq_throttled(cfs_rq))
2776 return;
2777
2778 /* update runtime allocation */
2779 account_cfs_rq_runtime(cfs_rq, 0);
2780 if (cfs_rq->runtime_remaining <= 0)
2781 throttle_cfs_rq(cfs_rq);
2782}
2783
2784/* conditionally throttle active cfs_rq's from put_prev_entity() */
2785static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2786{
Paul Turner56f570e2011-11-07 20:26:33 -08002787 if (!cfs_bandwidth_used())
2788 return;
2789
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002790 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
2791 return;
2792
2793 /*
2794 * it's possible for a throttled entity to be forced into a running
2795 * state (e.g. set_curr_task), in this case we're finished.
2796 */
2797 if (cfs_rq_throttled(cfs_rq))
2798 return;
2799
2800 throttle_cfs_rq(cfs_rq);
2801}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002802
Peter Zijlstra029632f2011-10-25 10:00:11 +02002803static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
2804{
2805 struct cfs_bandwidth *cfs_b =
2806 container_of(timer, struct cfs_bandwidth, slack_timer);
2807 do_sched_cfs_slack_timer(cfs_b);
2808
2809 return HRTIMER_NORESTART;
2810}
2811
2812static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
2813{
2814 struct cfs_bandwidth *cfs_b =
2815 container_of(timer, struct cfs_bandwidth, period_timer);
2816 ktime_t now;
2817 int overrun;
2818 int idle = 0;
2819
2820 for (;;) {
2821 now = hrtimer_cb_get_time(timer);
2822 overrun = hrtimer_forward(timer, now, cfs_b->period);
2823
2824 if (!overrun)
2825 break;
2826
2827 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2828 }
2829
2830 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2831}
2832
2833void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2834{
2835 raw_spin_lock_init(&cfs_b->lock);
2836 cfs_b->runtime = 0;
2837 cfs_b->quota = RUNTIME_INF;
2838 cfs_b->period = ns_to_ktime(default_cfs_period());
2839
2840 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2841 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2842 cfs_b->period_timer.function = sched_cfs_period_timer;
2843 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2844 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2845}
2846
2847static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2848{
2849 cfs_rq->runtime_enabled = 0;
2850 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2851}
2852
2853/* requires cfs_b->lock, may release to reprogram timer */
2854void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2855{
2856 /*
2857 * The timer may be active because we're trying to set a new bandwidth
2858 * period or because we're racing with the tear-down path
2859 * (timer_active==0 becomes visible before the hrtimer call-back
2860 * terminates). In either case we ensure that it's re-programmed
2861 */
2862 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2863 raw_spin_unlock(&cfs_b->lock);
2864 /* ensure cfs_b->lock is available while we wait */
2865 hrtimer_cancel(&cfs_b->period_timer);
2866
2867 raw_spin_lock(&cfs_b->lock);
2868 /* if someone else restarted the timer then we're done */
2869 if (cfs_b->timer_active)
2870 return;
2871 }
2872
2873 cfs_b->timer_active = 1;
2874 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2875}
2876
2877static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2878{
2879 hrtimer_cancel(&cfs_b->period_timer);
2880 hrtimer_cancel(&cfs_b->slack_timer);
2881}
2882
Arnd Bergmann38dc3342013-01-25 14:14:22 +00002883static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002884{
2885 struct cfs_rq *cfs_rq;
2886
2887 for_each_leaf_cfs_rq(rq, cfs_rq) {
2888 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2889
2890 if (!cfs_rq->runtime_enabled)
2891 continue;
2892
2893 /*
2894 * clock_task is not advancing so we just need to make sure
2895 * there's some valid quota amount
2896 */
2897 cfs_rq->runtime_remaining = cfs_b->quota;
2898 if (cfs_rq_throttled(cfs_rq))
2899 unthrottle_cfs_rq(cfs_rq);
2900 }
2901}
2902
2903#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02002904static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2905{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002906 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02002907}
2908
2909static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2910 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002911static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2912static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002913static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07002914
2915static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2916{
2917 return 0;
2918}
Paul Turner64660c82011-07-21 09:43:36 -07002919
2920static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2921{
2922 return 0;
2923}
2924
2925static inline int throttled_lb_pair(struct task_group *tg,
2926 int src_cpu, int dest_cpu)
2927{
2928 return 0;
2929}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002930
2931void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2932
2933#ifdef CONFIG_FAIR_GROUP_SCHED
2934static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07002935#endif
2936
Peter Zijlstra029632f2011-10-25 10:00:11 +02002937static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2938{
2939 return NULL;
2940}
2941static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07002942static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002943
2944#endif /* CONFIG_CFS_BANDWIDTH */
2945
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002946/**************************************************
2947 * CFS operations on tasks:
2948 */
2949
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002950#ifdef CONFIG_SCHED_HRTICK
2951static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2952{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002953 struct sched_entity *se = &p->se;
2954 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2955
2956 WARN_ON(task_rq(p) != rq);
2957
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002958 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002959 u64 slice = sched_slice(cfs_rq, se);
2960 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2961 s64 delta = slice - ran;
2962
2963 if (delta < 0) {
2964 if (rq->curr == p)
2965 resched_task(p);
2966 return;
2967 }
2968
2969 /*
2970 * Don't schedule slices shorter than 10000ns, that just
2971 * doesn't make sense. Rely on vruntime for fairness.
2972 */
Peter Zijlstra31656512008-07-18 18:01:23 +02002973 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02002974 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002975
Peter Zijlstra31656512008-07-18 18:01:23 +02002976 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002977 }
2978}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002979
2980/*
2981 * called from enqueue/dequeue and updates the hrtick when the
2982 * current task is from our class and nr_running is low enough
2983 * to matter.
2984 */
2985static void hrtick_update(struct rq *rq)
2986{
2987 struct task_struct *curr = rq->curr;
2988
Mike Galbraithb39e66e2011-11-22 15:20:07 +01002989 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02002990 return;
2991
2992 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2993 hrtick_start_fair(rq, curr);
2994}
Dhaval Giani55e12e52008-06-24 23:39:43 +05302995#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002996static inline void
2997hrtick_start_fair(struct rq *rq, struct task_struct *p)
2998{
2999}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003000
3001static inline void hrtick_update(struct rq *rq)
3002{
3003}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003004#endif
3005
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003006/*
3007 * The enqueue_task method is called before nr_running is
3008 * increased. Here we update the fair scheduling stats and
3009 * then put the task into the rbtree:
3010 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00003011static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003012enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003013{
3014 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003015 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003016
3017 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003018 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003019 break;
3020 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003021 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003022
3023 /*
3024 * end evaluation on encountering a throttled cfs_rq
3025 *
3026 * note: in the case of encountering a throttled cfs_rq we will
3027 * post the final h_nr_running increment below.
3028 */
3029 if (cfs_rq_throttled(cfs_rq))
3030 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003031 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07003032
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003033 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003034 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003035
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003036 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003037 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003038 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003039
Paul Turner85dac902011-07-21 09:43:33 -07003040 if (cfs_rq_throttled(cfs_rq))
3041 break;
3042
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003043 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003044 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003045 }
3046
Ben Segall18bf2802012-10-04 12:51:20 +02003047 if (!se) {
3048 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07003049 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003050 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003051 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003052}
3053
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003054static void set_next_buddy(struct sched_entity *se);
3055
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003056/*
3057 * The dequeue_task method is called before nr_running is
3058 * decreased. We remove the task from the rbtree and
3059 * update the fair scheduling stats:
3060 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003061static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003062{
3063 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003064 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003065 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003066
3067 for_each_sched_entity(se) {
3068 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003069 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003070
3071 /*
3072 * end evaluation on encountering a throttled cfs_rq
3073 *
3074 * note: in the case of encountering a throttled cfs_rq we will
3075 * post the final h_nr_running decrement below.
3076 */
3077 if (cfs_rq_throttled(cfs_rq))
3078 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003079 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003080
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003081 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003082 if (cfs_rq->load.weight) {
3083 /*
3084 * Bias pick_next to pick a task from this cfs_rq, as
3085 * p is sleeping when it is within its sched_slice.
3086 */
3087 if (task_sleep && parent_entity(se))
3088 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07003089
3090 /* avoid re-evaluating load for this entity */
3091 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003092 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003093 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003094 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003095 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003096
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003097 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003098 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003099 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003100
Paul Turner85dac902011-07-21 09:43:33 -07003101 if (cfs_rq_throttled(cfs_rq))
3102 break;
3103
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003104 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003105 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003106 }
3107
Ben Segall18bf2802012-10-04 12:51:20 +02003108 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07003109 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003110 update_rq_runnable_avg(rq, 1);
3111 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003112 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003113}
3114
Gregory Haskinse7693a32008-01-25 21:08:09 +01003115#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02003116/* Used instead of source_load when we know the type == 0 */
3117static unsigned long weighted_cpuload(const int cpu)
3118{
Alex Shib92486c2013-06-20 10:18:50 +08003119 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003120}
3121
3122/*
3123 * Return a low guess at the load of a migration-source cpu weighted
3124 * according to the scheduling class and "nice" value.
3125 *
3126 * We want to under-estimate the load of migration sources, to
3127 * balance conservatively.
3128 */
3129static unsigned long source_load(int cpu, int type)
3130{
3131 struct rq *rq = cpu_rq(cpu);
3132 unsigned long total = weighted_cpuload(cpu);
3133
3134 if (type == 0 || !sched_feat(LB_BIAS))
3135 return total;
3136
3137 return min(rq->cpu_load[type-1], total);
3138}
3139
3140/*
3141 * Return a high guess at the load of a migration-target cpu weighted
3142 * according to the scheduling class and "nice" value.
3143 */
3144static unsigned long target_load(int cpu, int type)
3145{
3146 struct rq *rq = cpu_rq(cpu);
3147 unsigned long total = weighted_cpuload(cpu);
3148
3149 if (type == 0 || !sched_feat(LB_BIAS))
3150 return total;
3151
3152 return max(rq->cpu_load[type-1], total);
3153}
3154
3155static unsigned long power_of(int cpu)
3156{
3157 return cpu_rq(cpu)->cpu_power;
3158}
3159
3160static unsigned long cpu_avg_load_per_task(int cpu)
3161{
3162 struct rq *rq = cpu_rq(cpu);
3163 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08003164 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003165
3166 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08003167 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003168
3169 return 0;
3170}
3171
Michael Wang62470412013-07-04 12:55:51 +08003172static void record_wakee(struct task_struct *p)
3173{
3174 /*
3175 * Rough decay (wiping) for cost saving, don't worry
3176 * about the boundary, really active task won't care
3177 * about the loss.
3178 */
3179 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3180 current->wakee_flips = 0;
3181 current->wakee_flip_decay_ts = jiffies;
3182 }
3183
3184 if (current->last_wakee != p) {
3185 current->last_wakee = p;
3186 current->wakee_flips++;
3187 }
3188}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003189
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003190static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003191{
3192 struct sched_entity *se = &p->se;
3193 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003194 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003195
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003196#ifndef CONFIG_64BIT
3197 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003198
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003199 do {
3200 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3201 smp_rmb();
3202 min_vruntime = cfs_rq->min_vruntime;
3203 } while (min_vruntime != min_vruntime_copy);
3204#else
3205 min_vruntime = cfs_rq->min_vruntime;
3206#endif
3207
3208 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08003209 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003210}
3211
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003212#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003213/*
3214 * effective_load() calculates the load change as seen from the root_task_group
3215 *
3216 * Adding load to a group doesn't make a group heavier, but can cause movement
3217 * of group shares between cpus. Assuming the shares were perfectly aligned one
3218 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003219 *
3220 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3221 * on this @cpu and results in a total addition (subtraction) of @wg to the
3222 * total group weight.
3223 *
3224 * Given a runqueue weight distribution (rw_i) we can compute a shares
3225 * distribution (s_i) using:
3226 *
3227 * s_i = rw_i / \Sum rw_j (1)
3228 *
3229 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3230 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3231 * shares distribution (s_i):
3232 *
3233 * rw_i = { 2, 4, 1, 0 }
3234 * s_i = { 2/7, 4/7, 1/7, 0 }
3235 *
3236 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3237 * task used to run on and the CPU the waker is running on), we need to
3238 * compute the effect of waking a task on either CPU and, in case of a sync
3239 * wakeup, compute the effect of the current task going to sleep.
3240 *
3241 * So for a change of @wl to the local @cpu with an overall group weight change
3242 * of @wl we can compute the new shares distribution (s'_i) using:
3243 *
3244 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3245 *
3246 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3247 * differences in waking a task to CPU 0. The additional task changes the
3248 * weight and shares distributions like:
3249 *
3250 * rw'_i = { 3, 4, 1, 0 }
3251 * s'_i = { 3/8, 4/8, 1/8, 0 }
3252 *
3253 * We can then compute the difference in effective weight by using:
3254 *
3255 * dw_i = S * (s'_i - s_i) (3)
3256 *
3257 * Where 'S' is the group weight as seen by its parent.
3258 *
3259 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3260 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3261 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003262 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003263static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003264{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003265 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003266
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003267 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003268 return wl;
3269
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003270 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003271 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003272
Paul Turner977dda72011-01-14 17:57:50 -08003273 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003274
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003275 /*
3276 * W = @wg + \Sum rw_j
3277 */
3278 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003279
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003280 /*
3281 * w = rw_i + @wl
3282 */
3283 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003284
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003285 /*
3286 * wl = S * s'_i; see (2)
3287 */
3288 if (W > 0 && w < W)
3289 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003290 else
3291 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003292
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003293 /*
3294 * Per the above, wl is the new se->load.weight value; since
3295 * those are clipped to [MIN_SHARES, ...) do so now. See
3296 * calc_cfs_shares().
3297 */
Paul Turner977dda72011-01-14 17:57:50 -08003298 if (wl < MIN_SHARES)
3299 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003300
3301 /*
3302 * wl = dw_i = S * (s'_i - s_i); see (3)
3303 */
Paul Turner977dda72011-01-14 17:57:50 -08003304 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003305
3306 /*
3307 * Recursively apply this logic to all parent groups to compute
3308 * the final effective load change on the root group. Since
3309 * only the @tg group gets extra weight, all parent groups can
3310 * only redistribute existing shares. @wl is the shift in shares
3311 * resulting from this level per the above.
3312 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003313 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003314 }
3315
3316 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003317}
3318#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003319
Peter Zijlstra83378262008-06-27 13:41:37 +02003320static inline unsigned long effective_load(struct task_group *tg, int cpu,
3321 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003322{
Peter Zijlstra83378262008-06-27 13:41:37 +02003323 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003324}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003325
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003326#endif
3327
Michael Wang62470412013-07-04 12:55:51 +08003328static int wake_wide(struct task_struct *p)
3329{
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08003330 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08003331
3332 /*
3333 * Yeah, it's the switching-frequency, could means many wakee or
3334 * rapidly switch, use factor here will just help to automatically
3335 * adjust the loose-degree, so bigger node will lead to more pull.
3336 */
3337 if (p->wakee_flips > factor) {
3338 /*
3339 * wakee is somewhat hot, it needs certain amount of cpu
3340 * resource, so if waker is far more hot, prefer to leave
3341 * it alone.
3342 */
3343 if (current->wakee_flips > (factor * p->wakee_flips))
3344 return 1;
3345 }
3346
3347 return 0;
3348}
3349
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003350static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003351{
Paul Turnere37b6a72011-01-21 20:44:59 -08003352 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003353 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003354 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003355 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003356 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003357 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003358
Michael Wang62470412013-07-04 12:55:51 +08003359 /*
3360 * If we wake multiple tasks be careful to not bounce
3361 * ourselves around too much.
3362 */
3363 if (wake_wide(p))
3364 return 0;
3365
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003366 idx = sd->wake_idx;
3367 this_cpu = smp_processor_id();
3368 prev_cpu = task_cpu(p);
3369 load = source_load(prev_cpu, idx);
3370 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003371
3372 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003373 * If sync wakeup then subtract the (maximum possible)
3374 * effect of the currently running task from the load
3375 * of the current CPU:
3376 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003377 if (sync) {
3378 tg = task_group(current);
3379 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003380
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003381 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003382 load += effective_load(tg, prev_cpu, 0, -weight);
3383 }
3384
3385 tg = task_group(p);
3386 weight = p->se.load.weight;
3387
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003388 /*
3389 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003390 * due to the sync cause above having dropped this_load to 0, we'll
3391 * always have an imbalance, but there's really nothing you can do
3392 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003393 *
3394 * Otherwise check if either cpus are near enough in load to allow this
3395 * task to be woken on this_cpu.
3396 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003397 if (this_load > 0) {
3398 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003399
3400 this_eff_load = 100;
3401 this_eff_load *= power_of(prev_cpu);
3402 this_eff_load *= this_load +
3403 effective_load(tg, this_cpu, weight, weight);
3404
3405 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3406 prev_eff_load *= power_of(this_cpu);
3407 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3408
3409 balanced = this_eff_load <= prev_eff_load;
3410 } else
3411 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003412
3413 /*
3414 * If the currently running task will sleep within
3415 * a reasonable amount of time then attract this newly
3416 * woken task:
3417 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003418 if (sync && balanced)
3419 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003420
Lucas De Marchi41acab82010-03-10 23:37:45 -03003421 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003422 tl_per_task = cpu_avg_load_per_task(this_cpu);
3423
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003424 if (balanced ||
3425 (this_load <= load &&
3426 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003427 /*
3428 * This domain has SD_WAKE_AFFINE and
3429 * p is cache cold in this domain, and
3430 * there is no bad imbalance.
3431 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003432 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003433 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003434
3435 return 1;
3436 }
3437 return 0;
3438}
3439
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003440/*
3441 * find_idlest_group finds and returns the least busy CPU group within the
3442 * domain.
3443 */
3444static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003445find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003446 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003447{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003448 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003449 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003450 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003451
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003452 do {
3453 unsigned long load, avg_load;
3454 int local_group;
3455 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003456
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003457 /* Skip over this group if it has no CPUs allowed */
3458 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003459 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003460 continue;
3461
3462 local_group = cpumask_test_cpu(this_cpu,
3463 sched_group_cpus(group));
3464
3465 /* Tally up the load of all CPUs in the group */
3466 avg_load = 0;
3467
3468 for_each_cpu(i, sched_group_cpus(group)) {
3469 /* Bias balancing toward cpus of our domain */
3470 if (local_group)
3471 load = source_load(i, load_idx);
3472 else
3473 load = target_load(i, load_idx);
3474
3475 avg_load += load;
3476 }
3477
3478 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003479 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003480
3481 if (local_group) {
3482 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003483 } else if (avg_load < min_load) {
3484 min_load = avg_load;
3485 idlest = group;
3486 }
3487 } while (group = group->next, group != sd->groups);
3488
3489 if (!idlest || 100*this_load < imbalance*min_load)
3490 return NULL;
3491 return idlest;
3492}
3493
3494/*
3495 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3496 */
3497static int
3498find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3499{
3500 unsigned long load, min_load = ULONG_MAX;
3501 int idlest = -1;
3502 int i;
3503
3504 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003505 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003506 load = weighted_cpuload(i);
3507
3508 if (load < min_load || (load == min_load && i == this_cpu)) {
3509 min_load = load;
3510 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003511 }
3512 }
3513
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003514 return idlest;
3515}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003516
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003517/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003518 * Try and locate an idle CPU in the sched_domain.
3519 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003520static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003521{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003522 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003523 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003524 int i = task_cpu(p);
3525
3526 if (idle_cpu(target))
3527 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003528
3529 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003530 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003531 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003532 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3533 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003534
3535 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003536 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003537 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003538 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003539 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003540 sg = sd->groups;
3541 do {
3542 if (!cpumask_intersects(sched_group_cpus(sg),
3543 tsk_cpus_allowed(p)))
3544 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003545
Linus Torvalds37407ea2012-09-16 12:29:43 -07003546 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003547 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003548 goto next;
3549 }
3550
3551 target = cpumask_first_and(sched_group_cpus(sg),
3552 tsk_cpus_allowed(p));
3553 goto done;
3554next:
3555 sg = sg->next;
3556 } while (sg != sd->groups);
3557 }
3558done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003559 return target;
3560}
3561
3562/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003563 * sched_balance_self: balance the current task (running on cpu) in domains
3564 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3565 * SD_BALANCE_EXEC.
3566 *
3567 * Balance, ie. select the least loaded group.
3568 *
3569 * Returns the target CPU number, or the same CPU if no balancing is needed.
3570 *
3571 * preempt must be disabled.
3572 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003573static int
Peter Zijlstra7608dec2011-04-05 17:23:46 +02003574select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003575{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003576 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003577 int cpu = smp_processor_id();
3578 int prev_cpu = task_cpu(p);
3579 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003580 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003581 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003582
Peter Zijlstra29baa742012-04-23 12:11:21 +02003583 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003584 return prev_cpu;
3585
Peter Zijlstra0763a662009-09-14 19:37:39 +02003586 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003587 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003588 want_affine = 1;
3589 new_cpu = prev_cpu;
3590 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003591
Peter Zijlstradce840a2011-04-07 14:09:50 +02003592 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003593 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01003594 if (!(tmp->flags & SD_LOAD_BALANCE))
3595 continue;
3596
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003597 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003598 * If both cpu and prev_cpu are part of this domain,
3599 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003600 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003601 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3602 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3603 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003604 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003605 }
3606
Alex Shif03542a2012-07-26 08:55:34 +08003607 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003608 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003609 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003610
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003611 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003612 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003613 prev_cpu = cpu;
3614
3615 new_cpu = select_idle_sibling(p, prev_cpu);
3616 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003617 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003618
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003619 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003620 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003621 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003622 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003623
Peter Zijlstra0763a662009-09-14 19:37:39 +02003624 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003625 sd = sd->child;
3626 continue;
3627 }
3628
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003629 if (sd_flag & SD_BALANCE_WAKE)
3630 load_idx = sd->wake_idx;
3631
3632 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003633 if (!group) {
3634 sd = sd->child;
3635 continue;
3636 }
3637
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003638 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003639 if (new_cpu == -1 || new_cpu == cpu) {
3640 /* Now try balancing at a lower domain level of cpu */
3641 sd = sd->child;
3642 continue;
3643 }
3644
3645 /* Now try balancing at a lower domain level of new_cpu */
3646 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003647 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003648 sd = NULL;
3649 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003650 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003651 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003652 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003653 sd = tmp;
3654 }
3655 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003656 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003657unlock:
3658 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003659
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003660 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003661}
Paul Turner0a74bef2012-10-04 13:18:30 +02003662
3663/*
3664 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3665 * cfs_rq_of(p) references at time of call are still valid and identify the
3666 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3667 * other assumptions, including the state of rq->lock, should be made.
3668 */
3669static void
3670migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3671{
Paul Turneraff3e492012-10-04 13:18:30 +02003672 struct sched_entity *se = &p->se;
3673 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3674
3675 /*
3676 * Load tracking: accumulate removed load so that it can be processed
3677 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3678 * to blocked load iff they have a positive decay-count. It can never
3679 * be negative here since on-rq tasks have decay-count == 0.
3680 */
3681 if (se->avg.decay_count) {
3682 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08003683 atomic_long_add(se->avg.load_avg_contrib,
3684 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02003685 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003686}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003687#endif /* CONFIG_SMP */
3688
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003689static unsigned long
3690wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003691{
3692 unsigned long gran = sysctl_sched_wakeup_granularity;
3693
3694 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003695 * Since its curr running now, convert the gran from real-time
3696 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003697 *
3698 * By using 'se' instead of 'curr' we penalize light tasks, so
3699 * they get preempted easier. That is, if 'se' < 'curr' then
3700 * the resulting gran will be larger, therefore penalizing the
3701 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3702 * be smaller, again penalizing the lighter task.
3703 *
3704 * This is especially important for buddies when the leftmost
3705 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003706 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003707 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003708}
3709
3710/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003711 * Should 'se' preempt 'curr'.
3712 *
3713 * |s1
3714 * |s2
3715 * |s3
3716 * g
3717 * |<--->|c
3718 *
3719 * w(c, s1) = -1
3720 * w(c, s2) = 0
3721 * w(c, s3) = 1
3722 *
3723 */
3724static int
3725wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3726{
3727 s64 gran, vdiff = curr->vruntime - se->vruntime;
3728
3729 if (vdiff <= 0)
3730 return -1;
3731
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003732 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003733 if (vdiff > gran)
3734 return 1;
3735
3736 return 0;
3737}
3738
Peter Zijlstra02479092008-11-04 21:25:10 +01003739static void set_last_buddy(struct sched_entity *se)
3740{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003741 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3742 return;
3743
3744 for_each_sched_entity(se)
3745 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003746}
3747
3748static void set_next_buddy(struct sched_entity *se)
3749{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003750 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
3751 return;
3752
3753 for_each_sched_entity(se)
3754 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01003755}
3756
Rik van Rielac53db52011-02-01 09:51:03 -05003757static void set_skip_buddy(struct sched_entity *se)
3758{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07003759 for_each_sched_entity(se)
3760 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05003761}
3762
Peter Zijlstra464b7522008-10-24 11:06:15 +02003763/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003764 * Preempt the current task with a newly woken task if needed:
3765 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02003766static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003767{
3768 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02003769 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003770 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003771 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003772 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01003773
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01003774 if (unlikely(se == pse))
3775 return;
3776
Paul Turner5238cdd2011-07-21 09:43:37 -07003777 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01003778 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07003779 * unconditionally check_prempt_curr() after an enqueue (which may have
3780 * lead to a throttle). This both saves work and prevents false
3781 * next-buddy nomination below.
3782 */
3783 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
3784 return;
3785
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003786 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02003787 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003788 next_buddy_marked = 1;
3789 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02003790
Bharata B Raoaec0a512008-08-28 14:42:49 +05303791 /*
3792 * We can come here with TIF_NEED_RESCHED already set from new task
3793 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07003794 *
3795 * Note: this also catches the edge-case of curr being in a throttled
3796 * group (e.g. via set_curr_task), since update_curr() (in the
3797 * enqueue of curr) will have resulted in resched being set. This
3798 * prevents us from potentially nominating it as a false LAST_BUDDY
3799 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05303800 */
3801 if (test_tsk_need_resched(curr))
3802 return;
3803
Darren Harta2f5c9a2011-02-22 13:04:33 -08003804 /* Idle tasks are by definition preempted by non-idle tasks. */
3805 if (unlikely(curr->policy == SCHED_IDLE) &&
3806 likely(p->policy != SCHED_IDLE))
3807 goto preempt;
3808
Ingo Molnar91c234b2007-10-15 17:00:18 +02003809 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08003810 * Batch and idle tasks do not preempt non-idle tasks (their preemption
3811 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02003812 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02003813 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02003814 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003815
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003816 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07003817 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003818 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003819 if (wakeup_preempt_entity(se, pse) == 1) {
3820 /*
3821 * Bias pick_next to pick the sched entity that is
3822 * triggering this preemption.
3823 */
3824 if (!next_buddy_marked)
3825 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003826 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003827 }
Jupyung Leea65ac742009-11-17 18:51:40 +09003828
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01003829 return;
3830
3831preempt:
3832 resched_task(curr);
3833 /*
3834 * Only set the backward buddy when the current task is still
3835 * on the rq. This can happen when a wakeup gets interleaved
3836 * with schedule on the ->pre_schedule() or idle_balance()
3837 * point, either of which can * drop the rq lock.
3838 *
3839 * Also, during early boot the idle thread is in the fair class,
3840 * for obvious reasons its a bad idea to schedule back to it.
3841 */
3842 if (unlikely(!se->on_rq || curr == rq->idle))
3843 return;
3844
3845 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
3846 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003847}
3848
Ingo Molnarfb8d4722007-08-09 11:16:48 +02003849static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003850{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003851 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003852 struct cfs_rq *cfs_rq = &rq->cfs;
3853 struct sched_entity *se;
3854
Tim Blechmann36ace272009-11-24 11:55:45 +01003855 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003856 return NULL;
3857
3858 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02003859 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003860 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003861 cfs_rq = group_cfs_rq(se);
3862 } while (cfs_rq);
3863
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003864 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003865 if (hrtick_enabled(rq))
3866 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003867
3868 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003869}
3870
3871/*
3872 * Account for a descheduled task:
3873 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02003874static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003875{
3876 struct sched_entity *se = &prev->se;
3877 struct cfs_rq *cfs_rq;
3878
3879 for_each_sched_entity(se) {
3880 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02003881 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003882 }
3883}
3884
Rik van Rielac53db52011-02-01 09:51:03 -05003885/*
3886 * sched_yield() is very simple
3887 *
3888 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3889 */
3890static void yield_task_fair(struct rq *rq)
3891{
3892 struct task_struct *curr = rq->curr;
3893 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3894 struct sched_entity *se = &curr->se;
3895
3896 /*
3897 * Are we the only task in the tree?
3898 */
3899 if (unlikely(rq->nr_running == 1))
3900 return;
3901
3902 clear_buddies(cfs_rq, se);
3903
3904 if (curr->policy != SCHED_BATCH) {
3905 update_rq_clock(rq);
3906 /*
3907 * Update run-time statistics of the 'current'.
3908 */
3909 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01003910 /*
3911 * Tell update_rq_clock() that we've just updated,
3912 * so we don't do microscopic update in schedule()
3913 * and double the fastpath cost.
3914 */
3915 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05003916 }
3917
3918 set_skip_buddy(se);
3919}
3920
Mike Galbraithd95f4122011-02-01 09:50:51 -05003921static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3922{
3923 struct sched_entity *se = &p->se;
3924
Paul Turner5238cdd2011-07-21 09:43:37 -07003925 /* throttled hierarchies are not runnable */
3926 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05003927 return false;
3928
3929 /* Tell the scheduler that we'd really like pse to run next. */
3930 set_next_buddy(se);
3931
Mike Galbraithd95f4122011-02-01 09:50:51 -05003932 yield_task_fair(rq);
3933
3934 return true;
3935}
3936
Peter Williams681f3e62007-10-24 18:23:51 +02003937#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003938/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02003939 * Fair scheduling class load-balancing methods.
3940 *
3941 * BASICS
3942 *
3943 * The purpose of load-balancing is to achieve the same basic fairness the
3944 * per-cpu scheduler provides, namely provide a proportional amount of compute
3945 * time to each task. This is expressed in the following equation:
3946 *
3947 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
3948 *
3949 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
3950 * W_i,0 is defined as:
3951 *
3952 * W_i,0 = \Sum_j w_i,j (2)
3953 *
3954 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
3955 * is derived from the nice value as per prio_to_weight[].
3956 *
3957 * The weight average is an exponential decay average of the instantaneous
3958 * weight:
3959 *
3960 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
3961 *
3962 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
3963 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
3964 * can also include other factors [XXX].
3965 *
3966 * To achieve this balance we define a measure of imbalance which follows
3967 * directly from (1):
3968 *
3969 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
3970 *
3971 * We them move tasks around to minimize the imbalance. In the continuous
3972 * function space it is obvious this converges, in the discrete case we get
3973 * a few fun cases generally called infeasible weight scenarios.
3974 *
3975 * [XXX expand on:
3976 * - infeasible weights;
3977 * - local vs global optima in the discrete case. ]
3978 *
3979 *
3980 * SCHED DOMAINS
3981 *
3982 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
3983 * for all i,j solution, we create a tree of cpus that follows the hardware
3984 * topology where each level pairs two lower groups (or better). This results
3985 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
3986 * tree to only the first of the previous level and we decrease the frequency
3987 * of load-balance at each level inv. proportional to the number of cpus in
3988 * the groups.
3989 *
3990 * This yields:
3991 *
3992 * log_2 n 1 n
3993 * \Sum { --- * --- * 2^i } = O(n) (5)
3994 * i = 0 2^i 2^i
3995 * `- size of each group
3996 * | | `- number of cpus doing load-balance
3997 * | `- freq
3998 * `- sum over all levels
3999 *
4000 * Coupled with a limit on how many tasks we can migrate every balance pass,
4001 * this makes (5) the runtime complexity of the balancer.
4002 *
4003 * An important property here is that each CPU is still (indirectly) connected
4004 * to every other cpu in at most O(log n) steps:
4005 *
4006 * The adjacency matrix of the resulting graph is given by:
4007 *
4008 * log_2 n
4009 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4010 * k = 0
4011 *
4012 * And you'll find that:
4013 *
4014 * A^(log_2 n)_i,j != 0 for all i,j (7)
4015 *
4016 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4017 * The task movement gives a factor of O(m), giving a convergence complexity
4018 * of:
4019 *
4020 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4021 *
4022 *
4023 * WORK CONSERVING
4024 *
4025 * In order to avoid CPUs going idle while there's still work to do, new idle
4026 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4027 * tree itself instead of relying on other CPUs to bring it work.
4028 *
4029 * This adds some complexity to both (5) and (8) but it reduces the total idle
4030 * time.
4031 *
4032 * [XXX more?]
4033 *
4034 *
4035 * CGROUPS
4036 *
4037 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4038 *
4039 * s_k,i
4040 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4041 * S_k
4042 *
4043 * Where
4044 *
4045 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4046 *
4047 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4048 *
4049 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4050 * property.
4051 *
4052 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4053 * rewrite all of this once again.]
4054 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004055
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09004056static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4057
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004058#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01004059#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02004060#define LBF_DST_PINNED 0x04
4061#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004062
4063struct lb_env {
4064 struct sched_domain *sd;
4065
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004066 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05304067 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004068
4069 int dst_cpu;
4070 struct rq *dst_rq;
4071
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304072 struct cpumask *dst_grpmask;
4073 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004074 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004075 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08004076 /* The set of CPUs under consideration for load-balancing */
4077 struct cpumask *cpus;
4078
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004079 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004080
4081 unsigned int loop;
4082 unsigned int loop_break;
4083 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004084};
4085
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004086/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004087 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004088 * Both runqueues must be locked.
4089 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004090static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004091{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004092 deactivate_task(env->src_rq, p, 0);
4093 set_task_cpu(p, env->dst_cpu);
4094 activate_task(env->dst_rq, p, 0);
4095 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004096}
4097
4098/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02004099 * Is this task likely cache-hot:
4100 */
4101static int
4102task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4103{
4104 s64 delta;
4105
4106 if (p->sched_class != &fair_sched_class)
4107 return 0;
4108
4109 if (unlikely(p->policy == SCHED_IDLE))
4110 return 0;
4111
4112 /*
4113 * Buddy candidates are cache hot:
4114 */
4115 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4116 (&p->se == cfs_rq_of(&p->se)->next ||
4117 &p->se == cfs_rq_of(&p->se)->last))
4118 return 1;
4119
4120 if (sysctl_sched_migration_cost == -1)
4121 return 1;
4122 if (sysctl_sched_migration_cost == 0)
4123 return 0;
4124
4125 delta = now - p->se.exec_start;
4126
4127 return delta < (s64)sysctl_sched_migration_cost;
4128}
4129
Mel Gorman3a7053b2013-10-07 11:29:00 +01004130#ifdef CONFIG_NUMA_BALANCING
4131/* Returns true if the destination node has incurred more faults */
4132static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4133{
4134 int src_nid, dst_nid;
4135
4136 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4137 !(env->sd->flags & SD_NUMA)) {
4138 return false;
4139 }
4140
4141 src_nid = cpu_to_node(env->src_cpu);
4142 dst_nid = cpu_to_node(env->dst_cpu);
4143
4144 if (src_nid == dst_nid ||
4145 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4146 return false;
4147
4148 if (dst_nid == p->numa_preferred_nid ||
4149 p->numa_faults[dst_nid] > p->numa_faults[src_nid])
4150 return true;
4151
4152 return false;
4153}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004154
4155
4156static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4157{
4158 int src_nid, dst_nid;
4159
4160 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4161 return false;
4162
4163 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
4164 return false;
4165
4166 src_nid = cpu_to_node(env->src_cpu);
4167 dst_nid = cpu_to_node(env->dst_cpu);
4168
4169 if (src_nid == dst_nid ||
4170 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4171 return false;
4172
4173 if (p->numa_faults[dst_nid] < p->numa_faults[src_nid])
4174 return true;
4175
4176 return false;
4177}
4178
Mel Gorman3a7053b2013-10-07 11:29:00 +01004179#else
4180static inline bool migrate_improves_locality(struct task_struct *p,
4181 struct lb_env *env)
4182{
4183 return false;
4184}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004185
4186static inline bool migrate_degrades_locality(struct task_struct *p,
4187 struct lb_env *env)
4188{
4189 return false;
4190}
Mel Gorman3a7053b2013-10-07 11:29:00 +01004191#endif
4192
Peter Zijlstra029632f2011-10-25 10:00:11 +02004193/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004194 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4195 */
4196static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004197int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004198{
4199 int tsk_cache_hot = 0;
4200 /*
4201 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09004202 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004203 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09004204 * 3) running (obviously), or
4205 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004206 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09004207 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4208 return 0;
4209
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004210 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004211 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304212
Lucas De Marchi41acab82010-03-10 23:37:45 -03004213 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304214
Peter Zijlstra62633222013-08-19 12:41:09 +02004215 env->flags |= LBF_SOME_PINNED;
4216
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304217 /*
4218 * Remember if this task can be migrated to any other cpu in
4219 * our sched_group. We may want to revisit it if we couldn't
4220 * meet load balance goals by pulling other tasks on src_cpu.
4221 *
4222 * Also avoid computing new_dst_cpu if we have already computed
4223 * one in current iteration.
4224 */
Peter Zijlstra62633222013-08-19 12:41:09 +02004225 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304226 return 0;
4227
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004228 /* Prevent to re-select dst_cpu via env's cpus */
4229 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4230 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02004231 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004232 env->new_dst_cpu = cpu;
4233 break;
4234 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304235 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004236
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004237 return 0;
4238 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304239
4240 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004241 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004242
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004243 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03004244 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004245 return 0;
4246 }
4247
4248 /*
4249 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01004250 * 1) destination numa is preferred
4251 * 2) task is cache cold, or
4252 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004253 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004254 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Mel Gorman7a0f3082013-10-07 11:29:01 +01004255 if (!tsk_cache_hot)
4256 tsk_cache_hot = migrate_degrades_locality(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01004257
4258 if (migrate_improves_locality(p, env)) {
4259#ifdef CONFIG_SCHEDSTATS
4260 if (tsk_cache_hot) {
4261 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4262 schedstat_inc(p, se.statistics.nr_forced_migrations);
4263 }
4264#endif
4265 return 1;
4266 }
4267
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004268 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004269 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004270
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004271 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004272 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004273 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004274 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004275
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004276 return 1;
4277 }
4278
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004279 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4280 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004281}
4282
Peter Zijlstra897c3952009-12-17 17:45:42 +01004283/*
4284 * move_one_task tries to move exactly one task from busiest to this_rq, as
4285 * part of active balancing operations within "domain".
4286 * Returns 1 if successful and 0 otherwise.
4287 *
4288 * Called with both runqueues locked.
4289 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004290static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004291{
4292 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004293
Peter Zijlstra367456c2012-02-20 21:49:09 +01004294 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004295 if (!can_migrate_task(p, env))
4296 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004297
Peter Zijlstra367456c2012-02-20 21:49:09 +01004298 move_task(p, env);
4299 /*
4300 * Right now, this is only the second place move_task()
4301 * is called, so we can safely collect move_task()
4302 * stats here rather than inside move_task().
4303 */
4304 schedstat_inc(env->sd, lb_gained[env->idle]);
4305 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004306 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004307 return 0;
4308}
4309
Peter Zijlstra367456c2012-02-20 21:49:09 +01004310static unsigned long task_h_load(struct task_struct *p);
4311
Peter Zijlstraeb953082012-04-17 13:38:40 +02004312static const unsigned int sched_nr_migrate_break = 32;
4313
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004314/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004315 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004316 * this_rq, as part of a balancing operation within domain "sd".
4317 * Returns 1 if successful and 0 otherwise.
4318 *
4319 * Called with both runqueues locked.
4320 */
4321static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004322{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004323 struct list_head *tasks = &env->src_rq->cfs_tasks;
4324 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004325 unsigned long load;
4326 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004327
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004328 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004329 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004330
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004331 while (!list_empty(tasks)) {
4332 p = list_first_entry(tasks, struct task_struct, se.group_node);
4333
Peter Zijlstra367456c2012-02-20 21:49:09 +01004334 env->loop++;
4335 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004336 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004337 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004338
4339 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004340 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004341 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004342 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004343 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004344 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004345
Joonsoo Kimd3198082013-04-23 17:27:40 +09004346 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004347 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004348
Peter Zijlstra367456c2012-02-20 21:49:09 +01004349 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004350
Peter Zijlstraeb953082012-04-17 13:38:40 +02004351 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004352 goto next;
4353
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004354 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004355 goto next;
4356
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004357 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004358 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004359 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004360
4361#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004362 /*
4363 * NEWIDLE balancing is a source of latency, so preemptible
4364 * kernels will stop after the first task is pulled to minimize
4365 * the critical section.
4366 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004367 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004368 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004369#endif
4370
Peter Zijlstraee00e662009-12-17 17:25:20 +01004371 /*
4372 * We only want to steal up to the prescribed amount of
4373 * weighted load.
4374 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004375 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004376 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004377
Peter Zijlstra367456c2012-02-20 21:49:09 +01004378 continue;
4379next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004380 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004381 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004382
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004383 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004384 * Right now, this is one of only two places move_task() is called,
4385 * so we can safely collect move_task() stats here rather than
4386 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004387 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004388 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004389
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004390 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004391}
4392
Peter Zijlstra230059de2009-12-17 17:47:12 +01004393#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004394/*
4395 * update tg->load_weight by folding this cpu's load_avg
4396 */
Paul Turner48a16752012-10-04 13:18:31 +02004397static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004398{
Paul Turner48a16752012-10-04 13:18:31 +02004399 struct sched_entity *se = tg->se[cpu];
4400 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004401
Paul Turner48a16752012-10-04 13:18:31 +02004402 /* throttled entities do not contribute to load */
4403 if (throttled_hierarchy(cfs_rq))
4404 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004405
Paul Turneraff3e492012-10-04 13:18:30 +02004406 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004407
Paul Turner82958362012-10-04 13:18:31 +02004408 if (se) {
4409 update_entity_load_avg(se, 1);
4410 /*
4411 * We pivot on our runnable average having decayed to zero for
4412 * list removal. This generally implies that all our children
4413 * have also been removed (modulo rounding error or bandwidth
4414 * control); however, such cases are rare and we can fix these
4415 * at enqueue.
4416 *
4417 * TODO: fix up out-of-order children on enqueue.
4418 */
4419 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4420 list_del_leaf_cfs_rq(cfs_rq);
4421 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004422 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004423 update_rq_runnable_avg(rq, rq->nr_running);
4424 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004425}
4426
Paul Turner48a16752012-10-04 13:18:31 +02004427static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004428{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004429 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004430 struct cfs_rq *cfs_rq;
4431 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004432
Paul Turner48a16752012-10-04 13:18:31 +02004433 raw_spin_lock_irqsave(&rq->lock, flags);
4434 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004435 /*
4436 * Iterates the task_group tree in a bottom up fashion, see
4437 * list_add_leaf_cfs_rq() for details.
4438 */
Paul Turner64660c82011-07-21 09:43:36 -07004439 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004440 /*
4441 * Note: We may want to consider periodically releasing
4442 * rq->lock about these updates so that creating many task
4443 * groups does not result in continually extending hold time.
4444 */
4445 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004446 }
Paul Turner48a16752012-10-04 13:18:31 +02004447
4448 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004449}
4450
Peter Zijlstra9763b672011-07-13 13:09:25 +02004451/*
Vladimir Davydov68520792013-07-15 17:49:19 +04004452 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02004453 * This needs to be done in a top-down fashion because the load of a child
4454 * group is a fraction of its parents load.
4455 */
Vladimir Davydov68520792013-07-15 17:49:19 +04004456static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02004457{
Vladimir Davydov68520792013-07-15 17:49:19 +04004458 struct rq *rq = rq_of(cfs_rq);
4459 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004460 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04004461 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004462
Vladimir Davydov68520792013-07-15 17:49:19 +04004463 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004464 return;
4465
Vladimir Davydov68520792013-07-15 17:49:19 +04004466 cfs_rq->h_load_next = NULL;
4467 for_each_sched_entity(se) {
4468 cfs_rq = cfs_rq_of(se);
4469 cfs_rq->h_load_next = se;
4470 if (cfs_rq->last_h_load_update == now)
4471 break;
4472 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004473
Vladimir Davydov68520792013-07-15 17:49:19 +04004474 if (!se) {
Vladimir Davydov7e3115e2013-09-14 19:39:46 +04004475 cfs_rq->h_load = cfs_rq->runnable_load_avg;
Vladimir Davydov68520792013-07-15 17:49:19 +04004476 cfs_rq->last_h_load_update = now;
4477 }
4478
4479 while ((se = cfs_rq->h_load_next) != NULL) {
4480 load = cfs_rq->h_load;
4481 load = div64_ul(load * se->avg.load_avg_contrib,
4482 cfs_rq->runnable_load_avg + 1);
4483 cfs_rq = group_cfs_rq(se);
4484 cfs_rq->h_load = load;
4485 cfs_rq->last_h_load_update = now;
4486 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02004487}
4488
Peter Zijlstra367456c2012-02-20 21:49:09 +01004489static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004490{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004491 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004492
Vladimir Davydov68520792013-07-15 17:49:19 +04004493 update_cfs_rq_h_load(cfs_rq);
Alex Shia003a252013-06-20 10:18:51 +08004494 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4495 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004496}
4497#else
Paul Turner48a16752012-10-04 13:18:31 +02004498static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004499{
4500}
4501
Peter Zijlstra367456c2012-02-20 21:49:09 +01004502static unsigned long task_h_load(struct task_struct *p)
4503{
Alex Shia003a252013-06-20 10:18:51 +08004504 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004505}
4506#endif
4507
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004508/********** Helpers for find_busiest_group ************************/
4509/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004510 * sg_lb_stats - stats of a sched_group required for load_balancing
4511 */
4512struct sg_lb_stats {
4513 unsigned long avg_load; /*Avg load across the CPUs of the group */
4514 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004515 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004516 unsigned long load_per_task;
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004517 unsigned long group_power;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004518 unsigned int sum_nr_running; /* Nr tasks running in the group */
4519 unsigned int group_capacity;
4520 unsigned int idle_cpus;
4521 unsigned int group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004522 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004523 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004524};
4525
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004526/*
4527 * sd_lb_stats - Structure to store the statistics of a sched_domain
4528 * during load balancing.
4529 */
4530struct sd_lb_stats {
4531 struct sched_group *busiest; /* Busiest group in this sd */
4532 struct sched_group *local; /* Local group in this sd */
4533 unsigned long total_load; /* Total load of all groups in sd */
4534 unsigned long total_pwr; /* Total power of all groups in sd */
4535 unsigned long avg_load; /* Average load across all groups in sd */
4536
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004537 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004538 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004539};
4540
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004541static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
4542{
4543 /*
4544 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
4545 * local_stat because update_sg_lb_stats() does a full clear/assignment.
4546 * We must however clear busiest_stat::avg_load because
4547 * update_sd_pick_busiest() reads this before assignment.
4548 */
4549 *sds = (struct sd_lb_stats){
4550 .busiest = NULL,
4551 .local = NULL,
4552 .total_load = 0UL,
4553 .total_pwr = 0UL,
4554 .busiest_stat = {
4555 .avg_load = 0UL,
4556 },
4557 };
4558}
4559
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004560/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004561 * get_sd_load_idx - Obtain the load index for a given sched domain.
4562 * @sd: The sched_domain whose load_idx is to be obtained.
4563 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004564 *
4565 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004566 */
4567static inline int get_sd_load_idx(struct sched_domain *sd,
4568 enum cpu_idle_type idle)
4569{
4570 int load_idx;
4571
4572 switch (idle) {
4573 case CPU_NOT_IDLE:
4574 load_idx = sd->busy_idx;
4575 break;
4576
4577 case CPU_NEWLY_IDLE:
4578 load_idx = sd->newidle_idx;
4579 break;
4580 default:
4581 load_idx = sd->idle_idx;
4582 break;
4583 }
4584
4585 return load_idx;
4586}
4587
Li Zefan15f803c2013-03-05 16:07:11 +08004588static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004589{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004590 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004591}
4592
4593unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4594{
4595 return default_scale_freq_power(sd, cpu);
4596}
4597
Li Zefan15f803c2013-03-05 16:07:11 +08004598static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004599{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004600 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004601 unsigned long smt_gain = sd->smt_gain;
4602
4603 smt_gain /= weight;
4604
4605 return smt_gain;
4606}
4607
4608unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4609{
4610 return default_scale_smt_power(sd, cpu);
4611}
4612
Li Zefan15f803c2013-03-05 16:07:11 +08004613static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004614{
4615 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004616 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004617
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004618 /*
4619 * Since we're reading these variables without serialization make sure
4620 * we read them once before doing sanity checks on them.
4621 */
4622 age_stamp = ACCESS_ONCE(rq->age_stamp);
4623 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004624
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004625 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004626
4627 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004628 /* Ensures that power won't end up being negative */
4629 available = 0;
4630 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004631 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004632 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004633
Nikhil Rao1399fa72011-05-18 10:09:39 -07004634 if (unlikely((s64)total < SCHED_POWER_SCALE))
4635 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004636
Nikhil Rao1399fa72011-05-18 10:09:39 -07004637 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004638
4639 return div_u64(available, total);
4640}
4641
4642static void update_cpu_power(struct sched_domain *sd, int cpu)
4643{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004644 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004645 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004646 struct sched_group *sdg = sd->groups;
4647
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004648 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4649 if (sched_feat(ARCH_POWER))
4650 power *= arch_scale_smt_power(sd, cpu);
4651 else
4652 power *= default_scale_smt_power(sd, cpu);
4653
Nikhil Rao1399fa72011-05-18 10:09:39 -07004654 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004655 }
4656
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004657 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004658
4659 if (sched_feat(ARCH_POWER))
4660 power *= arch_scale_freq_power(sd, cpu);
4661 else
4662 power *= default_scale_freq_power(sd, cpu);
4663
Nikhil Rao1399fa72011-05-18 10:09:39 -07004664 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004665
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004666 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004667 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004668
4669 if (!power)
4670 power = 1;
4671
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004672 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004673 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004674}
4675
Peter Zijlstra029632f2011-10-25 10:00:11 +02004676void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004677{
4678 struct sched_domain *child = sd->child;
4679 struct sched_group *group, *sdg = sd->groups;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004680 unsigned long power, power_orig;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004681 unsigned long interval;
4682
4683 interval = msecs_to_jiffies(sd->balance_interval);
4684 interval = clamp(interval, 1UL, max_load_balance_interval);
4685 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004686
4687 if (!child) {
4688 update_cpu_power(sd, cpu);
4689 return;
4690 }
4691
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004692 power_orig = power = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004693
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004694 if (child->flags & SD_OVERLAP) {
4695 /*
4696 * SD_OVERLAP domains cannot assume that child groups
4697 * span the current group.
4698 */
4699
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004700 for_each_cpu(cpu, sched_group_cpus(sdg)) {
4701 struct sched_group *sg = cpu_rq(cpu)->sd->groups;
4702
4703 power_orig += sg->sgp->power_orig;
4704 power += sg->sgp->power;
4705 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004706 } else {
4707 /*
4708 * !SD_OVERLAP domains can assume that child groups
4709 * span the current group.
4710 */
4711
4712 group = child->groups;
4713 do {
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004714 power_orig += group->sgp->power_orig;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004715 power += group->sgp->power;
4716 group = group->next;
4717 } while (group != child->groups);
4718 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004719
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004720 sdg->sgp->power_orig = power_orig;
4721 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004722}
4723
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004724/*
4725 * Try and fix up capacity for tiny siblings, this is needed when
4726 * things like SD_ASYM_PACKING need f_b_g to select another sibling
4727 * which on its own isn't powerful enough.
4728 *
4729 * See update_sd_pick_busiest() and check_asym_packing().
4730 */
4731static inline int
4732fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
4733{
4734 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07004735 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004736 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02004737 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004738 return 0;
4739
4740 /*
4741 * If ~90% of the cpu_power is still there, we're good.
4742 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004743 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004744 return 1;
4745
4746 return 0;
4747}
4748
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004749/*
4750 * Group imbalance indicates (and tries to solve) the problem where balancing
4751 * groups is inadequate due to tsk_cpus_allowed() constraints.
4752 *
4753 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
4754 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
4755 * Something like:
4756 *
4757 * { 0 1 2 3 } { 4 5 6 7 }
4758 * * * * *
4759 *
4760 * If we were to balance group-wise we'd place two tasks in the first group and
4761 * two tasks in the second group. Clearly this is undesired as it will overload
4762 * cpu 3 and leave one of the cpus in the second group unused.
4763 *
4764 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02004765 * by noticing the lower domain failed to reach balance and had difficulty
4766 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004767 *
4768 * When this is so detected; this group becomes a candidate for busiest; see
4769 * update_sd_pick_busiest(). And calculcate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02004770 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004771 * to create an effective group imbalance.
4772 *
4773 * This is a somewhat tricky proposition since the next run might not find the
4774 * group imbalance and decide the groups need to be balanced again. A most
4775 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004776 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004777
Peter Zijlstra62633222013-08-19 12:41:09 +02004778static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004779{
Peter Zijlstra62633222013-08-19 12:41:09 +02004780 return group->sgp->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004781}
4782
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004783/*
4784 * Compute the group capacity.
4785 *
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004786 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
4787 * first dividing out the smt factor and computing the actual number of cores
4788 * and limit power unit capacity with that.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004789 */
4790static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
4791{
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004792 unsigned int capacity, smt, cpus;
4793 unsigned int power, power_orig;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004794
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004795 power = group->sgp->power;
4796 power_orig = group->sgp->power_orig;
4797 cpus = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004798
Peter Zijlstrac61037e2013-08-28 12:40:38 +02004799 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
4800 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
4801 capacity = cpus / smt; /* cores */
4802
4803 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004804 if (!capacity)
4805 capacity = fix_small_capacity(env->sd, group);
4806
4807 return capacity;
4808}
4809
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004810/**
4811 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
4812 * @env: The load balancing environment.
4813 * @group: sched_group whose statistics are to be updated.
4814 * @load_idx: Load index of sched_domain of this_cpu for load calc.
4815 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004816 * @sgs: variable to hold the statistics for this group.
4817 */
4818static inline void update_sg_lb_stats(struct lb_env *env,
4819 struct sched_group *group, int load_idx,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09004820 int local_group, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004821{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02004822 unsigned long nr_running;
4823 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004824 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004825
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004826 memset(sgs, 0, sizeof(*sgs));
4827
Michael Wangb94031302012-07-12 16:10:13 +08004828 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004829 struct rq *rq = cpu_rq(i);
4830
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004831 nr_running = rq->nr_running;
4832
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004833 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02004834 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02004835 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02004836 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004837 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004838
4839 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02004840 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004841 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004842 if (idle_cpu(i))
4843 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004844 }
4845
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004846 /* Adjust by relative CPU power of the group */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004847 sgs->group_power = group->sgp->power;
4848 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004849
Suresh Siddhadd5feea2010-02-23 16:13:52 -08004850 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02004851 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004852
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07004853 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07004854
Peter Zijlstrab37d9312013-08-28 11:50:34 +02004855 sgs->group_imb = sg_imbalanced(group);
4856 sgs->group_capacity = sg_capacity(env, group);
4857
Nikhil Raofab47622010-10-15 13:12:29 -07004858 if (sgs->group_capacity > sgs->sum_nr_running)
4859 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004860}
4861
4862/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10004863 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07004864 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004865 * @sds: sched_domain statistics
4866 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10004867 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10004868 *
4869 * Determine if @sg is a busier group than the previously selected
4870 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004871 *
4872 * Return: %true if @sg is a busier group than the previously selected
4873 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004874 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004875static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10004876 struct sd_lb_stats *sds,
4877 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004878 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004879{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004880 if (sgs->avg_load <= sds->busiest_stat.avg_load)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004881 return false;
4882
4883 if (sgs->sum_nr_running > sgs->group_capacity)
4884 return true;
4885
4886 if (sgs->group_imb)
4887 return true;
4888
4889 /*
4890 * ASYM_PACKING needs to move all the work to the lowest
4891 * numbered CPUs in the group, therefore mark all groups
4892 * higher than ourself as busy.
4893 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004894 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
4895 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004896 if (!sds->busiest)
4897 return true;
4898
4899 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
4900 return true;
4901 }
4902
4903 return false;
4904}
4905
4906/**
Hui Kang461819a2011-10-11 23:00:59 -04004907 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07004908 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004909 * @balance: Should we balance.
4910 * @sds: variable to hold the statistics for this sched_domain.
4911 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004912static inline void update_sd_lb_stats(struct lb_env *env,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09004913 struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004914{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004915 struct sched_domain *child = env->sd->child;
4916 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004917 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004918 int load_idx, prefer_sibling = 0;
4919
4920 if (child && child->flags & SD_PREFER_SIBLING)
4921 prefer_sibling = 1;
4922
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004923 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004924
4925 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004926 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004927 int local_group;
4928
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004929 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004930 if (local_group) {
4931 sds->local = sg;
4932 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004933
4934 if (env->idle != CPU_NEWLY_IDLE ||
4935 time_after_eq(jiffies, sg->sgp->next_update))
4936 update_group_power(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004937 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004938
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004939 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004940
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004941 if (local_group)
4942 goto next_group;
4943
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004944 /*
4945 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10004946 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07004947 * and move all the excess tasks away. We lower the capacity
4948 * of a group only if the local group has the capacity to fit
4949 * these excess tasks, i.e. nr_running < group_capacity. The
4950 * extra check prevents the case where you always pull from the
4951 * heaviest group when it is already under-utilized (possible
4952 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004953 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004954 if (prefer_sibling && sds->local &&
4955 sds->local_stat.group_has_capacity)
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004956 sgs->group_capacity = min(sgs->group_capacity, 1U);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004957
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004958 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10004959 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004960 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004961 }
4962
Peter Zijlstrab72ff132013-08-28 10:32:32 +02004963next_group:
4964 /* Now, start updating sd_lb_stats */
4965 sds->total_load += sgs->group_load;
4966 sds->total_pwr += sgs->group_power;
4967
Michael Neuling532cb4c2010-06-08 14:57:02 +10004968 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004969 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10004970}
4971
Michael Neuling532cb4c2010-06-08 14:57:02 +10004972/**
4973 * check_asym_packing - Check to see if the group is packed into the
4974 * sched doman.
4975 *
4976 * This is primarily intended to used at the sibling level. Some
4977 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4978 * case of POWER7, it can move to lower SMT modes only when higher
4979 * threads are idle. When in lower SMT modes, the threads will
4980 * perform better since they share less core resources. Hence when we
4981 * have idle threads, we want them to be the higher ones.
4982 *
4983 * This packing function is run on idle threads. It checks to see if
4984 * the busiest CPU in this domain (core in the P7 case) has a higher
4985 * CPU number than the packing function is being run on. Here we are
4986 * assuming lower CPU number will be equivalent to lower a SMT thread
4987 * number.
4988 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02004989 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10004990 * this CPU. The amount of the imbalance is returned in *imbalance.
4991 *
Randy Dunlapcd968912012-06-08 13:18:33 -07004992 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10004993 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10004994 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004995static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10004996{
4997 int busiest_cpu;
4998
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004999 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005000 return 0;
5001
5002 if (!sds->busiest)
5003 return 0;
5004
5005 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005006 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005007 return 0;
5008
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005009 env->imbalance = DIV_ROUND_CLOSEST(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005010 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5011 SCHED_POWER_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005012
Michael Neuling532cb4c2010-06-08 14:57:02 +10005013 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005014}
5015
5016/**
5017 * fix_small_imbalance - Calculate the minor imbalance that exists
5018 * amongst the groups of a sched_domain, during
5019 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005020 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005021 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005022 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005023static inline
5024void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005025{
5026 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5027 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005028 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005029 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005030
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005031 local = &sds->local_stat;
5032 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005033
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005034 if (!local->sum_nr_running)
5035 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5036 else if (busiest->load_per_task > local->load_per_task)
5037 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005038
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005039 scaled_busy_load_per_task =
5040 (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005041 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005042
Vladimir Davydov3029ede2013-09-15 17:49:14 +04005043 if (busiest->avg_load + scaled_busy_load_per_task >=
5044 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005045 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005046 return;
5047 }
5048
5049 /*
5050 * OK, we don't have enough imbalance to justify moving tasks,
5051 * however we may be able to increase total CPU power used by
5052 * moving them.
5053 */
5054
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005055 pwr_now += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005056 min(busiest->load_per_task, busiest->avg_load);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005057 pwr_now += local->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005058 min(local->load_per_task, local->avg_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005059 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005060
5061 /* Amount of load we'd subtract */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005062 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005063 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005064 if (busiest->avg_load > tmp) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005065 pwr_move += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005066 min(busiest->load_per_task,
5067 busiest->avg_load - tmp);
5068 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005069
5070 /* Amount of load we'd add */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005071 if (busiest->avg_load * busiest->group_power <
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005072 busiest->load_per_task * SCHED_POWER_SCALE) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005073 tmp = (busiest->avg_load * busiest->group_power) /
5074 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005075 } else {
5076 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005077 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005078 }
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005079 pwr_move += local->group_power *
5080 min(local->load_per_task, local->avg_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005081 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005082
5083 /* Move if we gain throughput */
5084 if (pwr_move > pwr_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005085 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005086}
5087
5088/**
5089 * calculate_imbalance - Calculate the amount of imbalance present within the
5090 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005091 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005092 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005093 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005094static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005095{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005096 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005097 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005098
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005099 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005100 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005101
5102 if (busiest->group_imb) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005103 /*
5104 * In the group_imb case we cannot rely on group-wide averages
5105 * to ensure cpu-load equilibrium, look at wider averages. XXX
5106 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005107 busiest->load_per_task =
5108 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005109 }
5110
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005111 /*
5112 * In the presence of smp nice balancing, certain scenarios can have
5113 * max load less than avg load(as we skip the groups at or below
5114 * its cpu_power, while calculating max_load..)
5115 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04005116 if (busiest->avg_load <= sds->avg_load ||
5117 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005118 env->imbalance = 0;
5119 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005120 }
5121
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005122 if (!busiest->group_imb) {
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005123 /*
5124 * Don't want to pull so many tasks that a group would go idle.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005125 * Except of course for the group_imb case, since then we might
5126 * have to drop below capacity to reach cpu-load equilibrium.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005127 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005128 load_above_capacity =
5129 (busiest->sum_nr_running - busiest->group_capacity);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005130
Nikhil Rao1399fa72011-05-18 10:09:39 -07005131 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005132 load_above_capacity /= busiest->group_power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005133 }
5134
5135 /*
5136 * We're trying to get all the cpus to the average_load, so we don't
5137 * want to push ourselves above the average load, nor do we wish to
5138 * reduce the max loaded cpu below the average load. At the same time,
5139 * we also don't want to reduce the group load below the group capacity
5140 * (so that we can implement power-savings policies etc). Thus we look
5141 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005142 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005143 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005144
5145 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005146 env->imbalance = min(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005147 max_pull * busiest->group_power,
5148 (sds->avg_load - local->avg_load) * local->group_power
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005149 ) / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005150
5151 /*
5152 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005153 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005154 * a think about bumping its value to force at least one task to be
5155 * moved
5156 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005157 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005158 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005159}
Nikhil Raofab47622010-10-15 13:12:29 -07005160
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005161/******* find_busiest_group() helpers end here *********************/
5162
5163/**
5164 * find_busiest_group - Returns the busiest group within the sched_domain
5165 * if there is an imbalance. If there isn't an imbalance, and
5166 * the user has opted for power-savings, it returns a group whose
5167 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5168 * such a group exists.
5169 *
5170 * Also calculates the amount of weighted load which should be moved
5171 * to restore balance.
5172 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005173 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005174 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005175 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005176 * - If no imbalance and user has opted for power-savings balance,
5177 * return the least loaded group whose CPUs can be
5178 * put to idle by rebalancing its tasks onto our group.
5179 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005180static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005181{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005182 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005183 struct sd_lb_stats sds;
5184
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005185 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005186
5187 /*
5188 * Compute the various statistics relavent for load balancing at
5189 * this level.
5190 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005191 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005192 local = &sds.local_stat;
5193 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005194
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005195 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5196 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005197 return sds.busiest;
5198
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005199 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005200 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005201 goto out_balanced;
5202
Nikhil Rao1399fa72011-05-18 10:09:39 -07005203 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07005204
Peter Zijlstra866ab432011-02-21 18:56:47 +01005205 /*
5206 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005207 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01005208 * isn't true due to cpus_allowed constraints and the like.
5209 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005210 if (busiest->group_imb)
Peter Zijlstra866ab432011-02-21 18:56:47 +01005211 goto force_balance;
5212
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005213 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005214 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5215 !busiest->group_has_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07005216 goto force_balance;
5217
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005218 /*
5219 * If the local group is more busy than the selected busiest group
5220 * don't try and pull any tasks.
5221 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005222 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005223 goto out_balanced;
5224
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005225 /*
5226 * Don't pull any tasks if this group is already above the domain
5227 * average load.
5228 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005229 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005230 goto out_balanced;
5231
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005232 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005233 /*
5234 * This cpu is idle. If the busiest group load doesn't
5235 * have more tasks than the number of available cpu's and
5236 * there is no imbalance between this and busiest group
5237 * wrt to idle cpu's, it is balanced.
5238 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005239 if ((local->idle_cpus < busiest->idle_cpus) &&
5240 busiest->sum_nr_running <= busiest->group_weight)
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005241 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005242 } else {
5243 /*
5244 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5245 * imbalance_pct to be conservative.
5246 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005247 if (100 * busiest->avg_load <=
5248 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005249 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005250 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005251
Nikhil Raofab47622010-10-15 13:12:29 -07005252force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005253 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005254 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005255 return sds.busiest;
5256
5257out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005258 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005259 return NULL;
5260}
5261
5262/*
5263 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5264 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005265static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08005266 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005267{
5268 struct rq *busiest = NULL, *rq;
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005269 unsigned long busiest_load = 0, busiest_power = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005270 int i;
5271
Peter Zijlstra6906a402013-08-19 15:20:21 +02005272 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005273 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005274 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5275 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005276 unsigned long wl;
5277
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005278 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005279 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005280
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005281 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005282 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005283
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005284 /*
5285 * When comparing with imbalance, use weighted_cpuload()
5286 * which is not scaled with the cpu power.
5287 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005288 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005289 continue;
5290
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005291 /*
5292 * For the load comparisons with the other cpu's, consider
5293 * the weighted_cpuload() scaled with the cpu power, so that
5294 * the load can be moved away from the cpu that is potentially
5295 * running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005296 *
5297 * Thus we're looking for max(wl_i / power_i), crosswise
5298 * multiplication to rid ourselves of the division works out
5299 * to: wl_i * power_j > wl_j * power_i; where j is our
5300 * previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005301 */
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005302 if (wl * busiest_power > busiest_load * power) {
5303 busiest_load = wl;
5304 busiest_power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005305 busiest = rq;
5306 }
5307 }
5308
5309 return busiest;
5310}
5311
5312/*
5313 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5314 * so long as it is large enough.
5315 */
5316#define MAX_PINNED_INTERVAL 512
5317
5318/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09005319DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005320
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005321static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005322{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005323 struct sched_domain *sd = env->sd;
5324
5325 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005326
5327 /*
5328 * ASYM_PACKING needs to force migrate tasks from busy but
5329 * higher numbered CPUs in order to pack all tasks in the
5330 * lowest numbered CPUs.
5331 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005332 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005333 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005334 }
5335
5336 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5337}
5338
Tejun Heo969c7922010-05-06 18:49:21 +02005339static int active_load_balance_cpu_stop(void *data);
5340
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005341static int should_we_balance(struct lb_env *env)
5342{
5343 struct sched_group *sg = env->sd->groups;
5344 struct cpumask *sg_cpus, *sg_mask;
5345 int cpu, balance_cpu = -1;
5346
5347 /*
5348 * In the newly idle case, we will allow all the cpu's
5349 * to do the newly idle load balance.
5350 */
5351 if (env->idle == CPU_NEWLY_IDLE)
5352 return 1;
5353
5354 sg_cpus = sched_group_cpus(sg);
5355 sg_mask = sched_group_mask(sg);
5356 /* Try to find first idle cpu */
5357 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
5358 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
5359 continue;
5360
5361 balance_cpu = cpu;
5362 break;
5363 }
5364
5365 if (balance_cpu == -1)
5366 balance_cpu = group_balance_cpu(sg);
5367
5368 /*
5369 * First idle cpu or the first cpu(busiest) in this sched group
5370 * is eligible for doing load balancing at this and above domains.
5371 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09005372 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005373}
5374
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005375/*
5376 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5377 * tasks if there is an imbalance.
5378 */
5379static int load_balance(int this_cpu, struct rq *this_rq,
5380 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005381 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005382{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305383 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02005384 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005385 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005386 struct rq *busiest;
5387 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005388 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005389
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005390 struct lb_env env = {
5391 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005392 .dst_cpu = this_cpu,
5393 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305394 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005395 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005396 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08005397 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005398 };
5399
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005400 /*
5401 * For NEWLY_IDLE load_balancing, we don't need to consider
5402 * other cpus in our group
5403 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005404 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005405 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005406
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005407 cpumask_copy(cpus, cpu_active_mask);
5408
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005409 schedstat_inc(sd, lb_count[idle]);
5410
5411redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005412 if (!should_we_balance(&env)) {
5413 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005414 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005415 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005416
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005417 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005418 if (!group) {
5419 schedstat_inc(sd, lb_nobusyg[idle]);
5420 goto out_balanced;
5421 }
5422
Michael Wangb94031302012-07-12 16:10:13 +08005423 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005424 if (!busiest) {
5425 schedstat_inc(sd, lb_nobusyq[idle]);
5426 goto out_balanced;
5427 }
5428
Michael Wang78feefc2012-08-06 16:41:59 +08005429 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005430
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005431 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005432
5433 ld_moved = 0;
5434 if (busiest->nr_running > 1) {
5435 /*
5436 * Attempt to move tasks. If find_busiest_group has found
5437 * an imbalance but busiest->nr_running <= 1, the group is
5438 * still unbalanced. ld_moved simply stays zero, so it is
5439 * correctly treated as an imbalance.
5440 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005441 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005442 env.src_cpu = busiest->cpu;
5443 env.src_rq = busiest;
5444 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005445
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005446more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005447 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005448 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305449
5450 /*
5451 * cur_ld_moved - load moved in current iteration
5452 * ld_moved - cumulative load moved across iterations
5453 */
5454 cur_ld_moved = move_tasks(&env);
5455 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005456 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005457 local_irq_restore(flags);
5458
5459 /*
5460 * some other cpu did the load balance for us.
5461 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305462 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5463 resched_cpu(env.dst_cpu);
5464
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005465 if (env.flags & LBF_NEED_BREAK) {
5466 env.flags &= ~LBF_NEED_BREAK;
5467 goto more_balance;
5468 }
5469
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305470 /*
5471 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5472 * us and move them to an alternate dst_cpu in our sched_group
5473 * where they can run. The upper limit on how many times we
5474 * iterate on same src_cpu is dependent on number of cpus in our
5475 * sched_group.
5476 *
5477 * This changes load balance semantics a bit on who can move
5478 * load to a given_cpu. In addition to the given_cpu itself
5479 * (or a ilb_cpu acting on its behalf where given_cpu is
5480 * nohz-idle), we now have balance_cpu in a position to move
5481 * load to given_cpu. In rare situations, this may cause
5482 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5483 * _independently_ and at _same_ time to move some load to
5484 * given_cpu) causing exceess load to be moved to given_cpu.
5485 * This however should not happen so much in practice and
5486 * moreover subsequent load balance cycles should correct the
5487 * excess load moved.
5488 */
Peter Zijlstra62633222013-08-19 12:41:09 +02005489 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305490
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04005491 /* Prevent to re-select dst_cpu via env's cpus */
5492 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5493
Michael Wang78feefc2012-08-06 16:41:59 +08005494 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305495 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02005496 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305497 env.loop = 0;
5498 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005499
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305500 /*
5501 * Go back to "more_balance" rather than "redo" since we
5502 * need to continue with same src_cpu.
5503 */
5504 goto more_balance;
5505 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005506
Peter Zijlstra62633222013-08-19 12:41:09 +02005507 /*
5508 * We failed to reach balance because of affinity.
5509 */
5510 if (sd_parent) {
5511 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
5512
5513 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5514 *group_imbalance = 1;
5515 } else if (*group_imbalance)
5516 *group_imbalance = 0;
5517 }
5518
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005519 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005520 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005521 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305522 if (!cpumask_empty(cpus)) {
5523 env.loop = 0;
5524 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005525 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305526 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005527 goto out_balanced;
5528 }
5529 }
5530
5531 if (!ld_moved) {
5532 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005533 /*
5534 * Increment the failure counter only on periodic balance.
5535 * We do not want newidle balance, which can be very
5536 * frequent, pollute the failure counter causing
5537 * excessive cache_hot migrations and active balances.
5538 */
5539 if (idle != CPU_NEWLY_IDLE)
5540 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005541
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005542 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005543 raw_spin_lock_irqsave(&busiest->lock, flags);
5544
Tejun Heo969c7922010-05-06 18:49:21 +02005545 /* don't kick the active_load_balance_cpu_stop,
5546 * if the curr task on busiest cpu can't be
5547 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005548 */
5549 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005550 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005551 raw_spin_unlock_irqrestore(&busiest->lock,
5552 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005553 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005554 goto out_one_pinned;
5555 }
5556
Tejun Heo969c7922010-05-06 18:49:21 +02005557 /*
5558 * ->active_balance synchronizes accesses to
5559 * ->active_balance_work. Once set, it's cleared
5560 * only after active load balance is finished.
5561 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005562 if (!busiest->active_balance) {
5563 busiest->active_balance = 1;
5564 busiest->push_cpu = this_cpu;
5565 active_balance = 1;
5566 }
5567 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005568
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005569 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005570 stop_one_cpu_nowait(cpu_of(busiest),
5571 active_load_balance_cpu_stop, busiest,
5572 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005573 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005574
5575 /*
5576 * We've kicked active balancing, reset the failure
5577 * counter.
5578 */
5579 sd->nr_balance_failed = sd->cache_nice_tries+1;
5580 }
5581 } else
5582 sd->nr_balance_failed = 0;
5583
5584 if (likely(!active_balance)) {
5585 /* We were unbalanced, so reset the balancing interval */
5586 sd->balance_interval = sd->min_interval;
5587 } else {
5588 /*
5589 * If we've begun active balancing, start to back off. This
5590 * case may not be covered by the all_pinned logic if there
5591 * is only 1 task on the busy runqueue (because we don't call
5592 * move_tasks).
5593 */
5594 if (sd->balance_interval < sd->max_interval)
5595 sd->balance_interval *= 2;
5596 }
5597
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005598 goto out;
5599
5600out_balanced:
5601 schedstat_inc(sd, lb_balanced[idle]);
5602
5603 sd->nr_balance_failed = 0;
5604
5605out_one_pinned:
5606 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005607 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005608 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005609 (sd->balance_interval < sd->max_interval))
5610 sd->balance_interval *= 2;
5611
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005612 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005613out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005614 return ld_moved;
5615}
5616
5617/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005618 * idle_balance is called by schedule() if this_cpu is about to become
5619 * idle. Attempts to pull tasks from other CPUs.
5620 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005621void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005622{
5623 struct sched_domain *sd;
5624 int pulled_task = 0;
5625 unsigned long next_balance = jiffies + HZ;
Jason Low9bd721c2013-09-13 11:26:52 -07005626 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005627
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005628 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005629
5630 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5631 return;
5632
Peter Zijlstraf492e122009-12-23 15:29:42 +01005633 /*
5634 * Drop the rq->lock, but keep IRQ/preempt disabled.
5635 */
5636 raw_spin_unlock(&this_rq->lock);
5637
Paul Turner48a16752012-10-04 13:18:31 +02005638 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005639 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005640 for_each_domain(this_cpu, sd) {
5641 unsigned long interval;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005642 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07005643 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005644
5645 if (!(sd->flags & SD_LOAD_BALANCE))
5646 continue;
5647
Jason Low9bd721c2013-09-13 11:26:52 -07005648 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
5649 break;
5650
Peter Zijlstraf492e122009-12-23 15:29:42 +01005651 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07005652 t0 = sched_clock_cpu(this_cpu);
5653
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005654 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005655 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005656 sd, CPU_NEWLY_IDLE,
5657 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07005658
5659 domain_cost = sched_clock_cpu(this_cpu) - t0;
5660 if (domain_cost > sd->max_newidle_lb_cost)
5661 sd->max_newidle_lb_cost = domain_cost;
5662
5663 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005664 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005665
5666 interval = msecs_to_jiffies(sd->balance_interval);
5667 if (time_after(next_balance, sd->last_balance + interval))
5668 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005669 if (pulled_task) {
5670 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005671 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005672 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005673 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005674 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005675
5676 raw_spin_lock(&this_rq->lock);
5677
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005678 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5679 /*
5680 * We are going idle. next_balance may be set based on
5681 * a busy processor. So reset next_balance.
5682 */
5683 this_rq->next_balance = next_balance;
5684 }
Jason Low9bd721c2013-09-13 11:26:52 -07005685
5686 if (curr_cost > this_rq->max_idle_balance_cost)
5687 this_rq->max_idle_balance_cost = curr_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005688}
5689
5690/*
Tejun Heo969c7922010-05-06 18:49:21 +02005691 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5692 * running tasks off the busiest CPU onto idle CPUs. It requires at
5693 * least 1 task to be running on each physical CPU where possible, and
5694 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005695 */
Tejun Heo969c7922010-05-06 18:49:21 +02005696static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005697{
Tejun Heo969c7922010-05-06 18:49:21 +02005698 struct rq *busiest_rq = data;
5699 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005700 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005701 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005702 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005703
5704 raw_spin_lock_irq(&busiest_rq->lock);
5705
5706 /* make sure the requested cpu hasn't gone down in the meantime */
5707 if (unlikely(busiest_cpu != smp_processor_id() ||
5708 !busiest_rq->active_balance))
5709 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005710
5711 /* Is there any task to move? */
5712 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005713 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005714
5715 /*
5716 * This condition is "impossible", if it occurs
5717 * we need to fix it. Originally reported by
5718 * Bjorn Helgaas on a 128-cpu setup.
5719 */
5720 BUG_ON(busiest_rq == target_rq);
5721
5722 /* move a task from busiest_rq to target_rq */
5723 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005724
5725 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02005726 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005727 for_each_domain(target_cpu, sd) {
5728 if ((sd->flags & SD_LOAD_BALANCE) &&
5729 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
5730 break;
5731 }
5732
5733 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005734 struct lb_env env = {
5735 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005736 .dst_cpu = target_cpu,
5737 .dst_rq = target_rq,
5738 .src_cpu = busiest_rq->cpu,
5739 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005740 .idle = CPU_IDLE,
5741 };
5742
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005743 schedstat_inc(sd, alb_count);
5744
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005745 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005746 schedstat_inc(sd, alb_pushed);
5747 else
5748 schedstat_inc(sd, alb_failed);
5749 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005750 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005751 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02005752out_unlock:
5753 busiest_rq->active_balance = 0;
5754 raw_spin_unlock_irq(&busiest_rq->lock);
5755 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005756}
5757
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005758#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005759/*
5760 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005761 * - When one of the busy CPUs notice that there may be an idle rebalancing
5762 * needed, they will kick the idle load balancer, which then does idle
5763 * load balancing for all the idle CPUs.
5764 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005765static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005766 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005767 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005768 unsigned long next_balance; /* in jiffy units */
5769} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005770
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01005771static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005772{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005773 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005774
Suresh Siddha786d6dc72011-12-01 17:07:35 -08005775 if (ilb < nr_cpu_ids && idle_cpu(ilb))
5776 return ilb;
5777
5778 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005779}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005780
5781/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005782 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
5783 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
5784 * CPU (if there is one).
5785 */
5786static void nohz_balancer_kick(int cpu)
5787{
5788 int ilb_cpu;
5789
5790 nohz.next_balance++;
5791
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005792 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005793
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005794 if (ilb_cpu >= nr_cpu_ids)
5795 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005796
Suresh Siddhacd490c52011-12-06 11:26:34 -08005797 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08005798 return;
5799 /*
5800 * Use smp_send_reschedule() instead of resched_cpu().
5801 * This way we generate a sched IPI on the target cpu which
5802 * is idle. And the softirq performing nohz idle load balance
5803 * will be run before returning from the IPI.
5804 */
5805 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005806 return;
5807}
5808
Alex Shic1cc0172012-09-10 15:10:58 +08005809static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08005810{
5811 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
5812 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
5813 atomic_dec(&nohz.nr_cpus);
5814 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5815 }
5816}
5817
Suresh Siddha69e1e812011-12-01 17:07:33 -08005818static inline void set_cpu_sd_state_busy(void)
5819{
5820 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005821
Suresh Siddha69e1e812011-12-01 17:07:33 -08005822 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005823 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005824
5825 if (!sd || !sd->nohz_idle)
5826 goto unlock;
5827 sd->nohz_idle = 0;
5828
5829 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005830 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005831unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005832 rcu_read_unlock();
5833}
5834
5835void set_cpu_sd_state_idle(void)
5836{
5837 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08005838
Suresh Siddha69e1e812011-12-01 17:07:33 -08005839 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05005840 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005841
5842 if (!sd || sd->nohz_idle)
5843 goto unlock;
5844 sd->nohz_idle = 1;
5845
5846 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08005847 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02005848unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08005849 rcu_read_unlock();
5850}
5851
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005852/*
Alex Shic1cc0172012-09-10 15:10:58 +08005853 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08005854 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005855 */
Alex Shic1cc0172012-09-10 15:10:58 +08005856void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005857{
Suresh Siddha71325962012-01-19 18:28:57 -08005858 /*
5859 * If this cpu is going down, then nothing needs to be done.
5860 */
5861 if (!cpu_active(cpu))
5862 return;
5863
Alex Shic1cc0172012-09-10 15:10:58 +08005864 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
5865 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005866
Alex Shic1cc0172012-09-10 15:10:58 +08005867 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
5868 atomic_inc(&nohz.nr_cpus);
5869 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005870}
Suresh Siddha71325962012-01-19 18:28:57 -08005871
Paul Gortmaker0db06282013-06-19 14:53:51 -04005872static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08005873 unsigned long action, void *hcpu)
5874{
5875 switch (action & ~CPU_TASKS_FROZEN) {
5876 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08005877 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08005878 return NOTIFY_OK;
5879 default:
5880 return NOTIFY_DONE;
5881 }
5882}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005883#endif
5884
5885static DEFINE_SPINLOCK(balancing);
5886
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005887/*
5888 * Scale the max load_balance interval with the number of CPUs in the system.
5889 * This trades load-balance latency on larger machines for less cross talk.
5890 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005891void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005892{
5893 max_load_balance_interval = HZ*num_online_cpus()/10;
5894}
5895
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005896/*
5897 * It checks each scheduling domain to see if it is due to be balanced,
5898 * and initiates a balancing operation if so.
5899 *
Libinb9b08532013-04-01 19:14:01 +08005900 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005901 */
5902static void rebalance_domains(int cpu, enum cpu_idle_type idle)
5903{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005904 int continue_balancing = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005905 struct rq *rq = cpu_rq(cpu);
5906 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005907 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005908 /* Earliest time when we have to do rebalance again */
5909 unsigned long next_balance = jiffies + 60*HZ;
5910 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07005911 int need_serialize, need_decay = 0;
5912 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005913
Paul Turner48a16752012-10-04 13:18:31 +02005914 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08005915
Peter Zijlstradce840a2011-04-07 14:09:50 +02005916 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005917 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07005918 /*
5919 * Decay the newidle max times here because this is a regular
5920 * visit to all the domains. Decay ~1% per second.
5921 */
5922 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
5923 sd->max_newidle_lb_cost =
5924 (sd->max_newidle_lb_cost * 253) / 256;
5925 sd->next_decay_max_lb_cost = jiffies + HZ;
5926 need_decay = 1;
5927 }
5928 max_cost += sd->max_newidle_lb_cost;
5929
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005930 if (!(sd->flags & SD_LOAD_BALANCE))
5931 continue;
5932
Jason Lowf48627e2013-09-13 11:26:53 -07005933 /*
5934 * Stop the load balance at this level. There is another
5935 * CPU in our sched group which is doing load balancing more
5936 * actively.
5937 */
5938 if (!continue_balancing) {
5939 if (need_decay)
5940 continue;
5941 break;
5942 }
5943
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005944 interval = sd->balance_interval;
5945 if (idle != CPU_IDLE)
5946 interval *= sd->busy_factor;
5947
5948 /* scale ms to jiffies */
5949 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02005950 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005951
5952 need_serialize = sd->flags & SD_SERIALIZE;
5953
5954 if (need_serialize) {
5955 if (!spin_trylock(&balancing))
5956 goto out;
5957 }
5958
5959 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005960 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005961 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02005962 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005963 * env->dst_cpu, so we can't know our idle
5964 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005965 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09005966 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005967 }
5968 sd->last_balance = jiffies;
5969 }
5970 if (need_serialize)
5971 spin_unlock(&balancing);
5972out:
5973 if (time_after(next_balance, sd->last_balance + interval)) {
5974 next_balance = sd->last_balance + interval;
5975 update_next_balance = 1;
5976 }
Jason Lowf48627e2013-09-13 11:26:53 -07005977 }
5978 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005979 /*
Jason Lowf48627e2013-09-13 11:26:53 -07005980 * Ensure the rq-wide value also decays but keep it at a
5981 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005982 */
Jason Lowf48627e2013-09-13 11:26:53 -07005983 rq->max_idle_balance_cost =
5984 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005985 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005986 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005987
5988 /*
5989 * next_balance will be updated only when there is a need.
5990 * When the cpu is attached to null domain for ex, it will not be
5991 * updated.
5992 */
5993 if (likely(update_next_balance))
5994 rq->next_balance = next_balance;
5995}
5996
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005997#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07005998/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02005999 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006000 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6001 */
6002static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
6003{
6004 struct rq *this_rq = cpu_rq(this_cpu);
6005 struct rq *rq;
6006 int balance_cpu;
6007
Suresh Siddha1c792db2011-12-01 17:07:32 -08006008 if (idle != CPU_IDLE ||
6009 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6010 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006011
6012 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08006013 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006014 continue;
6015
6016 /*
6017 * If this cpu gets work to do, stop the load balancing
6018 * work being done for other cpus. Next load
6019 * balancing owner will pick it up.
6020 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08006021 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006022 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006023
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02006024 rq = cpu_rq(balance_cpu);
6025
6026 raw_spin_lock_irq(&rq->lock);
6027 update_rq_clock(rq);
6028 update_idle_cpu_load(rq);
6029 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006030
6031 rebalance_domains(balance_cpu, CPU_IDLE);
6032
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006033 if (time_after(this_rq->next_balance, rq->next_balance))
6034 this_rq->next_balance = rq->next_balance;
6035 }
6036 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006037end:
6038 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006039}
6040
6041/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006042 * Current heuristic for kicking the idle load balancer in the presence
6043 * of an idle cpu is the system.
6044 * - This rq has more than one task.
6045 * - At any scheduler domain level, this cpu's scheduler group has multiple
6046 * busy cpu's exceeding the group's power.
6047 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6048 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006049 */
6050static inline int nohz_kick_needed(struct rq *rq, int cpu)
6051{
6052 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006053 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006054
Suresh Siddha1c792db2011-12-01 17:07:32 -08006055 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006056 return 0;
6057
Suresh Siddha1c792db2011-12-01 17:07:32 -08006058 /*
6059 * We may be recently in ticked or tickless idle mode. At the first
6060 * busy tick after returning from idle, we will update the busy stats.
6061 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08006062 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08006063 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006064
6065 /*
6066 * None are in tickless mode and hence no need for NOHZ idle load
6067 * balancing.
6068 */
6069 if (likely(!atomic_read(&nohz.nr_cpus)))
6070 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006071
6072 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006073 return 0;
6074
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006075 if (rq->nr_running >= 2)
6076 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006077
Peter Zijlstra067491b2011-12-07 14:32:08 +01006078 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006079 for_each_domain(cpu, sd) {
6080 struct sched_group *sg = sd->groups;
6081 struct sched_group_power *sgp = sg->sgp;
6082 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006083
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006084 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01006085 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006086
6087 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
6088 && (cpumask_first_and(nohz.idle_cpus_mask,
6089 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01006090 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006091
6092 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6093 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006094 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01006095 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006096 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01006097
6098need_kick_unlock:
6099 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006100need_kick:
6101 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006102}
6103#else
6104static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6105#endif
6106
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006107/*
6108 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006109 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006110 */
6111static void run_rebalance_domains(struct softirq_action *h)
6112{
6113 int this_cpu = smp_processor_id();
6114 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07006115 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006116 CPU_IDLE : CPU_NOT_IDLE;
6117
6118 rebalance_domains(this_cpu, idle);
6119
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006120 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006121 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006122 * balancing on behalf of the other idle cpus whose ticks are
6123 * stopped.
6124 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006125 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006126}
6127
6128static inline int on_null_domain(int cpu)
6129{
Paul E. McKenney90a65012010-02-28 08:32:18 -08006130 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006131}
6132
6133/*
6134 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006135 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006136void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006137{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006138 /* Don't need to rebalance while attached to NULL domain */
6139 if (time_after_eq(jiffies, rq->next_balance) &&
6140 likely(!on_null_domain(cpu)))
6141 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006142#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08006143 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006144 nohz_balancer_kick(cpu);
6145#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006146}
6147
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006148static void rq_online_fair(struct rq *rq)
6149{
6150 update_sysctl();
6151}
6152
6153static void rq_offline_fair(struct rq *rq)
6154{
6155 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07006156
6157 /* Ensure any throttled groups are reachable by pick_next_task */
6158 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006159}
6160
Dhaval Giani55e12e52008-06-24 23:39:43 +05306161#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02006162
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006163/*
6164 * scheduler tick hitting a task of our scheduling class:
6165 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006166static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006167{
6168 struct cfs_rq *cfs_rq;
6169 struct sched_entity *se = &curr->se;
6170
6171 for_each_sched_entity(se) {
6172 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006173 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006174 }
Ben Segall18bf2802012-10-04 12:51:20 +02006175
Dave Kleikamp10e84b92013-07-31 13:53:35 -07006176 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02006177 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08006178
Ben Segall18bf2802012-10-04 12:51:20 +02006179 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006180}
6181
6182/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006183 * called on fork with the child task as argument from the parent's context
6184 * - child not yet on the tasklist
6185 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006186 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006187static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006188{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006189 struct cfs_rq *cfs_rq;
6190 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02006191 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006192 struct rq *rq = this_rq();
6193 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006194
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006195 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006196
Peter Zijlstra861d0342010-08-19 13:31:43 +02006197 update_rq_clock(rq);
6198
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006199 cfs_rq = task_cfs_rq(current);
6200 curr = cfs_rq->curr;
6201
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09006202 /*
6203 * Not only the cpu but also the task_group of the parent might have
6204 * been changed after parent->se.parent,cfs_rq were copied to
6205 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6206 * of child point to valid ones.
6207 */
6208 rcu_read_lock();
6209 __set_task_cpu(p, this_cpu);
6210 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006211
Ting Yang7109c442007-08-28 12:53:24 +02006212 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006213
Mike Galbraithb5d9d732009-09-08 11:12:28 +02006214 if (curr)
6215 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02006216 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006217
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006218 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02006219 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02006220 * Upon rescheduling, sched_class::put_prev_task() will place
6221 * 'current' within the tree based on its new key value.
6222 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006223 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05306224 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006225 }
6226
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006227 se->vruntime -= cfs_rq->min_vruntime;
6228
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006229 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006230}
6231
Steven Rostedtcb469842008-01-25 21:08:22 +01006232/*
6233 * Priority of the task has changed. Check to see if we preempt
6234 * the current task.
6235 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006236static void
6237prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01006238{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006239 if (!p->se.on_rq)
6240 return;
6241
Steven Rostedtcb469842008-01-25 21:08:22 +01006242 /*
6243 * Reschedule if we are currently running on this runqueue and
6244 * our priority decreased, or if we are not currently running on
6245 * this runqueue and our priority is higher than the current's
6246 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006247 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01006248 if (p->prio > oldprio)
6249 resched_task(rq->curr);
6250 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006251 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006252}
6253
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006254static void switched_from_fair(struct rq *rq, struct task_struct *p)
6255{
6256 struct sched_entity *se = &p->se;
6257 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6258
6259 /*
6260 * Ensure the task's vruntime is normalized, so that when its
6261 * switched back to the fair class the enqueue_entity(.flags=0) will
6262 * do the right thing.
6263 *
6264 * If it was on_rq, then the dequeue_entity(.flags=0) will already
6265 * have normalized the vruntime, if it was !on_rq, then only when
6266 * the task is sleeping will it still have non-normalized vruntime.
6267 */
6268 if (!se->on_rq && p->state != TASK_RUNNING) {
6269 /*
6270 * Fix up our vruntime so that the current sleep doesn't
6271 * cause 'unlimited' sleep bonus.
6272 */
6273 place_entity(cfs_rq, se, 0);
6274 se->vruntime -= cfs_rq->min_vruntime;
6275 }
Paul Turner9ee474f2012-10-04 13:18:30 +02006276
Alex Shi141965c2013-06-26 13:05:39 +08006277#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02006278 /*
6279 * Remove our load from contribution when we leave sched_fair
6280 * and ensure we don't carry in an old decay_count if we
6281 * switch back.
6282 */
Kirill Tkhai87e3c8a2013-07-21 04:32:07 +04006283 if (se->avg.decay_count) {
6284 __synchronize_entity_decay(se);
6285 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +02006286 }
6287#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006288}
6289
Steven Rostedtcb469842008-01-25 21:08:22 +01006290/*
6291 * We switched to the sched_fair class.
6292 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006293static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01006294{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006295 if (!p->se.on_rq)
6296 return;
6297
Steven Rostedtcb469842008-01-25 21:08:22 +01006298 /*
6299 * We were most likely switched from sched_rt, so
6300 * kick off the schedule if running, otherwise just see
6301 * if we can still preempt the current task.
6302 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006303 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01006304 resched_task(rq->curr);
6305 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006306 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006307}
6308
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006309/* Account for a task changing its policy or group.
6310 *
6311 * This routine is mostly called to set cfs_rq->curr field when a task
6312 * migrates between groups/classes.
6313 */
6314static void set_curr_task_fair(struct rq *rq)
6315{
6316 struct sched_entity *se = &rq->curr->se;
6317
Paul Turnerec12cb72011-07-21 09:43:30 -07006318 for_each_sched_entity(se) {
6319 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6320
6321 set_next_entity(cfs_rq, se);
6322 /* ensure bandwidth has been allocated on our new cfs_rq */
6323 account_cfs_rq_runtime(cfs_rq, 0);
6324 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006325}
6326
Peter Zijlstra029632f2011-10-25 10:00:11 +02006327void init_cfs_rq(struct cfs_rq *cfs_rq)
6328{
6329 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006330 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6331#ifndef CONFIG_64BIT
6332 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
6333#endif
Alex Shi141965c2013-06-26 13:05:39 +08006334#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02006335 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08006336 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02006337#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006338}
6339
Peter Zijlstra810b3812008-02-29 15:21:01 -05006340#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006341static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05006342{
Paul Turneraff3e492012-10-04 13:18:30 +02006343 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006344 /*
6345 * If the task was not on the rq at the time of this cgroup movement
6346 * it must have been asleep, sleeping tasks keep their ->vruntime
6347 * absolute on their old rq until wakeup (needed for the fair sleeper
6348 * bonus in place_entity()).
6349 *
6350 * If it was on the rq, we've just 'preempted' it, which does convert
6351 * ->vruntime to a relative base.
6352 *
6353 * Make sure both cases convert their relative position when migrating
6354 * to another cgroup's rq. This does somewhat interfere with the
6355 * fair sleeper stuff for the first placement, but who cares.
6356 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006357 /*
6358 * When !on_rq, vruntime of the task has usually NOT been normalized.
6359 * But there are some cases where it has already been normalized:
6360 *
6361 * - Moving a forked child which is waiting for being woken up by
6362 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09006363 * - Moving a task which has been woken up by try_to_wake_up() and
6364 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006365 *
6366 * To prevent boost or penalty in the new cfs_rq caused by delta
6367 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6368 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09006369 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006370 on_rq = 1;
6371
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006372 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006373 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6374 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02006375 if (!on_rq) {
6376 cfs_rq = cfs_rq_of(&p->se);
6377 p->se.vruntime += cfs_rq->min_vruntime;
6378#ifdef CONFIG_SMP
6379 /*
6380 * migrate_task_rq_fair() will have removed our previous
6381 * contribution, but we must synchronize for ongoing future
6382 * decay.
6383 */
6384 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6385 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6386#endif
6387 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05006388}
Peter Zijlstra029632f2011-10-25 10:00:11 +02006389
6390void free_fair_sched_group(struct task_group *tg)
6391{
6392 int i;
6393
6394 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6395
6396 for_each_possible_cpu(i) {
6397 if (tg->cfs_rq)
6398 kfree(tg->cfs_rq[i]);
6399 if (tg->se)
6400 kfree(tg->se[i]);
6401 }
6402
6403 kfree(tg->cfs_rq);
6404 kfree(tg->se);
6405}
6406
6407int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6408{
6409 struct cfs_rq *cfs_rq;
6410 struct sched_entity *se;
6411 int i;
6412
6413 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6414 if (!tg->cfs_rq)
6415 goto err;
6416 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6417 if (!tg->se)
6418 goto err;
6419
6420 tg->shares = NICE_0_LOAD;
6421
6422 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6423
6424 for_each_possible_cpu(i) {
6425 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6426 GFP_KERNEL, cpu_to_node(i));
6427 if (!cfs_rq)
6428 goto err;
6429
6430 se = kzalloc_node(sizeof(struct sched_entity),
6431 GFP_KERNEL, cpu_to_node(i));
6432 if (!se)
6433 goto err_free_rq;
6434
6435 init_cfs_rq(cfs_rq);
6436 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6437 }
6438
6439 return 1;
6440
6441err_free_rq:
6442 kfree(cfs_rq);
6443err:
6444 return 0;
6445}
6446
6447void unregister_fair_sched_group(struct task_group *tg, int cpu)
6448{
6449 struct rq *rq = cpu_rq(cpu);
6450 unsigned long flags;
6451
6452 /*
6453 * Only empty task groups can be destroyed; so we can speculatively
6454 * check on_list without danger of it being re-added.
6455 */
6456 if (!tg->cfs_rq[cpu]->on_list)
6457 return;
6458
6459 raw_spin_lock_irqsave(&rq->lock, flags);
6460 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6461 raw_spin_unlock_irqrestore(&rq->lock, flags);
6462}
6463
6464void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6465 struct sched_entity *se, int cpu,
6466 struct sched_entity *parent)
6467{
6468 struct rq *rq = cpu_rq(cpu);
6469
6470 cfs_rq->tg = tg;
6471 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006472 init_cfs_rq_runtime(cfs_rq);
6473
6474 tg->cfs_rq[cpu] = cfs_rq;
6475 tg->se[cpu] = se;
6476
6477 /* se could be NULL for root_task_group */
6478 if (!se)
6479 return;
6480
6481 if (!parent)
6482 se->cfs_rq = &rq->cfs;
6483 else
6484 se->cfs_rq = parent->my_q;
6485
6486 se->my_q = cfs_rq;
6487 update_load_set(&se->load, 0);
6488 se->parent = parent;
6489}
6490
6491static DEFINE_MUTEX(shares_mutex);
6492
6493int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6494{
6495 int i;
6496 unsigned long flags;
6497
6498 /*
6499 * We can't change the weight of the root cgroup.
6500 */
6501 if (!tg->se[0])
6502 return -EINVAL;
6503
6504 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6505
6506 mutex_lock(&shares_mutex);
6507 if (tg->shares == shares)
6508 goto done;
6509
6510 tg->shares = shares;
6511 for_each_possible_cpu(i) {
6512 struct rq *rq = cpu_rq(i);
6513 struct sched_entity *se;
6514
6515 se = tg->se[i];
6516 /* Propagate contribution to hierarchy */
6517 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006518
6519 /* Possible calls to update_curr() need rq clock */
6520 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006521 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006522 update_cfs_shares(group_cfs_rq(se));
6523 raw_spin_unlock_irqrestore(&rq->lock, flags);
6524 }
6525
6526done:
6527 mutex_unlock(&shares_mutex);
6528 return 0;
6529}
6530#else /* CONFIG_FAIR_GROUP_SCHED */
6531
6532void free_fair_sched_group(struct task_group *tg) { }
6533
6534int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6535{
6536 return 1;
6537}
6538
6539void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6540
6541#endif /* CONFIG_FAIR_GROUP_SCHED */
6542
Peter Zijlstra810b3812008-02-29 15:21:01 -05006543
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006544static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006545{
6546 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006547 unsigned int rr_interval = 0;
6548
6549 /*
6550 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6551 * idle runqueue:
6552 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006553 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006554 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006555
6556 return rr_interval;
6557}
6558
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006559/*
6560 * All the scheduling class methods:
6561 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006562const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006563 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006564 .enqueue_task = enqueue_task_fair,
6565 .dequeue_task = dequeue_task_fair,
6566 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006567 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006568
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006569 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006570
6571 .pick_next_task = pick_next_task_fair,
6572 .put_prev_task = put_prev_task_fair,
6573
Peter Williams681f3e62007-10-24 18:23:51 +02006574#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006575 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02006576 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08006577
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006578 .rq_online = rq_online_fair,
6579 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006580
6581 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006582#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006583
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006584 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006585 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006586 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006587
6588 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006589 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006590 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006591
Peter Williams0d721ce2009-09-21 01:31:53 +00006592 .get_rr_interval = get_rr_interval_fair,
6593
Peter Zijlstra810b3812008-02-29 15:21:01 -05006594#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006595 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006596#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006597};
6598
6599#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006600void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006601{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006602 struct cfs_rq *cfs_rq;
6603
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006604 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006605 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006606 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006607 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006608}
6609#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006610
6611__init void init_sched_fair_class(void)
6612{
6613#ifdef CONFIG_SMP
6614 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6615
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006616#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006617 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006618 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006619 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006620#endif
6621#endif /* SMP */
6622
6623}