blob: dbe0f628efa3c03963665cf32b86d07f9ecf4fd4 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
Alex Shia75cdaa2013-06-20 10:18:47 +0800683#ifdef CONFIG_SMP
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100684static unsigned long task_h_load(struct task_struct *p);
685
Alex Shia75cdaa2013-06-20 10:18:47 +0800686static inline void __update_task_entity_contrib(struct sched_entity *se);
687
688/* Give new task start runnable values to heavy its load in infant time */
689void init_task_runnable_average(struct task_struct *p)
690{
691 u32 slice;
692
693 p->se.avg.decay_count = 0;
694 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
695 p->se.avg.runnable_avg_sum = slice;
696 p->se.avg.runnable_avg_period = slice;
697 __update_task_entity_contrib(&p->se);
698}
699#else
700void init_task_runnable_average(struct task_struct *p)
701{
702}
703#endif
704
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200705/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200706 * Update the current task's runtime statistics. Skip current tasks that
707 * are not in our scheduling class.
708 */
709static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200710__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200713 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200714
Lucas De Marchi41acab82010-03-10 23:37:45 -0300715 schedstat_set(curr->statistics.exec_max,
716 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200717
718 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200719 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200720 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100721
Ingo Molnare9acbff2007-10-15 17:00:04 +0200722 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200723 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200724}
725
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200726static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200727{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200728 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200729 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200730 unsigned long delta_exec;
731
732 if (unlikely(!curr))
733 return;
734
735 /*
736 * Get the amount of time the current task was running
737 * since the last time we changed load (this cannot
738 * overflow on 32 bits):
739 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200740 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100741 if (!delta_exec)
742 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200743
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200744 __update_curr(cfs_rq, curr, delta_exec);
745 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100746
747 if (entity_is_task(curr)) {
748 struct task_struct *curtask = task_of(curr);
749
Ingo Molnarf977bb42009-09-13 18:15:54 +0200750 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100751 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700752 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100753 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700754
755 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200756}
757
758static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200759update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200761 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762}
763
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200764/*
765 * Task is being enqueued - update stats:
766 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200767static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200768{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200769 /*
770 * Are we enqueueing a waiting task? (for current tasks
771 * a dequeue/enqueue event is a NOP)
772 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200773 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200774 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775}
776
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200778update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200779{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300780 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200781 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300782 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
783 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200784 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200785#ifdef CONFIG_SCHEDSTATS
786 if (entity_is_task(se)) {
787 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200788 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200789 }
790#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300791 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200792}
793
794static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200795update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200796{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200797 /*
798 * Mark the end of the wait period if dequeueing a
799 * waiting task:
800 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200801 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200802 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200803}
804
805/*
806 * We are picking a new current task - update its stats:
807 */
808static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200809update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200810{
811 /*
812 * We are starting a new run period:
813 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200814 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200815}
816
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200817/**************************************************
818 * Scheduling class queueing methods:
819 */
820
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200821#ifdef CONFIG_NUMA_BALANCING
822/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100823 * Approximate time to scan a full NUMA task in ms. The task scan period is
824 * calculated based on the tasks virtual memory size and
825 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200826 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100827unsigned int sysctl_numa_balancing_scan_period_min = 1000;
828unsigned int sysctl_numa_balancing_scan_period_max = 60000;
829unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200830
831/* Portion of address space to scan in MB */
832unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200833
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200834/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
835unsigned int sysctl_numa_balancing_scan_delay = 1000;
836
Mel Gorman598f0ec2013-10-07 11:28:55 +0100837static unsigned int task_nr_scan_windows(struct task_struct *p)
838{
839 unsigned long rss = 0;
840 unsigned long nr_scan_pages;
841
842 /*
843 * Calculations based on RSS as non-present and empty pages are skipped
844 * by the PTE scanner and NUMA hinting faults should be trapped based
845 * on resident pages
846 */
847 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
848 rss = get_mm_rss(p->mm);
849 if (!rss)
850 rss = nr_scan_pages;
851
852 rss = round_up(rss, nr_scan_pages);
853 return rss / nr_scan_pages;
854}
855
856/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
857#define MAX_SCAN_WINDOW 2560
858
859static unsigned int task_scan_min(struct task_struct *p)
860{
861 unsigned int scan, floor;
862 unsigned int windows = 1;
863
864 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
865 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
866 floor = 1000 / windows;
867
868 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
869 return max_t(unsigned int, floor, scan);
870}
871
872static unsigned int task_scan_max(struct task_struct *p)
873{
874 unsigned int smin = task_scan_min(p);
875 unsigned int smax;
876
877 /* Watch for min being lower than max due to floor calculations */
878 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
879 return max(smin, smax);
880}
881
Mel Gorman3a7053b2013-10-07 11:29:00 +0100882/*
883 * Once a preferred node is selected the scheduler balancer will prefer moving
884 * a task to that node for sysctl_numa_balancing_settle_count number of PTE
885 * scans. This will give the process the chance to accumulate more faults on
886 * the preferred node but still allow the scheduler to move the task again if
887 * the nodes CPUs are overloaded.
888 */
Rik van Riel6fe6b2d2013-10-07 11:29:08 +0100889unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
Mel Gorman3a7053b2013-10-07 11:29:00 +0100890
Mel Gormanac8e8952013-10-07 11:29:03 +0100891static inline int task_faults_idx(int nid, int priv)
892{
893 return 2 * nid + priv;
894}
895
896static inline unsigned long task_faults(struct task_struct *p, int nid)
897{
898 if (!p->numa_faults)
899 return 0;
900
901 return p->numa_faults[task_faults_idx(nid, 0)] +
902 p->numa_faults[task_faults_idx(nid, 1)];
903}
904
Mel Gormane6628d52013-10-07 11:29:02 +0100905static unsigned long weighted_cpuload(const int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +0100906static unsigned long source_load(int cpu, int type);
907static unsigned long target_load(int cpu, int type);
908static unsigned long power_of(int cpu);
909static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
Mel Gormane6628d52013-10-07 11:29:02 +0100910
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100911/* Cached statistics for all CPUs within a node */
Mel Gorman58d081b2013-10-07 11:29:10 +0100912struct numa_stats {
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100913 unsigned long nr_running;
Mel Gorman58d081b2013-10-07 11:29:10 +0100914 unsigned long load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100915
916 /* Total compute capacity of CPUs on a node */
917 unsigned long power;
918
919 /* Approximate capacity in terms of runnable tasks on a node */
920 unsigned long capacity;
921 int has_capacity;
Mel Gorman58d081b2013-10-07 11:29:10 +0100922};
Mel Gormane6628d52013-10-07 11:29:02 +0100923
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100924/*
925 * XXX borrowed from update_sg_lb_stats
926 */
927static void update_numa_stats(struct numa_stats *ns, int nid)
928{
929 int cpu;
930
931 memset(ns, 0, sizeof(*ns));
932 for_each_cpu(cpu, cpumask_of_node(nid)) {
933 struct rq *rq = cpu_rq(cpu);
934
935 ns->nr_running += rq->nr_running;
936 ns->load += weighted_cpuload(cpu);
937 ns->power += power_of(cpu);
938 }
939
940 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
941 ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
942 ns->has_capacity = (ns->nr_running < ns->capacity);
943}
944
Mel Gorman58d081b2013-10-07 11:29:10 +0100945struct task_numa_env {
946 struct task_struct *p;
947
948 int src_cpu, src_nid;
949 int dst_cpu, dst_nid;
950
951 struct numa_stats src_stats, dst_stats;
952
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100953 int imbalance_pct, idx;
954
955 struct task_struct *best_task;
956 long best_imp;
Mel Gorman58d081b2013-10-07 11:29:10 +0100957 int best_cpu;
958};
959
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100960static void task_numa_assign(struct task_numa_env *env,
961 struct task_struct *p, long imp)
962{
963 if (env->best_task)
964 put_task_struct(env->best_task);
965 if (p)
966 get_task_struct(p);
967
968 env->best_task = p;
969 env->best_imp = imp;
970 env->best_cpu = env->dst_cpu;
971}
972
973/*
974 * This checks if the overall compute and NUMA accesses of the system would
975 * be improved if the source tasks was migrated to the target dst_cpu taking
976 * into account that it might be best if task running on the dst_cpu should
977 * be exchanged with the source task
978 */
979static void task_numa_compare(struct task_numa_env *env, long imp)
980{
981 struct rq *src_rq = cpu_rq(env->src_cpu);
982 struct rq *dst_rq = cpu_rq(env->dst_cpu);
983 struct task_struct *cur;
984 long dst_load, src_load;
985 long load;
986
987 rcu_read_lock();
988 cur = ACCESS_ONCE(dst_rq->curr);
989 if (cur->pid == 0) /* idle */
990 cur = NULL;
991
992 /*
993 * "imp" is the fault differential for the source task between the
994 * source and destination node. Calculate the total differential for
995 * the source task and potential destination task. The more negative
996 * the value is, the more rmeote accesses that would be expected to
997 * be incurred if the tasks were swapped.
998 */
999 if (cur) {
1000 /* Skip this swap candidate if cannot move to the source cpu */
1001 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1002 goto unlock;
1003
1004 imp += task_faults(cur, env->src_nid) -
1005 task_faults(cur, env->dst_nid);
1006 }
1007
1008 if (imp < env->best_imp)
1009 goto unlock;
1010
1011 if (!cur) {
1012 /* Is there capacity at our destination? */
1013 if (env->src_stats.has_capacity &&
1014 !env->dst_stats.has_capacity)
1015 goto unlock;
1016
1017 goto balance;
1018 }
1019
1020 /* Balance doesn't matter much if we're running a task per cpu */
1021 if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1022 goto assign;
1023
1024 /*
1025 * In the overloaded case, try and keep the load balanced.
1026 */
1027balance:
1028 dst_load = env->dst_stats.load;
1029 src_load = env->src_stats.load;
1030
1031 /* XXX missing power terms */
1032 load = task_h_load(env->p);
1033 dst_load += load;
1034 src_load -= load;
1035
1036 if (cur) {
1037 load = task_h_load(cur);
1038 dst_load -= load;
1039 src_load += load;
1040 }
1041
1042 /* make src_load the smaller */
1043 if (dst_load < src_load)
1044 swap(dst_load, src_load);
1045
1046 if (src_load * env->imbalance_pct < dst_load * 100)
1047 goto unlock;
1048
1049assign:
1050 task_numa_assign(env, cur, imp);
1051unlock:
1052 rcu_read_unlock();
1053}
1054
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001055static void task_numa_find_cpu(struct task_numa_env *env, long imp)
1056{
1057 int cpu;
1058
1059 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1060 /* Skip this CPU if the source task cannot migrate */
1061 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1062 continue;
1063
1064 env->dst_cpu = cpu;
1065 task_numa_compare(env, imp);
1066 }
1067}
1068
Mel Gorman58d081b2013-10-07 11:29:10 +01001069static int task_numa_migrate(struct task_struct *p)
Mel Gormane6628d52013-10-07 11:29:02 +01001070{
Mel Gorman58d081b2013-10-07 11:29:10 +01001071 struct task_numa_env env = {
1072 .p = p,
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001073
Mel Gorman58d081b2013-10-07 11:29:10 +01001074 .src_cpu = task_cpu(p),
1075 .src_nid = cpu_to_node(task_cpu(p)),
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001076
1077 .imbalance_pct = 112,
1078
1079 .best_task = NULL,
1080 .best_imp = 0,
1081 .best_cpu = -1
Mel Gorman58d081b2013-10-07 11:29:10 +01001082 };
1083 struct sched_domain *sd;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001084 unsigned long faults;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001085 int nid, ret;
1086 long imp;
Mel Gormane6628d52013-10-07 11:29:02 +01001087
Mel Gorman58d081b2013-10-07 11:29:10 +01001088 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001089 * Pick the lowest SD_NUMA domain, as that would have the smallest
1090 * imbalance and would be the first to start moving tasks about.
1091 *
1092 * And we want to avoid any moving of tasks about, as that would create
1093 * random movement of tasks -- counter the numa conditions we're trying
1094 * to satisfy here.
Mel Gorman58d081b2013-10-07 11:29:10 +01001095 */
Mel Gormane6628d52013-10-07 11:29:02 +01001096 rcu_read_lock();
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001097 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
1098 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
Mel Gormane6628d52013-10-07 11:29:02 +01001099 rcu_read_unlock();
1100
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001101 faults = task_faults(p, env.src_nid);
1102 update_numa_stats(&env.src_stats, env.src_nid);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001103 env.dst_nid = p->numa_preferred_nid;
1104 imp = task_faults(env.p, env.dst_nid) - faults;
1105 update_numa_stats(&env.dst_stats, env.dst_nid);
Mel Gorman58d081b2013-10-07 11:29:10 +01001106
Rik van Riele1dda8a2013-10-07 11:29:19 +01001107 /* If the preferred nid has capacity, try to use it. */
1108 if (env.dst_stats.has_capacity)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001109 task_numa_find_cpu(&env, imp);
Rik van Riele1dda8a2013-10-07 11:29:19 +01001110
1111 /* No space available on the preferred nid. Look elsewhere. */
1112 if (env.best_cpu == -1) {
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001113 for_each_online_node(nid) {
1114 if (nid == env.src_nid || nid == p->numa_preferred_nid)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001115 continue;
1116
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001117 /* Only consider nodes that recorded more faults */
1118 imp = task_faults(env.p, nid) - faults;
1119 if (imp < 0)
1120 continue;
1121
1122 env.dst_nid = nid;
1123 update_numa_stats(&env.dst_stats, env.dst_nid);
1124 task_numa_find_cpu(&env, imp);
Mel Gorman58d081b2013-10-07 11:29:10 +01001125 }
1126 }
1127
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001128 /* No better CPU than the current one was found. */
1129 if (env.best_cpu == -1)
1130 return -EAGAIN;
1131
1132 if (env.best_task == NULL) {
1133 int ret = migrate_task_to(p, env.best_cpu);
1134 return ret;
1135 }
1136
1137 ret = migrate_swap(p, env.best_task);
1138 put_task_struct(env.best_task);
1139 return ret;
Mel Gormane6628d52013-10-07 11:29:02 +01001140}
1141
Mel Gorman6b9a7462013-10-07 11:29:11 +01001142/* Attempt to migrate a task to a CPU on the preferred node. */
1143static void numa_migrate_preferred(struct task_struct *p)
1144{
1145 /* Success if task is already running on preferred CPU */
1146 p->numa_migrate_retry = 0;
Rik van Riel06ea5e02013-10-07 11:29:12 +01001147 if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid) {
1148 /*
1149 * If migration is temporarily disabled due to a task migration
1150 * then re-enable it now as the task is running on its
1151 * preferred node and memory should migrate locally
1152 */
1153 if (!p->numa_migrate_seq)
1154 p->numa_migrate_seq++;
Mel Gorman6b9a7462013-10-07 11:29:11 +01001155 return;
Rik van Riel06ea5e02013-10-07 11:29:12 +01001156 }
Mel Gorman6b9a7462013-10-07 11:29:11 +01001157
1158 /* This task has no NUMA fault statistics yet */
1159 if (unlikely(p->numa_preferred_nid == -1))
1160 return;
1161
1162 /* Otherwise, try migrate to a CPU on the preferred node */
1163 if (task_numa_migrate(p) != 0)
1164 p->numa_migrate_retry = jiffies + HZ*5;
1165}
1166
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001167static void task_numa_placement(struct task_struct *p)
1168{
Mel Gorman688b7582013-10-07 11:28:58 +01001169 int seq, nid, max_nid = -1;
1170 unsigned long max_faults = 0;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001171
Hugh Dickins2832bc12012-12-19 17:42:16 -08001172 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001173 if (p->numa_scan_seq == seq)
1174 return;
1175 p->numa_scan_seq = seq;
Mel Gorman3a7053b2013-10-07 11:29:00 +01001176 p->numa_migrate_seq++;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001177 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001178
Mel Gorman688b7582013-10-07 11:28:58 +01001179 /* Find the node with the highest number of faults */
1180 for_each_online_node(nid) {
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001181 unsigned long faults = 0;
Mel Gormanac8e8952013-10-07 11:29:03 +01001182 int priv, i;
Mel Gorman745d6142013-10-07 11:28:59 +01001183
Mel Gormanac8e8952013-10-07 11:29:03 +01001184 for (priv = 0; priv < 2; priv++) {
1185 i = task_faults_idx(nid, priv);
Mel Gorman745d6142013-10-07 11:28:59 +01001186
Mel Gormanac8e8952013-10-07 11:29:03 +01001187 /* Decay existing window, copy faults since last scan */
1188 p->numa_faults[i] >>= 1;
1189 p->numa_faults[i] += p->numa_faults_buffer[i];
1190 p->numa_faults_buffer[i] = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001191
1192 faults += p->numa_faults[i];
Mel Gormanac8e8952013-10-07 11:29:03 +01001193 }
1194
Mel Gorman688b7582013-10-07 11:28:58 +01001195 if (faults > max_faults) {
1196 max_faults = faults;
1197 max_nid = nid;
1198 }
1199 }
1200
Mel Gorman6b9a7462013-10-07 11:29:11 +01001201 /* Preferred node as the node with the most faults */
Mel Gorman3a7053b2013-10-07 11:29:00 +01001202 if (max_faults && max_nid != p->numa_preferred_nid) {
Mel Gormane6628d52013-10-07 11:29:02 +01001203 /* Update the preferred nid and migrate task if possible */
Mel Gorman688b7582013-10-07 11:28:58 +01001204 p->numa_preferred_nid = max_nid;
Rik van Riel6fe6b2d2013-10-07 11:29:08 +01001205 p->numa_migrate_seq = 1;
Mel Gorman6b9a7462013-10-07 11:29:11 +01001206 numa_migrate_preferred(p);
Mel Gorman3a7053b2013-10-07 11:29:00 +01001207 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001208}
1209
1210/*
1211 * Got a PROT_NONE fault for a page on @node.
1212 */
Peter Zijlstra90572892013-10-07 11:29:20 +01001213void task_numa_fault(int last_cpupid, int node, int pages, bool migrated)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001214{
1215 struct task_struct *p = current;
Mel Gormanac8e8952013-10-07 11:29:03 +01001216 int priv;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001217
Dave Kleikamp10e84b92013-07-31 13:53:35 -07001218 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +00001219 return;
1220
Mel Gorman9ff1d9f2013-10-07 11:29:04 +01001221 /* for example, ksmd faulting in a user's mm */
1222 if (!p->mm)
1223 return;
1224
Mel Gormanb7958542013-10-07 11:29:07 +01001225 /*
1226 * First accesses are treated as private, otherwise consider accesses
1227 * to be private if the accessing pid has not changed
1228 */
Peter Zijlstra90572892013-10-07 11:29:20 +01001229 if (!cpupid_pid_unset(last_cpupid))
1230 priv = ((p->pid & LAST__PID_MASK) == cpupid_to_pid(last_cpupid));
Mel Gormanb7958542013-10-07 11:29:07 +01001231 else
1232 priv = 1;
Mel Gormanac8e8952013-10-07 11:29:03 +01001233
Mel Gormanf809ca92013-10-07 11:28:57 +01001234 /* Allocate buffer to track faults on a per-node basis */
1235 if (unlikely(!p->numa_faults)) {
Mel Gormanac8e8952013-10-07 11:29:03 +01001236 int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +01001237
Mel Gorman745d6142013-10-07 11:28:59 +01001238 /* numa_faults and numa_faults_buffer share the allocation */
1239 p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
Mel Gormanf809ca92013-10-07 11:28:57 +01001240 if (!p->numa_faults)
1241 return;
Mel Gorman745d6142013-10-07 11:28:59 +01001242
1243 BUG_ON(p->numa_faults_buffer);
Mel Gormanac8e8952013-10-07 11:29:03 +01001244 p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
Mel Gormanf809ca92013-10-07 11:28:57 +01001245 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001246
Mel Gormanfb003b82012-11-15 09:01:14 +00001247 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +00001248 * If pages are properly placed (did not migrate) then scan slower.
1249 * This is reset periodically in case of phase changes
Mel Gormanfb003b82012-11-15 09:01:14 +00001250 */
Mel Gorman598f0ec2013-10-07 11:28:55 +01001251 if (!migrated) {
1252 /* Initialise if necessary */
1253 if (!p->numa_scan_period_max)
1254 p->numa_scan_period_max = task_scan_max(p);
1255
1256 p->numa_scan_period = min(p->numa_scan_period_max,
1257 p->numa_scan_period + 10);
1258 }
Mel Gormanfb003b82012-11-15 09:01:14 +00001259
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001260 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01001261
Mel Gorman6b9a7462013-10-07 11:29:11 +01001262 /* Retry task to preferred node migration if it previously failed */
1263 if (p->numa_migrate_retry && time_after(jiffies, p->numa_migrate_retry))
1264 numa_migrate_preferred(p);
1265
Mel Gormanac8e8952013-10-07 11:29:03 +01001266 p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001267}
1268
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001269static void reset_ptenuma_scan(struct task_struct *p)
1270{
1271 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1272 p->mm->numa_scan_offset = 0;
1273}
1274
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001275/*
1276 * The expensive part of numa migration is done from task_work context.
1277 * Triggered from task_tick_numa().
1278 */
1279void task_numa_work(struct callback_head *work)
1280{
1281 unsigned long migrate, next_scan, now = jiffies;
1282 struct task_struct *p = current;
1283 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001284 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00001285 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001286 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +00001287 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001288
1289 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1290
1291 work->next = work; /* protect against double add */
1292 /*
1293 * Who cares about NUMA placement when they're dying.
1294 *
1295 * NOTE: make sure not to dereference p->mm before this check,
1296 * exit_task_work() happens _after_ exit_mm() so we could be called
1297 * without p->mm even though we still had it when we enqueued this
1298 * work.
1299 */
1300 if (p->flags & PF_EXITING)
1301 return;
1302
Mel Gorman7e8d16b2013-10-07 11:28:54 +01001303 if (!mm->numa_next_reset || !mm->numa_next_scan) {
1304 mm->numa_next_scan = now +
1305 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
1306 mm->numa_next_reset = now +
1307 msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1308 }
1309
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001310 /*
Mel Gormanb8593bf2012-11-21 01:18:23 +00001311 * Reset the scan period if enough time has gone by. Objective is that
1312 * scanning will be reduced if pages are properly placed. As tasks
1313 * can enter different phases this needs to be re-examined. Lacking
1314 * proper tracking of reference behaviour, this blunt hammer is used.
1315 */
1316 migrate = mm->numa_next_reset;
1317 if (time_after(now, migrate)) {
Mel Gorman598f0ec2013-10-07 11:28:55 +01001318 p->numa_scan_period = task_scan_min(p);
Mel Gormanb8593bf2012-11-21 01:18:23 +00001319 next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
1320 xchg(&mm->numa_next_reset, next_scan);
1321 }
1322
1323 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001324 * Enforce maximal scan/migration frequency..
1325 */
1326 migrate = mm->numa_next_scan;
1327 if (time_before(now, migrate))
1328 return;
1329
Mel Gorman598f0ec2013-10-07 11:28:55 +01001330 if (p->numa_scan_period == 0) {
1331 p->numa_scan_period_max = task_scan_max(p);
1332 p->numa_scan_period = task_scan_min(p);
1333 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001334
Mel Gormanfb003b82012-11-15 09:01:14 +00001335 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001336 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1337 return;
1338
Mel Gormane14808b2012-11-19 10:59:15 +00001339 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001340 * Delay this task enough that another task of this mm will likely win
1341 * the next time around.
1342 */
1343 p->node_stamp += 2 * TICK_NSEC;
1344
Mel Gorman9f406042012-11-14 18:34:32 +00001345 start = mm->numa_scan_offset;
1346 pages = sysctl_numa_balancing_scan_size;
1347 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1348 if (!pages)
1349 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001350
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001351 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00001352 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001353 if (!vma) {
1354 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00001355 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001356 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001357 }
Mel Gorman9f406042012-11-14 18:34:32 +00001358 for (; vma; vma = vma->vm_next) {
Mel Gormanfc3147242013-10-07 11:29:09 +01001359 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001360 continue;
1361
Mel Gorman4591ce4f2013-10-07 11:29:13 +01001362 /*
1363 * Shared library pages mapped by multiple processes are not
1364 * migrated as it is expected they are cache replicated. Avoid
1365 * hinting faults in read-only file-backed mappings or the vdso
1366 * as migrating the pages will be of marginal benefit.
1367 */
1368 if (!vma->vm_mm ||
1369 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1370 continue;
1371
Mel Gorman9f406042012-11-14 18:34:32 +00001372 do {
1373 start = max(start, vma->vm_start);
1374 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1375 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01001376 nr_pte_updates += change_prot_numa(vma, start, end);
1377
1378 /*
1379 * Scan sysctl_numa_balancing_scan_size but ensure that
1380 * at least one PTE is updated so that unused virtual
1381 * address space is quickly skipped.
1382 */
1383 if (nr_pte_updates)
1384 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001385
Mel Gorman9f406042012-11-14 18:34:32 +00001386 start = end;
1387 if (pages <= 0)
1388 goto out;
1389 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001390 }
1391
Mel Gorman9f406042012-11-14 18:34:32 +00001392out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001393 /*
Mel Gormanf307cd12013-10-07 11:28:56 +01001394 * If the whole process was scanned without updates then no NUMA
1395 * hinting faults are being recorded and scan rate should be lower.
1396 */
1397 if (mm->numa_scan_offset == 0 && !nr_pte_updates) {
1398 p->numa_scan_period = min(p->numa_scan_period_max,
1399 p->numa_scan_period << 1);
1400
1401 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
1402 mm->numa_next_scan = next_scan;
1403 }
1404
1405 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001406 * It is possible to reach the end of the VMA list but the last few
1407 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1408 * would find the !migratable VMA on the next scan but not reset the
1409 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001410 */
1411 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00001412 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001413 else
1414 reset_ptenuma_scan(p);
1415 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001416}
1417
1418/*
1419 * Drive the periodic memory faults..
1420 */
1421void task_tick_numa(struct rq *rq, struct task_struct *curr)
1422{
1423 struct callback_head *work = &curr->numa_work;
1424 u64 period, now;
1425
1426 /*
1427 * We don't care about NUMA placement if we don't have memory.
1428 */
1429 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1430 return;
1431
1432 /*
1433 * Using runtime rather than walltime has the dual advantage that
1434 * we (mostly) drive the selection from busy threads and that the
1435 * task needs to have done some actual work before we bother with
1436 * NUMA placement.
1437 */
1438 now = curr->se.sum_exec_runtime;
1439 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1440
1441 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001442 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01001443 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001444 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001445
1446 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1447 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1448 task_work_add(curr, work, true);
1449 }
1450 }
1451}
1452#else
1453static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1454{
1455}
1456#endif /* CONFIG_NUMA_BALANCING */
1457
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001458static void
1459account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1460{
1461 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001462 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001463 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001464#ifdef CONFIG_SMP
1465 if (entity_is_task(se))
Peter Zijlstraeb953082012-04-17 13:38:40 +02001466 list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001467#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001468 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001469}
1470
1471static void
1472account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1473{
1474 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001475 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001476 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001477 if (entity_is_task(se))
Bharata B Raob87f1722008-09-25 09:53:54 +05301478 list_del_init(&se->group_node);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001479 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001480}
1481
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001482#ifdef CONFIG_FAIR_GROUP_SCHED
1483# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001484static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1485{
1486 long tg_weight;
1487
1488 /*
1489 * Use this CPU's actual weight instead of the last load_contribution
1490 * to gain a more accurate current total weight. See
1491 * update_cfs_rq_load_contribution().
1492 */
Alex Shibf5b9862013-06-20 10:18:54 +08001493 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02001494 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001495 tg_weight += cfs_rq->load.weight;
1496
1497 return tg_weight;
1498}
1499
Paul Turner6d5ab292011-01-21 20:45:01 -08001500static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001501{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001502 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001503
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001504 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001505 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001506
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001507 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001508 if (tg_weight)
1509 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001510
1511 if (shares < MIN_SHARES)
1512 shares = MIN_SHARES;
1513 if (shares > tg->shares)
1514 shares = tg->shares;
1515
1516 return shares;
1517}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001518# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001519static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001520{
1521 return tg->shares;
1522}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001523# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001524static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1525 unsigned long weight)
1526{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001527 if (se->on_rq) {
1528 /* commit outstanding execution time */
1529 if (cfs_rq->curr == se)
1530 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001531 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001532 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001533
1534 update_load_set(&se->load, weight);
1535
1536 if (se->on_rq)
1537 account_entity_enqueue(cfs_rq, se);
1538}
1539
Paul Turner82958362012-10-04 13:18:31 +02001540static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1541
Paul Turner6d5ab292011-01-21 20:45:01 -08001542static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001543{
1544 struct task_group *tg;
1545 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001546 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001547
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001548 tg = cfs_rq->tg;
1549 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001550 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001551 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001552#ifndef CONFIG_SMP
1553 if (likely(se->load.weight == tg->shares))
1554 return;
1555#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001556 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001557
1558 reweight_entity(cfs_rq_of(se), se, shares);
1559}
1560#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001561static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001562{
1563}
1564#endif /* CONFIG_FAIR_GROUP_SCHED */
1565
Alex Shi141965c2013-06-26 13:05:39 +08001566#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001567/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001568 * We choose a half-life close to 1 scheduling period.
1569 * Note: The tables below are dependent on this value.
1570 */
1571#define LOAD_AVG_PERIOD 32
1572#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1573#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1574
1575/* Precomputed fixed inverse multiplies for multiplication by y^n */
1576static const u32 runnable_avg_yN_inv[] = {
1577 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1578 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1579 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1580 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1581 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1582 0x85aac367, 0x82cd8698,
1583};
1584
1585/*
1586 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1587 * over-estimates when re-combining.
1588 */
1589static const u32 runnable_avg_yN_sum[] = {
1590 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1591 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1592 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1593};
1594
1595/*
Paul Turner9d85f212012-10-04 13:18:29 +02001596 * Approximate:
1597 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1598 */
1599static __always_inline u64 decay_load(u64 val, u64 n)
1600{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001601 unsigned int local_n;
1602
1603 if (!n)
1604 return val;
1605 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1606 return 0;
1607
1608 /* after bounds checking we can collapse to 32-bit */
1609 local_n = n;
1610
1611 /*
1612 * As y^PERIOD = 1/2, we can combine
1613 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1614 * With a look-up table which covers k^n (n<PERIOD)
1615 *
1616 * To achieve constant time decay_load.
1617 */
1618 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1619 val >>= local_n / LOAD_AVG_PERIOD;
1620 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001621 }
1622
Paul Turner5b51f2f2012-10-04 13:18:32 +02001623 val *= runnable_avg_yN_inv[local_n];
1624 /* We don't use SRR here since we always want to round down. */
1625 return val >> 32;
1626}
1627
1628/*
1629 * For updates fully spanning n periods, the contribution to runnable
1630 * average will be: \Sum 1024*y^n
1631 *
1632 * We can compute this reasonably efficiently by combining:
1633 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1634 */
1635static u32 __compute_runnable_contrib(u64 n)
1636{
1637 u32 contrib = 0;
1638
1639 if (likely(n <= LOAD_AVG_PERIOD))
1640 return runnable_avg_yN_sum[n];
1641 else if (unlikely(n >= LOAD_AVG_MAX_N))
1642 return LOAD_AVG_MAX;
1643
1644 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
1645 do {
1646 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
1647 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
1648
1649 n -= LOAD_AVG_PERIOD;
1650 } while (n > LOAD_AVG_PERIOD);
1651
1652 contrib = decay_load(contrib, n);
1653 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02001654}
1655
1656/*
1657 * We can represent the historical contribution to runnable average as the
1658 * coefficients of a geometric series. To do this we sub-divide our runnable
1659 * history into segments of approximately 1ms (1024us); label the segment that
1660 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
1661 *
1662 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
1663 * p0 p1 p2
1664 * (now) (~1ms ago) (~2ms ago)
1665 *
1666 * Let u_i denote the fraction of p_i that the entity was runnable.
1667 *
1668 * We then designate the fractions u_i as our co-efficients, yielding the
1669 * following representation of historical load:
1670 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
1671 *
1672 * We choose y based on the with of a reasonably scheduling period, fixing:
1673 * y^32 = 0.5
1674 *
1675 * This means that the contribution to load ~32ms ago (u_32) will be weighted
1676 * approximately half as much as the contribution to load within the last ms
1677 * (u_0).
1678 *
1679 * When a period "rolls over" and we have new u_0`, multiplying the previous
1680 * sum again by y is sufficient to update:
1681 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
1682 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
1683 */
1684static __always_inline int __update_entity_runnable_avg(u64 now,
1685 struct sched_avg *sa,
1686 int runnable)
1687{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001688 u64 delta, periods;
1689 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001690 int delta_w, decayed = 0;
1691
1692 delta = now - sa->last_runnable_update;
1693 /*
1694 * This should only happen when time goes backwards, which it
1695 * unfortunately does during sched clock init when we swap over to TSC.
1696 */
1697 if ((s64)delta < 0) {
1698 sa->last_runnable_update = now;
1699 return 0;
1700 }
1701
1702 /*
1703 * Use 1024ns as the unit of measurement since it's a reasonable
1704 * approximation of 1us and fast to compute.
1705 */
1706 delta >>= 10;
1707 if (!delta)
1708 return 0;
1709 sa->last_runnable_update = now;
1710
1711 /* delta_w is the amount already accumulated against our next period */
1712 delta_w = sa->runnable_avg_period % 1024;
1713 if (delta + delta_w >= 1024) {
1714 /* period roll-over */
1715 decayed = 1;
1716
1717 /*
1718 * Now that we know we're crossing a period boundary, figure
1719 * out how much from delta we need to complete the current
1720 * period and accrue it.
1721 */
1722 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02001723 if (runnable)
1724 sa->runnable_avg_sum += delta_w;
1725 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001726
Paul Turner5b51f2f2012-10-04 13:18:32 +02001727 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02001728
Paul Turner5b51f2f2012-10-04 13:18:32 +02001729 /* Figure out how many additional periods this update spans */
1730 periods = delta / 1024;
1731 delta %= 1024;
1732
1733 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
1734 periods + 1);
1735 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
1736 periods + 1);
1737
1738 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
1739 runnable_contrib = __compute_runnable_contrib(periods);
1740 if (runnable)
1741 sa->runnable_avg_sum += runnable_contrib;
1742 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02001743 }
1744
1745 /* Remainder of delta accrued against u_0` */
1746 if (runnable)
1747 sa->runnable_avg_sum += delta;
1748 sa->runnable_avg_period += delta;
1749
1750 return decayed;
1751}
1752
Paul Turner9ee474f2012-10-04 13:18:30 +02001753/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02001754static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02001755{
1756 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1757 u64 decays = atomic64_read(&cfs_rq->decay_counter);
1758
1759 decays -= se->avg.decay_count;
1760 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02001761 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02001762
1763 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
1764 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02001765
1766 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02001767}
1768
Paul Turnerc566e8e2012-10-04 13:18:30 +02001769#ifdef CONFIG_FAIR_GROUP_SCHED
1770static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1771 int force_update)
1772{
1773 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08001774 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02001775
1776 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
1777 tg_contrib -= cfs_rq->tg_load_contrib;
1778
Alex Shibf5b9862013-06-20 10:18:54 +08001779 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
1780 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02001781 cfs_rq->tg_load_contrib += tg_contrib;
1782 }
1783}
Paul Turner8165e142012-10-04 13:18:31 +02001784
Paul Turnerbb17f652012-10-04 13:18:31 +02001785/*
1786 * Aggregate cfs_rq runnable averages into an equivalent task_group
1787 * representation for computing load contributions.
1788 */
1789static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1790 struct cfs_rq *cfs_rq)
1791{
1792 struct task_group *tg = cfs_rq->tg;
1793 long contrib;
1794
1795 /* The fraction of a cpu used by this cfs_rq */
1796 contrib = div_u64(sa->runnable_avg_sum << NICE_0_SHIFT,
1797 sa->runnable_avg_period + 1);
1798 contrib -= cfs_rq->tg_runnable_contrib;
1799
1800 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
1801 atomic_add(contrib, &tg->runnable_avg);
1802 cfs_rq->tg_runnable_contrib += contrib;
1803 }
1804}
1805
Paul Turner8165e142012-10-04 13:18:31 +02001806static inline void __update_group_entity_contrib(struct sched_entity *se)
1807{
1808 struct cfs_rq *cfs_rq = group_cfs_rq(se);
1809 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02001810 int runnable_avg;
1811
Paul Turner8165e142012-10-04 13:18:31 +02001812 u64 contrib;
1813
1814 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08001815 se->avg.load_avg_contrib = div_u64(contrib,
1816 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02001817
1818 /*
1819 * For group entities we need to compute a correction term in the case
1820 * that they are consuming <1 cpu so that we would contribute the same
1821 * load as a task of equal weight.
1822 *
1823 * Explicitly co-ordinating this measurement would be expensive, but
1824 * fortunately the sum of each cpus contribution forms a usable
1825 * lower-bound on the true value.
1826 *
1827 * Consider the aggregate of 2 contributions. Either they are disjoint
1828 * (and the sum represents true value) or they are disjoint and we are
1829 * understating by the aggregate of their overlap.
1830 *
1831 * Extending this to N cpus, for a given overlap, the maximum amount we
1832 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
1833 * cpus that overlap for this interval and w_i is the interval width.
1834 *
1835 * On a small machine; the first term is well-bounded which bounds the
1836 * total error since w_i is a subset of the period. Whereas on a
1837 * larger machine, while this first term can be larger, if w_i is the
1838 * of consequential size guaranteed to see n_i*w_i quickly converge to
1839 * our upper bound of 1-cpu.
1840 */
1841 runnable_avg = atomic_read(&tg->runnable_avg);
1842 if (runnable_avg < NICE_0_LOAD) {
1843 se->avg.load_avg_contrib *= runnable_avg;
1844 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
1845 }
Paul Turner8165e142012-10-04 13:18:31 +02001846}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001847#else
1848static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
1849 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02001850static inline void __update_tg_runnable_avg(struct sched_avg *sa,
1851 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02001852static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02001853#endif
1854
Paul Turner8165e142012-10-04 13:18:31 +02001855static inline void __update_task_entity_contrib(struct sched_entity *se)
1856{
1857 u32 contrib;
1858
1859 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
1860 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
1861 contrib /= (se->avg.runnable_avg_period + 1);
1862 se->avg.load_avg_contrib = scale_load(contrib);
1863}
1864
Paul Turner2dac7542012-10-04 13:18:30 +02001865/* Compute the current contribution to load_avg by se, return any delta */
1866static long __update_entity_load_avg_contrib(struct sched_entity *se)
1867{
1868 long old_contrib = se->avg.load_avg_contrib;
1869
Paul Turner8165e142012-10-04 13:18:31 +02001870 if (entity_is_task(se)) {
1871 __update_task_entity_contrib(se);
1872 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02001873 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02001874 __update_group_entity_contrib(se);
1875 }
Paul Turner2dac7542012-10-04 13:18:30 +02001876
1877 return se->avg.load_avg_contrib - old_contrib;
1878}
1879
Paul Turner9ee474f2012-10-04 13:18:30 +02001880static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
1881 long load_contrib)
1882{
1883 if (likely(load_contrib < cfs_rq->blocked_load_avg))
1884 cfs_rq->blocked_load_avg -= load_contrib;
1885 else
1886 cfs_rq->blocked_load_avg = 0;
1887}
1888
Paul Turnerf1b17282012-10-04 13:18:31 +02001889static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
1890
Paul Turner9d85f212012-10-04 13:18:29 +02001891/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02001892static inline void update_entity_load_avg(struct sched_entity *se,
1893 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02001894{
Paul Turner2dac7542012-10-04 13:18:30 +02001895 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1896 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02001897 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02001898
Paul Turnerf1b17282012-10-04 13:18:31 +02001899 /*
1900 * For a group entity we need to use their owned cfs_rq_clock_task() in
1901 * case they are the parent of a throttled hierarchy.
1902 */
1903 if (entity_is_task(se))
1904 now = cfs_rq_clock_task(cfs_rq);
1905 else
1906 now = cfs_rq_clock_task(group_cfs_rq(se));
1907
1908 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02001909 return;
1910
1911 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02001912
1913 if (!update_cfs_rq)
1914 return;
1915
Paul Turner2dac7542012-10-04 13:18:30 +02001916 if (se->on_rq)
1917 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02001918 else
1919 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
1920}
1921
1922/*
1923 * Decay the load contributed by all blocked children and account this so that
1924 * their contribution may appropriately discounted when they wake up.
1925 */
Paul Turneraff3e492012-10-04 13:18:30 +02001926static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001927{
Paul Turnerf1b17282012-10-04 13:18:31 +02001928 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001929 u64 decays;
1930
1931 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02001932 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02001933 return;
1934
Alex Shi25099402013-06-20 10:18:55 +08001935 if (atomic_long_read(&cfs_rq->removed_load)) {
1936 unsigned long removed_load;
1937 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02001938 subtract_blocked_load_contrib(cfs_rq, removed_load);
1939 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001940
Paul Turneraff3e492012-10-04 13:18:30 +02001941 if (decays) {
1942 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
1943 decays);
1944 atomic64_add(decays, &cfs_rq->decay_counter);
1945 cfs_rq->last_decay = now;
1946 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02001947
1948 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02001949}
Ben Segall18bf2802012-10-04 12:51:20 +02001950
1951static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
1952{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001953 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02001954 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02001955}
Paul Turner2dac7542012-10-04 13:18:30 +02001956
1957/* Add the load generated by se into cfs_rq's child load-average */
1958static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02001959 struct sched_entity *se,
1960 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02001961{
Paul Turneraff3e492012-10-04 13:18:30 +02001962 /*
1963 * We track migrations using entity decay_count <= 0, on a wake-up
1964 * migration we use a negative decay count to track the remote decays
1965 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08001966 *
1967 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
1968 * are seen by enqueue_entity_load_avg() as a migration with an already
1969 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02001970 */
1971 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02001972 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02001973 if (se->avg.decay_count) {
1974 /*
1975 * In a wake-up migration we have to approximate the
1976 * time sleeping. This is because we can't synchronize
1977 * clock_task between the two cpus, and it is not
1978 * guaranteed to be read-safe. Instead, we can
1979 * approximate this using our carried decays, which are
1980 * explicitly atomically readable.
1981 */
1982 se->avg.last_runnable_update -= (-se->avg.decay_count)
1983 << 20;
1984 update_entity_load_avg(se, 0);
1985 /* Indicate that we're now synchronized and on-rq */
1986 se->avg.decay_count = 0;
1987 }
Paul Turner9ee474f2012-10-04 13:18:30 +02001988 wakeup = 0;
1989 } else {
Alex Shi282cf492013-06-20 10:18:48 +08001990 /*
1991 * Task re-woke on same cpu (or else migrate_task_rq_fair()
1992 * would have made count negative); we must be careful to avoid
1993 * double-accounting blocked time after synchronizing decays.
1994 */
1995 se->avg.last_runnable_update += __synchronize_entity_decay(se)
1996 << 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02001997 }
1998
Paul Turneraff3e492012-10-04 13:18:30 +02001999 /* migrated tasks did not contribute to our blocked load */
2000 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02002001 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02002002 update_entity_load_avg(se, 0);
2003 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002004
Paul Turner2dac7542012-10-04 13:18:30 +02002005 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02002006 /* we force update consideration on load-balancer moves */
2007 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02002008}
2009
Paul Turner9ee474f2012-10-04 13:18:30 +02002010/*
2011 * Remove se's load from this cfs_rq child load-average, if the entity is
2012 * transitioning to a blocked state we track its projected decay using
2013 * blocked_load_avg.
2014 */
Paul Turner2dac7542012-10-04 13:18:30 +02002015static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002016 struct sched_entity *se,
2017 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02002018{
Paul Turner9ee474f2012-10-04 13:18:30 +02002019 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002020 /* we force update consideration on load-balancer moves */
2021 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02002022
Paul Turner2dac7542012-10-04 13:18:30 +02002023 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02002024 if (sleep) {
2025 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2026 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2027 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02002028}
Vincent Guittot642dbc32013-04-18 18:34:26 +02002029
2030/*
2031 * Update the rq's load with the elapsed running time before entering
2032 * idle. if the last scheduled task is not a CFS task, idle_enter will
2033 * be the only way to update the runnable statistic.
2034 */
2035void idle_enter_fair(struct rq *this_rq)
2036{
2037 update_rq_runnable_avg(this_rq, 1);
2038}
2039
2040/*
2041 * Update the rq's load with the elapsed idle time before a task is
2042 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2043 * be the only way to update the runnable statistic.
2044 */
2045void idle_exit_fair(struct rq *this_rq)
2046{
2047 update_rq_runnable_avg(this_rq, 0);
2048}
2049
Paul Turner9d85f212012-10-04 13:18:29 +02002050#else
Paul Turner9ee474f2012-10-04 13:18:30 +02002051static inline void update_entity_load_avg(struct sched_entity *se,
2052 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02002053static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002054static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002055 struct sched_entity *se,
2056 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002057static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002058 struct sched_entity *se,
2059 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02002060static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2061 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02002062#endif
2063
Ingo Molnar2396af62007-08-09 11:16:48 +02002064static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002065{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002066#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02002067 struct task_struct *tsk = NULL;
2068
2069 if (entity_is_task(se))
2070 tsk = task_of(se);
2071
Lucas De Marchi41acab82010-03-10 23:37:45 -03002072 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002073 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002074
2075 if ((s64)delta < 0)
2076 delta = 0;
2077
Lucas De Marchi41acab82010-03-10 23:37:45 -03002078 if (unlikely(delta > se->statistics.sleep_max))
2079 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002080
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002081 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002082 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01002083
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002084 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02002085 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002086 trace_sched_stat_sleep(tsk, delta);
2087 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002088 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03002089 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002090 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002091
2092 if ((s64)delta < 0)
2093 delta = 0;
2094
Lucas De Marchi41acab82010-03-10 23:37:45 -03002095 if (unlikely(delta > se->statistics.block_max))
2096 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002097
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002098 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002099 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02002100
Peter Zijlstrae4143142009-07-23 20:13:26 +02002101 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002102 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002103 se->statistics.iowait_sum += delta;
2104 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002105 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002106 }
2107
Andrew Vaginb781a602011-11-28 12:03:35 +03002108 trace_sched_stat_blocked(tsk, delta);
2109
Peter Zijlstrae4143142009-07-23 20:13:26 +02002110 /*
2111 * Blocking time is in units of nanosecs, so shift by
2112 * 20 to get a milliseconds-range estimation of the
2113 * amount of time that the task spent sleeping:
2114 */
2115 if (unlikely(prof_on == SLEEP_PROFILING)) {
2116 profile_hits(SLEEP_PROFILING,
2117 (void *)get_wchan(tsk),
2118 delta >> 20);
2119 }
2120 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02002121 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002122 }
2123#endif
2124}
2125
Peter Zijlstraddc97292007-10-15 17:00:10 +02002126static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2127{
2128#ifdef CONFIG_SCHED_DEBUG
2129 s64 d = se->vruntime - cfs_rq->min_vruntime;
2130
2131 if (d < 0)
2132 d = -d;
2133
2134 if (d > 3*sysctl_sched_latency)
2135 schedstat_inc(cfs_rq, nr_spread_over);
2136#endif
2137}
2138
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002139static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002140place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2141{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02002142 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002143
Peter Zijlstra2cb86002007-11-09 22:39:37 +01002144 /*
2145 * The 'current' period is already promised to the current tasks,
2146 * however the extra weight of the new task will slow them down a
2147 * little, place the new task so that it fits in the slot that
2148 * stays open at the end.
2149 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002150 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02002151 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002152
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002153 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01002154 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002155 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02002156
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002157 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002158 * Halve their sleep time's effect, to allow
2159 * for a gentler effect of sleepers:
2160 */
2161 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2162 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02002163
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002164 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002165 }
2166
Mike Galbraithb5d9d732009-09-08 11:12:28 +02002167 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05302168 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002169}
2170
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002171static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2172
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002173static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002174enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002175{
2176 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002177 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05302178 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002179 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002180 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002181 se->vruntime += cfs_rq->min_vruntime;
2182
2183 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002184 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002185 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002186 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02002187 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002188 account_entity_enqueue(cfs_rq, se);
2189 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002190
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002191 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002192 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02002193 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02002194 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002195
Ingo Molnard2417e52007-08-09 11:16:47 +02002196 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02002197 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002198 if (se != cfs_rq->curr)
2199 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002200 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002201
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002202 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002203 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002204 check_enqueue_throttle(cfs_rq);
2205 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002206}
2207
Rik van Riel2c13c9192011-02-01 09:48:37 -05002208static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01002209{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002210 for_each_sched_entity(se) {
2211 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2212 if (cfs_rq->last == se)
2213 cfs_rq->last = NULL;
2214 else
2215 break;
2216 }
2217}
Peter Zijlstra2002c692008-11-11 11:52:33 +01002218
Rik van Riel2c13c9192011-02-01 09:48:37 -05002219static void __clear_buddies_next(struct sched_entity *se)
2220{
2221 for_each_sched_entity(se) {
2222 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2223 if (cfs_rq->next == se)
2224 cfs_rq->next = NULL;
2225 else
2226 break;
2227 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01002228}
2229
Rik van Rielac53db52011-02-01 09:51:03 -05002230static void __clear_buddies_skip(struct sched_entity *se)
2231{
2232 for_each_sched_entity(se) {
2233 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2234 if (cfs_rq->skip == se)
2235 cfs_rq->skip = NULL;
2236 else
2237 break;
2238 }
2239}
2240
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002241static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2242{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002243 if (cfs_rq->last == se)
2244 __clear_buddies_last(se);
2245
2246 if (cfs_rq->next == se)
2247 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05002248
2249 if (cfs_rq->skip == se)
2250 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002251}
2252
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002253static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07002254
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002255static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002256dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002257{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002258 /*
2259 * Update run-time statistics of the 'current'.
2260 */
2261 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002262 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002263
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02002264 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002265 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002266#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002267 if (entity_is_task(se)) {
2268 struct task_struct *tsk = task_of(se);
2269
2270 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002271 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002272 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002273 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002274 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02002275#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002276 }
2277
Peter Zijlstra2002c692008-11-11 11:52:33 +01002278 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002279
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002280 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002281 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002282 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002283 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002284
2285 /*
2286 * Normalize the entity after updating the min_vruntime because the
2287 * update can refer to the ->curr item and we need to reflect this
2288 * movement in our normalized position.
2289 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002290 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002291 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07002292
Paul Turnerd8b49862011-07-21 09:43:41 -07002293 /* return excess runtime on last dequeue */
2294 return_cfs_rq_runtime(cfs_rq);
2295
Peter Zijlstra1e876232011-05-17 16:21:10 -07002296 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002297 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002298}
2299
2300/*
2301 * Preempt the current task with a newly woken task if needed:
2302 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02002303static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002304check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002305{
Peter Zijlstra11697832007-09-05 14:32:49 +02002306 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002307 struct sched_entity *se;
2308 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02002309
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02002310 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02002311 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002312 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002313 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002314 /*
2315 * The current task ran long enough, ensure it doesn't get
2316 * re-elected due to buddy favours.
2317 */
2318 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002319 return;
2320 }
2321
2322 /*
2323 * Ensure that a task that missed wakeup preemption by a
2324 * narrow margin doesn't have to wait for a full slice.
2325 * This also mitigates buddy induced latencies under load.
2326 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002327 if (delta_exec < sysctl_sched_min_granularity)
2328 return;
2329
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002330 se = __pick_first_entity(cfs_rq);
2331 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02002332
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002333 if (delta < 0)
2334 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01002335
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002336 if (delta > ideal_runtime)
2337 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002338}
2339
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002340static void
Ingo Molnar8494f412007-08-09 11:16:48 +02002341set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002342{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002343 /* 'current' is not kept within the tree. */
2344 if (se->on_rq) {
2345 /*
2346 * Any task has to be enqueued before it get to execute on
2347 * a CPU. So account for the time it spent waiting on the
2348 * runqueue.
2349 */
2350 update_stats_wait_end(cfs_rq, se);
2351 __dequeue_entity(cfs_rq, se);
2352 }
2353
Ingo Molnar79303e92007-08-09 11:16:47 +02002354 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02002355 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02002356#ifdef CONFIG_SCHEDSTATS
2357 /*
2358 * Track our maximum slice length, if the CPU's load is at
2359 * least twice that of our own weight (i.e. dont track it
2360 * when there are only lesser-weight tasks around):
2361 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002362 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002363 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02002364 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2365 }
2366#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02002367 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002368}
2369
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02002370static int
2371wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2372
Rik van Rielac53db52011-02-01 09:51:03 -05002373/*
2374 * Pick the next process, keeping these things in mind, in this order:
2375 * 1) keep things fair between processes/task groups
2376 * 2) pick the "next" process, since someone really wants that to run
2377 * 3) pick the "last" process, for cache locality
2378 * 4) do not run the "skip" process, if something else is available
2379 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002380static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002381{
Rik van Rielac53db52011-02-01 09:51:03 -05002382 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002383 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002384
Rik van Rielac53db52011-02-01 09:51:03 -05002385 /*
2386 * Avoid running the skip buddy, if running something else can
2387 * be done without getting too unfair.
2388 */
2389 if (cfs_rq->skip == se) {
2390 struct sched_entity *second = __pick_next_entity(se);
2391 if (second && wakeup_preempt_entity(second, left) < 1)
2392 se = second;
2393 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002394
Mike Galbraithf685cea2009-10-23 23:09:22 +02002395 /*
2396 * Prefer last buddy, try to return the CPU to a preempted task.
2397 */
2398 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2399 se = cfs_rq->last;
2400
Rik van Rielac53db52011-02-01 09:51:03 -05002401 /*
2402 * Someone really wants this to run. If it's not unfair, run it.
2403 */
2404 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2405 se = cfs_rq->next;
2406
Mike Galbraithf685cea2009-10-23 23:09:22 +02002407 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002408
2409 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002410}
2411
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002412static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2413
Ingo Molnarab6cde22007-08-09 11:16:48 +02002414static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002415{
2416 /*
2417 * If still on the runqueue then deactivate_task()
2418 * was not called and update_curr() has to be done:
2419 */
2420 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002421 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002422
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002423 /* throttle cfs_rqs exceeding runtime */
2424 check_cfs_rq_runtime(cfs_rq);
2425
Peter Zijlstraddc97292007-10-15 17:00:10 +02002426 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002427 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002428 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002429 /* Put 'current' back into the tree. */
2430 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002431 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002432 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002433 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002434 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002435}
2436
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002437static void
2438entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002439{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002440 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002441 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002442 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002443 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002444
Paul Turner43365bd2010-12-15 19:10:17 -08002445 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002446 * Ensure that runnable average is periodically updated.
2447 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002448 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002449 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02002450 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02002451
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002452#ifdef CONFIG_SCHED_HRTICK
2453 /*
2454 * queued ticks are scheduled to match the slice, so don't bother
2455 * validating it and just reschedule.
2456 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002457 if (queued) {
2458 resched_task(rq_of(cfs_rq)->curr);
2459 return;
2460 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002461 /*
2462 * don't let the period tick interfere with the hrtick preemption
2463 */
2464 if (!sched_feat(DOUBLE_TICK) &&
2465 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2466 return;
2467#endif
2468
Yong Zhang2c2efae2011-07-29 16:20:33 +08002469 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002470 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002471}
2472
Paul Turnerab84d312011-07-21 09:43:28 -07002473
2474/**************************************************
2475 * CFS bandwidth control machinery
2476 */
2477
2478#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002479
2480#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002481static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002482
2483static inline bool cfs_bandwidth_used(void)
2484{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002485 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002486}
2487
2488void account_cfs_bandwidth_used(int enabled, int was_enabled)
2489{
2490 /* only need to count groups transitioning between enabled/!enabled */
2491 if (enabled && !was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002492 static_key_slow_inc(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002493 else if (!enabled && was_enabled)
Ingo Molnarc5905af2012-02-24 08:31:31 +01002494 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002495}
2496#else /* HAVE_JUMP_LABEL */
2497static bool cfs_bandwidth_used(void)
2498{
2499 return true;
2500}
2501
2502void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
2503#endif /* HAVE_JUMP_LABEL */
2504
Paul Turnerab84d312011-07-21 09:43:28 -07002505/*
2506 * default period for cfs group bandwidth.
2507 * default: 0.1s, units: nanoseconds
2508 */
2509static inline u64 default_cfs_period(void)
2510{
2511 return 100000000ULL;
2512}
Paul Turnerec12cb72011-07-21 09:43:30 -07002513
2514static inline u64 sched_cfs_bandwidth_slice(void)
2515{
2516 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2517}
2518
Paul Turnera9cf55b2011-07-21 09:43:32 -07002519/*
2520 * Replenish runtime according to assigned quota and update expiration time.
2521 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2522 * additional synchronization around rq->lock.
2523 *
2524 * requires cfs_b->lock
2525 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002526void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002527{
2528 u64 now;
2529
2530 if (cfs_b->quota == RUNTIME_INF)
2531 return;
2532
2533 now = sched_clock_cpu(smp_processor_id());
2534 cfs_b->runtime = cfs_b->quota;
2535 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2536}
2537
Peter Zijlstra029632f2011-10-25 10:00:11 +02002538static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2539{
2540 return &tg->cfs_bandwidth;
2541}
2542
Paul Turnerf1b17282012-10-04 13:18:31 +02002543/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2544static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2545{
2546 if (unlikely(cfs_rq->throttle_count))
2547 return cfs_rq->throttled_clock_task;
2548
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002549 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002550}
2551
Paul Turner85dac902011-07-21 09:43:33 -07002552/* returns 0 on failure to allocate runtime */
2553static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002554{
2555 struct task_group *tg = cfs_rq->tg;
2556 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002557 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002558
2559 /* note: this is a positive sum as runtime_remaining <= 0 */
2560 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2561
2562 raw_spin_lock(&cfs_b->lock);
2563 if (cfs_b->quota == RUNTIME_INF)
2564 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002565 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002566 /*
2567 * If the bandwidth pool has become inactive, then at least one
2568 * period must have elapsed since the last consumption.
2569 * Refresh the global state and ensure bandwidth timer becomes
2570 * active.
2571 */
2572 if (!cfs_b->timer_active) {
2573 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002574 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002575 }
Paul Turner58088ad2011-07-21 09:43:31 -07002576
2577 if (cfs_b->runtime > 0) {
2578 amount = min(cfs_b->runtime, min_amount);
2579 cfs_b->runtime -= amount;
2580 cfs_b->idle = 0;
2581 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002582 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002583 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002584 raw_spin_unlock(&cfs_b->lock);
2585
2586 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002587 /*
2588 * we may have advanced our local expiration to account for allowed
2589 * spread between our sched_clock and the one on which runtime was
2590 * issued.
2591 */
2592 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2593 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002594
2595 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002596}
2597
2598/*
2599 * Note: This depends on the synchronization provided by sched_clock and the
2600 * fact that rq->clock snapshots this value.
2601 */
2602static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2603{
2604 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002605
2606 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002607 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002608 return;
2609
2610 if (cfs_rq->runtime_remaining < 0)
2611 return;
2612
2613 /*
2614 * If the local deadline has passed we have to consider the
2615 * possibility that our sched_clock is 'fast' and the global deadline
2616 * has not truly expired.
2617 *
2618 * Fortunately we can check determine whether this the case by checking
2619 * whether the global deadline has advanced.
2620 */
2621
2622 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2623 /* extend local deadline, drift is bounded above by 2 ticks */
2624 cfs_rq->runtime_expires += TICK_NSEC;
2625 } else {
2626 /* global deadline is ahead, expiration has passed */
2627 cfs_rq->runtime_remaining = 0;
2628 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002629}
2630
2631static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2632 unsigned long delta_exec)
2633{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002634 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002635 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002636 expire_cfs_rq_runtime(cfs_rq);
2637
2638 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002639 return;
2640
Paul Turner85dac902011-07-21 09:43:33 -07002641 /*
2642 * if we're unable to extend our runtime we resched so that the active
2643 * hierarchy can be throttled
2644 */
2645 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
2646 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07002647}
2648
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002649static __always_inline
2650void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002651{
Paul Turner56f570e2011-11-07 20:26:33 -08002652 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07002653 return;
2654
2655 __account_cfs_rq_runtime(cfs_rq, delta_exec);
2656}
2657
Paul Turner85dac902011-07-21 09:43:33 -07002658static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2659{
Paul Turner56f570e2011-11-07 20:26:33 -08002660 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07002661}
2662
Paul Turner64660c82011-07-21 09:43:36 -07002663/* check whether cfs_rq, or any parent, is throttled */
2664static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2665{
Paul Turner56f570e2011-11-07 20:26:33 -08002666 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07002667}
2668
2669/*
2670 * Ensure that neither of the group entities corresponding to src_cpu or
2671 * dest_cpu are members of a throttled hierarchy when performing group
2672 * load-balance operations.
2673 */
2674static inline int throttled_lb_pair(struct task_group *tg,
2675 int src_cpu, int dest_cpu)
2676{
2677 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
2678
2679 src_cfs_rq = tg->cfs_rq[src_cpu];
2680 dest_cfs_rq = tg->cfs_rq[dest_cpu];
2681
2682 return throttled_hierarchy(src_cfs_rq) ||
2683 throttled_hierarchy(dest_cfs_rq);
2684}
2685
2686/* updated child weight may affect parent so we have to do this bottom up */
2687static int tg_unthrottle_up(struct task_group *tg, void *data)
2688{
2689 struct rq *rq = data;
2690 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2691
2692 cfs_rq->throttle_count--;
2693#ifdef CONFIG_SMP
2694 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02002695 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002696 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02002697 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07002698 }
2699#endif
2700
2701 return 0;
2702}
2703
2704static int tg_throttle_down(struct task_group *tg, void *data)
2705{
2706 struct rq *rq = data;
2707 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
2708
Paul Turner82958362012-10-04 13:18:31 +02002709 /* group is entering throttled state, stop time */
2710 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002711 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07002712 cfs_rq->throttle_count++;
2713
2714 return 0;
2715}
2716
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002717static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07002718{
2719 struct rq *rq = rq_of(cfs_rq);
2720 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2721 struct sched_entity *se;
2722 long task_delta, dequeue = 1;
2723
2724 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
2725
Paul Turnerf1b17282012-10-04 13:18:31 +02002726 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07002727 rcu_read_lock();
2728 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
2729 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07002730
2731 task_delta = cfs_rq->h_nr_running;
2732 for_each_sched_entity(se) {
2733 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
2734 /* throttled entity or throttle-on-deactivate */
2735 if (!se->on_rq)
2736 break;
2737
2738 if (dequeue)
2739 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
2740 qcfs_rq->h_nr_running -= task_delta;
2741
2742 if (qcfs_rq->load.weight)
2743 dequeue = 0;
2744 }
2745
2746 if (!se)
2747 rq->nr_running -= task_delta;
2748
2749 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002750 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07002751 raw_spin_lock(&cfs_b->lock);
2752 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
2753 raw_spin_unlock(&cfs_b->lock);
2754}
2755
Peter Zijlstra029632f2011-10-25 10:00:11 +02002756void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07002757{
2758 struct rq *rq = rq_of(cfs_rq);
2759 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2760 struct sched_entity *se;
2761 int enqueue = 1;
2762 long task_delta;
2763
Michael Wang22b958d2013-06-04 14:23:39 +08002764 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07002765
2766 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02002767
2768 update_rq_clock(rq);
2769
Paul Turner671fd9d2011-07-21 09:43:34 -07002770 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002771 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07002772 list_del_rcu(&cfs_rq->throttled_list);
2773 raw_spin_unlock(&cfs_b->lock);
2774
Paul Turner64660c82011-07-21 09:43:36 -07002775 /* update hierarchical throttle state */
2776 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
2777
Paul Turner671fd9d2011-07-21 09:43:34 -07002778 if (!cfs_rq->load.weight)
2779 return;
2780
2781 task_delta = cfs_rq->h_nr_running;
2782 for_each_sched_entity(se) {
2783 if (se->on_rq)
2784 enqueue = 0;
2785
2786 cfs_rq = cfs_rq_of(se);
2787 if (enqueue)
2788 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
2789 cfs_rq->h_nr_running += task_delta;
2790
2791 if (cfs_rq_throttled(cfs_rq))
2792 break;
2793 }
2794
2795 if (!se)
2796 rq->nr_running += task_delta;
2797
2798 /* determine whether we need to wake up potentially idle cpu */
2799 if (rq->curr == rq->idle && rq->cfs.nr_running)
2800 resched_task(rq->curr);
2801}
2802
2803static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
2804 u64 remaining, u64 expires)
2805{
2806 struct cfs_rq *cfs_rq;
2807 u64 runtime = remaining;
2808
2809 rcu_read_lock();
2810 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
2811 throttled_list) {
2812 struct rq *rq = rq_of(cfs_rq);
2813
2814 raw_spin_lock(&rq->lock);
2815 if (!cfs_rq_throttled(cfs_rq))
2816 goto next;
2817
2818 runtime = -cfs_rq->runtime_remaining + 1;
2819 if (runtime > remaining)
2820 runtime = remaining;
2821 remaining -= runtime;
2822
2823 cfs_rq->runtime_remaining += runtime;
2824 cfs_rq->runtime_expires = expires;
2825
2826 /* we check whether we're throttled above */
2827 if (cfs_rq->runtime_remaining > 0)
2828 unthrottle_cfs_rq(cfs_rq);
2829
2830next:
2831 raw_spin_unlock(&rq->lock);
2832
2833 if (!remaining)
2834 break;
2835 }
2836 rcu_read_unlock();
2837
2838 return remaining;
2839}
2840
Paul Turner58088ad2011-07-21 09:43:31 -07002841/*
2842 * Responsible for refilling a task_group's bandwidth and unthrottling its
2843 * cfs_rqs as appropriate. If there has been no activity within the last
2844 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
2845 * used to track this state.
2846 */
2847static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
2848{
Paul Turner671fd9d2011-07-21 09:43:34 -07002849 u64 runtime, runtime_expires;
2850 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07002851
2852 raw_spin_lock(&cfs_b->lock);
2853 /* no need to continue the timer with no bandwidth constraint */
2854 if (cfs_b->quota == RUNTIME_INF)
2855 goto out_unlock;
2856
Paul Turner671fd9d2011-07-21 09:43:34 -07002857 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2858 /* idle depends on !throttled (for the case of a large deficit) */
2859 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002860 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07002861
Paul Turnera9cf55b2011-07-21 09:43:32 -07002862 /* if we're going inactive then everything else can be deferred */
2863 if (idle)
2864 goto out_unlock;
2865
2866 __refill_cfs_bandwidth_runtime(cfs_b);
2867
Paul Turner671fd9d2011-07-21 09:43:34 -07002868 if (!throttled) {
2869 /* mark as potentially idle for the upcoming period */
2870 cfs_b->idle = 1;
2871 goto out_unlock;
2872 }
Paul Turner58088ad2011-07-21 09:43:31 -07002873
Nikhil Raoe8da1b12011-07-21 09:43:40 -07002874 /* account preceding periods in which throttling occurred */
2875 cfs_b->nr_throttled += overrun;
2876
Paul Turner671fd9d2011-07-21 09:43:34 -07002877 /*
2878 * There are throttled entities so we must first use the new bandwidth
2879 * to unthrottle them before making it generally available. This
2880 * ensures that all existing debts will be paid before a new cfs_rq is
2881 * allowed to run.
2882 */
2883 runtime = cfs_b->runtime;
2884 runtime_expires = cfs_b->runtime_expires;
2885 cfs_b->runtime = 0;
2886
2887 /*
2888 * This check is repeated as we are holding onto the new bandwidth
2889 * while we unthrottle. This can potentially race with an unthrottled
2890 * group trying to acquire new bandwidth from the global pool.
2891 */
2892 while (throttled && runtime > 0) {
2893 raw_spin_unlock(&cfs_b->lock);
2894 /* we can't nest cfs_b->lock while distributing bandwidth */
2895 runtime = distribute_cfs_runtime(cfs_b, runtime,
2896 runtime_expires);
2897 raw_spin_lock(&cfs_b->lock);
2898
2899 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
2900 }
2901
2902 /* return (any) remaining runtime */
2903 cfs_b->runtime = runtime;
2904 /*
2905 * While we are ensured activity in the period following an
2906 * unthrottle, this also covers the case in which the new bandwidth is
2907 * insufficient to cover the existing bandwidth deficit. (Forcing the
2908 * timer to remain active while there are any throttled entities.)
2909 */
2910 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07002911out_unlock:
2912 if (idle)
2913 cfs_b->timer_active = 0;
2914 raw_spin_unlock(&cfs_b->lock);
2915
2916 return idle;
2917}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002918
Paul Turnerd8b49862011-07-21 09:43:41 -07002919/* a cfs_rq won't donate quota below this amount */
2920static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
2921/* minimum remaining period time to redistribute slack quota */
2922static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
2923/* how long we wait to gather additional slack before distributing */
2924static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
2925
2926/* are we near the end of the current quota period? */
2927static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
2928{
2929 struct hrtimer *refresh_timer = &cfs_b->period_timer;
2930 u64 remaining;
2931
2932 /* if the call-back is running a quota refresh is already occurring */
2933 if (hrtimer_callback_running(refresh_timer))
2934 return 1;
2935
2936 /* is a quota refresh about to occur? */
2937 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
2938 if (remaining < min_expire)
2939 return 1;
2940
2941 return 0;
2942}
2943
2944static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
2945{
2946 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
2947
2948 /* if there's a quota refresh soon don't bother with slack */
2949 if (runtime_refresh_within(cfs_b, min_left))
2950 return;
2951
2952 start_bandwidth_timer(&cfs_b->slack_timer,
2953 ns_to_ktime(cfs_bandwidth_slack_period));
2954}
2955
2956/* we know any runtime found here is valid as update_curr() precedes return */
2957static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2958{
2959 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2960 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
2961
2962 if (slack_runtime <= 0)
2963 return;
2964
2965 raw_spin_lock(&cfs_b->lock);
2966 if (cfs_b->quota != RUNTIME_INF &&
2967 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
2968 cfs_b->runtime += slack_runtime;
2969
2970 /* we are under rq->lock, defer unthrottling using a timer */
2971 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
2972 !list_empty(&cfs_b->throttled_cfs_rq))
2973 start_cfs_slack_bandwidth(cfs_b);
2974 }
2975 raw_spin_unlock(&cfs_b->lock);
2976
2977 /* even if it's not valid for return we don't want to try again */
2978 cfs_rq->runtime_remaining -= slack_runtime;
2979}
2980
2981static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2982{
Paul Turner56f570e2011-11-07 20:26:33 -08002983 if (!cfs_bandwidth_used())
2984 return;
2985
Paul Turnerfccfdc62011-11-07 20:26:34 -08002986 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07002987 return;
2988
2989 __return_cfs_rq_runtime(cfs_rq);
2990}
2991
2992/*
2993 * This is done with a timer (instead of inline with bandwidth return) since
2994 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
2995 */
2996static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
2997{
2998 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
2999 u64 expires;
3000
3001 /* confirm we're still not at a refresh boundary */
3002 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
3003 return;
3004
3005 raw_spin_lock(&cfs_b->lock);
3006 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3007 runtime = cfs_b->runtime;
3008 cfs_b->runtime = 0;
3009 }
3010 expires = cfs_b->runtime_expires;
3011 raw_spin_unlock(&cfs_b->lock);
3012
3013 if (!runtime)
3014 return;
3015
3016 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3017
3018 raw_spin_lock(&cfs_b->lock);
3019 if (expires == cfs_b->runtime_expires)
3020 cfs_b->runtime = runtime;
3021 raw_spin_unlock(&cfs_b->lock);
3022}
3023
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003024/*
3025 * When a group wakes up we want to make sure that its quota is not already
3026 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3027 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3028 */
3029static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3030{
Paul Turner56f570e2011-11-07 20:26:33 -08003031 if (!cfs_bandwidth_used())
3032 return;
3033
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003034 /* an active group must be handled by the update_curr()->put() path */
3035 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3036 return;
3037
3038 /* ensure the group is not already throttled */
3039 if (cfs_rq_throttled(cfs_rq))
3040 return;
3041
3042 /* update runtime allocation */
3043 account_cfs_rq_runtime(cfs_rq, 0);
3044 if (cfs_rq->runtime_remaining <= 0)
3045 throttle_cfs_rq(cfs_rq);
3046}
3047
3048/* conditionally throttle active cfs_rq's from put_prev_entity() */
3049static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3050{
Paul Turner56f570e2011-11-07 20:26:33 -08003051 if (!cfs_bandwidth_used())
3052 return;
3053
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003054 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3055 return;
3056
3057 /*
3058 * it's possible for a throttled entity to be forced into a running
3059 * state (e.g. set_curr_task), in this case we're finished.
3060 */
3061 if (cfs_rq_throttled(cfs_rq))
3062 return;
3063
3064 throttle_cfs_rq(cfs_rq);
3065}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003066
Peter Zijlstra029632f2011-10-25 10:00:11 +02003067static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3068{
3069 struct cfs_bandwidth *cfs_b =
3070 container_of(timer, struct cfs_bandwidth, slack_timer);
3071 do_sched_cfs_slack_timer(cfs_b);
3072
3073 return HRTIMER_NORESTART;
3074}
3075
3076static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3077{
3078 struct cfs_bandwidth *cfs_b =
3079 container_of(timer, struct cfs_bandwidth, period_timer);
3080 ktime_t now;
3081 int overrun;
3082 int idle = 0;
3083
3084 for (;;) {
3085 now = hrtimer_cb_get_time(timer);
3086 overrun = hrtimer_forward(timer, now, cfs_b->period);
3087
3088 if (!overrun)
3089 break;
3090
3091 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3092 }
3093
3094 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3095}
3096
3097void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3098{
3099 raw_spin_lock_init(&cfs_b->lock);
3100 cfs_b->runtime = 0;
3101 cfs_b->quota = RUNTIME_INF;
3102 cfs_b->period = ns_to_ktime(default_cfs_period());
3103
3104 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3105 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3106 cfs_b->period_timer.function = sched_cfs_period_timer;
3107 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3108 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3109}
3110
3111static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3112{
3113 cfs_rq->runtime_enabled = 0;
3114 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3115}
3116
3117/* requires cfs_b->lock, may release to reprogram timer */
3118void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3119{
3120 /*
3121 * The timer may be active because we're trying to set a new bandwidth
3122 * period or because we're racing with the tear-down path
3123 * (timer_active==0 becomes visible before the hrtimer call-back
3124 * terminates). In either case we ensure that it's re-programmed
3125 */
3126 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
3127 raw_spin_unlock(&cfs_b->lock);
3128 /* ensure cfs_b->lock is available while we wait */
3129 hrtimer_cancel(&cfs_b->period_timer);
3130
3131 raw_spin_lock(&cfs_b->lock);
3132 /* if someone else restarted the timer then we're done */
3133 if (cfs_b->timer_active)
3134 return;
3135 }
3136
3137 cfs_b->timer_active = 1;
3138 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3139}
3140
3141static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3142{
3143 hrtimer_cancel(&cfs_b->period_timer);
3144 hrtimer_cancel(&cfs_b->slack_timer);
3145}
3146
Arnd Bergmann38dc3342013-01-25 14:14:22 +00003147static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003148{
3149 struct cfs_rq *cfs_rq;
3150
3151 for_each_leaf_cfs_rq(rq, cfs_rq) {
3152 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3153
3154 if (!cfs_rq->runtime_enabled)
3155 continue;
3156
3157 /*
3158 * clock_task is not advancing so we just need to make sure
3159 * there's some valid quota amount
3160 */
3161 cfs_rq->runtime_remaining = cfs_b->quota;
3162 if (cfs_rq_throttled(cfs_rq))
3163 unthrottle_cfs_rq(cfs_rq);
3164 }
3165}
3166
3167#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02003168static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3169{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003170 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02003171}
3172
3173static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3174 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003175static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3176static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003177static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07003178
3179static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3180{
3181 return 0;
3182}
Paul Turner64660c82011-07-21 09:43:36 -07003183
3184static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3185{
3186 return 0;
3187}
3188
3189static inline int throttled_lb_pair(struct task_group *tg,
3190 int src_cpu, int dest_cpu)
3191{
3192 return 0;
3193}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003194
3195void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3196
3197#ifdef CONFIG_FAIR_GROUP_SCHED
3198static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07003199#endif
3200
Peter Zijlstra029632f2011-10-25 10:00:11 +02003201static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3202{
3203 return NULL;
3204}
3205static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07003206static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003207
3208#endif /* CONFIG_CFS_BANDWIDTH */
3209
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003210/**************************************************
3211 * CFS operations on tasks:
3212 */
3213
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003214#ifdef CONFIG_SCHED_HRTICK
3215static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3216{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003217 struct sched_entity *se = &p->se;
3218 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3219
3220 WARN_ON(task_rq(p) != rq);
3221
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003222 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003223 u64 slice = sched_slice(cfs_rq, se);
3224 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3225 s64 delta = slice - ran;
3226
3227 if (delta < 0) {
3228 if (rq->curr == p)
3229 resched_task(p);
3230 return;
3231 }
3232
3233 /*
3234 * Don't schedule slices shorter than 10000ns, that just
3235 * doesn't make sense. Rely on vruntime for fairness.
3236 */
Peter Zijlstra31656512008-07-18 18:01:23 +02003237 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02003238 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003239
Peter Zijlstra31656512008-07-18 18:01:23 +02003240 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003241 }
3242}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003243
3244/*
3245 * called from enqueue/dequeue and updates the hrtick when the
3246 * current task is from our class and nr_running is low enough
3247 * to matter.
3248 */
3249static void hrtick_update(struct rq *rq)
3250{
3251 struct task_struct *curr = rq->curr;
3252
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003253 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003254 return;
3255
3256 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3257 hrtick_start_fair(rq, curr);
3258}
Dhaval Giani55e12e52008-06-24 23:39:43 +05303259#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003260static inline void
3261hrtick_start_fair(struct rq *rq, struct task_struct *p)
3262{
3263}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003264
3265static inline void hrtick_update(struct rq *rq)
3266{
3267}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003268#endif
3269
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003270/*
3271 * The enqueue_task method is called before nr_running is
3272 * increased. Here we update the fair scheduling stats and
3273 * then put the task into the rbtree:
3274 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00003275static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003276enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003277{
3278 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003279 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003280
3281 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003282 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003283 break;
3284 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003285 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003286
3287 /*
3288 * end evaluation on encountering a throttled cfs_rq
3289 *
3290 * note: in the case of encountering a throttled cfs_rq we will
3291 * post the final h_nr_running increment below.
3292 */
3293 if (cfs_rq_throttled(cfs_rq))
3294 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003295 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07003296
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003297 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003298 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003299
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003300 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003301 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003302 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003303
Paul Turner85dac902011-07-21 09:43:33 -07003304 if (cfs_rq_throttled(cfs_rq))
3305 break;
3306
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003307 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003308 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003309 }
3310
Ben Segall18bf2802012-10-04 12:51:20 +02003311 if (!se) {
3312 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07003313 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003314 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003315 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003316}
3317
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003318static void set_next_buddy(struct sched_entity *se);
3319
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003320/*
3321 * The dequeue_task method is called before nr_running is
3322 * decreased. We remove the task from the rbtree and
3323 * update the fair scheduling stats:
3324 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003325static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003326{
3327 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003328 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003329 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003330
3331 for_each_sched_entity(se) {
3332 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003333 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003334
3335 /*
3336 * end evaluation on encountering a throttled cfs_rq
3337 *
3338 * note: in the case of encountering a throttled cfs_rq we will
3339 * post the final h_nr_running decrement below.
3340 */
3341 if (cfs_rq_throttled(cfs_rq))
3342 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003343 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003344
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003345 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003346 if (cfs_rq->load.weight) {
3347 /*
3348 * Bias pick_next to pick a task from this cfs_rq, as
3349 * p is sleeping when it is within its sched_slice.
3350 */
3351 if (task_sleep && parent_entity(se))
3352 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07003353
3354 /* avoid re-evaluating load for this entity */
3355 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003356 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003357 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003358 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003359 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003360
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003361 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003362 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003363 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003364
Paul Turner85dac902011-07-21 09:43:33 -07003365 if (cfs_rq_throttled(cfs_rq))
3366 break;
3367
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003368 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003369 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003370 }
3371
Ben Segall18bf2802012-10-04 12:51:20 +02003372 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07003373 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003374 update_rq_runnable_avg(rq, 1);
3375 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003376 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003377}
3378
Gregory Haskinse7693a32008-01-25 21:08:09 +01003379#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02003380/* Used instead of source_load when we know the type == 0 */
3381static unsigned long weighted_cpuload(const int cpu)
3382{
Alex Shib92486c2013-06-20 10:18:50 +08003383 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003384}
3385
3386/*
3387 * Return a low guess at the load of a migration-source cpu weighted
3388 * according to the scheduling class and "nice" value.
3389 *
3390 * We want to under-estimate the load of migration sources, to
3391 * balance conservatively.
3392 */
3393static unsigned long source_load(int cpu, int type)
3394{
3395 struct rq *rq = cpu_rq(cpu);
3396 unsigned long total = weighted_cpuload(cpu);
3397
3398 if (type == 0 || !sched_feat(LB_BIAS))
3399 return total;
3400
3401 return min(rq->cpu_load[type-1], total);
3402}
3403
3404/*
3405 * Return a high guess at the load of a migration-target cpu weighted
3406 * according to the scheduling class and "nice" value.
3407 */
3408static unsigned long target_load(int cpu, int type)
3409{
3410 struct rq *rq = cpu_rq(cpu);
3411 unsigned long total = weighted_cpuload(cpu);
3412
3413 if (type == 0 || !sched_feat(LB_BIAS))
3414 return total;
3415
3416 return max(rq->cpu_load[type-1], total);
3417}
3418
3419static unsigned long power_of(int cpu)
3420{
3421 return cpu_rq(cpu)->cpu_power;
3422}
3423
3424static unsigned long cpu_avg_load_per_task(int cpu)
3425{
3426 struct rq *rq = cpu_rq(cpu);
3427 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08003428 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003429
3430 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08003431 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003432
3433 return 0;
3434}
3435
Michael Wang62470412013-07-04 12:55:51 +08003436static void record_wakee(struct task_struct *p)
3437{
3438 /*
3439 * Rough decay (wiping) for cost saving, don't worry
3440 * about the boundary, really active task won't care
3441 * about the loss.
3442 */
3443 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3444 current->wakee_flips = 0;
3445 current->wakee_flip_decay_ts = jiffies;
3446 }
3447
3448 if (current->last_wakee != p) {
3449 current->last_wakee = p;
3450 current->wakee_flips++;
3451 }
3452}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003453
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003454static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003455{
3456 struct sched_entity *se = &p->se;
3457 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003458 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003459
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003460#ifndef CONFIG_64BIT
3461 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003462
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003463 do {
3464 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3465 smp_rmb();
3466 min_vruntime = cfs_rq->min_vruntime;
3467 } while (min_vruntime != min_vruntime_copy);
3468#else
3469 min_vruntime = cfs_rq->min_vruntime;
3470#endif
3471
3472 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08003473 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003474}
3475
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003476#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003477/*
3478 * effective_load() calculates the load change as seen from the root_task_group
3479 *
3480 * Adding load to a group doesn't make a group heavier, but can cause movement
3481 * of group shares between cpus. Assuming the shares were perfectly aligned one
3482 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003483 *
3484 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3485 * on this @cpu and results in a total addition (subtraction) of @wg to the
3486 * total group weight.
3487 *
3488 * Given a runqueue weight distribution (rw_i) we can compute a shares
3489 * distribution (s_i) using:
3490 *
3491 * s_i = rw_i / \Sum rw_j (1)
3492 *
3493 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3494 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3495 * shares distribution (s_i):
3496 *
3497 * rw_i = { 2, 4, 1, 0 }
3498 * s_i = { 2/7, 4/7, 1/7, 0 }
3499 *
3500 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3501 * task used to run on and the CPU the waker is running on), we need to
3502 * compute the effect of waking a task on either CPU and, in case of a sync
3503 * wakeup, compute the effect of the current task going to sleep.
3504 *
3505 * So for a change of @wl to the local @cpu with an overall group weight change
3506 * of @wl we can compute the new shares distribution (s'_i) using:
3507 *
3508 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3509 *
3510 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3511 * differences in waking a task to CPU 0. The additional task changes the
3512 * weight and shares distributions like:
3513 *
3514 * rw'_i = { 3, 4, 1, 0 }
3515 * s'_i = { 3/8, 4/8, 1/8, 0 }
3516 *
3517 * We can then compute the difference in effective weight by using:
3518 *
3519 * dw_i = S * (s'_i - s_i) (3)
3520 *
3521 * Where 'S' is the group weight as seen by its parent.
3522 *
3523 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3524 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3525 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003526 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003527static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003528{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003529 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003530
Mel Gorman58d081b2013-10-07 11:29:10 +01003531 if (!tg->parent || !wl) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003532 return wl;
3533
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003534 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003535 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003536
Paul Turner977dda72011-01-14 17:57:50 -08003537 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003538
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003539 /*
3540 * W = @wg + \Sum rw_j
3541 */
3542 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003543
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003544 /*
3545 * w = rw_i + @wl
3546 */
3547 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003548
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003549 /*
3550 * wl = S * s'_i; see (2)
3551 */
3552 if (W > 0 && w < W)
3553 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003554 else
3555 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003556
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003557 /*
3558 * Per the above, wl is the new se->load.weight value; since
3559 * those are clipped to [MIN_SHARES, ...) do so now. See
3560 * calc_cfs_shares().
3561 */
Paul Turner977dda72011-01-14 17:57:50 -08003562 if (wl < MIN_SHARES)
3563 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003564
3565 /*
3566 * wl = dw_i = S * (s'_i - s_i); see (3)
3567 */
Paul Turner977dda72011-01-14 17:57:50 -08003568 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003569
3570 /*
3571 * Recursively apply this logic to all parent groups to compute
3572 * the final effective load change on the root group. Since
3573 * only the @tg group gets extra weight, all parent groups can
3574 * only redistribute existing shares. @wl is the shift in shares
3575 * resulting from this level per the above.
3576 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003577 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003578 }
3579
3580 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003581}
3582#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003583
Mel Gorman58d081b2013-10-07 11:29:10 +01003584static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003585{
Peter Zijlstra83378262008-06-27 13:41:37 +02003586 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003587}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003588
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003589#endif
3590
Michael Wang62470412013-07-04 12:55:51 +08003591static int wake_wide(struct task_struct *p)
3592{
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08003593 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08003594
3595 /*
3596 * Yeah, it's the switching-frequency, could means many wakee or
3597 * rapidly switch, use factor here will just help to automatically
3598 * adjust the loose-degree, so bigger node will lead to more pull.
3599 */
3600 if (p->wakee_flips > factor) {
3601 /*
3602 * wakee is somewhat hot, it needs certain amount of cpu
3603 * resource, so if waker is far more hot, prefer to leave
3604 * it alone.
3605 */
3606 if (current->wakee_flips > (factor * p->wakee_flips))
3607 return 1;
3608 }
3609
3610 return 0;
3611}
3612
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003613static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003614{
Paul Turnere37b6a72011-01-21 20:44:59 -08003615 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003616 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003617 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003618 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003619 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003620 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003621
Michael Wang62470412013-07-04 12:55:51 +08003622 /*
3623 * If we wake multiple tasks be careful to not bounce
3624 * ourselves around too much.
3625 */
3626 if (wake_wide(p))
3627 return 0;
3628
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003629 idx = sd->wake_idx;
3630 this_cpu = smp_processor_id();
3631 prev_cpu = task_cpu(p);
3632 load = source_load(prev_cpu, idx);
3633 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003634
3635 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003636 * If sync wakeup then subtract the (maximum possible)
3637 * effect of the currently running task from the load
3638 * of the current CPU:
3639 */
Peter Zijlstra83378262008-06-27 13:41:37 +02003640 if (sync) {
3641 tg = task_group(current);
3642 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003643
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003644 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02003645 load += effective_load(tg, prev_cpu, 0, -weight);
3646 }
3647
3648 tg = task_group(p);
3649 weight = p->se.load.weight;
3650
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003651 /*
3652 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003653 * due to the sync cause above having dropped this_load to 0, we'll
3654 * always have an imbalance, but there's really nothing you can do
3655 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02003656 *
3657 * Otherwise check if either cpus are near enough in load to allow this
3658 * task to be woken on this_cpu.
3659 */
Paul Turnere37b6a72011-01-21 20:44:59 -08003660 if (this_load > 0) {
3661 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02003662
3663 this_eff_load = 100;
3664 this_eff_load *= power_of(prev_cpu);
3665 this_eff_load *= this_load +
3666 effective_load(tg, this_cpu, weight, weight);
3667
3668 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
3669 prev_eff_load *= power_of(this_cpu);
3670 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
3671
3672 balanced = this_eff_load <= prev_eff_load;
3673 } else
3674 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003675
3676 /*
3677 * If the currently running task will sleep within
3678 * a reasonable amount of time then attract this newly
3679 * woken task:
3680 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02003681 if (sync && balanced)
3682 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003683
Lucas De Marchi41acab82010-03-10 23:37:45 -03003684 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003685 tl_per_task = cpu_avg_load_per_task(this_cpu);
3686
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003687 if (balanced ||
3688 (this_load <= load &&
3689 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003690 /*
3691 * This domain has SD_WAKE_AFFINE and
3692 * p is cache cold in this domain, and
3693 * there is no bad imbalance.
3694 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003695 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03003696 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003697
3698 return 1;
3699 }
3700 return 0;
3701}
3702
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003703/*
3704 * find_idlest_group finds and returns the least busy CPU group within the
3705 * domain.
3706 */
3707static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02003708find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003709 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01003710{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07003711 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003712 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003713 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003714
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003715 do {
3716 unsigned long load, avg_load;
3717 int local_group;
3718 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003719
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003720 /* Skip over this group if it has no CPUs allowed */
3721 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003722 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003723 continue;
3724
3725 local_group = cpumask_test_cpu(this_cpu,
3726 sched_group_cpus(group));
3727
3728 /* Tally up the load of all CPUs in the group */
3729 avg_load = 0;
3730
3731 for_each_cpu(i, sched_group_cpus(group)) {
3732 /* Bias balancing toward cpus of our domain */
3733 if (local_group)
3734 load = source_load(i, load_idx);
3735 else
3736 load = target_load(i, load_idx);
3737
3738 avg_load += load;
3739 }
3740
3741 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02003742 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003743
3744 if (local_group) {
3745 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003746 } else if (avg_load < min_load) {
3747 min_load = avg_load;
3748 idlest = group;
3749 }
3750 } while (group = group->next, group != sd->groups);
3751
3752 if (!idlest || 100*this_load < imbalance*min_load)
3753 return NULL;
3754 return idlest;
3755}
3756
3757/*
3758 * find_idlest_cpu - find the idlest cpu among the cpus in group.
3759 */
3760static int
3761find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
3762{
3763 unsigned long load, min_load = ULONG_MAX;
3764 int idlest = -1;
3765 int i;
3766
3767 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003768 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003769 load = weighted_cpuload(i);
3770
3771 if (load < min_load || (load == min_load && i == this_cpu)) {
3772 min_load = load;
3773 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003774 }
3775 }
3776
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003777 return idlest;
3778}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003779
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003780/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003781 * Try and locate an idle CPU in the sched_domain.
3782 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003783static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003784{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003785 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07003786 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003787 int i = task_cpu(p);
3788
3789 if (idle_cpu(target))
3790 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003791
3792 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003793 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003794 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003795 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
3796 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003797
3798 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07003799 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003800 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01003801 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08003802 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07003803 sg = sd->groups;
3804 do {
3805 if (!cpumask_intersects(sched_group_cpus(sg),
3806 tsk_cpus_allowed(p)))
3807 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02003808
Linus Torvalds37407ea2012-09-16 12:29:43 -07003809 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01003810 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07003811 goto next;
3812 }
3813
3814 target = cpumask_first_and(sched_group_cpus(sg),
3815 tsk_cpus_allowed(p));
3816 goto done;
3817next:
3818 sg = sg->next;
3819 } while (sg != sd->groups);
3820 }
3821done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01003822 return target;
3823}
3824
3825/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003826 * sched_balance_self: balance the current task (running on cpu) in domains
3827 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
3828 * SD_BALANCE_EXEC.
3829 *
3830 * Balance, ie. select the least loaded group.
3831 *
3832 * Returns the target CPU number, or the same CPU if no balancing is needed.
3833 *
3834 * preempt must be disabled.
3835 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01003836static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01003837select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003838{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003839 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003840 int cpu = smp_processor_id();
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003841 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003842 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003843 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003844
Peter Zijlstra29baa742012-04-23 12:11:21 +02003845 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01003846 return prev_cpu;
3847
Peter Zijlstra0763a662009-09-14 19:37:39 +02003848 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02003849 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003850 want_affine = 1;
3851 new_cpu = prev_cpu;
3852 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01003853
Peter Zijlstradce840a2011-04-07 14:09:50 +02003854 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003855 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01003856 if (!(tmp->flags & SD_LOAD_BALANCE))
3857 continue;
3858
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003859 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003860 * If both cpu and prev_cpu are part of this domain,
3861 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01003862 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07003863 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
3864 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
3865 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08003866 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003867 }
3868
Alex Shif03542a2012-07-26 08:55:34 +08003869 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02003870 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003871 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003872
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003873 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08003874 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02003875 prev_cpu = cpu;
3876
3877 new_cpu = select_idle_sibling(p, prev_cpu);
3878 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01003879 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02003880
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003881 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003882 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003883 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003884 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003885
Peter Zijlstra0763a662009-09-14 19:37:39 +02003886 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003887 sd = sd->child;
3888 continue;
3889 }
3890
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02003891 if (sd_flag & SD_BALANCE_WAKE)
3892 load_idx = sd->wake_idx;
3893
3894 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003895 if (!group) {
3896 sd = sd->child;
3897 continue;
3898 }
3899
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02003900 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003901 if (new_cpu == -1 || new_cpu == cpu) {
3902 /* Now try balancing at a lower domain level of cpu */
3903 sd = sd->child;
3904 continue;
3905 }
3906
3907 /* Now try balancing at a lower domain level of new_cpu */
3908 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003909 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003910 sd = NULL;
3911 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02003912 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003913 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02003914 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02003915 sd = tmp;
3916 }
3917 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01003918 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02003919unlock:
3920 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01003921
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003922 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01003923}
Paul Turner0a74bef2012-10-04 13:18:30 +02003924
3925/*
3926 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
3927 * cfs_rq_of(p) references at time of call are still valid and identify the
3928 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
3929 * other assumptions, including the state of rq->lock, should be made.
3930 */
3931static void
3932migrate_task_rq_fair(struct task_struct *p, int next_cpu)
3933{
Paul Turneraff3e492012-10-04 13:18:30 +02003934 struct sched_entity *se = &p->se;
3935 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3936
3937 /*
3938 * Load tracking: accumulate removed load so that it can be processed
3939 * when we next update owning cfs_rq under rq->lock. Tasks contribute
3940 * to blocked load iff they have a positive decay-count. It can never
3941 * be negative here since on-rq tasks have decay-count == 0.
3942 */
3943 if (se->avg.decay_count) {
3944 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08003945 atomic_long_add(se->avg.load_avg_contrib,
3946 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02003947 }
Paul Turner0a74bef2012-10-04 13:18:30 +02003948}
Gregory Haskinse7693a32008-01-25 21:08:09 +01003949#endif /* CONFIG_SMP */
3950
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003951static unsigned long
3952wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003953{
3954 unsigned long gran = sysctl_sched_wakeup_granularity;
3955
3956 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003957 * Since its curr running now, convert the gran from real-time
3958 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01003959 *
3960 * By using 'se' instead of 'curr' we penalize light tasks, so
3961 * they get preempted easier. That is, if 'se' < 'curr' then
3962 * the resulting gran will be larger, therefore penalizing the
3963 * lighter, if otoh 'se' > 'curr' then the resulting gran will
3964 * be smaller, again penalizing the lighter task.
3965 *
3966 * This is especially important for buddies when the leftmost
3967 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003968 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08003969 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02003970}
3971
3972/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02003973 * Should 'se' preempt 'curr'.
3974 *
3975 * |s1
3976 * |s2
3977 * |s3
3978 * g
3979 * |<--->|c
3980 *
3981 * w(c, s1) = -1
3982 * w(c, s2) = 0
3983 * w(c, s3) = 1
3984 *
3985 */
3986static int
3987wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
3988{
3989 s64 gran, vdiff = curr->vruntime - se->vruntime;
3990
3991 if (vdiff <= 0)
3992 return -1;
3993
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01003994 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02003995 if (vdiff > gran)
3996 return 1;
3997
3998 return 0;
3999}
4000
Peter Zijlstra02479092008-11-04 21:25:10 +01004001static void set_last_buddy(struct sched_entity *se)
4002{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004003 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4004 return;
4005
4006 for_each_sched_entity(se)
4007 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004008}
4009
4010static void set_next_buddy(struct sched_entity *se)
4011{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004012 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4013 return;
4014
4015 for_each_sched_entity(se)
4016 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004017}
4018
Rik van Rielac53db52011-02-01 09:51:03 -05004019static void set_skip_buddy(struct sched_entity *se)
4020{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004021 for_each_sched_entity(se)
4022 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05004023}
4024
Peter Zijlstra464b7522008-10-24 11:06:15 +02004025/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004026 * Preempt the current task with a newly woken task if needed:
4027 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02004028static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004029{
4030 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02004031 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004032 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02004033 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004034 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004035
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01004036 if (unlikely(se == pse))
4037 return;
4038
Paul Turner5238cdd2011-07-21 09:43:37 -07004039 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004040 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07004041 * unconditionally check_prempt_curr() after an enqueue (which may have
4042 * lead to a throttle). This both saves work and prevents false
4043 * next-buddy nomination below.
4044 */
4045 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4046 return;
4047
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004048 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02004049 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004050 next_buddy_marked = 1;
4051 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02004052
Bharata B Raoaec0a512008-08-28 14:42:49 +05304053 /*
4054 * We can come here with TIF_NEED_RESCHED already set from new task
4055 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07004056 *
4057 * Note: this also catches the edge-case of curr being in a throttled
4058 * group (e.g. via set_curr_task), since update_curr() (in the
4059 * enqueue of curr) will have resulted in resched being set. This
4060 * prevents us from potentially nominating it as a false LAST_BUDDY
4061 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05304062 */
4063 if (test_tsk_need_resched(curr))
4064 return;
4065
Darren Harta2f5c9a2011-02-22 13:04:33 -08004066 /* Idle tasks are by definition preempted by non-idle tasks. */
4067 if (unlikely(curr->policy == SCHED_IDLE) &&
4068 likely(p->policy != SCHED_IDLE))
4069 goto preempt;
4070
Ingo Molnar91c234b2007-10-15 17:00:18 +02004071 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08004072 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4073 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02004074 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02004075 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02004076 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004077
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004078 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07004079 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004080 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004081 if (wakeup_preempt_entity(se, pse) == 1) {
4082 /*
4083 * Bias pick_next to pick the sched entity that is
4084 * triggering this preemption.
4085 */
4086 if (!next_buddy_marked)
4087 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004088 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004089 }
Jupyung Leea65ac742009-11-17 18:51:40 +09004090
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004091 return;
4092
4093preempt:
4094 resched_task(curr);
4095 /*
4096 * Only set the backward buddy when the current task is still
4097 * on the rq. This can happen when a wakeup gets interleaved
4098 * with schedule on the ->pre_schedule() or idle_balance()
4099 * point, either of which can * drop the rq lock.
4100 *
4101 * Also, during early boot the idle thread is in the fair class,
4102 * for obvious reasons its a bad idea to schedule back to it.
4103 */
4104 if (unlikely(!se->on_rq || curr == rq->idle))
4105 return;
4106
4107 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4108 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004109}
4110
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004111static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004112{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004113 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004114 struct cfs_rq *cfs_rq = &rq->cfs;
4115 struct sched_entity *se;
4116
Tim Blechmann36ace272009-11-24 11:55:45 +01004117 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004118 return NULL;
4119
4120 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02004121 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01004122 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004123 cfs_rq = group_cfs_rq(se);
4124 } while (cfs_rq);
4125
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004126 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004127 if (hrtick_enabled(rq))
4128 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004129
4130 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004131}
4132
4133/*
4134 * Account for a descheduled task:
4135 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02004136static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004137{
4138 struct sched_entity *se = &prev->se;
4139 struct cfs_rq *cfs_rq;
4140
4141 for_each_sched_entity(se) {
4142 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02004143 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004144 }
4145}
4146
Rik van Rielac53db52011-02-01 09:51:03 -05004147/*
4148 * sched_yield() is very simple
4149 *
4150 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4151 */
4152static void yield_task_fair(struct rq *rq)
4153{
4154 struct task_struct *curr = rq->curr;
4155 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4156 struct sched_entity *se = &curr->se;
4157
4158 /*
4159 * Are we the only task in the tree?
4160 */
4161 if (unlikely(rq->nr_running == 1))
4162 return;
4163
4164 clear_buddies(cfs_rq, se);
4165
4166 if (curr->policy != SCHED_BATCH) {
4167 update_rq_clock(rq);
4168 /*
4169 * Update run-time statistics of the 'current'.
4170 */
4171 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01004172 /*
4173 * Tell update_rq_clock() that we've just updated,
4174 * so we don't do microscopic update in schedule()
4175 * and double the fastpath cost.
4176 */
4177 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05004178 }
4179
4180 set_skip_buddy(se);
4181}
4182
Mike Galbraithd95f4122011-02-01 09:50:51 -05004183static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4184{
4185 struct sched_entity *se = &p->se;
4186
Paul Turner5238cdd2011-07-21 09:43:37 -07004187 /* throttled hierarchies are not runnable */
4188 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05004189 return false;
4190
4191 /* Tell the scheduler that we'd really like pse to run next. */
4192 set_next_buddy(se);
4193
Mike Galbraithd95f4122011-02-01 09:50:51 -05004194 yield_task_fair(rq);
4195
4196 return true;
4197}
4198
Peter Williams681f3e62007-10-24 18:23:51 +02004199#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004200/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02004201 * Fair scheduling class load-balancing methods.
4202 *
4203 * BASICS
4204 *
4205 * The purpose of load-balancing is to achieve the same basic fairness the
4206 * per-cpu scheduler provides, namely provide a proportional amount of compute
4207 * time to each task. This is expressed in the following equation:
4208 *
4209 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4210 *
4211 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4212 * W_i,0 is defined as:
4213 *
4214 * W_i,0 = \Sum_j w_i,j (2)
4215 *
4216 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4217 * is derived from the nice value as per prio_to_weight[].
4218 *
4219 * The weight average is an exponential decay average of the instantaneous
4220 * weight:
4221 *
4222 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4223 *
4224 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4225 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4226 * can also include other factors [XXX].
4227 *
4228 * To achieve this balance we define a measure of imbalance which follows
4229 * directly from (1):
4230 *
4231 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
4232 *
4233 * We them move tasks around to minimize the imbalance. In the continuous
4234 * function space it is obvious this converges, in the discrete case we get
4235 * a few fun cases generally called infeasible weight scenarios.
4236 *
4237 * [XXX expand on:
4238 * - infeasible weights;
4239 * - local vs global optima in the discrete case. ]
4240 *
4241 *
4242 * SCHED DOMAINS
4243 *
4244 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4245 * for all i,j solution, we create a tree of cpus that follows the hardware
4246 * topology where each level pairs two lower groups (or better). This results
4247 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4248 * tree to only the first of the previous level and we decrease the frequency
4249 * of load-balance at each level inv. proportional to the number of cpus in
4250 * the groups.
4251 *
4252 * This yields:
4253 *
4254 * log_2 n 1 n
4255 * \Sum { --- * --- * 2^i } = O(n) (5)
4256 * i = 0 2^i 2^i
4257 * `- size of each group
4258 * | | `- number of cpus doing load-balance
4259 * | `- freq
4260 * `- sum over all levels
4261 *
4262 * Coupled with a limit on how many tasks we can migrate every balance pass,
4263 * this makes (5) the runtime complexity of the balancer.
4264 *
4265 * An important property here is that each CPU is still (indirectly) connected
4266 * to every other cpu in at most O(log n) steps:
4267 *
4268 * The adjacency matrix of the resulting graph is given by:
4269 *
4270 * log_2 n
4271 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4272 * k = 0
4273 *
4274 * And you'll find that:
4275 *
4276 * A^(log_2 n)_i,j != 0 for all i,j (7)
4277 *
4278 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4279 * The task movement gives a factor of O(m), giving a convergence complexity
4280 * of:
4281 *
4282 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4283 *
4284 *
4285 * WORK CONSERVING
4286 *
4287 * In order to avoid CPUs going idle while there's still work to do, new idle
4288 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4289 * tree itself instead of relying on other CPUs to bring it work.
4290 *
4291 * This adds some complexity to both (5) and (8) but it reduces the total idle
4292 * time.
4293 *
4294 * [XXX more?]
4295 *
4296 *
4297 * CGROUPS
4298 *
4299 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4300 *
4301 * s_k,i
4302 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4303 * S_k
4304 *
4305 * Where
4306 *
4307 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4308 *
4309 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4310 *
4311 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4312 * property.
4313 *
4314 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4315 * rewrite all of this once again.]
4316 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004317
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09004318static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4319
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004320#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01004321#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02004322#define LBF_DST_PINNED 0x04
4323#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004324
4325struct lb_env {
4326 struct sched_domain *sd;
4327
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004328 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05304329 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004330
4331 int dst_cpu;
4332 struct rq *dst_rq;
4333
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304334 struct cpumask *dst_grpmask;
4335 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004336 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004337 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08004338 /* The set of CPUs under consideration for load-balancing */
4339 struct cpumask *cpus;
4340
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004341 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004342
4343 unsigned int loop;
4344 unsigned int loop_break;
4345 unsigned int loop_max;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004346};
4347
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004348/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004349 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004350 * Both runqueues must be locked.
4351 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004352static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004353{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004354 deactivate_task(env->src_rq, p, 0);
4355 set_task_cpu(p, env->dst_cpu);
4356 activate_task(env->dst_rq, p, 0);
4357 check_preempt_curr(env->dst_rq, p, 0);
Rik van Riel6fe6b2d2013-10-07 11:29:08 +01004358#ifdef CONFIG_NUMA_BALANCING
4359 if (p->numa_preferred_nid != -1) {
4360 int src_nid = cpu_to_node(env->src_cpu);
4361 int dst_nid = cpu_to_node(env->dst_cpu);
4362
4363 /*
4364 * If the load balancer has moved the task then limit
4365 * migrations from taking place in the short term in
4366 * case this is a short-lived migration.
4367 */
4368 if (src_nid != dst_nid && dst_nid != p->numa_preferred_nid)
4369 p->numa_migrate_seq = 0;
4370 }
4371#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004372}
4373
4374/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02004375 * Is this task likely cache-hot:
4376 */
4377static int
4378task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4379{
4380 s64 delta;
4381
4382 if (p->sched_class != &fair_sched_class)
4383 return 0;
4384
4385 if (unlikely(p->policy == SCHED_IDLE))
4386 return 0;
4387
4388 /*
4389 * Buddy candidates are cache hot:
4390 */
4391 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4392 (&p->se == cfs_rq_of(&p->se)->next ||
4393 &p->se == cfs_rq_of(&p->se)->last))
4394 return 1;
4395
4396 if (sysctl_sched_migration_cost == -1)
4397 return 1;
4398 if (sysctl_sched_migration_cost == 0)
4399 return 0;
4400
4401 delta = now - p->se.exec_start;
4402
4403 return delta < (s64)sysctl_sched_migration_cost;
4404}
4405
Mel Gorman3a7053b2013-10-07 11:29:00 +01004406#ifdef CONFIG_NUMA_BALANCING
4407/* Returns true if the destination node has incurred more faults */
4408static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4409{
4410 int src_nid, dst_nid;
4411
4412 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4413 !(env->sd->flags & SD_NUMA)) {
4414 return false;
4415 }
4416
4417 src_nid = cpu_to_node(env->src_cpu);
4418 dst_nid = cpu_to_node(env->dst_cpu);
4419
4420 if (src_nid == dst_nid ||
4421 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4422 return false;
4423
4424 if (dst_nid == p->numa_preferred_nid ||
Mel Gormanac8e8952013-10-07 11:29:03 +01004425 task_faults(p, dst_nid) > task_faults(p, src_nid))
Mel Gorman3a7053b2013-10-07 11:29:00 +01004426 return true;
4427
4428 return false;
4429}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004430
4431
4432static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4433{
4434 int src_nid, dst_nid;
4435
4436 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4437 return false;
4438
4439 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
4440 return false;
4441
4442 src_nid = cpu_to_node(env->src_cpu);
4443 dst_nid = cpu_to_node(env->dst_cpu);
4444
4445 if (src_nid == dst_nid ||
4446 p->numa_migrate_seq >= sysctl_numa_balancing_settle_count)
4447 return false;
4448
Mel Gormanac8e8952013-10-07 11:29:03 +01004449 if (task_faults(p, dst_nid) < task_faults(p, src_nid))
Mel Gorman7a0f3082013-10-07 11:29:01 +01004450 return true;
4451
4452 return false;
4453}
4454
Mel Gorman3a7053b2013-10-07 11:29:00 +01004455#else
4456static inline bool migrate_improves_locality(struct task_struct *p,
4457 struct lb_env *env)
4458{
4459 return false;
4460}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004461
4462static inline bool migrate_degrades_locality(struct task_struct *p,
4463 struct lb_env *env)
4464{
4465 return false;
4466}
Mel Gorman3a7053b2013-10-07 11:29:00 +01004467#endif
4468
Peter Zijlstra029632f2011-10-25 10:00:11 +02004469/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004470 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4471 */
4472static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004473int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004474{
4475 int tsk_cache_hot = 0;
4476 /*
4477 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09004478 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004479 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09004480 * 3) running (obviously), or
4481 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004482 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09004483 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4484 return 0;
4485
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004486 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004487 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304488
Lucas De Marchi41acab82010-03-10 23:37:45 -03004489 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304490
Peter Zijlstra62633222013-08-19 12:41:09 +02004491 env->flags |= LBF_SOME_PINNED;
4492
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304493 /*
4494 * Remember if this task can be migrated to any other cpu in
4495 * our sched_group. We may want to revisit it if we couldn't
4496 * meet load balance goals by pulling other tasks on src_cpu.
4497 *
4498 * Also avoid computing new_dst_cpu if we have already computed
4499 * one in current iteration.
4500 */
Peter Zijlstra62633222013-08-19 12:41:09 +02004501 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304502 return 0;
4503
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004504 /* Prevent to re-select dst_cpu via env's cpus */
4505 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4506 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02004507 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004508 env->new_dst_cpu = cpu;
4509 break;
4510 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304511 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004512
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004513 return 0;
4514 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304515
4516 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004517 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004518
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004519 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03004520 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004521 return 0;
4522 }
4523
4524 /*
4525 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01004526 * 1) destination numa is preferred
4527 * 2) task is cache cold, or
4528 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004529 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004530 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Mel Gorman7a0f3082013-10-07 11:29:01 +01004531 if (!tsk_cache_hot)
4532 tsk_cache_hot = migrate_degrades_locality(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01004533
4534 if (migrate_improves_locality(p, env)) {
4535#ifdef CONFIG_SCHEDSTATS
4536 if (tsk_cache_hot) {
4537 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4538 schedstat_inc(p, se.statistics.nr_forced_migrations);
4539 }
4540#endif
4541 return 1;
4542 }
4543
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004544 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004545 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004546
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004547 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004548 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004549 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004550 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004551
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004552 return 1;
4553 }
4554
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004555 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4556 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004557}
4558
Peter Zijlstra897c3952009-12-17 17:45:42 +01004559/*
4560 * move_one_task tries to move exactly one task from busiest to this_rq, as
4561 * part of active balancing operations within "domain".
4562 * Returns 1 if successful and 0 otherwise.
4563 *
4564 * Called with both runqueues locked.
4565 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004566static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004567{
4568 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004569
Peter Zijlstra367456c2012-02-20 21:49:09 +01004570 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004571 if (!can_migrate_task(p, env))
4572 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004573
Peter Zijlstra367456c2012-02-20 21:49:09 +01004574 move_task(p, env);
4575 /*
4576 * Right now, this is only the second place move_task()
4577 * is called, so we can safely collect move_task()
4578 * stats here rather than inside move_task().
4579 */
4580 schedstat_inc(env->sd, lb_gained[env->idle]);
4581 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004582 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004583 return 0;
4584}
4585
Peter Zijlstraeb953082012-04-17 13:38:40 +02004586static const unsigned int sched_nr_migrate_break = 32;
4587
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004588/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004589 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004590 * this_rq, as part of a balancing operation within domain "sd".
4591 * Returns 1 if successful and 0 otherwise.
4592 *
4593 * Called with both runqueues locked.
4594 */
4595static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004596{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004597 struct list_head *tasks = &env->src_rq->cfs_tasks;
4598 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004599 unsigned long load;
4600 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004601
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004602 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004603 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004604
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004605 while (!list_empty(tasks)) {
4606 p = list_first_entry(tasks, struct task_struct, se.group_node);
4607
Peter Zijlstra367456c2012-02-20 21:49:09 +01004608 env->loop++;
4609 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004610 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004611 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004612
4613 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004614 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004615 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004616 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004617 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004618 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004619
Joonsoo Kimd3198082013-04-23 17:27:40 +09004620 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004621 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004622
Peter Zijlstra367456c2012-02-20 21:49:09 +01004623 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004624
Peter Zijlstraeb953082012-04-17 13:38:40 +02004625 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004626 goto next;
4627
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004628 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004629 goto next;
4630
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004631 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01004632 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004633 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004634
4635#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01004636 /*
4637 * NEWIDLE balancing is a source of latency, so preemptible
4638 * kernels will stop after the first task is pulled to minimize
4639 * the critical section.
4640 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004641 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004642 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004643#endif
4644
Peter Zijlstraee00e662009-12-17 17:25:20 +01004645 /*
4646 * We only want to steal up to the prescribed amount of
4647 * weighted load.
4648 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004649 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01004650 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004651
Peter Zijlstra367456c2012-02-20 21:49:09 +01004652 continue;
4653next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004654 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004655 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004656
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004657 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004658 * Right now, this is one of only two places move_task() is called,
4659 * so we can safely collect move_task() stats here rather than
4660 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004661 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004662 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004663
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004664 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004665}
4666
Peter Zijlstra230059de2009-12-17 17:47:12 +01004667#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004668/*
4669 * update tg->load_weight by folding this cpu's load_avg
4670 */
Paul Turner48a16752012-10-04 13:18:31 +02004671static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004672{
Paul Turner48a16752012-10-04 13:18:31 +02004673 struct sched_entity *se = tg->se[cpu];
4674 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004675
Paul Turner48a16752012-10-04 13:18:31 +02004676 /* throttled entities do not contribute to load */
4677 if (throttled_hierarchy(cfs_rq))
4678 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004679
Paul Turneraff3e492012-10-04 13:18:30 +02004680 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004681
Paul Turner82958362012-10-04 13:18:31 +02004682 if (se) {
4683 update_entity_load_avg(se, 1);
4684 /*
4685 * We pivot on our runnable average having decayed to zero for
4686 * list removal. This generally implies that all our children
4687 * have also been removed (modulo rounding error or bandwidth
4688 * control); however, such cases are rare and we can fix these
4689 * at enqueue.
4690 *
4691 * TODO: fix up out-of-order children on enqueue.
4692 */
4693 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
4694 list_del_leaf_cfs_rq(cfs_rq);
4695 } else {
Paul Turner48a16752012-10-04 13:18:31 +02004696 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02004697 update_rq_runnable_avg(rq, rq->nr_running);
4698 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004699}
4700
Paul Turner48a16752012-10-04 13:18:31 +02004701static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004702{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004703 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02004704 struct cfs_rq *cfs_rq;
4705 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004706
Paul Turner48a16752012-10-04 13:18:31 +02004707 raw_spin_lock_irqsave(&rq->lock, flags);
4708 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02004709 /*
4710 * Iterates the task_group tree in a bottom up fashion, see
4711 * list_add_leaf_cfs_rq() for details.
4712 */
Paul Turner64660c82011-07-21 09:43:36 -07004713 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02004714 /*
4715 * Note: We may want to consider periodically releasing
4716 * rq->lock about these updates so that creating many task
4717 * groups does not result in continually extending hold time.
4718 */
4719 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07004720 }
Paul Turner48a16752012-10-04 13:18:31 +02004721
4722 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004723}
4724
Peter Zijlstra9763b672011-07-13 13:09:25 +02004725/*
Vladimir Davydov68520792013-07-15 17:49:19 +04004726 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02004727 * This needs to be done in a top-down fashion because the load of a child
4728 * group is a fraction of its parents load.
4729 */
Vladimir Davydov68520792013-07-15 17:49:19 +04004730static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02004731{
Vladimir Davydov68520792013-07-15 17:49:19 +04004732 struct rq *rq = rq_of(cfs_rq);
4733 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004734 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04004735 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004736
Vladimir Davydov68520792013-07-15 17:49:19 +04004737 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004738 return;
4739
Vladimir Davydov68520792013-07-15 17:49:19 +04004740 cfs_rq->h_load_next = NULL;
4741 for_each_sched_entity(se) {
4742 cfs_rq = cfs_rq_of(se);
4743 cfs_rq->h_load_next = se;
4744 if (cfs_rq->last_h_load_update == now)
4745 break;
4746 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02004747
Vladimir Davydov68520792013-07-15 17:49:19 +04004748 if (!se) {
Vladimir Davydov7e3115e2013-09-14 19:39:46 +04004749 cfs_rq->h_load = cfs_rq->runnable_load_avg;
Vladimir Davydov68520792013-07-15 17:49:19 +04004750 cfs_rq->last_h_load_update = now;
4751 }
4752
4753 while ((se = cfs_rq->h_load_next) != NULL) {
4754 load = cfs_rq->h_load;
4755 load = div64_ul(load * se->avg.load_avg_contrib,
4756 cfs_rq->runnable_load_avg + 1);
4757 cfs_rq = group_cfs_rq(se);
4758 cfs_rq->h_load = load;
4759 cfs_rq->last_h_load_update = now;
4760 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02004761}
4762
Peter Zijlstra367456c2012-02-20 21:49:09 +01004763static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01004764{
Peter Zijlstra367456c2012-02-20 21:49:09 +01004765 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004766
Vladimir Davydov68520792013-07-15 17:49:19 +04004767 update_cfs_rq_h_load(cfs_rq);
Alex Shia003a252013-06-20 10:18:51 +08004768 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
4769 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01004770}
4771#else
Paul Turner48a16752012-10-04 13:18:31 +02004772static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08004773{
4774}
4775
Peter Zijlstra367456c2012-02-20 21:49:09 +01004776static unsigned long task_h_load(struct task_struct *p)
4777{
Alex Shia003a252013-06-20 10:18:51 +08004778 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01004779}
4780#endif
4781
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004782/********** Helpers for find_busiest_group ************************/
4783/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004784 * sg_lb_stats - stats of a sched_group required for load_balancing
4785 */
4786struct sg_lb_stats {
4787 unsigned long avg_load; /*Avg load across the CPUs of the group */
4788 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004789 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004790 unsigned long load_per_task;
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02004791 unsigned long group_power;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004792 unsigned int sum_nr_running; /* Nr tasks running in the group */
4793 unsigned int group_capacity;
4794 unsigned int idle_cpus;
4795 unsigned int group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004796 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07004797 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004798};
4799
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004800/*
4801 * sd_lb_stats - Structure to store the statistics of a sched_domain
4802 * during load balancing.
4803 */
4804struct sd_lb_stats {
4805 struct sched_group *busiest; /* Busiest group in this sd */
4806 struct sched_group *local; /* Local group in this sd */
4807 unsigned long total_load; /* Total load of all groups in sd */
4808 unsigned long total_pwr; /* Total power of all groups in sd */
4809 unsigned long avg_load; /* Average load across all groups in sd */
4810
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004811 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004812 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09004813};
4814
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02004815static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
4816{
4817 /*
4818 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
4819 * local_stat because update_sg_lb_stats() does a full clear/assignment.
4820 * We must however clear busiest_stat::avg_load because
4821 * update_sd_pick_busiest() reads this before assignment.
4822 */
4823 *sds = (struct sd_lb_stats){
4824 .busiest = NULL,
4825 .local = NULL,
4826 .total_load = 0UL,
4827 .total_pwr = 0UL,
4828 .busiest_stat = {
4829 .avg_load = 0UL,
4830 },
4831 };
4832}
4833
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004834/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004835 * get_sd_load_idx - Obtain the load index for a given sched domain.
4836 * @sd: The sched_domain whose load_idx is to be obtained.
4837 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02004838 *
4839 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004840 */
4841static inline int get_sd_load_idx(struct sched_domain *sd,
4842 enum cpu_idle_type idle)
4843{
4844 int load_idx;
4845
4846 switch (idle) {
4847 case CPU_NOT_IDLE:
4848 load_idx = sd->busy_idx;
4849 break;
4850
4851 case CPU_NEWLY_IDLE:
4852 load_idx = sd->newidle_idx;
4853 break;
4854 default:
4855 load_idx = sd->idle_idx;
4856 break;
4857 }
4858
4859 return load_idx;
4860}
4861
Li Zefan15f803c2013-03-05 16:07:11 +08004862static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004863{
Nikhil Rao1399fa72011-05-18 10:09:39 -07004864 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004865}
4866
4867unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
4868{
4869 return default_scale_freq_power(sd, cpu);
4870}
4871
Li Zefan15f803c2013-03-05 16:07:11 +08004872static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004873{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004874 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004875 unsigned long smt_gain = sd->smt_gain;
4876
4877 smt_gain /= weight;
4878
4879 return smt_gain;
4880}
4881
4882unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
4883{
4884 return default_scale_smt_power(sd, cpu);
4885}
4886
Li Zefan15f803c2013-03-05 16:07:11 +08004887static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004888{
4889 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004890 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004891
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004892 /*
4893 * Since we're reading these variables without serialization make sure
4894 * we read them once before doing sanity checks on them.
4895 */
4896 age_stamp = ACCESS_ONCE(rq->age_stamp);
4897 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004898
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004899 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004900
4901 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004902 /* Ensures that power won't end up being negative */
4903 available = 0;
4904 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02004905 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07004906 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004907
Nikhil Rao1399fa72011-05-18 10:09:39 -07004908 if (unlikely((s64)total < SCHED_POWER_SCALE))
4909 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004910
Nikhil Rao1399fa72011-05-18 10:09:39 -07004911 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004912
4913 return div_u64(available, total);
4914}
4915
4916static void update_cpu_power(struct sched_domain *sd, int cpu)
4917{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004918 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07004919 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004920 struct sched_group *sdg = sd->groups;
4921
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004922 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
4923 if (sched_feat(ARCH_POWER))
4924 power *= arch_scale_smt_power(sd, cpu);
4925 else
4926 power *= default_scale_smt_power(sd, cpu);
4927
Nikhil Rao1399fa72011-05-18 10:09:39 -07004928 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004929 }
4930
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004931 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004932
4933 if (sched_feat(ARCH_POWER))
4934 power *= arch_scale_freq_power(sd, cpu);
4935 else
4936 power *= default_scale_freq_power(sd, cpu);
4937
Nikhil Rao1399fa72011-05-18 10:09:39 -07004938 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004939
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004940 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07004941 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004942
4943 if (!power)
4944 power = 1;
4945
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004946 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004947 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004948}
4949
Peter Zijlstra029632f2011-10-25 10:00:11 +02004950void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004951{
4952 struct sched_domain *child = sd->child;
4953 struct sched_group *group, *sdg = sd->groups;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004954 unsigned long power, power_orig;
Vincent Guittot4ec44122011-12-12 20:21:08 +01004955 unsigned long interval;
4956
4957 interval = msecs_to_jiffies(sd->balance_interval);
4958 interval = clamp(interval, 1UL, max_load_balance_interval);
4959 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004960
4961 if (!child) {
4962 update_cpu_power(sd, cpu);
4963 return;
4964 }
4965
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004966 power_orig = power = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004967
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004968 if (child->flags & SD_OVERLAP) {
4969 /*
4970 * SD_OVERLAP domains cannot assume that child groups
4971 * span the current group.
4972 */
4973
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004974 for_each_cpu(cpu, sched_group_cpus(sdg)) {
4975 struct sched_group *sg = cpu_rq(cpu)->sd->groups;
4976
4977 power_orig += sg->sgp->power_orig;
4978 power += sg->sgp->power;
4979 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004980 } else {
4981 /*
4982 * !SD_OVERLAP domains can assume that child groups
4983 * span the current group.
4984 */
4985
4986 group = child->groups;
4987 do {
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004988 power_orig += group->sgp->power_orig;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02004989 power += group->sgp->power;
4990 group = group->next;
4991 } while (group != child->groups);
4992 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004993
Peter Zijlstra863bffc2013-08-28 11:44:39 +02004994 sdg->sgp->power_orig = power_orig;
4995 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004996}
4997
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10004998/*
4999 * Try and fix up capacity for tiny siblings, this is needed when
5000 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5001 * which on its own isn't powerful enough.
5002 *
5003 * See update_sd_pick_busiest() and check_asym_packing().
5004 */
5005static inline int
5006fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5007{
5008 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07005009 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005010 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02005011 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005012 return 0;
5013
5014 /*
5015 * If ~90% of the cpu_power is still there, we're good.
5016 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005017 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005018 return 1;
5019
5020 return 0;
5021}
5022
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005023/*
5024 * Group imbalance indicates (and tries to solve) the problem where balancing
5025 * groups is inadequate due to tsk_cpus_allowed() constraints.
5026 *
5027 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5028 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5029 * Something like:
5030 *
5031 * { 0 1 2 3 } { 4 5 6 7 }
5032 * * * * *
5033 *
5034 * If we were to balance group-wise we'd place two tasks in the first group and
5035 * two tasks in the second group. Clearly this is undesired as it will overload
5036 * cpu 3 and leave one of the cpus in the second group unused.
5037 *
5038 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02005039 * by noticing the lower domain failed to reach balance and had difficulty
5040 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005041 *
5042 * When this is so detected; this group becomes a candidate for busiest; see
5043 * update_sd_pick_busiest(). And calculcate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02005044 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005045 * to create an effective group imbalance.
5046 *
5047 * This is a somewhat tricky proposition since the next run might not find the
5048 * group imbalance and decide the groups need to be balanced again. A most
5049 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005050 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005051
Peter Zijlstra62633222013-08-19 12:41:09 +02005052static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005053{
Peter Zijlstra62633222013-08-19 12:41:09 +02005054 return group->sgp->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005055}
5056
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005057/*
5058 * Compute the group capacity.
5059 *
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005060 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5061 * first dividing out the smt factor and computing the actual number of cores
5062 * and limit power unit capacity with that.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005063 */
5064static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5065{
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005066 unsigned int capacity, smt, cpus;
5067 unsigned int power, power_orig;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005068
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005069 power = group->sgp->power;
5070 power_orig = group->sgp->power_orig;
5071 cpus = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005072
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005073 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5074 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5075 capacity = cpus / smt; /* cores */
5076
5077 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005078 if (!capacity)
5079 capacity = fix_small_capacity(env->sd, group);
5080
5081 return capacity;
5082}
5083
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005084/**
5085 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5086 * @env: The load balancing environment.
5087 * @group: sched_group whose statistics are to be updated.
5088 * @load_idx: Load index of sched_domain of this_cpu for load calc.
5089 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005090 * @sgs: variable to hold the statistics for this group.
5091 */
5092static inline void update_sg_lb_stats(struct lb_env *env,
5093 struct sched_group *group, int load_idx,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005094 int local_group, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005095{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005096 unsigned long nr_running;
5097 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005098 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005099
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005100 memset(sgs, 0, sizeof(*sgs));
5101
Michael Wangb94031302012-07-12 16:10:13 +08005102 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005103 struct rq *rq = cpu_rq(i);
5104
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02005105 nr_running = rq->nr_running;
5106
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005107 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02005108 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005109 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02005110 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005111 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005112
5113 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02005114 sgs->sum_nr_running += nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005115 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005116 if (idle_cpu(i))
5117 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005118 }
5119
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005120 /* Adjust by relative CPU power of the group */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005121 sgs->group_power = group->sgp->power;
5122 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005123
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005124 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02005125 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005126
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005127 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07005128
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005129 sgs->group_imb = sg_imbalanced(group);
5130 sgs->group_capacity = sg_capacity(env, group);
5131
Nikhil Raofab47622010-10-15 13:12:29 -07005132 if (sgs->group_capacity > sgs->sum_nr_running)
5133 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005134}
5135
5136/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10005137 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07005138 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005139 * @sds: sched_domain statistics
5140 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10005141 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10005142 *
5143 * Determine if @sg is a busier group than the previously selected
5144 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005145 *
5146 * Return: %true if @sg is a busier group than the previously selected
5147 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005148 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005149static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10005150 struct sd_lb_stats *sds,
5151 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005152 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005153{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005154 if (sgs->avg_load <= sds->busiest_stat.avg_load)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005155 return false;
5156
5157 if (sgs->sum_nr_running > sgs->group_capacity)
5158 return true;
5159
5160 if (sgs->group_imb)
5161 return true;
5162
5163 /*
5164 * ASYM_PACKING needs to move all the work to the lowest
5165 * numbered CPUs in the group, therefore mark all groups
5166 * higher than ourself as busy.
5167 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005168 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5169 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005170 if (!sds->busiest)
5171 return true;
5172
5173 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5174 return true;
5175 }
5176
5177 return false;
5178}
5179
5180/**
Hui Kang461819a2011-10-11 23:00:59 -04005181 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005182 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005183 * @balance: Should we balance.
5184 * @sds: variable to hold the statistics for this sched_domain.
5185 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005186static inline void update_sd_lb_stats(struct lb_env *env,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005187 struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005188{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005189 struct sched_domain *child = env->sd->child;
5190 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005191 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005192 int load_idx, prefer_sibling = 0;
5193
5194 if (child && child->flags & SD_PREFER_SIBLING)
5195 prefer_sibling = 1;
5196
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005197 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005198
5199 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005200 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005201 int local_group;
5202
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005203 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005204 if (local_group) {
5205 sds->local = sg;
5206 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005207
5208 if (env->idle != CPU_NEWLY_IDLE ||
5209 time_after_eq(jiffies, sg->sgp->next_update))
5210 update_group_power(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005211 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005212
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005213 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005214
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005215 if (local_group)
5216 goto next_group;
5217
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005218 /*
5219 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10005220 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07005221 * and move all the excess tasks away. We lower the capacity
5222 * of a group only if the local group has the capacity to fit
5223 * these excess tasks, i.e. nr_running < group_capacity. The
5224 * extra check prevents the case where you always pull from the
5225 * heaviest group when it is already under-utilized (possible
5226 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005227 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005228 if (prefer_sibling && sds->local &&
5229 sds->local_stat.group_has_capacity)
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005230 sgs->group_capacity = min(sgs->group_capacity, 1U);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005231
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005232 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005233 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005234 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005235 }
5236
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005237next_group:
5238 /* Now, start updating sd_lb_stats */
5239 sds->total_load += sgs->group_load;
5240 sds->total_pwr += sgs->group_power;
5241
Michael Neuling532cb4c2010-06-08 14:57:02 +10005242 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005243 } while (sg != env->sd->groups);
Michael Neuling532cb4c2010-06-08 14:57:02 +10005244}
5245
Michael Neuling532cb4c2010-06-08 14:57:02 +10005246/**
5247 * check_asym_packing - Check to see if the group is packed into the
5248 * sched doman.
5249 *
5250 * This is primarily intended to used at the sibling level. Some
5251 * cores like POWER7 prefer to use lower numbered SMT threads. In the
5252 * case of POWER7, it can move to lower SMT modes only when higher
5253 * threads are idle. When in lower SMT modes, the threads will
5254 * perform better since they share less core resources. Hence when we
5255 * have idle threads, we want them to be the higher ones.
5256 *
5257 * This packing function is run on idle threads. It checks to see if
5258 * the busiest CPU in this domain (core in the P7 case) has a higher
5259 * CPU number than the packing function is being run on. Here we are
5260 * assuming lower CPU number will be equivalent to lower a SMT thread
5261 * number.
5262 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005263 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10005264 * this CPU. The amount of the imbalance is returned in *imbalance.
5265 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005266 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005267 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10005268 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005269static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005270{
5271 int busiest_cpu;
5272
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005273 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005274 return 0;
5275
5276 if (!sds->busiest)
5277 return 0;
5278
5279 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005280 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005281 return 0;
5282
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005283 env->imbalance = DIV_ROUND_CLOSEST(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005284 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5285 SCHED_POWER_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005286
Michael Neuling532cb4c2010-06-08 14:57:02 +10005287 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005288}
5289
5290/**
5291 * fix_small_imbalance - Calculate the minor imbalance that exists
5292 * amongst the groups of a sched_domain, during
5293 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005294 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005295 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005296 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005297static inline
5298void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005299{
5300 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5301 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005302 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005303 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005304
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005305 local = &sds->local_stat;
5306 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005307
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005308 if (!local->sum_nr_running)
5309 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5310 else if (busiest->load_per_task > local->load_per_task)
5311 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005312
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005313 scaled_busy_load_per_task =
5314 (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005315 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005316
Vladimir Davydov3029ede2013-09-15 17:49:14 +04005317 if (busiest->avg_load + scaled_busy_load_per_task >=
5318 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005319 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005320 return;
5321 }
5322
5323 /*
5324 * OK, we don't have enough imbalance to justify moving tasks,
5325 * however we may be able to increase total CPU power used by
5326 * moving them.
5327 */
5328
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005329 pwr_now += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005330 min(busiest->load_per_task, busiest->avg_load);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005331 pwr_now += local->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005332 min(local->load_per_task, local->avg_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005333 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005334
5335 /* Amount of load we'd subtract */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005336 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005337 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005338 if (busiest->avg_load > tmp) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005339 pwr_move += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005340 min(busiest->load_per_task,
5341 busiest->avg_load - tmp);
5342 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005343
5344 /* Amount of load we'd add */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005345 if (busiest->avg_load * busiest->group_power <
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005346 busiest->load_per_task * SCHED_POWER_SCALE) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005347 tmp = (busiest->avg_load * busiest->group_power) /
5348 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005349 } else {
5350 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005351 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005352 }
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005353 pwr_move += local->group_power *
5354 min(local->load_per_task, local->avg_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005355 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005356
5357 /* Move if we gain throughput */
5358 if (pwr_move > pwr_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005359 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005360}
5361
5362/**
5363 * calculate_imbalance - Calculate the amount of imbalance present within the
5364 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005365 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005366 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005367 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005368static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005369{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005370 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005371 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005372
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005373 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005374 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005375
5376 if (busiest->group_imb) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005377 /*
5378 * In the group_imb case we cannot rely on group-wide averages
5379 * to ensure cpu-load equilibrium, look at wider averages. XXX
5380 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005381 busiest->load_per_task =
5382 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005383 }
5384
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005385 /*
5386 * In the presence of smp nice balancing, certain scenarios can have
5387 * max load less than avg load(as we skip the groups at or below
5388 * its cpu_power, while calculating max_load..)
5389 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04005390 if (busiest->avg_load <= sds->avg_load ||
5391 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005392 env->imbalance = 0;
5393 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005394 }
5395
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005396 if (!busiest->group_imb) {
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005397 /*
5398 * Don't want to pull so many tasks that a group would go idle.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005399 * Except of course for the group_imb case, since then we might
5400 * have to drop below capacity to reach cpu-load equilibrium.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005401 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005402 load_above_capacity =
5403 (busiest->sum_nr_running - busiest->group_capacity);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005404
Nikhil Rao1399fa72011-05-18 10:09:39 -07005405 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005406 load_above_capacity /= busiest->group_power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005407 }
5408
5409 /*
5410 * We're trying to get all the cpus to the average_load, so we don't
5411 * want to push ourselves above the average load, nor do we wish to
5412 * reduce the max loaded cpu below the average load. At the same time,
5413 * we also don't want to reduce the group load below the group capacity
5414 * (so that we can implement power-savings policies etc). Thus we look
5415 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005416 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005417 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005418
5419 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005420 env->imbalance = min(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005421 max_pull * busiest->group_power,
5422 (sds->avg_load - local->avg_load) * local->group_power
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005423 ) / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005424
5425 /*
5426 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005427 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005428 * a think about bumping its value to force at least one task to be
5429 * moved
5430 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005431 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005432 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005433}
Nikhil Raofab47622010-10-15 13:12:29 -07005434
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005435/******* find_busiest_group() helpers end here *********************/
5436
5437/**
5438 * find_busiest_group - Returns the busiest group within the sched_domain
5439 * if there is an imbalance. If there isn't an imbalance, and
5440 * the user has opted for power-savings, it returns a group whose
5441 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5442 * such a group exists.
5443 *
5444 * Also calculates the amount of weighted load which should be moved
5445 * to restore balance.
5446 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005447 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005448 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005449 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005450 * - If no imbalance and user has opted for power-savings balance,
5451 * return the least loaded group whose CPUs can be
5452 * put to idle by rebalancing its tasks onto our group.
5453 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005454static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005455{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005456 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005457 struct sd_lb_stats sds;
5458
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005459 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005460
5461 /*
5462 * Compute the various statistics relavent for load balancing at
5463 * this level.
5464 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005465 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005466 local = &sds.local_stat;
5467 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005468
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005469 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5470 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005471 return sds.busiest;
5472
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005473 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005474 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005475 goto out_balanced;
5476
Nikhil Rao1399fa72011-05-18 10:09:39 -07005477 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07005478
Peter Zijlstra866ab432011-02-21 18:56:47 +01005479 /*
5480 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005481 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01005482 * isn't true due to cpus_allowed constraints and the like.
5483 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005484 if (busiest->group_imb)
Peter Zijlstra866ab432011-02-21 18:56:47 +01005485 goto force_balance;
5486
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005487 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005488 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5489 !busiest->group_has_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07005490 goto force_balance;
5491
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005492 /*
5493 * If the local group is more busy than the selected busiest group
5494 * don't try and pull any tasks.
5495 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005496 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005497 goto out_balanced;
5498
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005499 /*
5500 * Don't pull any tasks if this group is already above the domain
5501 * average load.
5502 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005503 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005504 goto out_balanced;
5505
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005506 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005507 /*
5508 * This cpu is idle. If the busiest group load doesn't
5509 * have more tasks than the number of available cpu's and
5510 * there is no imbalance between this and busiest group
5511 * wrt to idle cpu's, it is balanced.
5512 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005513 if ((local->idle_cpus < busiest->idle_cpus) &&
5514 busiest->sum_nr_running <= busiest->group_weight)
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005515 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005516 } else {
5517 /*
5518 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5519 * imbalance_pct to be conservative.
5520 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005521 if (100 * busiest->avg_load <=
5522 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005523 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005524 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005525
Nikhil Raofab47622010-10-15 13:12:29 -07005526force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005527 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005528 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005529 return sds.busiest;
5530
5531out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005532 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005533 return NULL;
5534}
5535
5536/*
5537 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5538 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005539static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08005540 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005541{
5542 struct rq *busiest = NULL, *rq;
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005543 unsigned long busiest_load = 0, busiest_power = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005544 int i;
5545
Peter Zijlstra6906a402013-08-19 15:20:21 +02005546 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005547 unsigned long power = power_of(i);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005548 unsigned long capacity = DIV_ROUND_CLOSEST(power,
5549 SCHED_POWER_SCALE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005550 unsigned long wl;
5551
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005552 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005553 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005554
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005555 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005556 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005557
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005558 /*
5559 * When comparing with imbalance, use weighted_cpuload()
5560 * which is not scaled with the cpu power.
5561 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005562 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005563 continue;
5564
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005565 /*
5566 * For the load comparisons with the other cpu's, consider
5567 * the weighted_cpuload() scaled with the cpu power, so that
5568 * the load can be moved away from the cpu that is potentially
5569 * running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005570 *
5571 * Thus we're looking for max(wl_i / power_i), crosswise
5572 * multiplication to rid ourselves of the division works out
5573 * to: wl_i * power_j > wl_j * power_i; where j is our
5574 * previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01005575 */
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005576 if (wl * busiest_power > busiest_load * power) {
5577 busiest_load = wl;
5578 busiest_power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005579 busiest = rq;
5580 }
5581 }
5582
5583 return busiest;
5584}
5585
5586/*
5587 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
5588 * so long as it is large enough.
5589 */
5590#define MAX_PINNED_INTERVAL 512
5591
5592/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09005593DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005594
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005595static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005596{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005597 struct sched_domain *sd = env->sd;
5598
5599 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005600
5601 /*
5602 * ASYM_PACKING needs to force migrate tasks from busy but
5603 * higher numbered CPUs in order to pack all tasks in the
5604 * lowest numbered CPUs.
5605 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005606 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005607 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01005608 }
5609
5610 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
5611}
5612
Tejun Heo969c7922010-05-06 18:49:21 +02005613static int active_load_balance_cpu_stop(void *data);
5614
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005615static int should_we_balance(struct lb_env *env)
5616{
5617 struct sched_group *sg = env->sd->groups;
5618 struct cpumask *sg_cpus, *sg_mask;
5619 int cpu, balance_cpu = -1;
5620
5621 /*
5622 * In the newly idle case, we will allow all the cpu's
5623 * to do the newly idle load balance.
5624 */
5625 if (env->idle == CPU_NEWLY_IDLE)
5626 return 1;
5627
5628 sg_cpus = sched_group_cpus(sg);
5629 sg_mask = sched_group_mask(sg);
5630 /* Try to find first idle cpu */
5631 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
5632 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
5633 continue;
5634
5635 balance_cpu = cpu;
5636 break;
5637 }
5638
5639 if (balance_cpu == -1)
5640 balance_cpu = group_balance_cpu(sg);
5641
5642 /*
5643 * First idle cpu or the first cpu(busiest) in this sched group
5644 * is eligible for doing load balancing at this and above domains.
5645 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09005646 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005647}
5648
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005649/*
5650 * Check this_cpu to ensure it is balanced within domain. Attempt to move
5651 * tasks if there is an imbalance.
5652 */
5653static int load_balance(int this_cpu, struct rq *this_rq,
5654 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005655 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005656{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305657 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02005658 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005659 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005660 struct rq *busiest;
5661 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09005662 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005663
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005664 struct lb_env env = {
5665 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005666 .dst_cpu = this_cpu,
5667 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305668 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005669 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02005670 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08005671 .cpus = cpus,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005672 };
5673
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005674 /*
5675 * For NEWLY_IDLE load_balancing, we don't need to consider
5676 * other cpus in our group
5677 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005678 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005679 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09005680
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005681 cpumask_copy(cpus, cpu_active_mask);
5682
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005683 schedstat_inc(sd, lb_count[idle]);
5684
5685redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005686 if (!should_we_balance(&env)) {
5687 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005688 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005689 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005690
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005691 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005692 if (!group) {
5693 schedstat_inc(sd, lb_nobusyg[idle]);
5694 goto out_balanced;
5695 }
5696
Michael Wangb94031302012-07-12 16:10:13 +08005697 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005698 if (!busiest) {
5699 schedstat_inc(sd, lb_nobusyq[idle]);
5700 goto out_balanced;
5701 }
5702
Michael Wang78feefc2012-08-06 16:41:59 +08005703 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005704
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005705 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005706
5707 ld_moved = 0;
5708 if (busiest->nr_running > 1) {
5709 /*
5710 * Attempt to move tasks. If find_busiest_group has found
5711 * an imbalance but busiest->nr_running <= 1, the group is
5712 * still unbalanced. ld_moved simply stays zero, so it is
5713 * correctly treated as an imbalance.
5714 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005715 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02005716 env.src_cpu = busiest->cpu;
5717 env.src_rq = busiest;
5718 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005719
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005720more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005721 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08005722 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305723
5724 /*
5725 * cur_ld_moved - load moved in current iteration
5726 * ld_moved - cumulative load moved across iterations
5727 */
5728 cur_ld_moved = move_tasks(&env);
5729 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08005730 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005731 local_irq_restore(flags);
5732
5733 /*
5734 * some other cpu did the load balance for us.
5735 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305736 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
5737 resched_cpu(env.dst_cpu);
5738
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09005739 if (env.flags & LBF_NEED_BREAK) {
5740 env.flags &= ~LBF_NEED_BREAK;
5741 goto more_balance;
5742 }
5743
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305744 /*
5745 * Revisit (affine) tasks on src_cpu that couldn't be moved to
5746 * us and move them to an alternate dst_cpu in our sched_group
5747 * where they can run. The upper limit on how many times we
5748 * iterate on same src_cpu is dependent on number of cpus in our
5749 * sched_group.
5750 *
5751 * This changes load balance semantics a bit on who can move
5752 * load to a given_cpu. In addition to the given_cpu itself
5753 * (or a ilb_cpu acting on its behalf where given_cpu is
5754 * nohz-idle), we now have balance_cpu in a position to move
5755 * load to given_cpu. In rare situations, this may cause
5756 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
5757 * _independently_ and at _same_ time to move some load to
5758 * given_cpu) causing exceess load to be moved to given_cpu.
5759 * This however should not happen so much in practice and
5760 * moreover subsequent load balance cycles should correct the
5761 * excess load moved.
5762 */
Peter Zijlstra62633222013-08-19 12:41:09 +02005763 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305764
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04005765 /* Prevent to re-select dst_cpu via env's cpus */
5766 cpumask_clear_cpu(env.dst_cpu, env.cpus);
5767
Michael Wang78feefc2012-08-06 16:41:59 +08005768 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305769 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02005770 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305771 env.loop = 0;
5772 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005773
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305774 /*
5775 * Go back to "more_balance" rather than "redo" since we
5776 * need to continue with same src_cpu.
5777 */
5778 goto more_balance;
5779 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005780
Peter Zijlstra62633222013-08-19 12:41:09 +02005781 /*
5782 * We failed to reach balance because of affinity.
5783 */
5784 if (sd_parent) {
5785 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
5786
5787 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
5788 *group_imbalance = 1;
5789 } else if (*group_imbalance)
5790 *group_imbalance = 0;
5791 }
5792
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005793 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005794 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005795 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305796 if (!cpumask_empty(cpus)) {
5797 env.loop = 0;
5798 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005799 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05305800 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005801 goto out_balanced;
5802 }
5803 }
5804
5805 if (!ld_moved) {
5806 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07005807 /*
5808 * Increment the failure counter only on periodic balance.
5809 * We do not want newidle balance, which can be very
5810 * frequent, pollute the failure counter causing
5811 * excessive cache_hot migrations and active balances.
5812 */
5813 if (idle != CPU_NEWLY_IDLE)
5814 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005815
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005816 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005817 raw_spin_lock_irqsave(&busiest->lock, flags);
5818
Tejun Heo969c7922010-05-06 18:49:21 +02005819 /* don't kick the active_load_balance_cpu_stop,
5820 * if the curr task on busiest cpu can't be
5821 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005822 */
5823 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005824 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005825 raw_spin_unlock_irqrestore(&busiest->lock,
5826 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005827 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005828 goto out_one_pinned;
5829 }
5830
Tejun Heo969c7922010-05-06 18:49:21 +02005831 /*
5832 * ->active_balance synchronizes accesses to
5833 * ->active_balance_work. Once set, it's cleared
5834 * only after active load balance is finished.
5835 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005836 if (!busiest->active_balance) {
5837 busiest->active_balance = 1;
5838 busiest->push_cpu = this_cpu;
5839 active_balance = 1;
5840 }
5841 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02005842
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005843 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02005844 stop_one_cpu_nowait(cpu_of(busiest),
5845 active_load_balance_cpu_stop, busiest,
5846 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005847 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005848
5849 /*
5850 * We've kicked active balancing, reset the failure
5851 * counter.
5852 */
5853 sd->nr_balance_failed = sd->cache_nice_tries+1;
5854 }
5855 } else
5856 sd->nr_balance_failed = 0;
5857
5858 if (likely(!active_balance)) {
5859 /* We were unbalanced, so reset the balancing interval */
5860 sd->balance_interval = sd->min_interval;
5861 } else {
5862 /*
5863 * If we've begun active balancing, start to back off. This
5864 * case may not be covered by the all_pinned logic if there
5865 * is only 1 task on the busy runqueue (because we don't call
5866 * move_tasks).
5867 */
5868 if (sd->balance_interval < sd->max_interval)
5869 sd->balance_interval *= 2;
5870 }
5871
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005872 goto out;
5873
5874out_balanced:
5875 schedstat_inc(sd, lb_balanced[idle]);
5876
5877 sd->nr_balance_failed = 0;
5878
5879out_one_pinned:
5880 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005881 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02005882 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005883 (sd->balance_interval < sd->max_interval))
5884 sd->balance_interval *= 2;
5885
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08005886 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005887out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005888 return ld_moved;
5889}
5890
5891/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005892 * idle_balance is called by schedule() if this_cpu is about to become
5893 * idle. Attempts to pull tasks from other CPUs.
5894 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02005895void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005896{
5897 struct sched_domain *sd;
5898 int pulled_task = 0;
5899 unsigned long next_balance = jiffies + HZ;
Jason Low9bd721c2013-09-13 11:26:52 -07005900 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005901
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005902 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005903
5904 if (this_rq->avg_idle < sysctl_sched_migration_cost)
5905 return;
5906
Peter Zijlstraf492e122009-12-23 15:29:42 +01005907 /*
5908 * Drop the rq->lock, but keep IRQ/preempt disabled.
5909 */
5910 raw_spin_unlock(&this_rq->lock);
5911
Paul Turner48a16752012-10-04 13:18:31 +02005912 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02005913 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005914 for_each_domain(this_cpu, sd) {
5915 unsigned long interval;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005916 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07005917 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005918
5919 if (!(sd->flags & SD_LOAD_BALANCE))
5920 continue;
5921
Jason Low9bd721c2013-09-13 11:26:52 -07005922 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
5923 break;
5924
Peter Zijlstraf492e122009-12-23 15:29:42 +01005925 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07005926 t0 = sched_clock_cpu(this_cpu);
5927
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005928 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01005929 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005930 sd, CPU_NEWLY_IDLE,
5931 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07005932
5933 domain_cost = sched_clock_cpu(this_cpu) - t0;
5934 if (domain_cost > sd->max_newidle_lb_cost)
5935 sd->max_newidle_lb_cost = domain_cost;
5936
5937 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01005938 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005939
5940 interval = msecs_to_jiffies(sd->balance_interval);
5941 if (time_after(next_balance, sd->last_balance + interval))
5942 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005943 if (pulled_task) {
5944 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005945 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08005946 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005947 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005948 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01005949
5950 raw_spin_lock(&this_rq->lock);
5951
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005952 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
5953 /*
5954 * We are going idle. next_balance may be set based on
5955 * a busy processor. So reset next_balance.
5956 */
5957 this_rq->next_balance = next_balance;
5958 }
Jason Low9bd721c2013-09-13 11:26:52 -07005959
5960 if (curr_cost > this_rq->max_idle_balance_cost)
5961 this_rq->max_idle_balance_cost = curr_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005962}
5963
5964/*
Tejun Heo969c7922010-05-06 18:49:21 +02005965 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
5966 * running tasks off the busiest CPU onto idle CPUs. It requires at
5967 * least 1 task to be running on each physical CPU where possible, and
5968 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005969 */
Tejun Heo969c7922010-05-06 18:49:21 +02005970static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005971{
Tejun Heo969c7922010-05-06 18:49:21 +02005972 struct rq *busiest_rq = data;
5973 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005974 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02005975 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005976 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02005977
5978 raw_spin_lock_irq(&busiest_rq->lock);
5979
5980 /* make sure the requested cpu hasn't gone down in the meantime */
5981 if (unlikely(busiest_cpu != smp_processor_id() ||
5982 !busiest_rq->active_balance))
5983 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005984
5985 /* Is there any task to move? */
5986 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02005987 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005988
5989 /*
5990 * This condition is "impossible", if it occurs
5991 * we need to fix it. Originally reported by
5992 * Bjorn Helgaas on a 128-cpu setup.
5993 */
5994 BUG_ON(busiest_rq == target_rq);
5995
5996 /* move a task from busiest_rq to target_rq */
5997 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005998
5999 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006000 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006001 for_each_domain(target_cpu, sd) {
6002 if ((sd->flags & SD_LOAD_BALANCE) &&
6003 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6004 break;
6005 }
6006
6007 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006008 struct lb_env env = {
6009 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006010 .dst_cpu = target_cpu,
6011 .dst_rq = target_rq,
6012 .src_cpu = busiest_rq->cpu,
6013 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006014 .idle = CPU_IDLE,
6015 };
6016
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006017 schedstat_inc(sd, alb_count);
6018
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006019 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006020 schedstat_inc(sd, alb_pushed);
6021 else
6022 schedstat_inc(sd, alb_failed);
6023 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006024 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006025 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02006026out_unlock:
6027 busiest_rq->active_balance = 0;
6028 raw_spin_unlock_irq(&busiest_rq->lock);
6029 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006030}
6031
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006032#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006033/*
6034 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006035 * - When one of the busy CPUs notice that there may be an idle rebalancing
6036 * needed, they will kick the idle load balancer, which then does idle
6037 * load balancing for all the idle CPUs.
6038 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006039static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006040 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006041 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006042 unsigned long next_balance; /* in jiffy units */
6043} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006044
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01006045static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006046{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006047 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006048
Suresh Siddha786d6dc72011-12-01 17:07:35 -08006049 if (ilb < nr_cpu_ids && idle_cpu(ilb))
6050 return ilb;
6051
6052 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006053}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006054
6055/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006056 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6057 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6058 * CPU (if there is one).
6059 */
6060static void nohz_balancer_kick(int cpu)
6061{
6062 int ilb_cpu;
6063
6064 nohz.next_balance++;
6065
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006066 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006067
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006068 if (ilb_cpu >= nr_cpu_ids)
6069 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006070
Suresh Siddhacd490c52011-12-06 11:26:34 -08006071 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08006072 return;
6073 /*
6074 * Use smp_send_reschedule() instead of resched_cpu().
6075 * This way we generate a sched IPI on the target cpu which
6076 * is idle. And the softirq performing nohz idle load balance
6077 * will be run before returning from the IPI.
6078 */
6079 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006080 return;
6081}
6082
Alex Shic1cc0172012-09-10 15:10:58 +08006083static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08006084{
6085 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6086 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6087 atomic_dec(&nohz.nr_cpus);
6088 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6089 }
6090}
6091
Suresh Siddha69e1e812011-12-01 17:07:33 -08006092static inline void set_cpu_sd_state_busy(void)
6093{
6094 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08006095
Suresh Siddha69e1e812011-12-01 17:07:33 -08006096 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05006097 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006098
6099 if (!sd || !sd->nohz_idle)
6100 goto unlock;
6101 sd->nohz_idle = 0;
6102
6103 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08006104 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006105unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08006106 rcu_read_unlock();
6107}
6108
6109void set_cpu_sd_state_idle(void)
6110{
6111 struct sched_domain *sd;
Suresh Siddha69e1e812011-12-01 17:07:33 -08006112
Suresh Siddha69e1e812011-12-01 17:07:33 -08006113 rcu_read_lock();
Nathan Zimmer424c93f2013-05-09 11:24:03 -05006114 sd = rcu_dereference_check_sched_domain(this_rq()->sd);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006115
6116 if (!sd || sd->nohz_idle)
6117 goto unlock;
6118 sd->nohz_idle = 1;
6119
6120 for (; sd; sd = sd->parent)
Suresh Siddha69e1e812011-12-01 17:07:33 -08006121 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006122unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08006123 rcu_read_unlock();
6124}
6125
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006126/*
Alex Shic1cc0172012-09-10 15:10:58 +08006127 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006128 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006129 */
Alex Shic1cc0172012-09-10 15:10:58 +08006130void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006131{
Suresh Siddha71325962012-01-19 18:28:57 -08006132 /*
6133 * If this cpu is going down, then nothing needs to be done.
6134 */
6135 if (!cpu_active(cpu))
6136 return;
6137
Alex Shic1cc0172012-09-10 15:10:58 +08006138 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6139 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006140
Alex Shic1cc0172012-09-10 15:10:58 +08006141 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6142 atomic_inc(&nohz.nr_cpus);
6143 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006144}
Suresh Siddha71325962012-01-19 18:28:57 -08006145
Paul Gortmaker0db06282013-06-19 14:53:51 -04006146static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08006147 unsigned long action, void *hcpu)
6148{
6149 switch (action & ~CPU_TASKS_FROZEN) {
6150 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08006151 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08006152 return NOTIFY_OK;
6153 default:
6154 return NOTIFY_DONE;
6155 }
6156}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006157#endif
6158
6159static DEFINE_SPINLOCK(balancing);
6160
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006161/*
6162 * Scale the max load_balance interval with the number of CPUs in the system.
6163 * This trades load-balance latency on larger machines for less cross talk.
6164 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006165void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006166{
6167 max_load_balance_interval = HZ*num_online_cpus()/10;
6168}
6169
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006170/*
6171 * It checks each scheduling domain to see if it is due to be balanced,
6172 * and initiates a balancing operation if so.
6173 *
Libinb9b08532013-04-01 19:14:01 +08006174 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006175 */
6176static void rebalance_domains(int cpu, enum cpu_idle_type idle)
6177{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006178 int continue_balancing = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006179 struct rq *rq = cpu_rq(cpu);
6180 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02006181 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006182 /* Earliest time when we have to do rebalance again */
6183 unsigned long next_balance = jiffies + 60*HZ;
6184 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07006185 int need_serialize, need_decay = 0;
6186 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006187
Paul Turner48a16752012-10-04 13:18:31 +02006188 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08006189
Peter Zijlstradce840a2011-04-07 14:09:50 +02006190 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006191 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07006192 /*
6193 * Decay the newidle max times here because this is a regular
6194 * visit to all the domains. Decay ~1% per second.
6195 */
6196 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6197 sd->max_newidle_lb_cost =
6198 (sd->max_newidle_lb_cost * 253) / 256;
6199 sd->next_decay_max_lb_cost = jiffies + HZ;
6200 need_decay = 1;
6201 }
6202 max_cost += sd->max_newidle_lb_cost;
6203
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006204 if (!(sd->flags & SD_LOAD_BALANCE))
6205 continue;
6206
Jason Lowf48627e2013-09-13 11:26:53 -07006207 /*
6208 * Stop the load balance at this level. There is another
6209 * CPU in our sched group which is doing load balancing more
6210 * actively.
6211 */
6212 if (!continue_balancing) {
6213 if (need_decay)
6214 continue;
6215 break;
6216 }
6217
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006218 interval = sd->balance_interval;
6219 if (idle != CPU_IDLE)
6220 interval *= sd->busy_factor;
6221
6222 /* scale ms to jiffies */
6223 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006224 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006225
6226 need_serialize = sd->flags & SD_SERIALIZE;
6227
6228 if (need_serialize) {
6229 if (!spin_trylock(&balancing))
6230 goto out;
6231 }
6232
6233 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006234 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006235 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02006236 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09006237 * env->dst_cpu, so we can't know our idle
6238 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006239 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09006240 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006241 }
6242 sd->last_balance = jiffies;
6243 }
6244 if (need_serialize)
6245 spin_unlock(&balancing);
6246out:
6247 if (time_after(next_balance, sd->last_balance + interval)) {
6248 next_balance = sd->last_balance + interval;
6249 update_next_balance = 1;
6250 }
Jason Lowf48627e2013-09-13 11:26:53 -07006251 }
6252 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006253 /*
Jason Lowf48627e2013-09-13 11:26:53 -07006254 * Ensure the rq-wide value also decays but keep it at a
6255 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006256 */
Jason Lowf48627e2013-09-13 11:26:53 -07006257 rq->max_idle_balance_cost =
6258 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006259 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006260 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006261
6262 /*
6263 * next_balance will be updated only when there is a need.
6264 * When the cpu is attached to null domain for ex, it will not be
6265 * updated.
6266 */
6267 if (likely(update_next_balance))
6268 rq->next_balance = next_balance;
6269}
6270
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006271#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006272/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006273 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006274 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6275 */
6276static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
6277{
6278 struct rq *this_rq = cpu_rq(this_cpu);
6279 struct rq *rq;
6280 int balance_cpu;
6281
Suresh Siddha1c792db2011-12-01 17:07:32 -08006282 if (idle != CPU_IDLE ||
6283 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6284 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006285
6286 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08006287 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006288 continue;
6289
6290 /*
6291 * If this cpu gets work to do, stop the load balancing
6292 * work being done for other cpus. Next load
6293 * balancing owner will pick it up.
6294 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08006295 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006296 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006297
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02006298 rq = cpu_rq(balance_cpu);
6299
6300 raw_spin_lock_irq(&rq->lock);
6301 update_rq_clock(rq);
6302 update_idle_cpu_load(rq);
6303 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006304
6305 rebalance_domains(balance_cpu, CPU_IDLE);
6306
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006307 if (time_after(this_rq->next_balance, rq->next_balance))
6308 this_rq->next_balance = rq->next_balance;
6309 }
6310 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006311end:
6312 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006313}
6314
6315/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006316 * Current heuristic for kicking the idle load balancer in the presence
6317 * of an idle cpu is the system.
6318 * - This rq has more than one task.
6319 * - At any scheduler domain level, this cpu's scheduler group has multiple
6320 * busy cpu's exceeding the group's power.
6321 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6322 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006323 */
6324static inline int nohz_kick_needed(struct rq *rq, int cpu)
6325{
6326 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006327 struct sched_domain *sd;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006328
Suresh Siddha1c792db2011-12-01 17:07:32 -08006329 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006330 return 0;
6331
Suresh Siddha1c792db2011-12-01 17:07:32 -08006332 /*
6333 * We may be recently in ticked or tickless idle mode. At the first
6334 * busy tick after returning from idle, we will update the busy stats.
6335 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08006336 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08006337 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006338
6339 /*
6340 * None are in tickless mode and hence no need for NOHZ idle load
6341 * balancing.
6342 */
6343 if (likely(!atomic_read(&nohz.nr_cpus)))
6344 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006345
6346 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006347 return 0;
6348
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006349 if (rq->nr_running >= 2)
6350 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006351
Peter Zijlstra067491b2011-12-07 14:32:08 +01006352 rcu_read_lock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006353 for_each_domain(cpu, sd) {
6354 struct sched_group *sg = sd->groups;
6355 struct sched_group_power *sgp = sg->sgp;
6356 int nr_busy = atomic_read(&sgp->nr_busy_cpus);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006357
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006358 if (sd->flags & SD_SHARE_PKG_RESOURCES && nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01006359 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006360
6361 if (sd->flags & SD_ASYM_PACKING && nr_busy != sg->group_weight
6362 && (cpumask_first_and(nohz.idle_cpus_mask,
6363 sched_domain_span(sd)) < cpu))
Peter Zijlstra067491b2011-12-07 14:32:08 +01006364 goto need_kick_unlock;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006365
6366 if (!(sd->flags & (SD_SHARE_PKG_RESOURCES | SD_ASYM_PACKING)))
6367 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006368 }
Peter Zijlstra067491b2011-12-07 14:32:08 +01006369 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006370 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01006371
6372need_kick_unlock:
6373 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006374need_kick:
6375 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006376}
6377#else
6378static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6379#endif
6380
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006381/*
6382 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006383 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006384 */
6385static void run_rebalance_domains(struct softirq_action *h)
6386{
6387 int this_cpu = smp_processor_id();
6388 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07006389 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006390 CPU_IDLE : CPU_NOT_IDLE;
6391
6392 rebalance_domains(this_cpu, idle);
6393
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006394 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006395 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006396 * balancing on behalf of the other idle cpus whose ticks are
6397 * stopped.
6398 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006399 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006400}
6401
6402static inline int on_null_domain(int cpu)
6403{
Paul E. McKenney90a65012010-02-28 08:32:18 -08006404 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006405}
6406
6407/*
6408 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006409 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006410void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006411{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006412 /* Don't need to rebalance while attached to NULL domain */
6413 if (time_after_eq(jiffies, rq->next_balance) &&
6414 likely(!on_null_domain(cpu)))
6415 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006416#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08006417 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006418 nohz_balancer_kick(cpu);
6419#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006420}
6421
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006422static void rq_online_fair(struct rq *rq)
6423{
6424 update_sysctl();
6425}
6426
6427static void rq_offline_fair(struct rq *rq)
6428{
6429 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07006430
6431 /* Ensure any throttled groups are reachable by pick_next_task */
6432 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006433}
6434
Dhaval Giani55e12e52008-06-24 23:39:43 +05306435#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02006436
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006437/*
6438 * scheduler tick hitting a task of our scheduling class:
6439 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006440static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006441{
6442 struct cfs_rq *cfs_rq;
6443 struct sched_entity *se = &curr->se;
6444
6445 for_each_sched_entity(se) {
6446 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006447 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006448 }
Ben Segall18bf2802012-10-04 12:51:20 +02006449
Dave Kleikamp10e84b92013-07-31 13:53:35 -07006450 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02006451 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08006452
Ben Segall18bf2802012-10-04 12:51:20 +02006453 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006454}
6455
6456/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006457 * called on fork with the child task as argument from the parent's context
6458 * - child not yet on the tasklist
6459 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006460 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006461static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006462{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006463 struct cfs_rq *cfs_rq;
6464 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02006465 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006466 struct rq *rq = this_rq();
6467 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006468
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006469 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006470
Peter Zijlstra861d0342010-08-19 13:31:43 +02006471 update_rq_clock(rq);
6472
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006473 cfs_rq = task_cfs_rq(current);
6474 curr = cfs_rq->curr;
6475
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09006476 /*
6477 * Not only the cpu but also the task_group of the parent might have
6478 * been changed after parent->se.parent,cfs_rq were copied to
6479 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6480 * of child point to valid ones.
6481 */
6482 rcu_read_lock();
6483 __set_task_cpu(p, this_cpu);
6484 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006485
Ting Yang7109c442007-08-28 12:53:24 +02006486 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006487
Mike Galbraithb5d9d732009-09-08 11:12:28 +02006488 if (curr)
6489 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02006490 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006491
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006492 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02006493 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02006494 * Upon rescheduling, sched_class::put_prev_task() will place
6495 * 'current' within the tree based on its new key value.
6496 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006497 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05306498 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006499 }
6500
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006501 se->vruntime -= cfs_rq->min_vruntime;
6502
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006503 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006504}
6505
Steven Rostedtcb469842008-01-25 21:08:22 +01006506/*
6507 * Priority of the task has changed. Check to see if we preempt
6508 * the current task.
6509 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006510static void
6511prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01006512{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006513 if (!p->se.on_rq)
6514 return;
6515
Steven Rostedtcb469842008-01-25 21:08:22 +01006516 /*
6517 * Reschedule if we are currently running on this runqueue and
6518 * our priority decreased, or if we are not currently running on
6519 * this runqueue and our priority is higher than the current's
6520 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006521 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01006522 if (p->prio > oldprio)
6523 resched_task(rq->curr);
6524 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006525 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006526}
6527
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006528static void switched_from_fair(struct rq *rq, struct task_struct *p)
6529{
6530 struct sched_entity *se = &p->se;
6531 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6532
6533 /*
6534 * Ensure the task's vruntime is normalized, so that when its
6535 * switched back to the fair class the enqueue_entity(.flags=0) will
6536 * do the right thing.
6537 *
6538 * If it was on_rq, then the dequeue_entity(.flags=0) will already
6539 * have normalized the vruntime, if it was !on_rq, then only when
6540 * the task is sleeping will it still have non-normalized vruntime.
6541 */
6542 if (!se->on_rq && p->state != TASK_RUNNING) {
6543 /*
6544 * Fix up our vruntime so that the current sleep doesn't
6545 * cause 'unlimited' sleep bonus.
6546 */
6547 place_entity(cfs_rq, se, 0);
6548 se->vruntime -= cfs_rq->min_vruntime;
6549 }
Paul Turner9ee474f2012-10-04 13:18:30 +02006550
Alex Shi141965c2013-06-26 13:05:39 +08006551#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02006552 /*
6553 * Remove our load from contribution when we leave sched_fair
6554 * and ensure we don't carry in an old decay_count if we
6555 * switch back.
6556 */
Kirill Tkhai87e3c8a2013-07-21 04:32:07 +04006557 if (se->avg.decay_count) {
6558 __synchronize_entity_decay(se);
6559 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +02006560 }
6561#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006562}
6563
Steven Rostedtcb469842008-01-25 21:08:22 +01006564/*
6565 * We switched to the sched_fair class.
6566 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006567static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01006568{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006569 if (!p->se.on_rq)
6570 return;
6571
Steven Rostedtcb469842008-01-25 21:08:22 +01006572 /*
6573 * We were most likely switched from sched_rt, so
6574 * kick off the schedule if running, otherwise just see
6575 * if we can still preempt the current task.
6576 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006577 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01006578 resched_task(rq->curr);
6579 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006580 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006581}
6582
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006583/* Account for a task changing its policy or group.
6584 *
6585 * This routine is mostly called to set cfs_rq->curr field when a task
6586 * migrates between groups/classes.
6587 */
6588static void set_curr_task_fair(struct rq *rq)
6589{
6590 struct sched_entity *se = &rq->curr->se;
6591
Paul Turnerec12cb72011-07-21 09:43:30 -07006592 for_each_sched_entity(se) {
6593 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6594
6595 set_next_entity(cfs_rq, se);
6596 /* ensure bandwidth has been allocated on our new cfs_rq */
6597 account_cfs_rq_runtime(cfs_rq, 0);
6598 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006599}
6600
Peter Zijlstra029632f2011-10-25 10:00:11 +02006601void init_cfs_rq(struct cfs_rq *cfs_rq)
6602{
6603 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006604 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
6605#ifndef CONFIG_64BIT
6606 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
6607#endif
Alex Shi141965c2013-06-26 13:05:39 +08006608#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02006609 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08006610 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02006611#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006612}
6613
Peter Zijlstra810b3812008-02-29 15:21:01 -05006614#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006615static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05006616{
Paul Turneraff3e492012-10-04 13:18:30 +02006617 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006618 /*
6619 * If the task was not on the rq at the time of this cgroup movement
6620 * it must have been asleep, sleeping tasks keep their ->vruntime
6621 * absolute on their old rq until wakeup (needed for the fair sleeper
6622 * bonus in place_entity()).
6623 *
6624 * If it was on the rq, we've just 'preempted' it, which does convert
6625 * ->vruntime to a relative base.
6626 *
6627 * Make sure both cases convert their relative position when migrating
6628 * to another cgroup's rq. This does somewhat interfere with the
6629 * fair sleeper stuff for the first placement, but who cares.
6630 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006631 /*
6632 * When !on_rq, vruntime of the task has usually NOT been normalized.
6633 * But there are some cases where it has already been normalized:
6634 *
6635 * - Moving a forked child which is waiting for being woken up by
6636 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09006637 * - Moving a task which has been woken up by try_to_wake_up() and
6638 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006639 *
6640 * To prevent boost or penalty in the new cfs_rq caused by delta
6641 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
6642 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09006643 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09006644 on_rq = 1;
6645
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006646 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006647 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
6648 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02006649 if (!on_rq) {
6650 cfs_rq = cfs_rq_of(&p->se);
6651 p->se.vruntime += cfs_rq->min_vruntime;
6652#ifdef CONFIG_SMP
6653 /*
6654 * migrate_task_rq_fair() will have removed our previous
6655 * contribution, but we must synchronize for ongoing future
6656 * decay.
6657 */
6658 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
6659 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
6660#endif
6661 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05006662}
Peter Zijlstra029632f2011-10-25 10:00:11 +02006663
6664void free_fair_sched_group(struct task_group *tg)
6665{
6666 int i;
6667
6668 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
6669
6670 for_each_possible_cpu(i) {
6671 if (tg->cfs_rq)
6672 kfree(tg->cfs_rq[i]);
6673 if (tg->se)
6674 kfree(tg->se[i]);
6675 }
6676
6677 kfree(tg->cfs_rq);
6678 kfree(tg->se);
6679}
6680
6681int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6682{
6683 struct cfs_rq *cfs_rq;
6684 struct sched_entity *se;
6685 int i;
6686
6687 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
6688 if (!tg->cfs_rq)
6689 goto err;
6690 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
6691 if (!tg->se)
6692 goto err;
6693
6694 tg->shares = NICE_0_LOAD;
6695
6696 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
6697
6698 for_each_possible_cpu(i) {
6699 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
6700 GFP_KERNEL, cpu_to_node(i));
6701 if (!cfs_rq)
6702 goto err;
6703
6704 se = kzalloc_node(sizeof(struct sched_entity),
6705 GFP_KERNEL, cpu_to_node(i));
6706 if (!se)
6707 goto err_free_rq;
6708
6709 init_cfs_rq(cfs_rq);
6710 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
6711 }
6712
6713 return 1;
6714
6715err_free_rq:
6716 kfree(cfs_rq);
6717err:
6718 return 0;
6719}
6720
6721void unregister_fair_sched_group(struct task_group *tg, int cpu)
6722{
6723 struct rq *rq = cpu_rq(cpu);
6724 unsigned long flags;
6725
6726 /*
6727 * Only empty task groups can be destroyed; so we can speculatively
6728 * check on_list without danger of it being re-added.
6729 */
6730 if (!tg->cfs_rq[cpu]->on_list)
6731 return;
6732
6733 raw_spin_lock_irqsave(&rq->lock, flags);
6734 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
6735 raw_spin_unlock_irqrestore(&rq->lock, flags);
6736}
6737
6738void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
6739 struct sched_entity *se, int cpu,
6740 struct sched_entity *parent)
6741{
6742 struct rq *rq = cpu_rq(cpu);
6743
6744 cfs_rq->tg = tg;
6745 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006746 init_cfs_rq_runtime(cfs_rq);
6747
6748 tg->cfs_rq[cpu] = cfs_rq;
6749 tg->se[cpu] = se;
6750
6751 /* se could be NULL for root_task_group */
6752 if (!se)
6753 return;
6754
6755 if (!parent)
6756 se->cfs_rq = &rq->cfs;
6757 else
6758 se->cfs_rq = parent->my_q;
6759
6760 se->my_q = cfs_rq;
6761 update_load_set(&se->load, 0);
6762 se->parent = parent;
6763}
6764
6765static DEFINE_MUTEX(shares_mutex);
6766
6767int sched_group_set_shares(struct task_group *tg, unsigned long shares)
6768{
6769 int i;
6770 unsigned long flags;
6771
6772 /*
6773 * We can't change the weight of the root cgroup.
6774 */
6775 if (!tg->se[0])
6776 return -EINVAL;
6777
6778 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
6779
6780 mutex_lock(&shares_mutex);
6781 if (tg->shares == shares)
6782 goto done;
6783
6784 tg->shares = shares;
6785 for_each_possible_cpu(i) {
6786 struct rq *rq = cpu_rq(i);
6787 struct sched_entity *se;
6788
6789 se = tg->se[i];
6790 /* Propagate contribution to hierarchy */
6791 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02006792
6793 /* Possible calls to update_curr() need rq clock */
6794 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08006795 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02006796 update_cfs_shares(group_cfs_rq(se));
6797 raw_spin_unlock_irqrestore(&rq->lock, flags);
6798 }
6799
6800done:
6801 mutex_unlock(&shares_mutex);
6802 return 0;
6803}
6804#else /* CONFIG_FAIR_GROUP_SCHED */
6805
6806void free_fair_sched_group(struct task_group *tg) { }
6807
6808int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
6809{
6810 return 1;
6811}
6812
6813void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
6814
6815#endif /* CONFIG_FAIR_GROUP_SCHED */
6816
Peter Zijlstra810b3812008-02-29 15:21:01 -05006817
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07006818static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00006819{
6820 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00006821 unsigned int rr_interval = 0;
6822
6823 /*
6824 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
6825 * idle runqueue:
6826 */
Peter Williams0d721ce2009-09-21 01:31:53 +00006827 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08006828 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00006829
6830 return rr_interval;
6831}
6832
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006833/*
6834 * All the scheduling class methods:
6835 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006836const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02006837 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006838 .enqueue_task = enqueue_task_fair,
6839 .dequeue_task = dequeue_task_fair,
6840 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05006841 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006842
Ingo Molnar2e09bf52007-10-15 17:00:05 +02006843 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006844
6845 .pick_next_task = pick_next_task_fair,
6846 .put_prev_task = put_prev_task_fair,
6847
Peter Williams681f3e62007-10-24 18:23:51 +02006848#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08006849 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02006850 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08006851
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006852 .rq_online = rq_online_fair,
6853 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006854
6855 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02006856#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006857
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02006858 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006859 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006860 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006861
6862 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006863 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01006864 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006865
Peter Williams0d721ce2009-09-21 01:31:53 +00006866 .get_rr_interval = get_rr_interval_fair,
6867
Peter Zijlstra810b3812008-02-29 15:21:01 -05006868#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02006869 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05006870#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006871};
6872
6873#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02006874void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006875{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006876 struct cfs_rq *cfs_rq;
6877
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006878 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02006879 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02006880 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01006881 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006882}
6883#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02006884
6885__init void init_sched_fair_class(void)
6886{
6887#ifdef CONFIG_SMP
6888 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
6889
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006890#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08006891 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006892 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08006893 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02006894#endif
6895#endif /* SMP */
6896
6897}