blob: e8b652ebe027c481e87122f629a300058cf82679 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
181#if BITS_PER_LONG == 32
182# define WMULT_CONST (~0UL)
183#else
184# define WMULT_CONST (1UL << 32)
185#endif
186
187#define WMULT_SHIFT 32
188
189/*
190 * Shift right and round:
191 */
192#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
193
194/*
195 * delta *= weight / lw
196 */
197static unsigned long
198calc_delta_mine(unsigned long delta_exec, unsigned long weight,
199 struct load_weight *lw)
200{
201 u64 tmp;
202
203 /*
204 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
205 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
206 * 2^SCHED_LOAD_RESOLUTION.
207 */
208 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
209 tmp = (u64)delta_exec * scale_load_down(weight);
210 else
211 tmp = (u64)delta_exec;
212
213 if (!lw->inv_weight) {
214 unsigned long w = scale_load_down(lw->weight);
215
216 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
217 lw->inv_weight = 1;
218 else if (unlikely(!w))
219 lw->inv_weight = WMULT_CONST;
220 else
221 lw->inv_weight = WMULT_CONST / w;
222 }
223
224 /*
225 * Check whether we'd overflow the 64-bit multiplication:
226 */
227 if (unlikely(tmp > WMULT_CONST))
228 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
229 WMULT_SHIFT/2);
230 else
231 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
232
233 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
234}
235
236
237const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200238
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200239/**************************************************************
240 * CFS operations on generic schedulable entities:
241 */
242
243#ifdef CONFIG_FAIR_GROUP_SCHED
244
245/* cpu runqueue to which this cfs_rq is attached */
246static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
247{
248 return cfs_rq->rq;
249}
250
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251/* An entity is a task if it doesn't "own" a runqueue */
252#define entity_is_task(se) (!se->my_q)
253
Peter Zijlstra8f488942009-07-24 12:25:30 +0200254static inline struct task_struct *task_of(struct sched_entity *se)
255{
256#ifdef CONFIG_SCHED_DEBUG
257 WARN_ON_ONCE(!entity_is_task(se));
258#endif
259 return container_of(se, struct task_struct, se);
260}
261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262/* Walk up scheduling entities hierarchy */
263#define for_each_sched_entity(se) \
264 for (; se; se = se->parent)
265
266static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
267{
268 return p->se.cfs_rq;
269}
270
271/* runqueue on which this entity is (to be) queued */
272static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
273{
274 return se->cfs_rq;
275}
276
277/* runqueue "owned" by this group */
278static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
279{
280 return grp->my_q;
281}
282
Paul Turneraff3e492012-10-04 13:18:30 +0200283static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
284 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200305 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200306 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800307 }
308}
309
310static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
311{
312 if (cfs_rq->on_list) {
313 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
314 cfs_rq->on_list = 0;
315 }
316}
317
Peter Zijlstrab7581492008-04-19 19:45:00 +0200318/* Iterate thr' all leaf cfs_rq's on a runqueue */
319#define for_each_leaf_cfs_rq(rq, cfs_rq) \
320 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
321
322/* Do the two (enqueued) entities belong to the same group ? */
323static inline int
324is_same_group(struct sched_entity *se, struct sched_entity *pse)
325{
326 if (se->cfs_rq == pse->cfs_rq)
327 return 1;
328
329 return 0;
330}
331
332static inline struct sched_entity *parent_entity(struct sched_entity *se)
333{
334 return se->parent;
335}
336
Peter Zijlstra464b7522008-10-24 11:06:15 +0200337/* return depth at which a sched entity is present in the hierarchy */
338static inline int depth_se(struct sched_entity *se)
339{
340 int depth = 0;
341
342 for_each_sched_entity(se)
343 depth++;
344
345 return depth;
346}
347
348static void
349find_matching_se(struct sched_entity **se, struct sched_entity **pse)
350{
351 int se_depth, pse_depth;
352
353 /*
354 * preemption test can be made between sibling entities who are in the
355 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
356 * both tasks until we find their ancestors who are siblings of common
357 * parent.
358 */
359
360 /* First walk up until both entities are at same depth */
361 se_depth = depth_se(*se);
362 pse_depth = depth_se(*pse);
363
364 while (se_depth > pse_depth) {
365 se_depth--;
366 *se = parent_entity(*se);
367 }
368
369 while (pse_depth > se_depth) {
370 pse_depth--;
371 *pse = parent_entity(*pse);
372 }
373
374 while (!is_same_group(*se, *pse)) {
375 *se = parent_entity(*se);
376 *pse = parent_entity(*pse);
377 }
378}
379
Peter Zijlstra8f488942009-07-24 12:25:30 +0200380#else /* !CONFIG_FAIR_GROUP_SCHED */
381
382static inline struct task_struct *task_of(struct sched_entity *se)
383{
384 return container_of(se, struct task_struct, se);
385}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200386
387static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
388{
389 return container_of(cfs_rq, struct rq, cfs);
390}
391
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392#define entity_is_task(se) 1
393
Peter Zijlstrab7581492008-04-19 19:45:00 +0200394#define for_each_sched_entity(se) \
395 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200396
Peter Zijlstrab7581492008-04-19 19:45:00 +0200397static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400}
401
Peter Zijlstrab7581492008-04-19 19:45:00 +0200402static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
403{
404 struct task_struct *p = task_of(se);
405 struct rq *rq = task_rq(p);
406
407 return &rq->cfs;
408}
409
410/* runqueue "owned" by this group */
411static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
412{
413 return NULL;
414}
415
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800416static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
417{
418}
419
420static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#define for_each_leaf_cfs_rq(rq, cfs_rq) \
425 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
426
427static inline int
428is_same_group(struct sched_entity *se, struct sched_entity *pse)
429{
430 return 1;
431}
432
433static inline struct sched_entity *parent_entity(struct sched_entity *se)
434{
435 return NULL;
436}
437
Peter Zijlstra464b7522008-10-24 11:06:15 +0200438static inline void
439find_matching_se(struct sched_entity **se, struct sched_entity **pse)
440{
441}
442
Peter Zijlstrab7581492008-04-19 19:45:00 +0200443#endif /* CONFIG_FAIR_GROUP_SCHED */
444
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700445static __always_inline
446void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200447
448/**************************************************************
449 * Scheduling class tree data structure manipulation methods:
450 */
451
Andrei Epure1bf08232013-03-12 21:12:24 +0200452static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200453{
Andrei Epure1bf08232013-03-12 21:12:24 +0200454 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200455 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200457
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459}
460
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200461static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200462{
463 s64 delta = (s64)(vruntime - min_vruntime);
464 if (delta < 0)
465 min_vruntime = vruntime;
466
467 return min_vruntime;
468}
469
Fabio Checconi54fdc582009-07-16 12:32:27 +0200470static inline int entity_before(struct sched_entity *a,
471 struct sched_entity *b)
472{
473 return (s64)(a->vruntime - b->vruntime) < 0;
474}
475
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476static void update_min_vruntime(struct cfs_rq *cfs_rq)
477{
478 u64 vruntime = cfs_rq->min_vruntime;
479
480 if (cfs_rq->curr)
481 vruntime = cfs_rq->curr->vruntime;
482
483 if (cfs_rq->rb_leftmost) {
484 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
485 struct sched_entity,
486 run_node);
487
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100488 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200489 vruntime = se->vruntime;
490 else
491 vruntime = min_vruntime(vruntime, se->vruntime);
492 }
493
Andrei Epure1bf08232013-03-12 21:12:24 +0200494 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200495 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200496#ifndef CONFIG_64BIT
497 smp_wmb();
498 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
499#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200500}
501
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200502/*
503 * Enqueue an entity into the rb-tree:
504 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200505static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200506{
507 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
508 struct rb_node *parent = NULL;
509 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200510 int leftmost = 1;
511
512 /*
513 * Find the right place in the rbtree:
514 */
515 while (*link) {
516 parent = *link;
517 entry = rb_entry(parent, struct sched_entity, run_node);
518 /*
519 * We dont care about collisions. Nodes with
520 * the same key stay together.
521 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200522 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523 link = &parent->rb_left;
524 } else {
525 link = &parent->rb_right;
526 leftmost = 0;
527 }
528 }
529
530 /*
531 * Maintain a cache of leftmost tree entries (it is frequently
532 * used):
533 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200534 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200535 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536
537 rb_link_node(&se->run_node, parent, link);
538 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539}
540
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200541static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100543 if (cfs_rq->rb_leftmost == &se->run_node) {
544 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545
546 next_node = rb_next(&se->run_node);
547 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100548 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200549
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200550 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200551}
552
Peter Zijlstra029632f2011-10-25 10:00:11 +0200553struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100555 struct rb_node *left = cfs_rq->rb_leftmost;
556
557 if (!left)
558 return NULL;
559
560 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200561}
562
Rik van Rielac53db52011-02-01 09:51:03 -0500563static struct sched_entity *__pick_next_entity(struct sched_entity *se)
564{
565 struct rb_node *next = rb_next(&se->run_node);
566
567 if (!next)
568 return NULL;
569
570 return rb_entry(next, struct sched_entity, run_node);
571}
572
573#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200574struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200575{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100576 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577
Balbir Singh70eee742008-02-22 13:25:53 +0530578 if (!last)
579 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100580
581 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200582}
583
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200584/**************************************************************
585 * Scheduling class statistics methods:
586 */
587
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100588int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700589 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100590 loff_t *ppos)
591{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700592 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100593 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100594
595 if (ret || !write)
596 return ret;
597
598 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
599 sysctl_sched_min_granularity);
600
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100601#define WRT_SYSCTL(name) \
602 (normalized_sysctl_##name = sysctl_##name / (factor))
603 WRT_SYSCTL(sched_min_granularity);
604 WRT_SYSCTL(sched_latency);
605 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100606#undef WRT_SYSCTL
607
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100608 return 0;
609}
610#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200611
612/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200613 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200614 */
615static inline unsigned long
616calc_delta_fair(unsigned long delta, struct sched_entity *se)
617{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200618 if (unlikely(se->load.weight != NICE_0_LOAD))
619 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200620
621 return delta;
622}
623
624/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 * The idea is to set a period in which each task runs once.
626 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200627 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200628 * this period because otherwise the slices get too small.
629 *
630 * p = (nr <= nl) ? l : l*nr/nl
631 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200632static u64 __sched_period(unsigned long nr_running)
633{
634 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100635 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200636
637 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100638 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200639 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 }
641
642 return period;
643}
644
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200645/*
646 * We calculate the wall-time slice from the period by taking a part
647 * proportional to the weight.
648 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200649 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200651static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200652{
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200654
Mike Galbraith0a582442009-01-02 12:16:42 +0100655 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100656 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200657 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100658
659 cfs_rq = cfs_rq_of(se);
660 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200661
Mike Galbraith0a582442009-01-02 12:16:42 +0100662 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200663 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100664
665 update_load_add(&lw, se->load.weight);
666 load = &lw;
667 }
668 slice = calc_delta_mine(slice, se->load.weight, load);
669 }
670 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200671}
672
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200673/*
Andrei Epure660cc002013-03-11 12:03:20 +0200674 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200675 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200676 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200677 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200678static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200679{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200680 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200681}
682
Alex Shia75cdaa2013-06-20 10:18:47 +0800683#ifdef CONFIG_SMP
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100684static unsigned long task_h_load(struct task_struct *p);
685
Alex Shia75cdaa2013-06-20 10:18:47 +0800686static inline void __update_task_entity_contrib(struct sched_entity *se);
687
688/* Give new task start runnable values to heavy its load in infant time */
689void init_task_runnable_average(struct task_struct *p)
690{
691 u32 slice;
692
693 p->se.avg.decay_count = 0;
694 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
695 p->se.avg.runnable_avg_sum = slice;
696 p->se.avg.runnable_avg_period = slice;
697 __update_task_entity_contrib(&p->se);
698}
699#else
700void init_task_runnable_average(struct task_struct *p)
701{
702}
703#endif
704
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200705/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200706 * Update the current task's runtime statistics. Skip current tasks that
707 * are not in our scheduling class.
708 */
709static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200710__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
711 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200713 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200714
Lucas De Marchi41acab82010-03-10 23:37:45 -0300715 schedstat_set(curr->statistics.exec_max,
716 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200717
718 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200719 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200720 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100721
Ingo Molnare9acbff2007-10-15 17:00:04 +0200722 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200723 update_min_vruntime(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200724}
725
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200726static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200727{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200728 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200729 u64 now = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200730 unsigned long delta_exec;
731
732 if (unlikely(!curr))
733 return;
734
735 /*
736 * Get the amount of time the current task was running
737 * since the last time we changed load (this cannot
738 * overflow on 32 bits):
739 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200740 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100741 if (!delta_exec)
742 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200743
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200744 __update_curr(cfs_rq, curr, delta_exec);
745 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100746
747 if (entity_is_task(curr)) {
748 struct task_struct *curtask = task_of(curr);
749
Ingo Molnarf977bb42009-09-13 18:15:54 +0200750 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100751 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700752 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100753 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700754
755 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200756}
757
758static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200759update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200760{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200761 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762}
763
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200764/*
765 * Task is being enqueued - update stats:
766 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200767static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200768{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200769 /*
770 * Are we enqueueing a waiting task? (for current tasks
771 * a dequeue/enqueue event is a NOP)
772 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200773 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200774 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200775}
776
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200777static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200778update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200779{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300780 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200781 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300782 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
783 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200784 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200785#ifdef CONFIG_SCHEDSTATS
786 if (entity_is_task(se)) {
787 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200788 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200789 }
790#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300791 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200792}
793
794static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200795update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200796{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200797 /*
798 * Mark the end of the wait period if dequeueing a
799 * waiting task:
800 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200801 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200802 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200803}
804
805/*
806 * We are picking a new current task - update its stats:
807 */
808static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200809update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200810{
811 /*
812 * We are starting a new run period:
813 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200814 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200815}
816
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200817/**************************************************
818 * Scheduling class queueing methods:
819 */
820
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200821#ifdef CONFIG_NUMA_BALANCING
822/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100823 * Approximate time to scan a full NUMA task in ms. The task scan period is
824 * calculated based on the tasks virtual memory size and
825 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200826 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100827unsigned int sysctl_numa_balancing_scan_period_min = 1000;
828unsigned int sysctl_numa_balancing_scan_period_max = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200829
830/* Portion of address space to scan in MB */
831unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200832
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200833/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
834unsigned int sysctl_numa_balancing_scan_delay = 1000;
835
Rik van Rielde1c9ce2013-10-07 11:29:39 +0100836/*
837 * After skipping a page migration on a shared page, skip N more numa page
838 * migrations unconditionally. This reduces the number of NUMA migrations
839 * in shared memory workloads, and has the effect of pulling tasks towards
840 * where their memory lives, over pulling the memory towards the task.
841 */
842unsigned int sysctl_numa_balancing_migrate_deferred = 16;
843
Mel Gorman598f0ec2013-10-07 11:28:55 +0100844static unsigned int task_nr_scan_windows(struct task_struct *p)
845{
846 unsigned long rss = 0;
847 unsigned long nr_scan_pages;
848
849 /*
850 * Calculations based on RSS as non-present and empty pages are skipped
851 * by the PTE scanner and NUMA hinting faults should be trapped based
852 * on resident pages
853 */
854 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
855 rss = get_mm_rss(p->mm);
856 if (!rss)
857 rss = nr_scan_pages;
858
859 rss = round_up(rss, nr_scan_pages);
860 return rss / nr_scan_pages;
861}
862
863/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
864#define MAX_SCAN_WINDOW 2560
865
866static unsigned int task_scan_min(struct task_struct *p)
867{
868 unsigned int scan, floor;
869 unsigned int windows = 1;
870
871 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
872 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
873 floor = 1000 / windows;
874
875 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
876 return max_t(unsigned int, floor, scan);
877}
878
879static unsigned int task_scan_max(struct task_struct *p)
880{
881 unsigned int smin = task_scan_min(p);
882 unsigned int smax;
883
884 /* Watch for min being lower than max due to floor calculations */
885 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
886 return max(smin, smax);
887}
888
Mel Gorman3a7053b2013-10-07 11:29:00 +0100889/*
890 * Once a preferred node is selected the scheduler balancer will prefer moving
891 * a task to that node for sysctl_numa_balancing_settle_count number of PTE
892 * scans. This will give the process the chance to accumulate more faults on
893 * the preferred node but still allow the scheduler to move the task again if
894 * the nodes CPUs are overloaded.
895 */
Rik van Riel6fe6b2d2013-10-07 11:29:08 +0100896unsigned int sysctl_numa_balancing_settle_count __read_mostly = 4;
Mel Gorman3a7053b2013-10-07 11:29:00 +0100897
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100898static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
899{
900 rq->nr_numa_running += (p->numa_preferred_nid != -1);
901 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
902}
903
904static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
905{
906 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
907 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
908}
909
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100910struct numa_group {
911 atomic_t refcount;
912
913 spinlock_t lock; /* nr_tasks, tasks */
914 int nr_tasks;
Mel Gormane29cf082013-10-07 11:29:22 +0100915 pid_t gid;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100916 struct list_head task_list;
917
918 struct rcu_head rcu;
Mel Gorman989348b2013-10-07 11:29:40 +0100919 unsigned long total_faults;
920 unsigned long faults[0];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100921};
922
Mel Gormane29cf082013-10-07 11:29:22 +0100923pid_t task_numa_group_id(struct task_struct *p)
924{
925 return p->numa_group ? p->numa_group->gid : 0;
926}
927
Mel Gormanac8e8952013-10-07 11:29:03 +0100928static inline int task_faults_idx(int nid, int priv)
929{
930 return 2 * nid + priv;
931}
932
933static inline unsigned long task_faults(struct task_struct *p, int nid)
934{
935 if (!p->numa_faults)
936 return 0;
937
938 return p->numa_faults[task_faults_idx(nid, 0)] +
939 p->numa_faults[task_faults_idx(nid, 1)];
940}
941
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100942static inline unsigned long group_faults(struct task_struct *p, int nid)
943{
944 if (!p->numa_group)
945 return 0;
946
Mel Gorman989348b2013-10-07 11:29:40 +0100947 return p->numa_group->faults[2*nid] + p->numa_group->faults[2*nid+1];
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100948}
949
950/*
951 * These return the fraction of accesses done by a particular task, or
952 * task group, on a particular numa node. The group weight is given a
953 * larger multiplier, in order to group tasks together that are almost
954 * evenly spread out between numa nodes.
955 */
956static inline unsigned long task_weight(struct task_struct *p, int nid)
957{
958 unsigned long total_faults;
959
960 if (!p->numa_faults)
961 return 0;
962
963 total_faults = p->total_numa_faults;
964
965 if (!total_faults)
966 return 0;
967
968 return 1000 * task_faults(p, nid) / total_faults;
969}
970
971static inline unsigned long group_weight(struct task_struct *p, int nid)
972{
Mel Gorman989348b2013-10-07 11:29:40 +0100973 if (!p->numa_group || !p->numa_group->total_faults)
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100974 return 0;
975
Mel Gorman989348b2013-10-07 11:29:40 +0100976 return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100977}
978
Mel Gormane6628d52013-10-07 11:29:02 +0100979static unsigned long weighted_cpuload(const int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +0100980static unsigned long source_load(int cpu, int type);
981static unsigned long target_load(int cpu, int type);
982static unsigned long power_of(int cpu);
983static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
Mel Gormane6628d52013-10-07 11:29:02 +0100984
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100985/* Cached statistics for all CPUs within a node */
Mel Gorman58d081b2013-10-07 11:29:10 +0100986struct numa_stats {
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100987 unsigned long nr_running;
Mel Gorman58d081b2013-10-07 11:29:10 +0100988 unsigned long load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100989
990 /* Total compute capacity of CPUs on a node */
991 unsigned long power;
992
993 /* Approximate capacity in terms of runnable tasks on a node */
994 unsigned long capacity;
995 int has_capacity;
Mel Gorman58d081b2013-10-07 11:29:10 +0100996};
Mel Gormane6628d52013-10-07 11:29:02 +0100997
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100998/*
999 * XXX borrowed from update_sg_lb_stats
1000 */
1001static void update_numa_stats(struct numa_stats *ns, int nid)
1002{
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001003 int cpu, cpus = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001004
1005 memset(ns, 0, sizeof(*ns));
1006 for_each_cpu(cpu, cpumask_of_node(nid)) {
1007 struct rq *rq = cpu_rq(cpu);
1008
1009 ns->nr_running += rq->nr_running;
1010 ns->load += weighted_cpuload(cpu);
1011 ns->power += power_of(cpu);
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001012
1013 cpus++;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001014 }
1015
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001016 /*
1017 * If we raced with hotplug and there are no CPUs left in our mask
1018 * the @ns structure is NULL'ed and task_numa_compare() will
1019 * not find this node attractive.
1020 *
1021 * We'll either bail at !has_capacity, or we'll detect a huge imbalance
1022 * and bail there.
1023 */
1024 if (!cpus)
1025 return;
1026
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001027 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
1028 ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
1029 ns->has_capacity = (ns->nr_running < ns->capacity);
1030}
1031
Mel Gorman58d081b2013-10-07 11:29:10 +01001032struct task_numa_env {
1033 struct task_struct *p;
1034
1035 int src_cpu, src_nid;
1036 int dst_cpu, dst_nid;
1037
1038 struct numa_stats src_stats, dst_stats;
1039
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001040 int imbalance_pct, idx;
1041
1042 struct task_struct *best_task;
1043 long best_imp;
Mel Gorman58d081b2013-10-07 11:29:10 +01001044 int best_cpu;
1045};
1046
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001047static void task_numa_assign(struct task_numa_env *env,
1048 struct task_struct *p, long imp)
1049{
1050 if (env->best_task)
1051 put_task_struct(env->best_task);
1052 if (p)
1053 get_task_struct(p);
1054
1055 env->best_task = p;
1056 env->best_imp = imp;
1057 env->best_cpu = env->dst_cpu;
1058}
1059
1060/*
1061 * This checks if the overall compute and NUMA accesses of the system would
1062 * be improved if the source tasks was migrated to the target dst_cpu taking
1063 * into account that it might be best if task running on the dst_cpu should
1064 * be exchanged with the source task
1065 */
Rik van Riel887c2902013-10-07 11:29:31 +01001066static void task_numa_compare(struct task_numa_env *env,
1067 long taskimp, long groupimp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001068{
1069 struct rq *src_rq = cpu_rq(env->src_cpu);
1070 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1071 struct task_struct *cur;
1072 long dst_load, src_load;
1073 long load;
Rik van Riel887c2902013-10-07 11:29:31 +01001074 long imp = (groupimp > 0) ? groupimp : taskimp;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001075
1076 rcu_read_lock();
1077 cur = ACCESS_ONCE(dst_rq->curr);
1078 if (cur->pid == 0) /* idle */
1079 cur = NULL;
1080
1081 /*
1082 * "imp" is the fault differential for the source task between the
1083 * source and destination node. Calculate the total differential for
1084 * the source task and potential destination task. The more negative
1085 * the value is, the more rmeote accesses that would be expected to
1086 * be incurred if the tasks were swapped.
1087 */
1088 if (cur) {
1089 /* Skip this swap candidate if cannot move to the source cpu */
1090 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1091 goto unlock;
1092
Rik van Riel887c2902013-10-07 11:29:31 +01001093 /*
1094 * If dst and source tasks are in the same NUMA group, or not
Rik van Rielca28aa52013-10-07 11:29:32 +01001095 * in any group then look only at task weights.
Rik van Riel887c2902013-10-07 11:29:31 +01001096 */
Rik van Rielca28aa52013-10-07 11:29:32 +01001097 if (cur->numa_group == env->p->numa_group) {
Rik van Riel887c2902013-10-07 11:29:31 +01001098 imp = taskimp + task_weight(cur, env->src_nid) -
1099 task_weight(cur, env->dst_nid);
Rik van Rielca28aa52013-10-07 11:29:32 +01001100 /*
1101 * Add some hysteresis to prevent swapping the
1102 * tasks within a group over tiny differences.
1103 */
1104 if (cur->numa_group)
1105 imp -= imp/16;
Rik van Riel887c2902013-10-07 11:29:31 +01001106 } else {
Rik van Rielca28aa52013-10-07 11:29:32 +01001107 /*
1108 * Compare the group weights. If a task is all by
1109 * itself (not part of a group), use the task weight
1110 * instead.
1111 */
1112 if (env->p->numa_group)
1113 imp = groupimp;
1114 else
1115 imp = taskimp;
1116
1117 if (cur->numa_group)
1118 imp += group_weight(cur, env->src_nid) -
1119 group_weight(cur, env->dst_nid);
1120 else
1121 imp += task_weight(cur, env->src_nid) -
1122 task_weight(cur, env->dst_nid);
Rik van Riel887c2902013-10-07 11:29:31 +01001123 }
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001124 }
1125
1126 if (imp < env->best_imp)
1127 goto unlock;
1128
1129 if (!cur) {
1130 /* Is there capacity at our destination? */
1131 if (env->src_stats.has_capacity &&
1132 !env->dst_stats.has_capacity)
1133 goto unlock;
1134
1135 goto balance;
1136 }
1137
1138 /* Balance doesn't matter much if we're running a task per cpu */
1139 if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1140 goto assign;
1141
1142 /*
1143 * In the overloaded case, try and keep the load balanced.
1144 */
1145balance:
1146 dst_load = env->dst_stats.load;
1147 src_load = env->src_stats.load;
1148
1149 /* XXX missing power terms */
1150 load = task_h_load(env->p);
1151 dst_load += load;
1152 src_load -= load;
1153
1154 if (cur) {
1155 load = task_h_load(cur);
1156 dst_load -= load;
1157 src_load += load;
1158 }
1159
1160 /* make src_load the smaller */
1161 if (dst_load < src_load)
1162 swap(dst_load, src_load);
1163
1164 if (src_load * env->imbalance_pct < dst_load * 100)
1165 goto unlock;
1166
1167assign:
1168 task_numa_assign(env, cur, imp);
1169unlock:
1170 rcu_read_unlock();
1171}
1172
Rik van Riel887c2902013-10-07 11:29:31 +01001173static void task_numa_find_cpu(struct task_numa_env *env,
1174 long taskimp, long groupimp)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001175{
1176 int cpu;
1177
1178 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1179 /* Skip this CPU if the source task cannot migrate */
1180 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1181 continue;
1182
1183 env->dst_cpu = cpu;
Rik van Riel887c2902013-10-07 11:29:31 +01001184 task_numa_compare(env, taskimp, groupimp);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001185 }
1186}
1187
Mel Gorman58d081b2013-10-07 11:29:10 +01001188static int task_numa_migrate(struct task_struct *p)
Mel Gormane6628d52013-10-07 11:29:02 +01001189{
Mel Gorman58d081b2013-10-07 11:29:10 +01001190 struct task_numa_env env = {
1191 .p = p,
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001192
Mel Gorman58d081b2013-10-07 11:29:10 +01001193 .src_cpu = task_cpu(p),
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001194 .src_nid = task_node(p),
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001195
1196 .imbalance_pct = 112,
1197
1198 .best_task = NULL,
1199 .best_imp = 0,
1200 .best_cpu = -1
Mel Gorman58d081b2013-10-07 11:29:10 +01001201 };
1202 struct sched_domain *sd;
Rik van Riel887c2902013-10-07 11:29:31 +01001203 unsigned long taskweight, groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001204 int nid, ret;
Rik van Riel887c2902013-10-07 11:29:31 +01001205 long taskimp, groupimp;
Mel Gormane6628d52013-10-07 11:29:02 +01001206
Mel Gorman58d081b2013-10-07 11:29:10 +01001207 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001208 * Pick the lowest SD_NUMA domain, as that would have the smallest
1209 * imbalance and would be the first to start moving tasks about.
1210 *
1211 * And we want to avoid any moving of tasks about, as that would create
1212 * random movement of tasks -- counter the numa conditions we're trying
1213 * to satisfy here.
Mel Gorman58d081b2013-10-07 11:29:10 +01001214 */
Mel Gormane6628d52013-10-07 11:29:02 +01001215 rcu_read_lock();
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001216 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
Rik van Riel46a73e82013-11-11 19:29:25 -05001217 if (sd)
1218 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
Mel Gormane6628d52013-10-07 11:29:02 +01001219 rcu_read_unlock();
1220
Rik van Riel46a73e82013-11-11 19:29:25 -05001221 /*
1222 * Cpusets can break the scheduler domain tree into smaller
1223 * balance domains, some of which do not cross NUMA boundaries.
1224 * Tasks that are "trapped" in such domains cannot be migrated
1225 * elsewhere, so there is no point in (re)trying.
1226 */
1227 if (unlikely(!sd)) {
1228 p->numa_preferred_nid = cpu_to_node(task_cpu(p));
1229 return -EINVAL;
1230 }
1231
Rik van Riel887c2902013-10-07 11:29:31 +01001232 taskweight = task_weight(p, env.src_nid);
1233 groupweight = group_weight(p, env.src_nid);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001234 update_numa_stats(&env.src_stats, env.src_nid);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001235 env.dst_nid = p->numa_preferred_nid;
Rik van Riel887c2902013-10-07 11:29:31 +01001236 taskimp = task_weight(p, env.dst_nid) - taskweight;
1237 groupimp = group_weight(p, env.dst_nid) - groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001238 update_numa_stats(&env.dst_stats, env.dst_nid);
Mel Gorman58d081b2013-10-07 11:29:10 +01001239
Rik van Riele1dda8a2013-10-07 11:29:19 +01001240 /* If the preferred nid has capacity, try to use it. */
1241 if (env.dst_stats.has_capacity)
Rik van Riel887c2902013-10-07 11:29:31 +01001242 task_numa_find_cpu(&env, taskimp, groupimp);
Rik van Riele1dda8a2013-10-07 11:29:19 +01001243
1244 /* No space available on the preferred nid. Look elsewhere. */
1245 if (env.best_cpu == -1) {
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001246 for_each_online_node(nid) {
1247 if (nid == env.src_nid || nid == p->numa_preferred_nid)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001248 continue;
1249
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001250 /* Only consider nodes where both task and groups benefit */
Rik van Riel887c2902013-10-07 11:29:31 +01001251 taskimp = task_weight(p, nid) - taskweight;
1252 groupimp = group_weight(p, nid) - groupweight;
1253 if (taskimp < 0 && groupimp < 0)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001254 continue;
1255
1256 env.dst_nid = nid;
1257 update_numa_stats(&env.dst_stats, env.dst_nid);
Rik van Riel887c2902013-10-07 11:29:31 +01001258 task_numa_find_cpu(&env, taskimp, groupimp);
Mel Gorman58d081b2013-10-07 11:29:10 +01001259 }
1260 }
1261
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001262 /* No better CPU than the current one was found. */
1263 if (env.best_cpu == -1)
1264 return -EAGAIN;
1265
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001266 sched_setnuma(p, env.dst_nid);
1267
Rik van Riel04bb2f92013-10-07 11:29:36 +01001268 /*
1269 * Reset the scan period if the task is being rescheduled on an
1270 * alternative node to recheck if the tasks is now properly placed.
1271 */
1272 p->numa_scan_period = task_scan_min(p);
1273
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001274 if (env.best_task == NULL) {
1275 int ret = migrate_task_to(p, env.best_cpu);
1276 return ret;
1277 }
1278
1279 ret = migrate_swap(p, env.best_task);
1280 put_task_struct(env.best_task);
1281 return ret;
Mel Gormane6628d52013-10-07 11:29:02 +01001282}
1283
Mel Gorman6b9a7462013-10-07 11:29:11 +01001284/* Attempt to migrate a task to a CPU on the preferred node. */
1285static void numa_migrate_preferred(struct task_struct *p)
1286{
Rik van Riel2739d3e2013-10-07 11:29:41 +01001287 /* This task has no NUMA fault statistics yet */
1288 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
1289 return;
1290
1291 /* Periodically retry migrating the task to the preferred node */
1292 p->numa_migrate_retry = jiffies + HZ;
1293
Mel Gorman6b9a7462013-10-07 11:29:11 +01001294 /* Success if task is already running on preferred CPU */
Rik van Riel1e3646f2013-10-07 11:29:38 +01001295 if (cpu_to_node(task_cpu(p)) == p->numa_preferred_nid)
Mel Gorman6b9a7462013-10-07 11:29:11 +01001296 return;
1297
Mel Gorman6b9a7462013-10-07 11:29:11 +01001298 /* Otherwise, try migrate to a CPU on the preferred node */
Rik van Riel2739d3e2013-10-07 11:29:41 +01001299 task_numa_migrate(p);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001300}
1301
Rik van Riel04bb2f92013-10-07 11:29:36 +01001302/*
1303 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1304 * increments. The more local the fault statistics are, the higher the scan
1305 * period will be for the next scan window. If local/remote ratio is below
1306 * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
1307 * scan period will decrease
1308 */
1309#define NUMA_PERIOD_SLOTS 10
1310#define NUMA_PERIOD_THRESHOLD 3
1311
1312/*
1313 * Increase the scan period (slow down scanning) if the majority of
1314 * our memory is already on our local node, or if the majority of
1315 * the page accesses are shared with other processes.
1316 * Otherwise, decrease the scan period.
1317 */
1318static void update_task_scan_period(struct task_struct *p,
1319 unsigned long shared, unsigned long private)
1320{
1321 unsigned int period_slot;
1322 int ratio;
1323 int diff;
1324
1325 unsigned long remote = p->numa_faults_locality[0];
1326 unsigned long local = p->numa_faults_locality[1];
1327
1328 /*
1329 * If there were no record hinting faults then either the task is
1330 * completely idle or all activity is areas that are not of interest
1331 * to automatic numa balancing. Scan slower
1332 */
1333 if (local + shared == 0) {
1334 p->numa_scan_period = min(p->numa_scan_period_max,
1335 p->numa_scan_period << 1);
1336
1337 p->mm->numa_next_scan = jiffies +
1338 msecs_to_jiffies(p->numa_scan_period);
1339
1340 return;
1341 }
1342
1343 /*
1344 * Prepare to scale scan period relative to the current period.
1345 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1346 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1347 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1348 */
1349 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1350 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1351 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1352 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1353 if (!slot)
1354 slot = 1;
1355 diff = slot * period_slot;
1356 } else {
1357 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1358
1359 /*
1360 * Scale scan rate increases based on sharing. There is an
1361 * inverse relationship between the degree of sharing and
1362 * the adjustment made to the scanning period. Broadly
1363 * speaking the intent is that there is little point
1364 * scanning faster if shared accesses dominate as it may
1365 * simply bounce migrations uselessly
1366 */
1367 period_slot = DIV_ROUND_UP(diff, NUMA_PERIOD_SLOTS);
1368 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1369 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1370 }
1371
1372 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1373 task_scan_min(p), task_scan_max(p));
1374 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1375}
1376
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001377static void task_numa_placement(struct task_struct *p)
1378{
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001379 int seq, nid, max_nid = -1, max_group_nid = -1;
1380 unsigned long max_faults = 0, max_group_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001381 unsigned long fault_types[2] = { 0, 0 };
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001382 spinlock_t *group_lock = NULL;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001383
Hugh Dickins2832bc12012-12-19 17:42:16 -08001384 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001385 if (p->numa_scan_seq == seq)
1386 return;
1387 p->numa_scan_seq = seq;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001388 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001389
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001390 /* If the task is part of a group prevent parallel updates to group stats */
1391 if (p->numa_group) {
1392 group_lock = &p->numa_group->lock;
1393 spin_lock(group_lock);
1394 }
1395
Mel Gorman688b7582013-10-07 11:28:58 +01001396 /* Find the node with the highest number of faults */
1397 for_each_online_node(nid) {
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001398 unsigned long faults = 0, group_faults = 0;
Mel Gormanac8e8952013-10-07 11:29:03 +01001399 int priv, i;
Mel Gorman745d6142013-10-07 11:28:59 +01001400
Mel Gormanac8e8952013-10-07 11:29:03 +01001401 for (priv = 0; priv < 2; priv++) {
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001402 long diff;
1403
Mel Gormanac8e8952013-10-07 11:29:03 +01001404 i = task_faults_idx(nid, priv);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001405 diff = -p->numa_faults[i];
Mel Gorman745d6142013-10-07 11:28:59 +01001406
Mel Gormanac8e8952013-10-07 11:29:03 +01001407 /* Decay existing window, copy faults since last scan */
1408 p->numa_faults[i] >>= 1;
1409 p->numa_faults[i] += p->numa_faults_buffer[i];
Rik van Riel04bb2f92013-10-07 11:29:36 +01001410 fault_types[priv] += p->numa_faults_buffer[i];
Mel Gormanac8e8952013-10-07 11:29:03 +01001411 p->numa_faults_buffer[i] = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001412
1413 faults += p->numa_faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001414 diff += p->numa_faults[i];
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001415 p->total_numa_faults += diff;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001416 if (p->numa_group) {
1417 /* safe because we can only change our own group */
Mel Gorman989348b2013-10-07 11:29:40 +01001418 p->numa_group->faults[i] += diff;
1419 p->numa_group->total_faults += diff;
1420 group_faults += p->numa_group->faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001421 }
Mel Gormanac8e8952013-10-07 11:29:03 +01001422 }
1423
Mel Gorman688b7582013-10-07 11:28:58 +01001424 if (faults > max_faults) {
1425 max_faults = faults;
1426 max_nid = nid;
1427 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001428
1429 if (group_faults > max_group_faults) {
1430 max_group_faults = group_faults;
1431 max_group_nid = nid;
1432 }
1433 }
1434
Rik van Riel04bb2f92013-10-07 11:29:36 +01001435 update_task_scan_period(p, fault_types[0], fault_types[1]);
1436
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001437 if (p->numa_group) {
1438 /*
1439 * If the preferred task and group nids are different,
1440 * iterate over the nodes again to find the best place.
1441 */
1442 if (max_nid != max_group_nid) {
1443 unsigned long weight, max_weight = 0;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001444
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001445 for_each_online_node(nid) {
1446 weight = task_weight(p, nid) + group_weight(p, nid);
1447 if (weight > max_weight) {
1448 max_weight = weight;
1449 max_nid = nid;
1450 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001451 }
1452 }
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001453
1454 spin_unlock(group_lock);
Mel Gorman688b7582013-10-07 11:28:58 +01001455 }
1456
Mel Gorman6b9a7462013-10-07 11:29:11 +01001457 /* Preferred node as the node with the most faults */
Mel Gorman3a7053b2013-10-07 11:29:00 +01001458 if (max_faults && max_nid != p->numa_preferred_nid) {
Mel Gormane6628d52013-10-07 11:29:02 +01001459 /* Update the preferred nid and migrate task if possible */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001460 sched_setnuma(p, max_nid);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001461 numa_migrate_preferred(p);
Mel Gorman3a7053b2013-10-07 11:29:00 +01001462 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001463}
1464
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001465static inline int get_numa_group(struct numa_group *grp)
1466{
1467 return atomic_inc_not_zero(&grp->refcount);
1468}
1469
1470static inline void put_numa_group(struct numa_group *grp)
1471{
1472 if (atomic_dec_and_test(&grp->refcount))
1473 kfree_rcu(grp, rcu);
1474}
1475
Mel Gorman3e6a9412013-10-07 11:29:35 +01001476static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1477 int *priv)
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001478{
1479 struct numa_group *grp, *my_grp;
1480 struct task_struct *tsk;
1481 bool join = false;
1482 int cpu = cpupid_to_cpu(cpupid);
1483 int i;
1484
1485 if (unlikely(!p->numa_group)) {
1486 unsigned int size = sizeof(struct numa_group) +
Mel Gorman989348b2013-10-07 11:29:40 +01001487 2*nr_node_ids*sizeof(unsigned long);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001488
1489 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1490 if (!grp)
1491 return;
1492
1493 atomic_set(&grp->refcount, 1);
1494 spin_lock_init(&grp->lock);
1495 INIT_LIST_HEAD(&grp->task_list);
Mel Gormane29cf082013-10-07 11:29:22 +01001496 grp->gid = p->pid;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001497
1498 for (i = 0; i < 2*nr_node_ids; i++)
Mel Gorman989348b2013-10-07 11:29:40 +01001499 grp->faults[i] = p->numa_faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001500
Mel Gorman989348b2013-10-07 11:29:40 +01001501 grp->total_faults = p->total_numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001502
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001503 list_add(&p->numa_entry, &grp->task_list);
1504 grp->nr_tasks++;
1505 rcu_assign_pointer(p->numa_group, grp);
1506 }
1507
1508 rcu_read_lock();
1509 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1510
1511 if (!cpupid_match_pid(tsk, cpupid))
Peter Zijlstra33547812013-10-09 10:24:48 +02001512 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001513
1514 grp = rcu_dereference(tsk->numa_group);
1515 if (!grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001516 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001517
1518 my_grp = p->numa_group;
1519 if (grp == my_grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001520 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001521
1522 /*
1523 * Only join the other group if its bigger; if we're the bigger group,
1524 * the other task will join us.
1525 */
1526 if (my_grp->nr_tasks > grp->nr_tasks)
Peter Zijlstra33547812013-10-09 10:24:48 +02001527 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001528
1529 /*
1530 * Tie-break on the grp address.
1531 */
1532 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001533 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001534
Rik van Rieldabe1d92013-10-07 11:29:34 +01001535 /* Always join threads in the same process. */
1536 if (tsk->mm == current->mm)
1537 join = true;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001538
Rik van Rieldabe1d92013-10-07 11:29:34 +01001539 /* Simple filter to avoid false positives due to PID collisions */
1540 if (flags & TNF_SHARED)
1541 join = true;
1542
Mel Gorman3e6a9412013-10-07 11:29:35 +01001543 /* Update priv based on whether false sharing was detected */
1544 *priv = !join;
1545
Rik van Rieldabe1d92013-10-07 11:29:34 +01001546 if (join && !get_numa_group(grp))
Peter Zijlstra33547812013-10-09 10:24:48 +02001547 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001548
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001549 rcu_read_unlock();
1550
1551 if (!join)
1552 return;
1553
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001554 double_lock(&my_grp->lock, &grp->lock);
1555
Mel Gorman989348b2013-10-07 11:29:40 +01001556 for (i = 0; i < 2*nr_node_ids; i++) {
1557 my_grp->faults[i] -= p->numa_faults[i];
1558 grp->faults[i] += p->numa_faults[i];
1559 }
1560 my_grp->total_faults -= p->total_numa_faults;
1561 grp->total_faults += p->total_numa_faults;
1562
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001563 list_move(&p->numa_entry, &grp->task_list);
1564 my_grp->nr_tasks--;
1565 grp->nr_tasks++;
1566
1567 spin_unlock(&my_grp->lock);
1568 spin_unlock(&grp->lock);
1569
1570 rcu_assign_pointer(p->numa_group, grp);
1571
1572 put_numa_group(my_grp);
Peter Zijlstra33547812013-10-09 10:24:48 +02001573 return;
1574
1575no_join:
1576 rcu_read_unlock();
1577 return;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001578}
1579
1580void task_numa_free(struct task_struct *p)
1581{
1582 struct numa_group *grp = p->numa_group;
1583 int i;
Rik van Riel82727012013-10-07 11:29:28 +01001584 void *numa_faults = p->numa_faults;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001585
1586 if (grp) {
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001587 spin_lock(&grp->lock);
Mel Gorman989348b2013-10-07 11:29:40 +01001588 for (i = 0; i < 2*nr_node_ids; i++)
1589 grp->faults[i] -= p->numa_faults[i];
1590 grp->total_faults -= p->total_numa_faults;
1591
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001592 list_del(&p->numa_entry);
1593 grp->nr_tasks--;
1594 spin_unlock(&grp->lock);
1595 rcu_assign_pointer(p->numa_group, NULL);
1596 put_numa_group(grp);
1597 }
1598
Rik van Riel82727012013-10-07 11:29:28 +01001599 p->numa_faults = NULL;
1600 p->numa_faults_buffer = NULL;
1601 kfree(numa_faults);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001602}
1603
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001604/*
1605 * Got a PROT_NONE fault for a page on @node.
1606 */
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001607void task_numa_fault(int last_cpupid, int node, int pages, int flags)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001608{
1609 struct task_struct *p = current;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001610 bool migrated = flags & TNF_MIGRATED;
Mel Gormanac8e8952013-10-07 11:29:03 +01001611 int priv;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001612
Dave Kleikamp10e84b92013-07-31 13:53:35 -07001613 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +00001614 return;
1615
Mel Gorman9ff1d9f2013-10-07 11:29:04 +01001616 /* for example, ksmd faulting in a user's mm */
1617 if (!p->mm)
1618 return;
1619
Rik van Riel82727012013-10-07 11:29:28 +01001620 /* Do not worry about placement if exiting */
1621 if (p->state == TASK_DEAD)
1622 return;
1623
Mel Gormanf809ca92013-10-07 11:28:57 +01001624 /* Allocate buffer to track faults on a per-node basis */
1625 if (unlikely(!p->numa_faults)) {
Mel Gormanac8e8952013-10-07 11:29:03 +01001626 int size = sizeof(*p->numa_faults) * 2 * nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +01001627
Mel Gorman745d6142013-10-07 11:28:59 +01001628 /* numa_faults and numa_faults_buffer share the allocation */
1629 p->numa_faults = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
Mel Gormanf809ca92013-10-07 11:28:57 +01001630 if (!p->numa_faults)
1631 return;
Mel Gorman745d6142013-10-07 11:28:59 +01001632
1633 BUG_ON(p->numa_faults_buffer);
Mel Gormanac8e8952013-10-07 11:29:03 +01001634 p->numa_faults_buffer = p->numa_faults + (2 * nr_node_ids);
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001635 p->total_numa_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001636 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
Mel Gormanf809ca92013-10-07 11:28:57 +01001637 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001638
Mel Gormanfb003b82012-11-15 09:01:14 +00001639 /*
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001640 * First accesses are treated as private, otherwise consider accesses
1641 * to be private if the accessing pid has not changed
1642 */
1643 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1644 priv = 1;
1645 } else {
1646 priv = cpupid_match_pid(p, last_cpupid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001647 if (!priv && !(flags & TNF_NO_GROUP))
Mel Gorman3e6a9412013-10-07 11:29:35 +01001648 task_numa_group(p, last_cpupid, flags, &priv);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001649 }
1650
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001651 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01001652
Rik van Riel2739d3e2013-10-07 11:29:41 +01001653 /*
1654 * Retry task to preferred node migration periodically, in case it
1655 * case it previously failed, or the scheduler moved us.
1656 */
1657 if (time_after(jiffies, p->numa_migrate_retry))
Mel Gorman6b9a7462013-10-07 11:29:11 +01001658 numa_migrate_preferred(p);
1659
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001660 if (migrated)
1661 p->numa_pages_migrated += pages;
1662
Mel Gormanac8e8952013-10-07 11:29:03 +01001663 p->numa_faults_buffer[task_faults_idx(node, priv)] += pages;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001664 p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001665}
1666
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001667static void reset_ptenuma_scan(struct task_struct *p)
1668{
1669 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1670 p->mm->numa_scan_offset = 0;
1671}
1672
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001673/*
1674 * The expensive part of numa migration is done from task_work context.
1675 * Triggered from task_tick_numa().
1676 */
1677void task_numa_work(struct callback_head *work)
1678{
1679 unsigned long migrate, next_scan, now = jiffies;
1680 struct task_struct *p = current;
1681 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001682 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00001683 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001684 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +00001685 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001686
1687 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1688
1689 work->next = work; /* protect against double add */
1690 /*
1691 * Who cares about NUMA placement when they're dying.
1692 *
1693 * NOTE: make sure not to dereference p->mm before this check,
1694 * exit_task_work() happens _after_ exit_mm() so we could be called
1695 * without p->mm even though we still had it when we enqueued this
1696 * work.
1697 */
1698 if (p->flags & PF_EXITING)
1699 return;
1700
Mel Gorman930aa172013-10-07 11:29:37 +01001701 if (!mm->numa_next_scan) {
Mel Gorman7e8d16b2013-10-07 11:28:54 +01001702 mm->numa_next_scan = now +
1703 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
Mel Gormanb8593bf2012-11-21 01:18:23 +00001704 }
1705
1706 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001707 * Enforce maximal scan/migration frequency..
1708 */
1709 migrate = mm->numa_next_scan;
1710 if (time_before(now, migrate))
1711 return;
1712
Mel Gorman598f0ec2013-10-07 11:28:55 +01001713 if (p->numa_scan_period == 0) {
1714 p->numa_scan_period_max = task_scan_max(p);
1715 p->numa_scan_period = task_scan_min(p);
1716 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001717
Mel Gormanfb003b82012-11-15 09:01:14 +00001718 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001719 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1720 return;
1721
Mel Gormane14808b2012-11-19 10:59:15 +00001722 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001723 * Delay this task enough that another task of this mm will likely win
1724 * the next time around.
1725 */
1726 p->node_stamp += 2 * TICK_NSEC;
1727
Mel Gorman9f406042012-11-14 18:34:32 +00001728 start = mm->numa_scan_offset;
1729 pages = sysctl_numa_balancing_scan_size;
1730 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1731 if (!pages)
1732 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001733
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001734 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00001735 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001736 if (!vma) {
1737 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00001738 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001739 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001740 }
Mel Gorman9f406042012-11-14 18:34:32 +00001741 for (; vma; vma = vma->vm_next) {
Mel Gormanfc3147242013-10-07 11:29:09 +01001742 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001743 continue;
1744
Mel Gorman4591ce4f2013-10-07 11:29:13 +01001745 /*
1746 * Shared library pages mapped by multiple processes are not
1747 * migrated as it is expected they are cache replicated. Avoid
1748 * hinting faults in read-only file-backed mappings or the vdso
1749 * as migrating the pages will be of marginal benefit.
1750 */
1751 if (!vma->vm_mm ||
1752 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1753 continue;
1754
Mel Gorman9f406042012-11-14 18:34:32 +00001755 do {
1756 start = max(start, vma->vm_start);
1757 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1758 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01001759 nr_pte_updates += change_prot_numa(vma, start, end);
1760
1761 /*
1762 * Scan sysctl_numa_balancing_scan_size but ensure that
1763 * at least one PTE is updated so that unused virtual
1764 * address space is quickly skipped.
1765 */
1766 if (nr_pte_updates)
1767 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001768
Mel Gorman9f406042012-11-14 18:34:32 +00001769 start = end;
1770 if (pages <= 0)
1771 goto out;
1772 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001773 }
1774
Mel Gorman9f406042012-11-14 18:34:32 +00001775out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001776 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001777 * It is possible to reach the end of the VMA list but the last few
1778 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1779 * would find the !migratable VMA on the next scan but not reset the
1780 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001781 */
1782 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00001783 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001784 else
1785 reset_ptenuma_scan(p);
1786 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001787}
1788
1789/*
1790 * Drive the periodic memory faults..
1791 */
1792void task_tick_numa(struct rq *rq, struct task_struct *curr)
1793{
1794 struct callback_head *work = &curr->numa_work;
1795 u64 period, now;
1796
1797 /*
1798 * We don't care about NUMA placement if we don't have memory.
1799 */
1800 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1801 return;
1802
1803 /*
1804 * Using runtime rather than walltime has the dual advantage that
1805 * we (mostly) drive the selection from busy threads and that the
1806 * task needs to have done some actual work before we bother with
1807 * NUMA placement.
1808 */
1809 now = curr->se.sum_exec_runtime;
1810 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1811
1812 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001813 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01001814 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001815 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001816
1817 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1818 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1819 task_work_add(curr, work, true);
1820 }
1821 }
1822}
1823#else
1824static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1825{
1826}
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001827
1828static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1829{
1830}
1831
1832static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1833{
1834}
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001835#endif /* CONFIG_NUMA_BALANCING */
1836
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001837static void
1838account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1839{
1840 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001841 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001842 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001843#ifdef CONFIG_SMP
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001844 if (entity_is_task(se)) {
1845 struct rq *rq = rq_of(cfs_rq);
1846
1847 account_numa_enqueue(rq, task_of(se));
1848 list_add(&se->group_node, &rq->cfs_tasks);
1849 }
Peter Zijlstra367456c2012-02-20 21:49:09 +01001850#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001851 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001852}
1853
1854static void
1855account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1856{
1857 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001858 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001859 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001860 if (entity_is_task(se)) {
1861 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
Bharata B Raob87f1722008-09-25 09:53:54 +05301862 list_del_init(&se->group_node);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001863 }
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001864 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001865}
1866
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001867#ifdef CONFIG_FAIR_GROUP_SCHED
1868# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001869static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1870{
1871 long tg_weight;
1872
1873 /*
1874 * Use this CPU's actual weight instead of the last load_contribution
1875 * to gain a more accurate current total weight. See
1876 * update_cfs_rq_load_contribution().
1877 */
Alex Shibf5b9862013-06-20 10:18:54 +08001878 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02001879 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001880 tg_weight += cfs_rq->load.weight;
1881
1882 return tg_weight;
1883}
1884
Paul Turner6d5ab292011-01-21 20:45:01 -08001885static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001886{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001887 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001888
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001889 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001890 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001891
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001892 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001893 if (tg_weight)
1894 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001895
1896 if (shares < MIN_SHARES)
1897 shares = MIN_SHARES;
1898 if (shares > tg->shares)
1899 shares = tg->shares;
1900
1901 return shares;
1902}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001903# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001904static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001905{
1906 return tg->shares;
1907}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001908# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001909static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1910 unsigned long weight)
1911{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001912 if (se->on_rq) {
1913 /* commit outstanding execution time */
1914 if (cfs_rq->curr == se)
1915 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001916 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001917 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001918
1919 update_load_set(&se->load, weight);
1920
1921 if (se->on_rq)
1922 account_entity_enqueue(cfs_rq, se);
1923}
1924
Paul Turner82958362012-10-04 13:18:31 +02001925static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1926
Paul Turner6d5ab292011-01-21 20:45:01 -08001927static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001928{
1929 struct task_group *tg;
1930 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001931 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001932
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001933 tg = cfs_rq->tg;
1934 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001935 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001936 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001937#ifndef CONFIG_SMP
1938 if (likely(se->load.weight == tg->shares))
1939 return;
1940#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001941 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001942
1943 reweight_entity(cfs_rq_of(se), se, shares);
1944}
1945#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001946static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001947{
1948}
1949#endif /* CONFIG_FAIR_GROUP_SCHED */
1950
Alex Shi141965c2013-06-26 13:05:39 +08001951#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001952/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001953 * We choose a half-life close to 1 scheduling period.
1954 * Note: The tables below are dependent on this value.
1955 */
1956#define LOAD_AVG_PERIOD 32
1957#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1958#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1959
1960/* Precomputed fixed inverse multiplies for multiplication by y^n */
1961static const u32 runnable_avg_yN_inv[] = {
1962 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1963 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1964 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1965 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1966 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1967 0x85aac367, 0x82cd8698,
1968};
1969
1970/*
1971 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1972 * over-estimates when re-combining.
1973 */
1974static const u32 runnable_avg_yN_sum[] = {
1975 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1976 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1977 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1978};
1979
1980/*
Paul Turner9d85f212012-10-04 13:18:29 +02001981 * Approximate:
1982 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1983 */
1984static __always_inline u64 decay_load(u64 val, u64 n)
1985{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001986 unsigned int local_n;
1987
1988 if (!n)
1989 return val;
1990 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1991 return 0;
1992
1993 /* after bounds checking we can collapse to 32-bit */
1994 local_n = n;
1995
1996 /*
1997 * As y^PERIOD = 1/2, we can combine
1998 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1999 * With a look-up table which covers k^n (n<PERIOD)
2000 *
2001 * To achieve constant time decay_load.
2002 */
2003 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2004 val >>= local_n / LOAD_AVG_PERIOD;
2005 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02002006 }
2007
Paul Turner5b51f2f2012-10-04 13:18:32 +02002008 val *= runnable_avg_yN_inv[local_n];
2009 /* We don't use SRR here since we always want to round down. */
2010 return val >> 32;
2011}
2012
2013/*
2014 * For updates fully spanning n periods, the contribution to runnable
2015 * average will be: \Sum 1024*y^n
2016 *
2017 * We can compute this reasonably efficiently by combining:
2018 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2019 */
2020static u32 __compute_runnable_contrib(u64 n)
2021{
2022 u32 contrib = 0;
2023
2024 if (likely(n <= LOAD_AVG_PERIOD))
2025 return runnable_avg_yN_sum[n];
2026 else if (unlikely(n >= LOAD_AVG_MAX_N))
2027 return LOAD_AVG_MAX;
2028
2029 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2030 do {
2031 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2032 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2033
2034 n -= LOAD_AVG_PERIOD;
2035 } while (n > LOAD_AVG_PERIOD);
2036
2037 contrib = decay_load(contrib, n);
2038 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02002039}
2040
2041/*
2042 * We can represent the historical contribution to runnable average as the
2043 * coefficients of a geometric series. To do this we sub-divide our runnable
2044 * history into segments of approximately 1ms (1024us); label the segment that
2045 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2046 *
2047 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2048 * p0 p1 p2
2049 * (now) (~1ms ago) (~2ms ago)
2050 *
2051 * Let u_i denote the fraction of p_i that the entity was runnable.
2052 *
2053 * We then designate the fractions u_i as our co-efficients, yielding the
2054 * following representation of historical load:
2055 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2056 *
2057 * We choose y based on the with of a reasonably scheduling period, fixing:
2058 * y^32 = 0.5
2059 *
2060 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2061 * approximately half as much as the contribution to load within the last ms
2062 * (u_0).
2063 *
2064 * When a period "rolls over" and we have new u_0`, multiplying the previous
2065 * sum again by y is sufficient to update:
2066 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2067 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2068 */
2069static __always_inline int __update_entity_runnable_avg(u64 now,
2070 struct sched_avg *sa,
2071 int runnable)
2072{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002073 u64 delta, periods;
2074 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002075 int delta_w, decayed = 0;
2076
2077 delta = now - sa->last_runnable_update;
2078 /*
2079 * This should only happen when time goes backwards, which it
2080 * unfortunately does during sched clock init when we swap over to TSC.
2081 */
2082 if ((s64)delta < 0) {
2083 sa->last_runnable_update = now;
2084 return 0;
2085 }
2086
2087 /*
2088 * Use 1024ns as the unit of measurement since it's a reasonable
2089 * approximation of 1us and fast to compute.
2090 */
2091 delta >>= 10;
2092 if (!delta)
2093 return 0;
2094 sa->last_runnable_update = now;
2095
2096 /* delta_w is the amount already accumulated against our next period */
2097 delta_w = sa->runnable_avg_period % 1024;
2098 if (delta + delta_w >= 1024) {
2099 /* period roll-over */
2100 decayed = 1;
2101
2102 /*
2103 * Now that we know we're crossing a period boundary, figure
2104 * out how much from delta we need to complete the current
2105 * period and accrue it.
2106 */
2107 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02002108 if (runnable)
2109 sa->runnable_avg_sum += delta_w;
2110 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002111
Paul Turner5b51f2f2012-10-04 13:18:32 +02002112 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002113
Paul Turner5b51f2f2012-10-04 13:18:32 +02002114 /* Figure out how many additional periods this update spans */
2115 periods = delta / 1024;
2116 delta %= 1024;
2117
2118 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2119 periods + 1);
2120 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2121 periods + 1);
2122
2123 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2124 runnable_contrib = __compute_runnable_contrib(periods);
2125 if (runnable)
2126 sa->runnable_avg_sum += runnable_contrib;
2127 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002128 }
2129
2130 /* Remainder of delta accrued against u_0` */
2131 if (runnable)
2132 sa->runnable_avg_sum += delta;
2133 sa->runnable_avg_period += delta;
2134
2135 return decayed;
2136}
2137
Paul Turner9ee474f2012-10-04 13:18:30 +02002138/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02002139static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02002140{
2141 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2142 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2143
2144 decays -= se->avg.decay_count;
2145 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02002146 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02002147
2148 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2149 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02002150
2151 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02002152}
2153
Paul Turnerc566e8e2012-10-04 13:18:30 +02002154#ifdef CONFIG_FAIR_GROUP_SCHED
2155static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2156 int force_update)
2157{
2158 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08002159 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02002160
2161 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2162 tg_contrib -= cfs_rq->tg_load_contrib;
2163
Alex Shibf5b9862013-06-20 10:18:54 +08002164 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2165 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02002166 cfs_rq->tg_load_contrib += tg_contrib;
2167 }
2168}
Paul Turner8165e142012-10-04 13:18:31 +02002169
Paul Turnerbb17f652012-10-04 13:18:31 +02002170/*
2171 * Aggregate cfs_rq runnable averages into an equivalent task_group
2172 * representation for computing load contributions.
2173 */
2174static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2175 struct cfs_rq *cfs_rq)
2176{
2177 struct task_group *tg = cfs_rq->tg;
2178 long contrib;
2179
2180 /* The fraction of a cpu used by this cfs_rq */
Michal Nazarewicz85b088e2013-11-10 20:42:01 +01002181 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
Paul Turnerbb17f652012-10-04 13:18:31 +02002182 sa->runnable_avg_period + 1);
2183 contrib -= cfs_rq->tg_runnable_contrib;
2184
2185 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2186 atomic_add(contrib, &tg->runnable_avg);
2187 cfs_rq->tg_runnable_contrib += contrib;
2188 }
2189}
2190
Paul Turner8165e142012-10-04 13:18:31 +02002191static inline void __update_group_entity_contrib(struct sched_entity *se)
2192{
2193 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2194 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02002195 int runnable_avg;
2196
Paul Turner8165e142012-10-04 13:18:31 +02002197 u64 contrib;
2198
2199 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08002200 se->avg.load_avg_contrib = div_u64(contrib,
2201 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02002202
2203 /*
2204 * For group entities we need to compute a correction term in the case
2205 * that they are consuming <1 cpu so that we would contribute the same
2206 * load as a task of equal weight.
2207 *
2208 * Explicitly co-ordinating this measurement would be expensive, but
2209 * fortunately the sum of each cpus contribution forms a usable
2210 * lower-bound on the true value.
2211 *
2212 * Consider the aggregate of 2 contributions. Either they are disjoint
2213 * (and the sum represents true value) or they are disjoint and we are
2214 * understating by the aggregate of their overlap.
2215 *
2216 * Extending this to N cpus, for a given overlap, the maximum amount we
2217 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2218 * cpus that overlap for this interval and w_i is the interval width.
2219 *
2220 * On a small machine; the first term is well-bounded which bounds the
2221 * total error since w_i is a subset of the period. Whereas on a
2222 * larger machine, while this first term can be larger, if w_i is the
2223 * of consequential size guaranteed to see n_i*w_i quickly converge to
2224 * our upper bound of 1-cpu.
2225 */
2226 runnable_avg = atomic_read(&tg->runnable_avg);
2227 if (runnable_avg < NICE_0_LOAD) {
2228 se->avg.load_avg_contrib *= runnable_avg;
2229 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2230 }
Paul Turner8165e142012-10-04 13:18:31 +02002231}
Paul Turnerc566e8e2012-10-04 13:18:30 +02002232#else
2233static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2234 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02002235static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2236 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02002237static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02002238#endif
2239
Paul Turner8165e142012-10-04 13:18:31 +02002240static inline void __update_task_entity_contrib(struct sched_entity *se)
2241{
2242 u32 contrib;
2243
2244 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2245 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2246 contrib /= (se->avg.runnable_avg_period + 1);
2247 se->avg.load_avg_contrib = scale_load(contrib);
2248}
2249
Paul Turner2dac7542012-10-04 13:18:30 +02002250/* Compute the current contribution to load_avg by se, return any delta */
2251static long __update_entity_load_avg_contrib(struct sched_entity *se)
2252{
2253 long old_contrib = se->avg.load_avg_contrib;
2254
Paul Turner8165e142012-10-04 13:18:31 +02002255 if (entity_is_task(se)) {
2256 __update_task_entity_contrib(se);
2257 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02002258 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02002259 __update_group_entity_contrib(se);
2260 }
Paul Turner2dac7542012-10-04 13:18:30 +02002261
2262 return se->avg.load_avg_contrib - old_contrib;
2263}
2264
Paul Turner9ee474f2012-10-04 13:18:30 +02002265static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2266 long load_contrib)
2267{
2268 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2269 cfs_rq->blocked_load_avg -= load_contrib;
2270 else
2271 cfs_rq->blocked_load_avg = 0;
2272}
2273
Paul Turnerf1b17282012-10-04 13:18:31 +02002274static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2275
Paul Turner9d85f212012-10-04 13:18:29 +02002276/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02002277static inline void update_entity_load_avg(struct sched_entity *se,
2278 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02002279{
Paul Turner2dac7542012-10-04 13:18:30 +02002280 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2281 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02002282 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02002283
Paul Turnerf1b17282012-10-04 13:18:31 +02002284 /*
2285 * For a group entity we need to use their owned cfs_rq_clock_task() in
2286 * case they are the parent of a throttled hierarchy.
2287 */
2288 if (entity_is_task(se))
2289 now = cfs_rq_clock_task(cfs_rq);
2290 else
2291 now = cfs_rq_clock_task(group_cfs_rq(se));
2292
2293 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02002294 return;
2295
2296 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02002297
2298 if (!update_cfs_rq)
2299 return;
2300
Paul Turner2dac7542012-10-04 13:18:30 +02002301 if (se->on_rq)
2302 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02002303 else
2304 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2305}
2306
2307/*
2308 * Decay the load contributed by all blocked children and account this so that
2309 * their contribution may appropriately discounted when they wake up.
2310 */
Paul Turneraff3e492012-10-04 13:18:30 +02002311static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02002312{
Paul Turnerf1b17282012-10-04 13:18:31 +02002313 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02002314 u64 decays;
2315
2316 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02002317 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02002318 return;
2319
Alex Shi25099402013-06-20 10:18:55 +08002320 if (atomic_long_read(&cfs_rq->removed_load)) {
2321 unsigned long removed_load;
2322 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02002323 subtract_blocked_load_contrib(cfs_rq, removed_load);
2324 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002325
Paul Turneraff3e492012-10-04 13:18:30 +02002326 if (decays) {
2327 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2328 decays);
2329 atomic64_add(decays, &cfs_rq->decay_counter);
2330 cfs_rq->last_decay = now;
2331 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02002332
2333 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02002334}
Ben Segall18bf2802012-10-04 12:51:20 +02002335
2336static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2337{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002338 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02002339 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02002340}
Paul Turner2dac7542012-10-04 13:18:30 +02002341
2342/* Add the load generated by se into cfs_rq's child load-average */
2343static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002344 struct sched_entity *se,
2345 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02002346{
Paul Turneraff3e492012-10-04 13:18:30 +02002347 /*
2348 * We track migrations using entity decay_count <= 0, on a wake-up
2349 * migration we use a negative decay count to track the remote decays
2350 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08002351 *
2352 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2353 * are seen by enqueue_entity_load_avg() as a migration with an already
2354 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02002355 */
2356 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002357 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02002358 if (se->avg.decay_count) {
2359 /*
2360 * In a wake-up migration we have to approximate the
2361 * time sleeping. This is because we can't synchronize
2362 * clock_task between the two cpus, and it is not
2363 * guaranteed to be read-safe. Instead, we can
2364 * approximate this using our carried decays, which are
2365 * explicitly atomically readable.
2366 */
2367 se->avg.last_runnable_update -= (-se->avg.decay_count)
2368 << 20;
2369 update_entity_load_avg(se, 0);
2370 /* Indicate that we're now synchronized and on-rq */
2371 se->avg.decay_count = 0;
2372 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002373 wakeup = 0;
2374 } else {
Alex Shi282cf492013-06-20 10:18:48 +08002375 /*
2376 * Task re-woke on same cpu (or else migrate_task_rq_fair()
2377 * would have made count negative); we must be careful to avoid
2378 * double-accounting blocked time after synchronizing decays.
2379 */
2380 se->avg.last_runnable_update += __synchronize_entity_decay(se)
2381 << 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02002382 }
2383
Paul Turneraff3e492012-10-04 13:18:30 +02002384 /* migrated tasks did not contribute to our blocked load */
2385 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02002386 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02002387 update_entity_load_avg(se, 0);
2388 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002389
Paul Turner2dac7542012-10-04 13:18:30 +02002390 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02002391 /* we force update consideration on load-balancer moves */
2392 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02002393}
2394
Paul Turner9ee474f2012-10-04 13:18:30 +02002395/*
2396 * Remove se's load from this cfs_rq child load-average, if the entity is
2397 * transitioning to a blocked state we track its projected decay using
2398 * blocked_load_avg.
2399 */
Paul Turner2dac7542012-10-04 13:18:30 +02002400static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002401 struct sched_entity *se,
2402 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02002403{
Paul Turner9ee474f2012-10-04 13:18:30 +02002404 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002405 /* we force update consideration on load-balancer moves */
2406 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02002407
Paul Turner2dac7542012-10-04 13:18:30 +02002408 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02002409 if (sleep) {
2410 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2411 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2412 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02002413}
Vincent Guittot642dbc32013-04-18 18:34:26 +02002414
2415/*
2416 * Update the rq's load with the elapsed running time before entering
2417 * idle. if the last scheduled task is not a CFS task, idle_enter will
2418 * be the only way to update the runnable statistic.
2419 */
2420void idle_enter_fair(struct rq *this_rq)
2421{
2422 update_rq_runnable_avg(this_rq, 1);
2423}
2424
2425/*
2426 * Update the rq's load with the elapsed idle time before a task is
2427 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2428 * be the only way to update the runnable statistic.
2429 */
2430void idle_exit_fair(struct rq *this_rq)
2431{
2432 update_rq_runnable_avg(this_rq, 0);
2433}
2434
Paul Turner9d85f212012-10-04 13:18:29 +02002435#else
Paul Turner9ee474f2012-10-04 13:18:30 +02002436static inline void update_entity_load_avg(struct sched_entity *se,
2437 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02002438static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002439static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002440 struct sched_entity *se,
2441 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002442static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002443 struct sched_entity *se,
2444 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02002445static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2446 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02002447#endif
2448
Ingo Molnar2396af62007-08-09 11:16:48 +02002449static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002450{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002451#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02002452 struct task_struct *tsk = NULL;
2453
2454 if (entity_is_task(se))
2455 tsk = task_of(se);
2456
Lucas De Marchi41acab82010-03-10 23:37:45 -03002457 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002458 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002459
2460 if ((s64)delta < 0)
2461 delta = 0;
2462
Lucas De Marchi41acab82010-03-10 23:37:45 -03002463 if (unlikely(delta > se->statistics.sleep_max))
2464 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002465
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002466 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002467 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01002468
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002469 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02002470 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002471 trace_sched_stat_sleep(tsk, delta);
2472 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002473 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03002474 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002475 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002476
2477 if ((s64)delta < 0)
2478 delta = 0;
2479
Lucas De Marchi41acab82010-03-10 23:37:45 -03002480 if (unlikely(delta > se->statistics.block_max))
2481 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002482
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002483 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002484 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02002485
Peter Zijlstrae4143142009-07-23 20:13:26 +02002486 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002487 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002488 se->statistics.iowait_sum += delta;
2489 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002490 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002491 }
2492
Andrew Vaginb781a602011-11-28 12:03:35 +03002493 trace_sched_stat_blocked(tsk, delta);
2494
Peter Zijlstrae4143142009-07-23 20:13:26 +02002495 /*
2496 * Blocking time is in units of nanosecs, so shift by
2497 * 20 to get a milliseconds-range estimation of the
2498 * amount of time that the task spent sleeping:
2499 */
2500 if (unlikely(prof_on == SLEEP_PROFILING)) {
2501 profile_hits(SLEEP_PROFILING,
2502 (void *)get_wchan(tsk),
2503 delta >> 20);
2504 }
2505 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02002506 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002507 }
2508#endif
2509}
2510
Peter Zijlstraddc97292007-10-15 17:00:10 +02002511static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2512{
2513#ifdef CONFIG_SCHED_DEBUG
2514 s64 d = se->vruntime - cfs_rq->min_vruntime;
2515
2516 if (d < 0)
2517 d = -d;
2518
2519 if (d > 3*sysctl_sched_latency)
2520 schedstat_inc(cfs_rq, nr_spread_over);
2521#endif
2522}
2523
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002524static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002525place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2526{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02002527 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002528
Peter Zijlstra2cb86002007-11-09 22:39:37 +01002529 /*
2530 * The 'current' period is already promised to the current tasks,
2531 * however the extra weight of the new task will slow them down a
2532 * little, place the new task so that it fits in the slot that
2533 * stays open at the end.
2534 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002535 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02002536 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002537
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002538 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01002539 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002540 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02002541
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002542 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002543 * Halve their sleep time's effect, to allow
2544 * for a gentler effect of sleepers:
2545 */
2546 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2547 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02002548
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002549 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002550 }
2551
Mike Galbraithb5d9d732009-09-08 11:12:28 +02002552 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05302553 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002554}
2555
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002556static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2557
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002558static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002559enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002560{
2561 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002562 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05302563 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002564 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002565 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002566 se->vruntime += cfs_rq->min_vruntime;
2567
2568 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002569 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002570 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002571 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02002572 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002573 account_entity_enqueue(cfs_rq, se);
2574 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002575
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002576 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002577 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02002578 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02002579 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002580
Ingo Molnard2417e52007-08-09 11:16:47 +02002581 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02002582 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002583 if (se != cfs_rq->curr)
2584 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002585 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002586
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002587 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002588 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002589 check_enqueue_throttle(cfs_rq);
2590 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002591}
2592
Rik van Riel2c13c9192011-02-01 09:48:37 -05002593static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01002594{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002595 for_each_sched_entity(se) {
2596 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2597 if (cfs_rq->last == se)
2598 cfs_rq->last = NULL;
2599 else
2600 break;
2601 }
2602}
Peter Zijlstra2002c692008-11-11 11:52:33 +01002603
Rik van Riel2c13c9192011-02-01 09:48:37 -05002604static void __clear_buddies_next(struct sched_entity *se)
2605{
2606 for_each_sched_entity(se) {
2607 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2608 if (cfs_rq->next == se)
2609 cfs_rq->next = NULL;
2610 else
2611 break;
2612 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01002613}
2614
Rik van Rielac53db52011-02-01 09:51:03 -05002615static void __clear_buddies_skip(struct sched_entity *se)
2616{
2617 for_each_sched_entity(se) {
2618 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2619 if (cfs_rq->skip == se)
2620 cfs_rq->skip = NULL;
2621 else
2622 break;
2623 }
2624}
2625
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002626static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2627{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002628 if (cfs_rq->last == se)
2629 __clear_buddies_last(se);
2630
2631 if (cfs_rq->next == se)
2632 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05002633
2634 if (cfs_rq->skip == se)
2635 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002636}
2637
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002638static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07002639
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002640static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002641dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002642{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002643 /*
2644 * Update run-time statistics of the 'current'.
2645 */
2646 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002647 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002648
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02002649 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002650 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002651#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002652 if (entity_is_task(se)) {
2653 struct task_struct *tsk = task_of(se);
2654
2655 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002656 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002657 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002658 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002659 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02002660#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002661 }
2662
Peter Zijlstra2002c692008-11-11 11:52:33 +01002663 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002664
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002665 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002666 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002667 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002668 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002669
2670 /*
2671 * Normalize the entity after updating the min_vruntime because the
2672 * update can refer to the ->curr item and we need to reflect this
2673 * movement in our normalized position.
2674 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002675 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002676 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07002677
Paul Turnerd8b49862011-07-21 09:43:41 -07002678 /* return excess runtime on last dequeue */
2679 return_cfs_rq_runtime(cfs_rq);
2680
Peter Zijlstra1e876232011-05-17 16:21:10 -07002681 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002682 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002683}
2684
2685/*
2686 * Preempt the current task with a newly woken task if needed:
2687 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02002688static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002689check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002690{
Peter Zijlstra11697832007-09-05 14:32:49 +02002691 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002692 struct sched_entity *se;
2693 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02002694
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02002695 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02002696 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002697 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002698 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002699 /*
2700 * The current task ran long enough, ensure it doesn't get
2701 * re-elected due to buddy favours.
2702 */
2703 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002704 return;
2705 }
2706
2707 /*
2708 * Ensure that a task that missed wakeup preemption by a
2709 * narrow margin doesn't have to wait for a full slice.
2710 * This also mitigates buddy induced latencies under load.
2711 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002712 if (delta_exec < sysctl_sched_min_granularity)
2713 return;
2714
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002715 se = __pick_first_entity(cfs_rq);
2716 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02002717
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002718 if (delta < 0)
2719 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01002720
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002721 if (delta > ideal_runtime)
2722 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002723}
2724
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002725static void
Ingo Molnar8494f412007-08-09 11:16:48 +02002726set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002727{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002728 /* 'current' is not kept within the tree. */
2729 if (se->on_rq) {
2730 /*
2731 * Any task has to be enqueued before it get to execute on
2732 * a CPU. So account for the time it spent waiting on the
2733 * runqueue.
2734 */
2735 update_stats_wait_end(cfs_rq, se);
2736 __dequeue_entity(cfs_rq, se);
2737 }
2738
Ingo Molnar79303e92007-08-09 11:16:47 +02002739 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02002740 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02002741#ifdef CONFIG_SCHEDSTATS
2742 /*
2743 * Track our maximum slice length, if the CPU's load is at
2744 * least twice that of our own weight (i.e. dont track it
2745 * when there are only lesser-weight tasks around):
2746 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002747 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002748 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02002749 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2750 }
2751#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02002752 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002753}
2754
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02002755static int
2756wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2757
Rik van Rielac53db52011-02-01 09:51:03 -05002758/*
2759 * Pick the next process, keeping these things in mind, in this order:
2760 * 1) keep things fair between processes/task groups
2761 * 2) pick the "next" process, since someone really wants that to run
2762 * 3) pick the "last" process, for cache locality
2763 * 4) do not run the "skip" process, if something else is available
2764 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002765static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002766{
Rik van Rielac53db52011-02-01 09:51:03 -05002767 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002768 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002769
Rik van Rielac53db52011-02-01 09:51:03 -05002770 /*
2771 * Avoid running the skip buddy, if running something else can
2772 * be done without getting too unfair.
2773 */
2774 if (cfs_rq->skip == se) {
2775 struct sched_entity *second = __pick_next_entity(se);
2776 if (second && wakeup_preempt_entity(second, left) < 1)
2777 se = second;
2778 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002779
Mike Galbraithf685cea2009-10-23 23:09:22 +02002780 /*
2781 * Prefer last buddy, try to return the CPU to a preempted task.
2782 */
2783 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2784 se = cfs_rq->last;
2785
Rik van Rielac53db52011-02-01 09:51:03 -05002786 /*
2787 * Someone really wants this to run. If it's not unfair, run it.
2788 */
2789 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2790 se = cfs_rq->next;
2791
Mike Galbraithf685cea2009-10-23 23:09:22 +02002792 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002793
2794 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002795}
2796
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002797static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2798
Ingo Molnarab6cde22007-08-09 11:16:48 +02002799static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002800{
2801 /*
2802 * If still on the runqueue then deactivate_task()
2803 * was not called and update_curr() has to be done:
2804 */
2805 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002806 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002807
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002808 /* throttle cfs_rqs exceeding runtime */
2809 check_cfs_rq_runtime(cfs_rq);
2810
Peter Zijlstraddc97292007-10-15 17:00:10 +02002811 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002812 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002813 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002814 /* Put 'current' back into the tree. */
2815 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002816 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002817 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002818 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002819 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002820}
2821
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002822static void
2823entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002824{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002825 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002826 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002827 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002828 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002829
Paul Turner43365bd2010-12-15 19:10:17 -08002830 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002831 * Ensure that runnable average is periodically updated.
2832 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002833 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002834 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02002835 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02002836
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002837#ifdef CONFIG_SCHED_HRTICK
2838 /*
2839 * queued ticks are scheduled to match the slice, so don't bother
2840 * validating it and just reschedule.
2841 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002842 if (queued) {
2843 resched_task(rq_of(cfs_rq)->curr);
2844 return;
2845 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002846 /*
2847 * don't let the period tick interfere with the hrtick preemption
2848 */
2849 if (!sched_feat(DOUBLE_TICK) &&
2850 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2851 return;
2852#endif
2853
Yong Zhang2c2efae2011-07-29 16:20:33 +08002854 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002855 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002856}
2857
Paul Turnerab84d312011-07-21 09:43:28 -07002858
2859/**************************************************
2860 * CFS bandwidth control machinery
2861 */
2862
2863#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002864
2865#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002866static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002867
2868static inline bool cfs_bandwidth_used(void)
2869{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002870 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002871}
2872
Ben Segall1ee14e62013-10-16 11:16:12 -07002873void cfs_bandwidth_usage_inc(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002874{
Ben Segall1ee14e62013-10-16 11:16:12 -07002875 static_key_slow_inc(&__cfs_bandwidth_used);
2876}
2877
2878void cfs_bandwidth_usage_dec(void)
2879{
2880 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002881}
2882#else /* HAVE_JUMP_LABEL */
2883static bool cfs_bandwidth_used(void)
2884{
2885 return true;
2886}
2887
Ben Segall1ee14e62013-10-16 11:16:12 -07002888void cfs_bandwidth_usage_inc(void) {}
2889void cfs_bandwidth_usage_dec(void) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002890#endif /* HAVE_JUMP_LABEL */
2891
Paul Turnerab84d312011-07-21 09:43:28 -07002892/*
2893 * default period for cfs group bandwidth.
2894 * default: 0.1s, units: nanoseconds
2895 */
2896static inline u64 default_cfs_period(void)
2897{
2898 return 100000000ULL;
2899}
Paul Turnerec12cb72011-07-21 09:43:30 -07002900
2901static inline u64 sched_cfs_bandwidth_slice(void)
2902{
2903 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2904}
2905
Paul Turnera9cf55b2011-07-21 09:43:32 -07002906/*
2907 * Replenish runtime according to assigned quota and update expiration time.
2908 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2909 * additional synchronization around rq->lock.
2910 *
2911 * requires cfs_b->lock
2912 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002913void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002914{
2915 u64 now;
2916
2917 if (cfs_b->quota == RUNTIME_INF)
2918 return;
2919
2920 now = sched_clock_cpu(smp_processor_id());
2921 cfs_b->runtime = cfs_b->quota;
2922 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2923}
2924
Peter Zijlstra029632f2011-10-25 10:00:11 +02002925static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2926{
2927 return &tg->cfs_bandwidth;
2928}
2929
Paul Turnerf1b17282012-10-04 13:18:31 +02002930/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2931static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2932{
2933 if (unlikely(cfs_rq->throttle_count))
2934 return cfs_rq->throttled_clock_task;
2935
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002936 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002937}
2938
Paul Turner85dac902011-07-21 09:43:33 -07002939/* returns 0 on failure to allocate runtime */
2940static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002941{
2942 struct task_group *tg = cfs_rq->tg;
2943 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002944 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002945
2946 /* note: this is a positive sum as runtime_remaining <= 0 */
2947 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2948
2949 raw_spin_lock(&cfs_b->lock);
2950 if (cfs_b->quota == RUNTIME_INF)
2951 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002952 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002953 /*
2954 * If the bandwidth pool has become inactive, then at least one
2955 * period must have elapsed since the last consumption.
2956 * Refresh the global state and ensure bandwidth timer becomes
2957 * active.
2958 */
2959 if (!cfs_b->timer_active) {
2960 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002961 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002962 }
Paul Turner58088ad2011-07-21 09:43:31 -07002963
2964 if (cfs_b->runtime > 0) {
2965 amount = min(cfs_b->runtime, min_amount);
2966 cfs_b->runtime -= amount;
2967 cfs_b->idle = 0;
2968 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002969 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002970 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002971 raw_spin_unlock(&cfs_b->lock);
2972
2973 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002974 /*
2975 * we may have advanced our local expiration to account for allowed
2976 * spread between our sched_clock and the one on which runtime was
2977 * issued.
2978 */
2979 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2980 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002981
2982 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002983}
2984
2985/*
2986 * Note: This depends on the synchronization provided by sched_clock and the
2987 * fact that rq->clock snapshots this value.
2988 */
2989static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2990{
2991 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002992
2993 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002994 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002995 return;
2996
2997 if (cfs_rq->runtime_remaining < 0)
2998 return;
2999
3000 /*
3001 * If the local deadline has passed we have to consider the
3002 * possibility that our sched_clock is 'fast' and the global deadline
3003 * has not truly expired.
3004 *
3005 * Fortunately we can check determine whether this the case by checking
3006 * whether the global deadline has advanced.
3007 */
3008
3009 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
3010 /* extend local deadline, drift is bounded above by 2 ticks */
3011 cfs_rq->runtime_expires += TICK_NSEC;
3012 } else {
3013 /* global deadline is ahead, expiration has passed */
3014 cfs_rq->runtime_remaining = 0;
3015 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003016}
3017
3018static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3019 unsigned long delta_exec)
3020{
Paul Turnera9cf55b2011-07-21 09:43:32 -07003021 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07003022 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003023 expire_cfs_rq_runtime(cfs_rq);
3024
3025 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07003026 return;
3027
Paul Turner85dac902011-07-21 09:43:33 -07003028 /*
3029 * if we're unable to extend our runtime we resched so that the active
3030 * hierarchy can be throttled
3031 */
3032 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3033 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07003034}
3035
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003036static __always_inline
3037void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003038{
Paul Turner56f570e2011-11-07 20:26:33 -08003039 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07003040 return;
3041
3042 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3043}
3044
Paul Turner85dac902011-07-21 09:43:33 -07003045static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3046{
Paul Turner56f570e2011-11-07 20:26:33 -08003047 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07003048}
3049
Paul Turner64660c82011-07-21 09:43:36 -07003050/* check whether cfs_rq, or any parent, is throttled */
3051static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3052{
Paul Turner56f570e2011-11-07 20:26:33 -08003053 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07003054}
3055
3056/*
3057 * Ensure that neither of the group entities corresponding to src_cpu or
3058 * dest_cpu are members of a throttled hierarchy when performing group
3059 * load-balance operations.
3060 */
3061static inline int throttled_lb_pair(struct task_group *tg,
3062 int src_cpu, int dest_cpu)
3063{
3064 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3065
3066 src_cfs_rq = tg->cfs_rq[src_cpu];
3067 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3068
3069 return throttled_hierarchy(src_cfs_rq) ||
3070 throttled_hierarchy(dest_cfs_rq);
3071}
3072
3073/* updated child weight may affect parent so we have to do this bottom up */
3074static int tg_unthrottle_up(struct task_group *tg, void *data)
3075{
3076 struct rq *rq = data;
3077 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3078
3079 cfs_rq->throttle_count--;
3080#ifdef CONFIG_SMP
3081 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02003082 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003083 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02003084 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07003085 }
3086#endif
3087
3088 return 0;
3089}
3090
3091static int tg_throttle_down(struct task_group *tg, void *data)
3092{
3093 struct rq *rq = data;
3094 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3095
Paul Turner82958362012-10-04 13:18:31 +02003096 /* group is entering throttled state, stop time */
3097 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003098 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07003099 cfs_rq->throttle_count++;
3100
3101 return 0;
3102}
3103
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003104static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07003105{
3106 struct rq *rq = rq_of(cfs_rq);
3107 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3108 struct sched_entity *se;
3109 long task_delta, dequeue = 1;
3110
3111 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3112
Paul Turnerf1b17282012-10-04 13:18:31 +02003113 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07003114 rcu_read_lock();
3115 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3116 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07003117
3118 task_delta = cfs_rq->h_nr_running;
3119 for_each_sched_entity(se) {
3120 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3121 /* throttled entity or throttle-on-deactivate */
3122 if (!se->on_rq)
3123 break;
3124
3125 if (dequeue)
3126 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3127 qcfs_rq->h_nr_running -= task_delta;
3128
3129 if (qcfs_rq->load.weight)
3130 dequeue = 0;
3131 }
3132
3133 if (!se)
3134 rq->nr_running -= task_delta;
3135
3136 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003137 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07003138 raw_spin_lock(&cfs_b->lock);
3139 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
Ben Segallf9f9ffc2013-10-16 11:16:32 -07003140 if (!cfs_b->timer_active)
3141 __start_cfs_bandwidth(cfs_b);
Paul Turner85dac902011-07-21 09:43:33 -07003142 raw_spin_unlock(&cfs_b->lock);
3143}
3144
Peter Zijlstra029632f2011-10-25 10:00:11 +02003145void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07003146{
3147 struct rq *rq = rq_of(cfs_rq);
3148 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3149 struct sched_entity *se;
3150 int enqueue = 1;
3151 long task_delta;
3152
Michael Wang22b958d2013-06-04 14:23:39 +08003153 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07003154
3155 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02003156
3157 update_rq_clock(rq);
3158
Paul Turner671fd9d2011-07-21 09:43:34 -07003159 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003160 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07003161 list_del_rcu(&cfs_rq->throttled_list);
3162 raw_spin_unlock(&cfs_b->lock);
3163
Paul Turner64660c82011-07-21 09:43:36 -07003164 /* update hierarchical throttle state */
3165 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3166
Paul Turner671fd9d2011-07-21 09:43:34 -07003167 if (!cfs_rq->load.weight)
3168 return;
3169
3170 task_delta = cfs_rq->h_nr_running;
3171 for_each_sched_entity(se) {
3172 if (se->on_rq)
3173 enqueue = 0;
3174
3175 cfs_rq = cfs_rq_of(se);
3176 if (enqueue)
3177 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3178 cfs_rq->h_nr_running += task_delta;
3179
3180 if (cfs_rq_throttled(cfs_rq))
3181 break;
3182 }
3183
3184 if (!se)
3185 rq->nr_running += task_delta;
3186
3187 /* determine whether we need to wake up potentially idle cpu */
3188 if (rq->curr == rq->idle && rq->cfs.nr_running)
3189 resched_task(rq->curr);
3190}
3191
3192static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3193 u64 remaining, u64 expires)
3194{
3195 struct cfs_rq *cfs_rq;
3196 u64 runtime = remaining;
3197
3198 rcu_read_lock();
3199 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3200 throttled_list) {
3201 struct rq *rq = rq_of(cfs_rq);
3202
3203 raw_spin_lock(&rq->lock);
3204 if (!cfs_rq_throttled(cfs_rq))
3205 goto next;
3206
3207 runtime = -cfs_rq->runtime_remaining + 1;
3208 if (runtime > remaining)
3209 runtime = remaining;
3210 remaining -= runtime;
3211
3212 cfs_rq->runtime_remaining += runtime;
3213 cfs_rq->runtime_expires = expires;
3214
3215 /* we check whether we're throttled above */
3216 if (cfs_rq->runtime_remaining > 0)
3217 unthrottle_cfs_rq(cfs_rq);
3218
3219next:
3220 raw_spin_unlock(&rq->lock);
3221
3222 if (!remaining)
3223 break;
3224 }
3225 rcu_read_unlock();
3226
3227 return remaining;
3228}
3229
Paul Turner58088ad2011-07-21 09:43:31 -07003230/*
3231 * Responsible for refilling a task_group's bandwidth and unthrottling its
3232 * cfs_rqs as appropriate. If there has been no activity within the last
3233 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3234 * used to track this state.
3235 */
3236static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3237{
Paul Turner671fd9d2011-07-21 09:43:34 -07003238 u64 runtime, runtime_expires;
3239 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07003240
3241 raw_spin_lock(&cfs_b->lock);
3242 /* no need to continue the timer with no bandwidth constraint */
3243 if (cfs_b->quota == RUNTIME_INF)
3244 goto out_unlock;
3245
Paul Turner671fd9d2011-07-21 09:43:34 -07003246 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3247 /* idle depends on !throttled (for the case of a large deficit) */
3248 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003249 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07003250
Paul Turnera9cf55b2011-07-21 09:43:32 -07003251 /* if we're going inactive then everything else can be deferred */
3252 if (idle)
3253 goto out_unlock;
3254
Ben Segall927b54f2013-10-16 11:16:22 -07003255 /*
3256 * if we have relooped after returning idle once, we need to update our
3257 * status as actually running, so that other cpus doing
3258 * __start_cfs_bandwidth will stop trying to cancel us.
3259 */
3260 cfs_b->timer_active = 1;
3261
Paul Turnera9cf55b2011-07-21 09:43:32 -07003262 __refill_cfs_bandwidth_runtime(cfs_b);
3263
Paul Turner671fd9d2011-07-21 09:43:34 -07003264 if (!throttled) {
3265 /* mark as potentially idle for the upcoming period */
3266 cfs_b->idle = 1;
3267 goto out_unlock;
3268 }
Paul Turner58088ad2011-07-21 09:43:31 -07003269
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003270 /* account preceding periods in which throttling occurred */
3271 cfs_b->nr_throttled += overrun;
3272
Paul Turner671fd9d2011-07-21 09:43:34 -07003273 /*
3274 * There are throttled entities so we must first use the new bandwidth
3275 * to unthrottle them before making it generally available. This
3276 * ensures that all existing debts will be paid before a new cfs_rq is
3277 * allowed to run.
3278 */
3279 runtime = cfs_b->runtime;
3280 runtime_expires = cfs_b->runtime_expires;
3281 cfs_b->runtime = 0;
3282
3283 /*
3284 * This check is repeated as we are holding onto the new bandwidth
3285 * while we unthrottle. This can potentially race with an unthrottled
3286 * group trying to acquire new bandwidth from the global pool.
3287 */
3288 while (throttled && runtime > 0) {
3289 raw_spin_unlock(&cfs_b->lock);
3290 /* we can't nest cfs_b->lock while distributing bandwidth */
3291 runtime = distribute_cfs_runtime(cfs_b, runtime,
3292 runtime_expires);
3293 raw_spin_lock(&cfs_b->lock);
3294
3295 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3296 }
3297
3298 /* return (any) remaining runtime */
3299 cfs_b->runtime = runtime;
3300 /*
3301 * While we are ensured activity in the period following an
3302 * unthrottle, this also covers the case in which the new bandwidth is
3303 * insufficient to cover the existing bandwidth deficit. (Forcing the
3304 * timer to remain active while there are any throttled entities.)
3305 */
3306 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07003307out_unlock:
3308 if (idle)
3309 cfs_b->timer_active = 0;
3310 raw_spin_unlock(&cfs_b->lock);
3311
3312 return idle;
3313}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003314
Paul Turnerd8b49862011-07-21 09:43:41 -07003315/* a cfs_rq won't donate quota below this amount */
3316static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3317/* minimum remaining period time to redistribute slack quota */
3318static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3319/* how long we wait to gather additional slack before distributing */
3320static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3321
Ben Segalldb06e782013-10-16 11:16:17 -07003322/*
3323 * Are we near the end of the current quota period?
3324 *
3325 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3326 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3327 * migrate_hrtimers, base is never cleared, so we are fine.
3328 */
Paul Turnerd8b49862011-07-21 09:43:41 -07003329static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3330{
3331 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3332 u64 remaining;
3333
3334 /* if the call-back is running a quota refresh is already occurring */
3335 if (hrtimer_callback_running(refresh_timer))
3336 return 1;
3337
3338 /* is a quota refresh about to occur? */
3339 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3340 if (remaining < min_expire)
3341 return 1;
3342
3343 return 0;
3344}
3345
3346static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3347{
3348 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3349
3350 /* if there's a quota refresh soon don't bother with slack */
3351 if (runtime_refresh_within(cfs_b, min_left))
3352 return;
3353
3354 start_bandwidth_timer(&cfs_b->slack_timer,
3355 ns_to_ktime(cfs_bandwidth_slack_period));
3356}
3357
3358/* we know any runtime found here is valid as update_curr() precedes return */
3359static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3360{
3361 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3362 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3363
3364 if (slack_runtime <= 0)
3365 return;
3366
3367 raw_spin_lock(&cfs_b->lock);
3368 if (cfs_b->quota != RUNTIME_INF &&
3369 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3370 cfs_b->runtime += slack_runtime;
3371
3372 /* we are under rq->lock, defer unthrottling using a timer */
3373 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3374 !list_empty(&cfs_b->throttled_cfs_rq))
3375 start_cfs_slack_bandwidth(cfs_b);
3376 }
3377 raw_spin_unlock(&cfs_b->lock);
3378
3379 /* even if it's not valid for return we don't want to try again */
3380 cfs_rq->runtime_remaining -= slack_runtime;
3381}
3382
3383static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3384{
Paul Turner56f570e2011-11-07 20:26:33 -08003385 if (!cfs_bandwidth_used())
3386 return;
3387
Paul Turnerfccfdc62011-11-07 20:26:34 -08003388 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07003389 return;
3390
3391 __return_cfs_rq_runtime(cfs_rq);
3392}
3393
3394/*
3395 * This is done with a timer (instead of inline with bandwidth return) since
3396 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3397 */
3398static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3399{
3400 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3401 u64 expires;
3402
3403 /* confirm we're still not at a refresh boundary */
Paul Turnerd8b49862011-07-21 09:43:41 -07003404 raw_spin_lock(&cfs_b->lock);
Ben Segalldb06e782013-10-16 11:16:17 -07003405 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3406 raw_spin_unlock(&cfs_b->lock);
3407 return;
3408 }
3409
Paul Turnerd8b49862011-07-21 09:43:41 -07003410 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3411 runtime = cfs_b->runtime;
3412 cfs_b->runtime = 0;
3413 }
3414 expires = cfs_b->runtime_expires;
3415 raw_spin_unlock(&cfs_b->lock);
3416
3417 if (!runtime)
3418 return;
3419
3420 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3421
3422 raw_spin_lock(&cfs_b->lock);
3423 if (expires == cfs_b->runtime_expires)
3424 cfs_b->runtime = runtime;
3425 raw_spin_unlock(&cfs_b->lock);
3426}
3427
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003428/*
3429 * When a group wakes up we want to make sure that its quota is not already
3430 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3431 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3432 */
3433static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3434{
Paul Turner56f570e2011-11-07 20:26:33 -08003435 if (!cfs_bandwidth_used())
3436 return;
3437
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003438 /* an active group must be handled by the update_curr()->put() path */
3439 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3440 return;
3441
3442 /* ensure the group is not already throttled */
3443 if (cfs_rq_throttled(cfs_rq))
3444 return;
3445
3446 /* update runtime allocation */
3447 account_cfs_rq_runtime(cfs_rq, 0);
3448 if (cfs_rq->runtime_remaining <= 0)
3449 throttle_cfs_rq(cfs_rq);
3450}
3451
3452/* conditionally throttle active cfs_rq's from put_prev_entity() */
3453static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3454{
Paul Turner56f570e2011-11-07 20:26:33 -08003455 if (!cfs_bandwidth_used())
3456 return;
3457
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003458 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3459 return;
3460
3461 /*
3462 * it's possible for a throttled entity to be forced into a running
3463 * state (e.g. set_curr_task), in this case we're finished.
3464 */
3465 if (cfs_rq_throttled(cfs_rq))
3466 return;
3467
3468 throttle_cfs_rq(cfs_rq);
3469}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003470
Peter Zijlstra029632f2011-10-25 10:00:11 +02003471static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3472{
3473 struct cfs_bandwidth *cfs_b =
3474 container_of(timer, struct cfs_bandwidth, slack_timer);
3475 do_sched_cfs_slack_timer(cfs_b);
3476
3477 return HRTIMER_NORESTART;
3478}
3479
3480static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3481{
3482 struct cfs_bandwidth *cfs_b =
3483 container_of(timer, struct cfs_bandwidth, period_timer);
3484 ktime_t now;
3485 int overrun;
3486 int idle = 0;
3487
3488 for (;;) {
3489 now = hrtimer_cb_get_time(timer);
3490 overrun = hrtimer_forward(timer, now, cfs_b->period);
3491
3492 if (!overrun)
3493 break;
3494
3495 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3496 }
3497
3498 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3499}
3500
3501void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3502{
3503 raw_spin_lock_init(&cfs_b->lock);
3504 cfs_b->runtime = 0;
3505 cfs_b->quota = RUNTIME_INF;
3506 cfs_b->period = ns_to_ktime(default_cfs_period());
3507
3508 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3509 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3510 cfs_b->period_timer.function = sched_cfs_period_timer;
3511 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3512 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3513}
3514
3515static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3516{
3517 cfs_rq->runtime_enabled = 0;
3518 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3519}
3520
3521/* requires cfs_b->lock, may release to reprogram timer */
3522void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3523{
3524 /*
3525 * The timer may be active because we're trying to set a new bandwidth
3526 * period or because we're racing with the tear-down path
3527 * (timer_active==0 becomes visible before the hrtimer call-back
3528 * terminates). In either case we ensure that it's re-programmed
3529 */
Ben Segall927b54f2013-10-16 11:16:22 -07003530 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3531 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3532 /* bounce the lock to allow do_sched_cfs_period_timer to run */
Peter Zijlstra029632f2011-10-25 10:00:11 +02003533 raw_spin_unlock(&cfs_b->lock);
Ben Segall927b54f2013-10-16 11:16:22 -07003534 cpu_relax();
Peter Zijlstra029632f2011-10-25 10:00:11 +02003535 raw_spin_lock(&cfs_b->lock);
3536 /* if someone else restarted the timer then we're done */
3537 if (cfs_b->timer_active)
3538 return;
3539 }
3540
3541 cfs_b->timer_active = 1;
3542 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3543}
3544
3545static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3546{
3547 hrtimer_cancel(&cfs_b->period_timer);
3548 hrtimer_cancel(&cfs_b->slack_timer);
3549}
3550
Arnd Bergmann38dc3342013-01-25 14:14:22 +00003551static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003552{
3553 struct cfs_rq *cfs_rq;
3554
3555 for_each_leaf_cfs_rq(rq, cfs_rq) {
3556 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3557
3558 if (!cfs_rq->runtime_enabled)
3559 continue;
3560
3561 /*
3562 * clock_task is not advancing so we just need to make sure
3563 * there's some valid quota amount
3564 */
3565 cfs_rq->runtime_remaining = cfs_b->quota;
3566 if (cfs_rq_throttled(cfs_rq))
3567 unthrottle_cfs_rq(cfs_rq);
3568 }
3569}
3570
3571#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02003572static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3573{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003574 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02003575}
3576
3577static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
3578 unsigned long delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003579static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3580static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003581static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07003582
3583static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3584{
3585 return 0;
3586}
Paul Turner64660c82011-07-21 09:43:36 -07003587
3588static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3589{
3590 return 0;
3591}
3592
3593static inline int throttled_lb_pair(struct task_group *tg,
3594 int src_cpu, int dest_cpu)
3595{
3596 return 0;
3597}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003598
3599void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3600
3601#ifdef CONFIG_FAIR_GROUP_SCHED
3602static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07003603#endif
3604
Peter Zijlstra029632f2011-10-25 10:00:11 +02003605static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3606{
3607 return NULL;
3608}
3609static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07003610static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003611
3612#endif /* CONFIG_CFS_BANDWIDTH */
3613
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003614/**************************************************
3615 * CFS operations on tasks:
3616 */
3617
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003618#ifdef CONFIG_SCHED_HRTICK
3619static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3620{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003621 struct sched_entity *se = &p->se;
3622 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3623
3624 WARN_ON(task_rq(p) != rq);
3625
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003626 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003627 u64 slice = sched_slice(cfs_rq, se);
3628 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3629 s64 delta = slice - ran;
3630
3631 if (delta < 0) {
3632 if (rq->curr == p)
3633 resched_task(p);
3634 return;
3635 }
3636
3637 /*
3638 * Don't schedule slices shorter than 10000ns, that just
3639 * doesn't make sense. Rely on vruntime for fairness.
3640 */
Peter Zijlstra31656512008-07-18 18:01:23 +02003641 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02003642 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003643
Peter Zijlstra31656512008-07-18 18:01:23 +02003644 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003645 }
3646}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003647
3648/*
3649 * called from enqueue/dequeue and updates the hrtick when the
3650 * current task is from our class and nr_running is low enough
3651 * to matter.
3652 */
3653static void hrtick_update(struct rq *rq)
3654{
3655 struct task_struct *curr = rq->curr;
3656
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003657 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003658 return;
3659
3660 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3661 hrtick_start_fair(rq, curr);
3662}
Dhaval Giani55e12e52008-06-24 23:39:43 +05303663#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003664static inline void
3665hrtick_start_fair(struct rq *rq, struct task_struct *p)
3666{
3667}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003668
3669static inline void hrtick_update(struct rq *rq)
3670{
3671}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003672#endif
3673
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003674/*
3675 * The enqueue_task method is called before nr_running is
3676 * increased. Here we update the fair scheduling stats and
3677 * then put the task into the rbtree:
3678 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00003679static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003680enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003681{
3682 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003683 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003684
3685 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003686 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003687 break;
3688 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003689 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003690
3691 /*
3692 * end evaluation on encountering a throttled cfs_rq
3693 *
3694 * note: in the case of encountering a throttled cfs_rq we will
3695 * post the final h_nr_running increment below.
3696 */
3697 if (cfs_rq_throttled(cfs_rq))
3698 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003699 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07003700
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003701 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003702 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003703
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003704 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003705 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003706 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003707
Paul Turner85dac902011-07-21 09:43:33 -07003708 if (cfs_rq_throttled(cfs_rq))
3709 break;
3710
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003711 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003712 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003713 }
3714
Ben Segall18bf2802012-10-04 12:51:20 +02003715 if (!se) {
3716 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07003717 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003718 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003719 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003720}
3721
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003722static void set_next_buddy(struct sched_entity *se);
3723
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003724/*
3725 * The dequeue_task method is called before nr_running is
3726 * decreased. We remove the task from the rbtree and
3727 * update the fair scheduling stats:
3728 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003729static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003730{
3731 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003732 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003733 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003734
3735 for_each_sched_entity(se) {
3736 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003737 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003738
3739 /*
3740 * end evaluation on encountering a throttled cfs_rq
3741 *
3742 * note: in the case of encountering a throttled cfs_rq we will
3743 * post the final h_nr_running decrement below.
3744 */
3745 if (cfs_rq_throttled(cfs_rq))
3746 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003747 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003748
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003749 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003750 if (cfs_rq->load.weight) {
3751 /*
3752 * Bias pick_next to pick a task from this cfs_rq, as
3753 * p is sleeping when it is within its sched_slice.
3754 */
3755 if (task_sleep && parent_entity(se))
3756 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07003757
3758 /* avoid re-evaluating load for this entity */
3759 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003760 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003761 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003762 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003763 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003764
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003765 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003766 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003767 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003768
Paul Turner85dac902011-07-21 09:43:33 -07003769 if (cfs_rq_throttled(cfs_rq))
3770 break;
3771
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003772 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003773 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003774 }
3775
Ben Segall18bf2802012-10-04 12:51:20 +02003776 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07003777 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003778 update_rq_runnable_avg(rq, 1);
3779 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003780 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003781}
3782
Gregory Haskinse7693a32008-01-25 21:08:09 +01003783#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02003784/* Used instead of source_load when we know the type == 0 */
3785static unsigned long weighted_cpuload(const int cpu)
3786{
Alex Shib92486c2013-06-20 10:18:50 +08003787 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003788}
3789
3790/*
3791 * Return a low guess at the load of a migration-source cpu weighted
3792 * according to the scheduling class and "nice" value.
3793 *
3794 * We want to under-estimate the load of migration sources, to
3795 * balance conservatively.
3796 */
3797static unsigned long source_load(int cpu, int type)
3798{
3799 struct rq *rq = cpu_rq(cpu);
3800 unsigned long total = weighted_cpuload(cpu);
3801
3802 if (type == 0 || !sched_feat(LB_BIAS))
3803 return total;
3804
3805 return min(rq->cpu_load[type-1], total);
3806}
3807
3808/*
3809 * Return a high guess at the load of a migration-target cpu weighted
3810 * according to the scheduling class and "nice" value.
3811 */
3812static unsigned long target_load(int cpu, int type)
3813{
3814 struct rq *rq = cpu_rq(cpu);
3815 unsigned long total = weighted_cpuload(cpu);
3816
3817 if (type == 0 || !sched_feat(LB_BIAS))
3818 return total;
3819
3820 return max(rq->cpu_load[type-1], total);
3821}
3822
3823static unsigned long power_of(int cpu)
3824{
3825 return cpu_rq(cpu)->cpu_power;
3826}
3827
3828static unsigned long cpu_avg_load_per_task(int cpu)
3829{
3830 struct rq *rq = cpu_rq(cpu);
3831 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08003832 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003833
3834 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08003835 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003836
3837 return 0;
3838}
3839
Michael Wang62470412013-07-04 12:55:51 +08003840static void record_wakee(struct task_struct *p)
3841{
3842 /*
3843 * Rough decay (wiping) for cost saving, don't worry
3844 * about the boundary, really active task won't care
3845 * about the loss.
3846 */
3847 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3848 current->wakee_flips = 0;
3849 current->wakee_flip_decay_ts = jiffies;
3850 }
3851
3852 if (current->last_wakee != p) {
3853 current->last_wakee = p;
3854 current->wakee_flips++;
3855 }
3856}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003857
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003858static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003859{
3860 struct sched_entity *se = &p->se;
3861 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003862 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003863
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003864#ifndef CONFIG_64BIT
3865 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003866
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003867 do {
3868 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3869 smp_rmb();
3870 min_vruntime = cfs_rq->min_vruntime;
3871 } while (min_vruntime != min_vruntime_copy);
3872#else
3873 min_vruntime = cfs_rq->min_vruntime;
3874#endif
3875
3876 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08003877 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003878}
3879
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003880#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003881/*
3882 * effective_load() calculates the load change as seen from the root_task_group
3883 *
3884 * Adding load to a group doesn't make a group heavier, but can cause movement
3885 * of group shares between cpus. Assuming the shares were perfectly aligned one
3886 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003887 *
3888 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3889 * on this @cpu and results in a total addition (subtraction) of @wg to the
3890 * total group weight.
3891 *
3892 * Given a runqueue weight distribution (rw_i) we can compute a shares
3893 * distribution (s_i) using:
3894 *
3895 * s_i = rw_i / \Sum rw_j (1)
3896 *
3897 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3898 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3899 * shares distribution (s_i):
3900 *
3901 * rw_i = { 2, 4, 1, 0 }
3902 * s_i = { 2/7, 4/7, 1/7, 0 }
3903 *
3904 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3905 * task used to run on and the CPU the waker is running on), we need to
3906 * compute the effect of waking a task on either CPU and, in case of a sync
3907 * wakeup, compute the effect of the current task going to sleep.
3908 *
3909 * So for a change of @wl to the local @cpu with an overall group weight change
3910 * of @wl we can compute the new shares distribution (s'_i) using:
3911 *
3912 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3913 *
3914 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3915 * differences in waking a task to CPU 0. The additional task changes the
3916 * weight and shares distributions like:
3917 *
3918 * rw'_i = { 3, 4, 1, 0 }
3919 * s'_i = { 3/8, 4/8, 1/8, 0 }
3920 *
3921 * We can then compute the difference in effective weight by using:
3922 *
3923 * dw_i = S * (s'_i - s_i) (3)
3924 *
3925 * Where 'S' is the group weight as seen by its parent.
3926 *
3927 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3928 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3929 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003930 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003931static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003932{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003933 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003934
Mel Gorman58d081b2013-10-07 11:29:10 +01003935 if (!tg->parent || !wl) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003936 return wl;
3937
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003938 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003939 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003940
Paul Turner977dda72011-01-14 17:57:50 -08003941 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003942
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003943 /*
3944 * W = @wg + \Sum rw_j
3945 */
3946 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003947
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003948 /*
3949 * w = rw_i + @wl
3950 */
3951 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003952
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003953 /*
3954 * wl = S * s'_i; see (2)
3955 */
3956 if (W > 0 && w < W)
3957 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003958 else
3959 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003960
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003961 /*
3962 * Per the above, wl is the new se->load.weight value; since
3963 * those are clipped to [MIN_SHARES, ...) do so now. See
3964 * calc_cfs_shares().
3965 */
Paul Turner977dda72011-01-14 17:57:50 -08003966 if (wl < MIN_SHARES)
3967 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003968
3969 /*
3970 * wl = dw_i = S * (s'_i - s_i); see (3)
3971 */
Paul Turner977dda72011-01-14 17:57:50 -08003972 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003973
3974 /*
3975 * Recursively apply this logic to all parent groups to compute
3976 * the final effective load change on the root group. Since
3977 * only the @tg group gets extra weight, all parent groups can
3978 * only redistribute existing shares. @wl is the shift in shares
3979 * resulting from this level per the above.
3980 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003981 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003982 }
3983
3984 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003985}
3986#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003987
Mel Gorman58d081b2013-10-07 11:29:10 +01003988static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003989{
Peter Zijlstra83378262008-06-27 13:41:37 +02003990 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003991}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003992
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003993#endif
3994
Michael Wang62470412013-07-04 12:55:51 +08003995static int wake_wide(struct task_struct *p)
3996{
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08003997 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08003998
3999 /*
4000 * Yeah, it's the switching-frequency, could means many wakee or
4001 * rapidly switch, use factor here will just help to automatically
4002 * adjust the loose-degree, so bigger node will lead to more pull.
4003 */
4004 if (p->wakee_flips > factor) {
4005 /*
4006 * wakee is somewhat hot, it needs certain amount of cpu
4007 * resource, so if waker is far more hot, prefer to leave
4008 * it alone.
4009 */
4010 if (current->wakee_flips > (factor * p->wakee_flips))
4011 return 1;
4012 }
4013
4014 return 0;
4015}
4016
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004017static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004018{
Paul Turnere37b6a72011-01-21 20:44:59 -08004019 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004020 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004021 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004022 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02004023 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004024 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004025
Michael Wang62470412013-07-04 12:55:51 +08004026 /*
4027 * If we wake multiple tasks be careful to not bounce
4028 * ourselves around too much.
4029 */
4030 if (wake_wide(p))
4031 return 0;
4032
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004033 idx = sd->wake_idx;
4034 this_cpu = smp_processor_id();
4035 prev_cpu = task_cpu(p);
4036 load = source_load(prev_cpu, idx);
4037 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004038
4039 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004040 * If sync wakeup then subtract the (maximum possible)
4041 * effect of the currently running task from the load
4042 * of the current CPU:
4043 */
Peter Zijlstra83378262008-06-27 13:41:37 +02004044 if (sync) {
4045 tg = task_group(current);
4046 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004047
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004048 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02004049 load += effective_load(tg, prev_cpu, 0, -weight);
4050 }
4051
4052 tg = task_group(p);
4053 weight = p->se.load.weight;
4054
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004055 /*
4056 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004057 * due to the sync cause above having dropped this_load to 0, we'll
4058 * always have an imbalance, but there's really nothing you can do
4059 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004060 *
4061 * Otherwise check if either cpus are near enough in load to allow this
4062 * task to be woken on this_cpu.
4063 */
Paul Turnere37b6a72011-01-21 20:44:59 -08004064 if (this_load > 0) {
4065 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004066
4067 this_eff_load = 100;
4068 this_eff_load *= power_of(prev_cpu);
4069 this_eff_load *= this_load +
4070 effective_load(tg, this_cpu, weight, weight);
4071
4072 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4073 prev_eff_load *= power_of(this_cpu);
4074 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4075
4076 balanced = this_eff_load <= prev_eff_load;
4077 } else
4078 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004079
4080 /*
4081 * If the currently running task will sleep within
4082 * a reasonable amount of time then attract this newly
4083 * woken task:
4084 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02004085 if (sync && balanced)
4086 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004087
Lucas De Marchi41acab82010-03-10 23:37:45 -03004088 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004089 tl_per_task = cpu_avg_load_per_task(this_cpu);
4090
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004091 if (balanced ||
4092 (this_load <= load &&
4093 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004094 /*
4095 * This domain has SD_WAKE_AFFINE and
4096 * p is cache cold in this domain, and
4097 * there is no bad imbalance.
4098 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004099 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004100 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004101
4102 return 1;
4103 }
4104 return 0;
4105}
4106
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004107/*
4108 * find_idlest_group finds and returns the least busy CPU group within the
4109 * domain.
4110 */
4111static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02004112find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004113 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01004114{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07004115 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004116 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004117 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004118
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004119 do {
4120 unsigned long load, avg_load;
4121 int local_group;
4122 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004123
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004124 /* Skip over this group if it has no CPUs allowed */
4125 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004126 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004127 continue;
4128
4129 local_group = cpumask_test_cpu(this_cpu,
4130 sched_group_cpus(group));
4131
4132 /* Tally up the load of all CPUs in the group */
4133 avg_load = 0;
4134
4135 for_each_cpu(i, sched_group_cpus(group)) {
4136 /* Bias balancing toward cpus of our domain */
4137 if (local_group)
4138 load = source_load(i, load_idx);
4139 else
4140 load = target_load(i, load_idx);
4141
4142 avg_load += load;
4143 }
4144
4145 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004146 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004147
4148 if (local_group) {
4149 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004150 } else if (avg_load < min_load) {
4151 min_load = avg_load;
4152 idlest = group;
4153 }
4154 } while (group = group->next, group != sd->groups);
4155
4156 if (!idlest || 100*this_load < imbalance*min_load)
4157 return NULL;
4158 return idlest;
4159}
4160
4161/*
4162 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4163 */
4164static int
4165find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4166{
4167 unsigned long load, min_load = ULONG_MAX;
4168 int idlest = -1;
4169 int i;
4170
4171 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004172 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004173 load = weighted_cpuload(i);
4174
4175 if (load < min_load || (load == min_load && i == this_cpu)) {
4176 min_load = load;
4177 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004178 }
4179 }
4180
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004181 return idlest;
4182}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004183
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004184/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004185 * Try and locate an idle CPU in the sched_domain.
4186 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004187static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004188{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004189 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07004190 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004191 int i = task_cpu(p);
4192
4193 if (idle_cpu(target))
4194 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004195
4196 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004197 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004198 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004199 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4200 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004201
4202 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07004203 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004204 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01004205 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08004206 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07004207 sg = sd->groups;
4208 do {
4209 if (!cpumask_intersects(sched_group_cpus(sg),
4210 tsk_cpus_allowed(p)))
4211 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02004212
Linus Torvalds37407ea2012-09-16 12:29:43 -07004213 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004214 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07004215 goto next;
4216 }
4217
4218 target = cpumask_first_and(sched_group_cpus(sg),
4219 tsk_cpus_allowed(p));
4220 goto done;
4221next:
4222 sg = sg->next;
4223 } while (sg != sd->groups);
4224 }
4225done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004226 return target;
4227}
4228
4229/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004230 * sched_balance_self: balance the current task (running on cpu) in domains
4231 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
4232 * SD_BALANCE_EXEC.
4233 *
4234 * Balance, ie. select the least loaded group.
4235 *
4236 * Returns the target CPU number, or the same CPU if no balancing is needed.
4237 *
4238 * preempt must be disabled.
4239 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01004240static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01004241select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004242{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004243 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004244 int cpu = smp_processor_id();
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004245 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004246 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004247 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004248
Peter Zijlstra29baa742012-04-23 12:11:21 +02004249 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01004250 return prev_cpu;
4251
Peter Zijlstra0763a662009-09-14 19:37:39 +02004252 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004253 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004254 want_affine = 1;
4255 new_cpu = prev_cpu;
4256 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01004257
Peter Zijlstradce840a2011-04-07 14:09:50 +02004258 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004259 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01004260 if (!(tmp->flags & SD_LOAD_BALANCE))
4261 continue;
4262
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004263 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004264 * If both cpu and prev_cpu are part of this domain,
4265 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01004266 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004267 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4268 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4269 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08004270 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004271 }
4272
Alex Shif03542a2012-07-26 08:55:34 +08004273 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004274 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004275 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004276
Mike Galbraith8b911ac2010-03-11 17:17:16 +01004277 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08004278 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02004279 prev_cpu = cpu;
4280
4281 new_cpu = select_idle_sibling(p, prev_cpu);
4282 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01004283 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02004284
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004285 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004286 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004287 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004288 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004289
Peter Zijlstra0763a662009-09-14 19:37:39 +02004290 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004291 sd = sd->child;
4292 continue;
4293 }
4294
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004295 if (sd_flag & SD_BALANCE_WAKE)
4296 load_idx = sd->wake_idx;
4297
4298 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004299 if (!group) {
4300 sd = sd->child;
4301 continue;
4302 }
4303
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02004304 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004305 if (new_cpu == -1 || new_cpu == cpu) {
4306 /* Now try balancing at a lower domain level of cpu */
4307 sd = sd->child;
4308 continue;
4309 }
4310
4311 /* Now try balancing at a lower domain level of new_cpu */
4312 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004313 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004314 sd = NULL;
4315 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004316 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004317 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02004318 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004319 sd = tmp;
4320 }
4321 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01004322 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02004323unlock:
4324 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01004325
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004326 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004327}
Paul Turner0a74bef2012-10-04 13:18:30 +02004328
4329/*
4330 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4331 * cfs_rq_of(p) references at time of call are still valid and identify the
4332 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4333 * other assumptions, including the state of rq->lock, should be made.
4334 */
4335static void
4336migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4337{
Paul Turneraff3e492012-10-04 13:18:30 +02004338 struct sched_entity *se = &p->se;
4339 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4340
4341 /*
4342 * Load tracking: accumulate removed load so that it can be processed
4343 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4344 * to blocked load iff they have a positive decay-count. It can never
4345 * be negative here since on-rq tasks have decay-count == 0.
4346 */
4347 if (se->avg.decay_count) {
4348 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08004349 atomic_long_add(se->avg.load_avg_contrib,
4350 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02004351 }
Paul Turner0a74bef2012-10-04 13:18:30 +02004352}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004353#endif /* CONFIG_SMP */
4354
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004355static unsigned long
4356wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004357{
4358 unsigned long gran = sysctl_sched_wakeup_granularity;
4359
4360 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004361 * Since its curr running now, convert the gran from real-time
4362 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01004363 *
4364 * By using 'se' instead of 'curr' we penalize light tasks, so
4365 * they get preempted easier. That is, if 'se' < 'curr' then
4366 * the resulting gran will be larger, therefore penalizing the
4367 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4368 * be smaller, again penalizing the lighter task.
4369 *
4370 * This is especially important for buddies when the leftmost
4371 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004372 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08004373 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004374}
4375
4376/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02004377 * Should 'se' preempt 'curr'.
4378 *
4379 * |s1
4380 * |s2
4381 * |s3
4382 * g
4383 * |<--->|c
4384 *
4385 * w(c, s1) = -1
4386 * w(c, s2) = 0
4387 * w(c, s3) = 1
4388 *
4389 */
4390static int
4391wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4392{
4393 s64 gran, vdiff = curr->vruntime - se->vruntime;
4394
4395 if (vdiff <= 0)
4396 return -1;
4397
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004398 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02004399 if (vdiff > gran)
4400 return 1;
4401
4402 return 0;
4403}
4404
Peter Zijlstra02479092008-11-04 21:25:10 +01004405static void set_last_buddy(struct sched_entity *se)
4406{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004407 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4408 return;
4409
4410 for_each_sched_entity(se)
4411 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004412}
4413
4414static void set_next_buddy(struct sched_entity *se)
4415{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004416 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4417 return;
4418
4419 for_each_sched_entity(se)
4420 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004421}
4422
Rik van Rielac53db52011-02-01 09:51:03 -05004423static void set_skip_buddy(struct sched_entity *se)
4424{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004425 for_each_sched_entity(se)
4426 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05004427}
4428
Peter Zijlstra464b7522008-10-24 11:06:15 +02004429/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004430 * Preempt the current task with a newly woken task if needed:
4431 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02004432static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004433{
4434 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02004435 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004436 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02004437 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004438 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004439
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01004440 if (unlikely(se == pse))
4441 return;
4442
Paul Turner5238cdd2011-07-21 09:43:37 -07004443 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004444 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07004445 * unconditionally check_prempt_curr() after an enqueue (which may have
4446 * lead to a throttle). This both saves work and prevents false
4447 * next-buddy nomination below.
4448 */
4449 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4450 return;
4451
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004452 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02004453 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004454 next_buddy_marked = 1;
4455 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02004456
Bharata B Raoaec0a512008-08-28 14:42:49 +05304457 /*
4458 * We can come here with TIF_NEED_RESCHED already set from new task
4459 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07004460 *
4461 * Note: this also catches the edge-case of curr being in a throttled
4462 * group (e.g. via set_curr_task), since update_curr() (in the
4463 * enqueue of curr) will have resulted in resched being set. This
4464 * prevents us from potentially nominating it as a false LAST_BUDDY
4465 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05304466 */
4467 if (test_tsk_need_resched(curr))
4468 return;
4469
Darren Harta2f5c9a2011-02-22 13:04:33 -08004470 /* Idle tasks are by definition preempted by non-idle tasks. */
4471 if (unlikely(curr->policy == SCHED_IDLE) &&
4472 likely(p->policy != SCHED_IDLE))
4473 goto preempt;
4474
Ingo Molnar91c234b2007-10-15 17:00:18 +02004475 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08004476 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4477 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02004478 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02004479 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02004480 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004481
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004482 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07004483 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004484 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004485 if (wakeup_preempt_entity(se, pse) == 1) {
4486 /*
4487 * Bias pick_next to pick the sched entity that is
4488 * triggering this preemption.
4489 */
4490 if (!next_buddy_marked)
4491 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004492 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004493 }
Jupyung Leea65ac742009-11-17 18:51:40 +09004494
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004495 return;
4496
4497preempt:
4498 resched_task(curr);
4499 /*
4500 * Only set the backward buddy when the current task is still
4501 * on the rq. This can happen when a wakeup gets interleaved
4502 * with schedule on the ->pre_schedule() or idle_balance()
4503 * point, either of which can * drop the rq lock.
4504 *
4505 * Also, during early boot the idle thread is in the fair class,
4506 * for obvious reasons its a bad idea to schedule back to it.
4507 */
4508 if (unlikely(!se->on_rq || curr == rq->idle))
4509 return;
4510
4511 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4512 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004513}
4514
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004515static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004516{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004517 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004518 struct cfs_rq *cfs_rq = &rq->cfs;
4519 struct sched_entity *se;
4520
Tim Blechmann36ace272009-11-24 11:55:45 +01004521 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004522 return NULL;
4523
4524 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02004525 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01004526 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004527 cfs_rq = group_cfs_rq(se);
4528 } while (cfs_rq);
4529
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004530 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004531 if (hrtick_enabled(rq))
4532 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004533
4534 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004535}
4536
4537/*
4538 * Account for a descheduled task:
4539 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02004540static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004541{
4542 struct sched_entity *se = &prev->se;
4543 struct cfs_rq *cfs_rq;
4544
4545 for_each_sched_entity(se) {
4546 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02004547 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004548 }
4549}
4550
Rik van Rielac53db52011-02-01 09:51:03 -05004551/*
4552 * sched_yield() is very simple
4553 *
4554 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4555 */
4556static void yield_task_fair(struct rq *rq)
4557{
4558 struct task_struct *curr = rq->curr;
4559 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4560 struct sched_entity *se = &curr->se;
4561
4562 /*
4563 * Are we the only task in the tree?
4564 */
4565 if (unlikely(rq->nr_running == 1))
4566 return;
4567
4568 clear_buddies(cfs_rq, se);
4569
4570 if (curr->policy != SCHED_BATCH) {
4571 update_rq_clock(rq);
4572 /*
4573 * Update run-time statistics of the 'current'.
4574 */
4575 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01004576 /*
4577 * Tell update_rq_clock() that we've just updated,
4578 * so we don't do microscopic update in schedule()
4579 * and double the fastpath cost.
4580 */
4581 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05004582 }
4583
4584 set_skip_buddy(se);
4585}
4586
Mike Galbraithd95f4122011-02-01 09:50:51 -05004587static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4588{
4589 struct sched_entity *se = &p->se;
4590
Paul Turner5238cdd2011-07-21 09:43:37 -07004591 /* throttled hierarchies are not runnable */
4592 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05004593 return false;
4594
4595 /* Tell the scheduler that we'd really like pse to run next. */
4596 set_next_buddy(se);
4597
Mike Galbraithd95f4122011-02-01 09:50:51 -05004598 yield_task_fair(rq);
4599
4600 return true;
4601}
4602
Peter Williams681f3e62007-10-24 18:23:51 +02004603#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004604/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02004605 * Fair scheduling class load-balancing methods.
4606 *
4607 * BASICS
4608 *
4609 * The purpose of load-balancing is to achieve the same basic fairness the
4610 * per-cpu scheduler provides, namely provide a proportional amount of compute
4611 * time to each task. This is expressed in the following equation:
4612 *
4613 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4614 *
4615 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4616 * W_i,0 is defined as:
4617 *
4618 * W_i,0 = \Sum_j w_i,j (2)
4619 *
4620 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4621 * is derived from the nice value as per prio_to_weight[].
4622 *
4623 * The weight average is an exponential decay average of the instantaneous
4624 * weight:
4625 *
4626 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4627 *
4628 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4629 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4630 * can also include other factors [XXX].
4631 *
4632 * To achieve this balance we define a measure of imbalance which follows
4633 * directly from (1):
4634 *
4635 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
4636 *
4637 * We them move tasks around to minimize the imbalance. In the continuous
4638 * function space it is obvious this converges, in the discrete case we get
4639 * a few fun cases generally called infeasible weight scenarios.
4640 *
4641 * [XXX expand on:
4642 * - infeasible weights;
4643 * - local vs global optima in the discrete case. ]
4644 *
4645 *
4646 * SCHED DOMAINS
4647 *
4648 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4649 * for all i,j solution, we create a tree of cpus that follows the hardware
4650 * topology where each level pairs two lower groups (or better). This results
4651 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4652 * tree to only the first of the previous level and we decrease the frequency
4653 * of load-balance at each level inv. proportional to the number of cpus in
4654 * the groups.
4655 *
4656 * This yields:
4657 *
4658 * log_2 n 1 n
4659 * \Sum { --- * --- * 2^i } = O(n) (5)
4660 * i = 0 2^i 2^i
4661 * `- size of each group
4662 * | | `- number of cpus doing load-balance
4663 * | `- freq
4664 * `- sum over all levels
4665 *
4666 * Coupled with a limit on how many tasks we can migrate every balance pass,
4667 * this makes (5) the runtime complexity of the balancer.
4668 *
4669 * An important property here is that each CPU is still (indirectly) connected
4670 * to every other cpu in at most O(log n) steps:
4671 *
4672 * The adjacency matrix of the resulting graph is given by:
4673 *
4674 * log_2 n
4675 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4676 * k = 0
4677 *
4678 * And you'll find that:
4679 *
4680 * A^(log_2 n)_i,j != 0 for all i,j (7)
4681 *
4682 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4683 * The task movement gives a factor of O(m), giving a convergence complexity
4684 * of:
4685 *
4686 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4687 *
4688 *
4689 * WORK CONSERVING
4690 *
4691 * In order to avoid CPUs going idle while there's still work to do, new idle
4692 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4693 * tree itself instead of relying on other CPUs to bring it work.
4694 *
4695 * This adds some complexity to both (5) and (8) but it reduces the total idle
4696 * time.
4697 *
4698 * [XXX more?]
4699 *
4700 *
4701 * CGROUPS
4702 *
4703 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4704 *
4705 * s_k,i
4706 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4707 * S_k
4708 *
4709 * Where
4710 *
4711 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4712 *
4713 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4714 *
4715 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4716 * property.
4717 *
4718 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4719 * rewrite all of this once again.]
4720 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004721
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09004722static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4723
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01004724enum fbq_type { regular, remote, all };
4725
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004726#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01004727#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02004728#define LBF_DST_PINNED 0x04
4729#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004730
4731struct lb_env {
4732 struct sched_domain *sd;
4733
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004734 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05304735 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004736
4737 int dst_cpu;
4738 struct rq *dst_rq;
4739
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304740 struct cpumask *dst_grpmask;
4741 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004742 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004743 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08004744 /* The set of CPUs under consideration for load-balancing */
4745 struct cpumask *cpus;
4746
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004747 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004748
4749 unsigned int loop;
4750 unsigned int loop_break;
4751 unsigned int loop_max;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01004752
4753 enum fbq_type fbq_type;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004754};
4755
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004756/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004757 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004758 * Both runqueues must be locked.
4759 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004760static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004761{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004762 deactivate_task(env->src_rq, p, 0);
4763 set_task_cpu(p, env->dst_cpu);
4764 activate_task(env->dst_rq, p, 0);
4765 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004766}
4767
4768/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02004769 * Is this task likely cache-hot:
4770 */
4771static int
4772task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4773{
4774 s64 delta;
4775
4776 if (p->sched_class != &fair_sched_class)
4777 return 0;
4778
4779 if (unlikely(p->policy == SCHED_IDLE))
4780 return 0;
4781
4782 /*
4783 * Buddy candidates are cache hot:
4784 */
4785 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4786 (&p->se == cfs_rq_of(&p->se)->next ||
4787 &p->se == cfs_rq_of(&p->se)->last))
4788 return 1;
4789
4790 if (sysctl_sched_migration_cost == -1)
4791 return 1;
4792 if (sysctl_sched_migration_cost == 0)
4793 return 0;
4794
4795 delta = now - p->se.exec_start;
4796
4797 return delta < (s64)sysctl_sched_migration_cost;
4798}
4799
Mel Gorman3a7053b2013-10-07 11:29:00 +01004800#ifdef CONFIG_NUMA_BALANCING
4801/* Returns true if the destination node has incurred more faults */
4802static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4803{
4804 int src_nid, dst_nid;
4805
4806 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
4807 !(env->sd->flags & SD_NUMA)) {
4808 return false;
4809 }
4810
4811 src_nid = cpu_to_node(env->src_cpu);
4812 dst_nid = cpu_to_node(env->dst_cpu);
4813
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004814 if (src_nid == dst_nid)
Mel Gorman3a7053b2013-10-07 11:29:00 +01004815 return false;
4816
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004817 /* Always encourage migration to the preferred node. */
4818 if (dst_nid == p->numa_preferred_nid)
4819 return true;
4820
Rik van Riel887c2902013-10-07 11:29:31 +01004821 /* If both task and group weight improve, this move is a winner. */
4822 if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
4823 group_weight(p, dst_nid) > group_weight(p, src_nid))
Mel Gorman3a7053b2013-10-07 11:29:00 +01004824 return true;
4825
4826 return false;
4827}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004828
4829
4830static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4831{
4832 int src_nid, dst_nid;
4833
4834 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4835 return false;
4836
4837 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
4838 return false;
4839
4840 src_nid = cpu_to_node(env->src_cpu);
4841 dst_nid = cpu_to_node(env->dst_cpu);
4842
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004843 if (src_nid == dst_nid)
Mel Gorman7a0f3082013-10-07 11:29:01 +01004844 return false;
4845
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004846 /* Migrating away from the preferred node is always bad. */
4847 if (src_nid == p->numa_preferred_nid)
4848 return true;
4849
Rik van Riel887c2902013-10-07 11:29:31 +01004850 /* If either task or group weight get worse, don't do it. */
4851 if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
4852 group_weight(p, dst_nid) < group_weight(p, src_nid))
Mel Gorman7a0f3082013-10-07 11:29:01 +01004853 return true;
4854
4855 return false;
4856}
4857
Mel Gorman3a7053b2013-10-07 11:29:00 +01004858#else
4859static inline bool migrate_improves_locality(struct task_struct *p,
4860 struct lb_env *env)
4861{
4862 return false;
4863}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004864
4865static inline bool migrate_degrades_locality(struct task_struct *p,
4866 struct lb_env *env)
4867{
4868 return false;
4869}
Mel Gorman3a7053b2013-10-07 11:29:00 +01004870#endif
4871
Peter Zijlstra029632f2011-10-25 10:00:11 +02004872/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004873 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4874 */
4875static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004876int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004877{
4878 int tsk_cache_hot = 0;
4879 /*
4880 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09004881 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004882 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09004883 * 3) running (obviously), or
4884 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004885 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09004886 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4887 return 0;
4888
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004889 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004890 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304891
Lucas De Marchi41acab82010-03-10 23:37:45 -03004892 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304893
Peter Zijlstra62633222013-08-19 12:41:09 +02004894 env->flags |= LBF_SOME_PINNED;
4895
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304896 /*
4897 * Remember if this task can be migrated to any other cpu in
4898 * our sched_group. We may want to revisit it if we couldn't
4899 * meet load balance goals by pulling other tasks on src_cpu.
4900 *
4901 * Also avoid computing new_dst_cpu if we have already computed
4902 * one in current iteration.
4903 */
Peter Zijlstra62633222013-08-19 12:41:09 +02004904 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304905 return 0;
4906
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004907 /* Prevent to re-select dst_cpu via env's cpus */
4908 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4909 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02004910 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004911 env->new_dst_cpu = cpu;
4912 break;
4913 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304914 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004915
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004916 return 0;
4917 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304918
4919 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004920 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004921
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004922 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03004923 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004924 return 0;
4925 }
4926
4927 /*
4928 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01004929 * 1) destination numa is preferred
4930 * 2) task is cache cold, or
4931 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004932 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004933 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Mel Gorman7a0f3082013-10-07 11:29:01 +01004934 if (!tsk_cache_hot)
4935 tsk_cache_hot = migrate_degrades_locality(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01004936
4937 if (migrate_improves_locality(p, env)) {
4938#ifdef CONFIG_SCHEDSTATS
4939 if (tsk_cache_hot) {
4940 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4941 schedstat_inc(p, se.statistics.nr_forced_migrations);
4942 }
4943#endif
4944 return 1;
4945 }
4946
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004947 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004948 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004949
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004950 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004951 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004952 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004953 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004954
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004955 return 1;
4956 }
4957
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004958 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4959 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004960}
4961
Peter Zijlstra897c3952009-12-17 17:45:42 +01004962/*
4963 * move_one_task tries to move exactly one task from busiest to this_rq, as
4964 * part of active balancing operations within "domain".
4965 * Returns 1 if successful and 0 otherwise.
4966 *
4967 * Called with both runqueues locked.
4968 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004969static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004970{
4971 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004972
Peter Zijlstra367456c2012-02-20 21:49:09 +01004973 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004974 if (!can_migrate_task(p, env))
4975 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004976
Peter Zijlstra367456c2012-02-20 21:49:09 +01004977 move_task(p, env);
4978 /*
4979 * Right now, this is only the second place move_task()
4980 * is called, so we can safely collect move_task()
4981 * stats here rather than inside move_task().
4982 */
4983 schedstat_inc(env->sd, lb_gained[env->idle]);
4984 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004985 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004986 return 0;
4987}
4988
Peter Zijlstraeb953082012-04-17 13:38:40 +02004989static const unsigned int sched_nr_migrate_break = 32;
4990
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004991/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004992 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004993 * this_rq, as part of a balancing operation within domain "sd".
4994 * Returns 1 if successful and 0 otherwise.
4995 *
4996 * Called with both runqueues locked.
4997 */
4998static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004999{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005000 struct list_head *tasks = &env->src_rq->cfs_tasks;
5001 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01005002 unsigned long load;
5003 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005004
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005005 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005006 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005007
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005008 while (!list_empty(tasks)) {
5009 p = list_first_entry(tasks, struct task_struct, se.group_node);
5010
Peter Zijlstra367456c2012-02-20 21:49:09 +01005011 env->loop++;
5012 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005013 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005014 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005015
5016 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01005017 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02005018 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005019 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01005020 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02005021 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005022
Joonsoo Kimd3198082013-04-23 17:27:40 +09005023 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01005024 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005025
Peter Zijlstra367456c2012-02-20 21:49:09 +01005026 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005027
Peter Zijlstraeb953082012-04-17 13:38:40 +02005028 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005029 goto next;
5030
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005031 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005032 goto next;
5033
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005034 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01005035 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005036 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005037
5038#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01005039 /*
5040 * NEWIDLE balancing is a source of latency, so preemptible
5041 * kernels will stop after the first task is pulled to minimize
5042 * the critical section.
5043 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005044 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005045 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005046#endif
5047
Peter Zijlstraee00e662009-12-17 17:25:20 +01005048 /*
5049 * We only want to steal up to the prescribed amount of
5050 * weighted load.
5051 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005052 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005053 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005054
Peter Zijlstra367456c2012-02-20 21:49:09 +01005055 continue;
5056next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005057 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005058 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005059
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005060 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005061 * Right now, this is one of only two places move_task() is called,
5062 * so we can safely collect move_task() stats here rather than
5063 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005064 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005065 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005066
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005067 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005068}
5069
Peter Zijlstra230059de2009-12-17 17:47:12 +01005070#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005071/*
5072 * update tg->load_weight by folding this cpu's load_avg
5073 */
Paul Turner48a16752012-10-04 13:18:31 +02005074static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005075{
Paul Turner48a16752012-10-04 13:18:31 +02005076 struct sched_entity *se = tg->se[cpu];
5077 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005078
Paul Turner48a16752012-10-04 13:18:31 +02005079 /* throttled entities do not contribute to load */
5080 if (throttled_hierarchy(cfs_rq))
5081 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005082
Paul Turneraff3e492012-10-04 13:18:30 +02005083 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005084
Paul Turner82958362012-10-04 13:18:31 +02005085 if (se) {
5086 update_entity_load_avg(se, 1);
5087 /*
5088 * We pivot on our runnable average having decayed to zero for
5089 * list removal. This generally implies that all our children
5090 * have also been removed (modulo rounding error or bandwidth
5091 * control); however, such cases are rare and we can fix these
5092 * at enqueue.
5093 *
5094 * TODO: fix up out-of-order children on enqueue.
5095 */
5096 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5097 list_del_leaf_cfs_rq(cfs_rq);
5098 } else {
Paul Turner48a16752012-10-04 13:18:31 +02005099 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02005100 update_rq_runnable_avg(rq, rq->nr_running);
5101 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005102}
5103
Paul Turner48a16752012-10-04 13:18:31 +02005104static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005105{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005106 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02005107 struct cfs_rq *cfs_rq;
5108 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005109
Paul Turner48a16752012-10-04 13:18:31 +02005110 raw_spin_lock_irqsave(&rq->lock, flags);
5111 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02005112 /*
5113 * Iterates the task_group tree in a bottom up fashion, see
5114 * list_add_leaf_cfs_rq() for details.
5115 */
Paul Turner64660c82011-07-21 09:43:36 -07005116 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02005117 /*
5118 * Note: We may want to consider periodically releasing
5119 * rq->lock about these updates so that creating many task
5120 * groups does not result in continually extending hold time.
5121 */
5122 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07005123 }
Paul Turner48a16752012-10-04 13:18:31 +02005124
5125 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005126}
5127
Peter Zijlstra9763b672011-07-13 13:09:25 +02005128/*
Vladimir Davydov68520792013-07-15 17:49:19 +04005129 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02005130 * This needs to be done in a top-down fashion because the load of a child
5131 * group is a fraction of its parents load.
5132 */
Vladimir Davydov68520792013-07-15 17:49:19 +04005133static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02005134{
Vladimir Davydov68520792013-07-15 17:49:19 +04005135 struct rq *rq = rq_of(cfs_rq);
5136 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005137 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04005138 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005139
Vladimir Davydov68520792013-07-15 17:49:19 +04005140 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005141 return;
5142
Vladimir Davydov68520792013-07-15 17:49:19 +04005143 cfs_rq->h_load_next = NULL;
5144 for_each_sched_entity(se) {
5145 cfs_rq = cfs_rq_of(se);
5146 cfs_rq->h_load_next = se;
5147 if (cfs_rq->last_h_load_update == now)
5148 break;
5149 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005150
Vladimir Davydov68520792013-07-15 17:49:19 +04005151 if (!se) {
Vladimir Davydov7e3115e2013-09-14 19:39:46 +04005152 cfs_rq->h_load = cfs_rq->runnable_load_avg;
Vladimir Davydov68520792013-07-15 17:49:19 +04005153 cfs_rq->last_h_load_update = now;
5154 }
5155
5156 while ((se = cfs_rq->h_load_next) != NULL) {
5157 load = cfs_rq->h_load;
5158 load = div64_ul(load * se->avg.load_avg_contrib,
5159 cfs_rq->runnable_load_avg + 1);
5160 cfs_rq = group_cfs_rq(se);
5161 cfs_rq->h_load = load;
5162 cfs_rq->last_h_load_update = now;
5163 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02005164}
5165
Peter Zijlstra367456c2012-02-20 21:49:09 +01005166static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01005167{
Peter Zijlstra367456c2012-02-20 21:49:09 +01005168 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005169
Vladimir Davydov68520792013-07-15 17:49:19 +04005170 update_cfs_rq_h_load(cfs_rq);
Alex Shia003a252013-06-20 10:18:51 +08005171 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5172 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005173}
5174#else
Paul Turner48a16752012-10-04 13:18:31 +02005175static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005176{
5177}
5178
Peter Zijlstra367456c2012-02-20 21:49:09 +01005179static unsigned long task_h_load(struct task_struct *p)
5180{
Alex Shia003a252013-06-20 10:18:51 +08005181 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01005182}
5183#endif
5184
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005185/********** Helpers for find_busiest_group ************************/
5186/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005187 * sg_lb_stats - stats of a sched_group required for load_balancing
5188 */
5189struct sg_lb_stats {
5190 unsigned long avg_load; /*Avg load across the CPUs of the group */
5191 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005192 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005193 unsigned long load_per_task;
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005194 unsigned long group_power;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005195 unsigned int sum_nr_running; /* Nr tasks running in the group */
5196 unsigned int group_capacity;
5197 unsigned int idle_cpus;
5198 unsigned int group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005199 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07005200 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005201#ifdef CONFIG_NUMA_BALANCING
5202 unsigned int nr_numa_running;
5203 unsigned int nr_preferred_running;
5204#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005205};
5206
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005207/*
5208 * sd_lb_stats - Structure to store the statistics of a sched_domain
5209 * during load balancing.
5210 */
5211struct sd_lb_stats {
5212 struct sched_group *busiest; /* Busiest group in this sd */
5213 struct sched_group *local; /* Local group in this sd */
5214 unsigned long total_load; /* Total load of all groups in sd */
5215 unsigned long total_pwr; /* Total power of all groups in sd */
5216 unsigned long avg_load; /* Average load across all groups in sd */
5217
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005218 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005219 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005220};
5221
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005222static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5223{
5224 /*
5225 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5226 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5227 * We must however clear busiest_stat::avg_load because
5228 * update_sd_pick_busiest() reads this before assignment.
5229 */
5230 *sds = (struct sd_lb_stats){
5231 .busiest = NULL,
5232 .local = NULL,
5233 .total_load = 0UL,
5234 .total_pwr = 0UL,
5235 .busiest_stat = {
5236 .avg_load = 0UL,
5237 },
5238 };
5239}
5240
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005241/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005242 * get_sd_load_idx - Obtain the load index for a given sched domain.
5243 * @sd: The sched_domain whose load_idx is to be obtained.
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305244 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005245 *
5246 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005247 */
5248static inline int get_sd_load_idx(struct sched_domain *sd,
5249 enum cpu_idle_type idle)
5250{
5251 int load_idx;
5252
5253 switch (idle) {
5254 case CPU_NOT_IDLE:
5255 load_idx = sd->busy_idx;
5256 break;
5257
5258 case CPU_NEWLY_IDLE:
5259 load_idx = sd->newidle_idx;
5260 break;
5261 default:
5262 load_idx = sd->idle_idx;
5263 break;
5264 }
5265
5266 return load_idx;
5267}
5268
Li Zefan15f803c2013-03-05 16:07:11 +08005269static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005270{
Nikhil Rao1399fa72011-05-18 10:09:39 -07005271 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005272}
5273
5274unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5275{
5276 return default_scale_freq_power(sd, cpu);
5277}
5278
Li Zefan15f803c2013-03-05 16:07:11 +08005279static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005280{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005281 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005282 unsigned long smt_gain = sd->smt_gain;
5283
5284 smt_gain /= weight;
5285
5286 return smt_gain;
5287}
5288
5289unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5290{
5291 return default_scale_smt_power(sd, cpu);
5292}
5293
Li Zefan15f803c2013-03-05 16:07:11 +08005294static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005295{
5296 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005297 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005298
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005299 /*
5300 * Since we're reading these variables without serialization make sure
5301 * we read them once before doing sanity checks on them.
5302 */
5303 age_stamp = ACCESS_ONCE(rq->age_stamp);
5304 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005305
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005306 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005307
5308 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005309 /* Ensures that power won't end up being negative */
5310 available = 0;
5311 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005312 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005313 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005314
Nikhil Rao1399fa72011-05-18 10:09:39 -07005315 if (unlikely((s64)total < SCHED_POWER_SCALE))
5316 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005317
Nikhil Rao1399fa72011-05-18 10:09:39 -07005318 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005319
5320 return div_u64(available, total);
5321}
5322
5323static void update_cpu_power(struct sched_domain *sd, int cpu)
5324{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005325 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07005326 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005327 struct sched_group *sdg = sd->groups;
5328
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005329 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5330 if (sched_feat(ARCH_POWER))
5331 power *= arch_scale_smt_power(sd, cpu);
5332 else
5333 power *= default_scale_smt_power(sd, cpu);
5334
Nikhil Rao1399fa72011-05-18 10:09:39 -07005335 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005336 }
5337
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005338 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005339
5340 if (sched_feat(ARCH_POWER))
5341 power *= arch_scale_freq_power(sd, cpu);
5342 else
5343 power *= default_scale_freq_power(sd, cpu);
5344
Nikhil Rao1399fa72011-05-18 10:09:39 -07005345 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005346
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005347 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005348 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005349
5350 if (!power)
5351 power = 1;
5352
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02005353 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005354 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005355}
5356
Peter Zijlstra029632f2011-10-25 10:00:11 +02005357void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005358{
5359 struct sched_domain *child = sd->child;
5360 struct sched_group *group, *sdg = sd->groups;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005361 unsigned long power, power_orig;
Vincent Guittot4ec44122011-12-12 20:21:08 +01005362 unsigned long interval;
5363
5364 interval = msecs_to_jiffies(sd->balance_interval);
5365 interval = clamp(interval, 1UL, max_load_balance_interval);
5366 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005367
5368 if (!child) {
5369 update_cpu_power(sd, cpu);
5370 return;
5371 }
5372
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005373 power_orig = power = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005374
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005375 if (child->flags & SD_OVERLAP) {
5376 /*
5377 * SD_OVERLAP domains cannot assume that child groups
5378 * span the current group.
5379 */
5380
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005381 for_each_cpu(cpu, sched_group_cpus(sdg)) {
5382 struct sched_group *sg = cpu_rq(cpu)->sd->groups;
5383
5384 power_orig += sg->sgp->power_orig;
5385 power += sg->sgp->power;
5386 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005387 } else {
5388 /*
5389 * !SD_OVERLAP domains can assume that child groups
5390 * span the current group.
5391 */
5392
5393 group = child->groups;
5394 do {
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005395 power_orig += group->sgp->power_orig;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005396 power += group->sgp->power;
5397 group = group->next;
5398 } while (group != child->groups);
5399 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005400
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005401 sdg->sgp->power_orig = power_orig;
5402 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005403}
5404
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005405/*
5406 * Try and fix up capacity for tiny siblings, this is needed when
5407 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5408 * which on its own isn't powerful enough.
5409 *
5410 * See update_sd_pick_busiest() and check_asym_packing().
5411 */
5412static inline int
5413fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5414{
5415 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07005416 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005417 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02005418 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005419 return 0;
5420
5421 /*
5422 * If ~90% of the cpu_power is still there, we're good.
5423 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005424 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005425 return 1;
5426
5427 return 0;
5428}
5429
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005430/*
5431 * Group imbalance indicates (and tries to solve) the problem where balancing
5432 * groups is inadequate due to tsk_cpus_allowed() constraints.
5433 *
5434 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5435 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5436 * Something like:
5437 *
5438 * { 0 1 2 3 } { 4 5 6 7 }
5439 * * * * *
5440 *
5441 * If we were to balance group-wise we'd place two tasks in the first group and
5442 * two tasks in the second group. Clearly this is undesired as it will overload
5443 * cpu 3 and leave one of the cpus in the second group unused.
5444 *
5445 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02005446 * by noticing the lower domain failed to reach balance and had difficulty
5447 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005448 *
5449 * When this is so detected; this group becomes a candidate for busiest; see
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305450 * update_sd_pick_busiest(). And calculate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02005451 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005452 * to create an effective group imbalance.
5453 *
5454 * This is a somewhat tricky proposition since the next run might not find the
5455 * group imbalance and decide the groups need to be balanced again. A most
5456 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005457 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005458
Peter Zijlstra62633222013-08-19 12:41:09 +02005459static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005460{
Peter Zijlstra62633222013-08-19 12:41:09 +02005461 return group->sgp->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005462}
5463
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005464/*
5465 * Compute the group capacity.
5466 *
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005467 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5468 * first dividing out the smt factor and computing the actual number of cores
5469 * and limit power unit capacity with that.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005470 */
5471static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5472{
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005473 unsigned int capacity, smt, cpus;
5474 unsigned int power, power_orig;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005475
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005476 power = group->sgp->power;
5477 power_orig = group->sgp->power_orig;
5478 cpus = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005479
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005480 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5481 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5482 capacity = cpus / smt; /* cores */
5483
5484 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005485 if (!capacity)
5486 capacity = fix_small_capacity(env->sd, group);
5487
5488 return capacity;
5489}
5490
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005491/**
5492 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5493 * @env: The load balancing environment.
5494 * @group: sched_group whose statistics are to be updated.
5495 * @load_idx: Load index of sched_domain of this_cpu for load calc.
5496 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005497 * @sgs: variable to hold the statistics for this group.
5498 */
5499static inline void update_sg_lb_stats(struct lb_env *env,
5500 struct sched_group *group, int load_idx,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005501 int local_group, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005502{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005503 unsigned long nr_running;
5504 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005505 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005506
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005507 memset(sgs, 0, sizeof(*sgs));
5508
Michael Wangb94031302012-07-12 16:10:13 +08005509 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005510 struct rq *rq = cpu_rq(i);
5511
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02005512 nr_running = rq->nr_running;
5513
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005514 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02005515 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005516 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02005517 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005518 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005519
5520 sgs->group_load += load;
Peter Zijlstrae44bc5c2012-05-11 00:22:12 +02005521 sgs->sum_nr_running += nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005522#ifdef CONFIG_NUMA_BALANCING
5523 sgs->nr_numa_running += rq->nr_numa_running;
5524 sgs->nr_preferred_running += rq->nr_preferred_running;
5525#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005526 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005527 if (idle_cpu(i))
5528 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005529 }
5530
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005531 /* Adjust by relative CPU power of the group */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005532 sgs->group_power = group->sgp->power;
5533 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005534
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005535 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02005536 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005537
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005538 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07005539
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005540 sgs->group_imb = sg_imbalanced(group);
5541 sgs->group_capacity = sg_capacity(env, group);
5542
Nikhil Raofab47622010-10-15 13:12:29 -07005543 if (sgs->group_capacity > sgs->sum_nr_running)
5544 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005545}
5546
5547/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10005548 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07005549 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005550 * @sds: sched_domain statistics
5551 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10005552 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10005553 *
5554 * Determine if @sg is a busier group than the previously selected
5555 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005556 *
5557 * Return: %true if @sg is a busier group than the previously selected
5558 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005559 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005560static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10005561 struct sd_lb_stats *sds,
5562 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005563 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005564{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005565 if (sgs->avg_load <= sds->busiest_stat.avg_load)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005566 return false;
5567
5568 if (sgs->sum_nr_running > sgs->group_capacity)
5569 return true;
5570
5571 if (sgs->group_imb)
5572 return true;
5573
5574 /*
5575 * ASYM_PACKING needs to move all the work to the lowest
5576 * numbered CPUs in the group, therefore mark all groups
5577 * higher than ourself as busy.
5578 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005579 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5580 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005581 if (!sds->busiest)
5582 return true;
5583
5584 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5585 return true;
5586 }
5587
5588 return false;
5589}
5590
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005591#ifdef CONFIG_NUMA_BALANCING
5592static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5593{
5594 if (sgs->sum_nr_running > sgs->nr_numa_running)
5595 return regular;
5596 if (sgs->sum_nr_running > sgs->nr_preferred_running)
5597 return remote;
5598 return all;
5599}
5600
5601static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5602{
5603 if (rq->nr_running > rq->nr_numa_running)
5604 return regular;
5605 if (rq->nr_running > rq->nr_preferred_running)
5606 return remote;
5607 return all;
5608}
5609#else
5610static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5611{
5612 return all;
5613}
5614
5615static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5616{
5617 return regular;
5618}
5619#endif /* CONFIG_NUMA_BALANCING */
5620
Michael Neuling532cb4c2010-06-08 14:57:02 +10005621/**
Hui Kang461819a2011-10-11 23:00:59 -04005622 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005623 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005624 * @sds: variable to hold the statistics for this sched_domain.
5625 */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005626static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005627{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005628 struct sched_domain *child = env->sd->child;
5629 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005630 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005631 int load_idx, prefer_sibling = 0;
5632
5633 if (child && child->flags & SD_PREFER_SIBLING)
5634 prefer_sibling = 1;
5635
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005636 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005637
5638 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005639 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005640 int local_group;
5641
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005642 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005643 if (local_group) {
5644 sds->local = sg;
5645 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005646
5647 if (env->idle != CPU_NEWLY_IDLE ||
5648 time_after_eq(jiffies, sg->sgp->next_update))
5649 update_group_power(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005650 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005651
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005652 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005653
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005654 if (local_group)
5655 goto next_group;
5656
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005657 /*
5658 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10005659 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07005660 * and move all the excess tasks away. We lower the capacity
5661 * of a group only if the local group has the capacity to fit
5662 * these excess tasks, i.e. nr_running < group_capacity. The
5663 * extra check prevents the case where you always pull from the
5664 * heaviest group when it is already under-utilized (possible
5665 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005666 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005667 if (prefer_sibling && sds->local &&
5668 sds->local_stat.group_has_capacity)
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005669 sgs->group_capacity = min(sgs->group_capacity, 1U);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005670
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005671 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005672 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005673 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005674 }
5675
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005676next_group:
5677 /* Now, start updating sd_lb_stats */
5678 sds->total_load += sgs->group_load;
5679 sds->total_pwr += sgs->group_power;
5680
Michael Neuling532cb4c2010-06-08 14:57:02 +10005681 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005682 } while (sg != env->sd->groups);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005683
5684 if (env->sd->flags & SD_NUMA)
5685 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
Michael Neuling532cb4c2010-06-08 14:57:02 +10005686}
5687
Michael Neuling532cb4c2010-06-08 14:57:02 +10005688/**
5689 * check_asym_packing - Check to see if the group is packed into the
5690 * sched doman.
5691 *
5692 * This is primarily intended to used at the sibling level. Some
5693 * cores like POWER7 prefer to use lower numbered SMT threads. In the
5694 * case of POWER7, it can move to lower SMT modes only when higher
5695 * threads are idle. When in lower SMT modes, the threads will
5696 * perform better since they share less core resources. Hence when we
5697 * have idle threads, we want them to be the higher ones.
5698 *
5699 * This packing function is run on idle threads. It checks to see if
5700 * the busiest CPU in this domain (core in the P7 case) has a higher
5701 * CPU number than the packing function is being run on. Here we are
5702 * assuming lower CPU number will be equivalent to lower a SMT thread
5703 * number.
5704 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005705 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10005706 * this CPU. The amount of the imbalance is returned in *imbalance.
5707 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005708 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005709 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10005710 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005711static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005712{
5713 int busiest_cpu;
5714
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005715 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005716 return 0;
5717
5718 if (!sds->busiest)
5719 return 0;
5720
5721 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005722 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005723 return 0;
5724
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005725 env->imbalance = DIV_ROUND_CLOSEST(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005726 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5727 SCHED_POWER_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005728
Michael Neuling532cb4c2010-06-08 14:57:02 +10005729 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005730}
5731
5732/**
5733 * fix_small_imbalance - Calculate the minor imbalance that exists
5734 * amongst the groups of a sched_domain, during
5735 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005736 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005737 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005738 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005739static inline
5740void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005741{
5742 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5743 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005744 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005745 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005746
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005747 local = &sds->local_stat;
5748 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005749
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005750 if (!local->sum_nr_running)
5751 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5752 else if (busiest->load_per_task > local->load_per_task)
5753 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005754
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005755 scaled_busy_load_per_task =
5756 (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005757 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005758
Vladimir Davydov3029ede2013-09-15 17:49:14 +04005759 if (busiest->avg_load + scaled_busy_load_per_task >=
5760 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005761 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005762 return;
5763 }
5764
5765 /*
5766 * OK, we don't have enough imbalance to justify moving tasks,
5767 * however we may be able to increase total CPU power used by
5768 * moving them.
5769 */
5770
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005771 pwr_now += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005772 min(busiest->load_per_task, busiest->avg_load);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005773 pwr_now += local->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005774 min(local->load_per_task, local->avg_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005775 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005776
5777 /* Amount of load we'd subtract */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005778 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005779 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005780 if (busiest->avg_load > tmp) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005781 pwr_move += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005782 min(busiest->load_per_task,
5783 busiest->avg_load - tmp);
5784 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005785
5786 /* Amount of load we'd add */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005787 if (busiest->avg_load * busiest->group_power <
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005788 busiest->load_per_task * SCHED_POWER_SCALE) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005789 tmp = (busiest->avg_load * busiest->group_power) /
5790 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005791 } else {
5792 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005793 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005794 }
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005795 pwr_move += local->group_power *
5796 min(local->load_per_task, local->avg_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005797 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005798
5799 /* Move if we gain throughput */
5800 if (pwr_move > pwr_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005801 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005802}
5803
5804/**
5805 * calculate_imbalance - Calculate the amount of imbalance present within the
5806 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005807 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005808 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005809 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005810static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005811{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005812 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005813 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005814
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005815 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005816 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005817
5818 if (busiest->group_imb) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005819 /*
5820 * In the group_imb case we cannot rely on group-wide averages
5821 * to ensure cpu-load equilibrium, look at wider averages. XXX
5822 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005823 busiest->load_per_task =
5824 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005825 }
5826
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005827 /*
5828 * In the presence of smp nice balancing, certain scenarios can have
5829 * max load less than avg load(as we skip the groups at or below
5830 * its cpu_power, while calculating max_load..)
5831 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04005832 if (busiest->avg_load <= sds->avg_load ||
5833 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005834 env->imbalance = 0;
5835 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005836 }
5837
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005838 if (!busiest->group_imb) {
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005839 /*
5840 * Don't want to pull so many tasks that a group would go idle.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005841 * Except of course for the group_imb case, since then we might
5842 * have to drop below capacity to reach cpu-load equilibrium.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005843 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005844 load_above_capacity =
5845 (busiest->sum_nr_running - busiest->group_capacity);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005846
Nikhil Rao1399fa72011-05-18 10:09:39 -07005847 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005848 load_above_capacity /= busiest->group_power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005849 }
5850
5851 /*
5852 * We're trying to get all the cpus to the average_load, so we don't
5853 * want to push ourselves above the average load, nor do we wish to
5854 * reduce the max loaded cpu below the average load. At the same time,
5855 * we also don't want to reduce the group load below the group capacity
5856 * (so that we can implement power-savings policies etc). Thus we look
5857 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005858 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005859 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005860
5861 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005862 env->imbalance = min(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005863 max_pull * busiest->group_power,
5864 (sds->avg_load - local->avg_load) * local->group_power
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005865 ) / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005866
5867 /*
5868 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005869 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005870 * a think about bumping its value to force at least one task to be
5871 * moved
5872 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005873 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005874 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005875}
Nikhil Raofab47622010-10-15 13:12:29 -07005876
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005877/******* find_busiest_group() helpers end here *********************/
5878
5879/**
5880 * find_busiest_group - Returns the busiest group within the sched_domain
5881 * if there is an imbalance. If there isn't an imbalance, and
5882 * the user has opted for power-savings, it returns a group whose
5883 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5884 * such a group exists.
5885 *
5886 * Also calculates the amount of weighted load which should be moved
5887 * to restore balance.
5888 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005889 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005890 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005891 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005892 * - If no imbalance and user has opted for power-savings balance,
5893 * return the least loaded group whose CPUs can be
5894 * put to idle by rebalancing its tasks onto our group.
5895 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005896static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005897{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005898 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005899 struct sd_lb_stats sds;
5900
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005901 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005902
5903 /*
5904 * Compute the various statistics relavent for load balancing at
5905 * this level.
5906 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005907 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005908 local = &sds.local_stat;
5909 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005910
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005911 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5912 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005913 return sds.busiest;
5914
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005915 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005916 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005917 goto out_balanced;
5918
Nikhil Rao1399fa72011-05-18 10:09:39 -07005919 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07005920
Peter Zijlstra866ab432011-02-21 18:56:47 +01005921 /*
5922 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005923 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01005924 * isn't true due to cpus_allowed constraints and the like.
5925 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005926 if (busiest->group_imb)
Peter Zijlstra866ab432011-02-21 18:56:47 +01005927 goto force_balance;
5928
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005929 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005930 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5931 !busiest->group_has_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07005932 goto force_balance;
5933
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005934 /*
5935 * If the local group is more busy than the selected busiest group
5936 * don't try and pull any tasks.
5937 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005938 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005939 goto out_balanced;
5940
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005941 /*
5942 * Don't pull any tasks if this group is already above the domain
5943 * average load.
5944 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005945 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005946 goto out_balanced;
5947
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005948 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005949 /*
5950 * This cpu is idle. If the busiest group load doesn't
5951 * have more tasks than the number of available cpu's and
5952 * there is no imbalance between this and busiest group
5953 * wrt to idle cpu's, it is balanced.
5954 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005955 if ((local->idle_cpus < busiest->idle_cpus) &&
5956 busiest->sum_nr_running <= busiest->group_weight)
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005957 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005958 } else {
5959 /*
5960 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5961 * imbalance_pct to be conservative.
5962 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005963 if (100 * busiest->avg_load <=
5964 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005965 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005966 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005967
Nikhil Raofab47622010-10-15 13:12:29 -07005968force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005969 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005970 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005971 return sds.busiest;
5972
5973out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005974 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005975 return NULL;
5976}
5977
5978/*
5979 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5980 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005981static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08005982 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005983{
5984 struct rq *busiest = NULL, *rq;
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005985 unsigned long busiest_load = 0, busiest_power = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005986 int i;
5987
Peter Zijlstra6906a402013-08-19 15:20:21 +02005988 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005989 unsigned long power, capacity, wl;
5990 enum fbq_type rt;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005991
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005992 rq = cpu_rq(i);
5993 rt = fbq_classify_rq(rq);
5994
5995 /*
5996 * We classify groups/runqueues into three groups:
5997 * - regular: there are !numa tasks
5998 * - remote: there are numa tasks that run on the 'wrong' node
5999 * - all: there is no distinction
6000 *
6001 * In order to avoid migrating ideally placed numa tasks,
6002 * ignore those when there's better options.
6003 *
6004 * If we ignore the actual busiest queue to migrate another
6005 * task, the next balance pass can still reduce the busiest
6006 * queue by moving tasks around inside the node.
6007 *
6008 * If we cannot move enough load due to this classification
6009 * the next pass will adjust the group classification and
6010 * allow migration of more tasks.
6011 *
6012 * Both cases only affect the total convergence complexity.
6013 */
6014 if (rt > env->fbq_type)
6015 continue;
6016
6017 power = power_of(i);
6018 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006019 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006020 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006021
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006022 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006023
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006024 /*
6025 * When comparing with imbalance, use weighted_cpuload()
6026 * which is not scaled with the cpu power.
6027 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006028 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006029 continue;
6030
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006031 /*
6032 * For the load comparisons with the other cpu's, consider
6033 * the weighted_cpuload() scaled with the cpu power, so that
6034 * the load can be moved away from the cpu that is potentially
6035 * running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006036 *
6037 * Thus we're looking for max(wl_i / power_i), crosswise
6038 * multiplication to rid ourselves of the division works out
6039 * to: wl_i * power_j > wl_j * power_i; where j is our
6040 * previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006041 */
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006042 if (wl * busiest_power > busiest_load * power) {
6043 busiest_load = wl;
6044 busiest_power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006045 busiest = rq;
6046 }
6047 }
6048
6049 return busiest;
6050}
6051
6052/*
6053 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6054 * so long as it is large enough.
6055 */
6056#define MAX_PINNED_INTERVAL 512
6057
6058/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09006059DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006060
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006061static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006062{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006063 struct sched_domain *sd = env->sd;
6064
6065 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006066
6067 /*
6068 * ASYM_PACKING needs to force migrate tasks from busy but
6069 * higher numbered CPUs in order to pack all tasks in the
6070 * lowest numbered CPUs.
6071 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006072 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006073 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006074 }
6075
6076 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6077}
6078
Tejun Heo969c7922010-05-06 18:49:21 +02006079static int active_load_balance_cpu_stop(void *data);
6080
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006081static int should_we_balance(struct lb_env *env)
6082{
6083 struct sched_group *sg = env->sd->groups;
6084 struct cpumask *sg_cpus, *sg_mask;
6085 int cpu, balance_cpu = -1;
6086
6087 /*
6088 * In the newly idle case, we will allow all the cpu's
6089 * to do the newly idle load balance.
6090 */
6091 if (env->idle == CPU_NEWLY_IDLE)
6092 return 1;
6093
6094 sg_cpus = sched_group_cpus(sg);
6095 sg_mask = sched_group_mask(sg);
6096 /* Try to find first idle cpu */
6097 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6098 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6099 continue;
6100
6101 balance_cpu = cpu;
6102 break;
6103 }
6104
6105 if (balance_cpu == -1)
6106 balance_cpu = group_balance_cpu(sg);
6107
6108 /*
6109 * First idle cpu or the first cpu(busiest) in this sched group
6110 * is eligible for doing load balancing at this and above domains.
6111 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09006112 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006113}
6114
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006115/*
6116 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6117 * tasks if there is an imbalance.
6118 */
6119static int load_balance(int this_cpu, struct rq *this_rq,
6120 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006121 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006122{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306123 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02006124 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006125 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006126 struct rq *busiest;
6127 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09006128 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006129
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006130 struct lb_env env = {
6131 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006132 .dst_cpu = this_cpu,
6133 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306134 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006135 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02006136 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08006137 .cpus = cpus,
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006138 .fbq_type = all,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006139 };
6140
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006141 /*
6142 * For NEWLY_IDLE load_balancing, we don't need to consider
6143 * other cpus in our group
6144 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006145 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006146 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006147
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006148 cpumask_copy(cpus, cpu_active_mask);
6149
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006150 schedstat_inc(sd, lb_count[idle]);
6151
6152redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006153 if (!should_we_balance(&env)) {
6154 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006155 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006156 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006157
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006158 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006159 if (!group) {
6160 schedstat_inc(sd, lb_nobusyg[idle]);
6161 goto out_balanced;
6162 }
6163
Michael Wangb94031302012-07-12 16:10:13 +08006164 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006165 if (!busiest) {
6166 schedstat_inc(sd, lb_nobusyq[idle]);
6167 goto out_balanced;
6168 }
6169
Michael Wang78feefc2012-08-06 16:41:59 +08006170 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006171
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006172 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006173
6174 ld_moved = 0;
6175 if (busiest->nr_running > 1) {
6176 /*
6177 * Attempt to move tasks. If find_busiest_group has found
6178 * an imbalance but busiest->nr_running <= 1, the group is
6179 * still unbalanced. ld_moved simply stays zero, so it is
6180 * correctly treated as an imbalance.
6181 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006182 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02006183 env.src_cpu = busiest->cpu;
6184 env.src_rq = busiest;
6185 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006186
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006187more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006188 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08006189 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306190
6191 /*
6192 * cur_ld_moved - load moved in current iteration
6193 * ld_moved - cumulative load moved across iterations
6194 */
6195 cur_ld_moved = move_tasks(&env);
6196 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08006197 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006198 local_irq_restore(flags);
6199
6200 /*
6201 * some other cpu did the load balance for us.
6202 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306203 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6204 resched_cpu(env.dst_cpu);
6205
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09006206 if (env.flags & LBF_NEED_BREAK) {
6207 env.flags &= ~LBF_NEED_BREAK;
6208 goto more_balance;
6209 }
6210
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306211 /*
6212 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6213 * us and move them to an alternate dst_cpu in our sched_group
6214 * where they can run. The upper limit on how many times we
6215 * iterate on same src_cpu is dependent on number of cpus in our
6216 * sched_group.
6217 *
6218 * This changes load balance semantics a bit on who can move
6219 * load to a given_cpu. In addition to the given_cpu itself
6220 * (or a ilb_cpu acting on its behalf where given_cpu is
6221 * nohz-idle), we now have balance_cpu in a position to move
6222 * load to given_cpu. In rare situations, this may cause
6223 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6224 * _independently_ and at _same_ time to move some load to
6225 * given_cpu) causing exceess load to be moved to given_cpu.
6226 * This however should not happen so much in practice and
6227 * moreover subsequent load balance cycles should correct the
6228 * excess load moved.
6229 */
Peter Zijlstra62633222013-08-19 12:41:09 +02006230 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306231
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04006232 /* Prevent to re-select dst_cpu via env's cpus */
6233 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6234
Michael Wang78feefc2012-08-06 16:41:59 +08006235 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306236 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02006237 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306238 env.loop = 0;
6239 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006240
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306241 /*
6242 * Go back to "more_balance" rather than "redo" since we
6243 * need to continue with same src_cpu.
6244 */
6245 goto more_balance;
6246 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006247
Peter Zijlstra62633222013-08-19 12:41:09 +02006248 /*
6249 * We failed to reach balance because of affinity.
6250 */
6251 if (sd_parent) {
6252 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6253
6254 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6255 *group_imbalance = 1;
6256 } else if (*group_imbalance)
6257 *group_imbalance = 0;
6258 }
6259
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006260 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006261 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006262 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05306263 if (!cpumask_empty(cpus)) {
6264 env.loop = 0;
6265 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006266 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05306267 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006268 goto out_balanced;
6269 }
6270 }
6271
6272 if (!ld_moved) {
6273 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07006274 /*
6275 * Increment the failure counter only on periodic balance.
6276 * We do not want newidle balance, which can be very
6277 * frequent, pollute the failure counter causing
6278 * excessive cache_hot migrations and active balances.
6279 */
6280 if (idle != CPU_NEWLY_IDLE)
6281 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006282
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006283 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006284 raw_spin_lock_irqsave(&busiest->lock, flags);
6285
Tejun Heo969c7922010-05-06 18:49:21 +02006286 /* don't kick the active_load_balance_cpu_stop,
6287 * if the curr task on busiest cpu can't be
6288 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006289 */
6290 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02006291 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006292 raw_spin_unlock_irqrestore(&busiest->lock,
6293 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006294 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006295 goto out_one_pinned;
6296 }
6297
Tejun Heo969c7922010-05-06 18:49:21 +02006298 /*
6299 * ->active_balance synchronizes accesses to
6300 * ->active_balance_work. Once set, it's cleared
6301 * only after active load balance is finished.
6302 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006303 if (!busiest->active_balance) {
6304 busiest->active_balance = 1;
6305 busiest->push_cpu = this_cpu;
6306 active_balance = 1;
6307 }
6308 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02006309
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006310 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02006311 stop_one_cpu_nowait(cpu_of(busiest),
6312 active_load_balance_cpu_stop, busiest,
6313 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006314 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006315
6316 /*
6317 * We've kicked active balancing, reset the failure
6318 * counter.
6319 */
6320 sd->nr_balance_failed = sd->cache_nice_tries+1;
6321 }
6322 } else
6323 sd->nr_balance_failed = 0;
6324
6325 if (likely(!active_balance)) {
6326 /* We were unbalanced, so reset the balancing interval */
6327 sd->balance_interval = sd->min_interval;
6328 } else {
6329 /*
6330 * If we've begun active balancing, start to back off. This
6331 * case may not be covered by the all_pinned logic if there
6332 * is only 1 task on the busy runqueue (because we don't call
6333 * move_tasks).
6334 */
6335 if (sd->balance_interval < sd->max_interval)
6336 sd->balance_interval *= 2;
6337 }
6338
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006339 goto out;
6340
6341out_balanced:
6342 schedstat_inc(sd, lb_balanced[idle]);
6343
6344 sd->nr_balance_failed = 0;
6345
6346out_one_pinned:
6347 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006348 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02006349 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006350 (sd->balance_interval < sd->max_interval))
6351 sd->balance_interval *= 2;
6352
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08006353 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006354out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006355 return ld_moved;
6356}
6357
6358/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006359 * idle_balance is called by schedule() if this_cpu is about to become
6360 * idle. Attempts to pull tasks from other CPUs.
6361 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006362void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006363{
6364 struct sched_domain *sd;
6365 int pulled_task = 0;
6366 unsigned long next_balance = jiffies + HZ;
Jason Low9bd721c2013-09-13 11:26:52 -07006367 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006368
Frederic Weisbecker78becc22013-04-12 01:51:02 +02006369 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006370
6371 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6372 return;
6373
Peter Zijlstraf492e122009-12-23 15:29:42 +01006374 /*
6375 * Drop the rq->lock, but keep IRQ/preempt disabled.
6376 */
6377 raw_spin_unlock(&this_rq->lock);
6378
Paul Turner48a16752012-10-04 13:18:31 +02006379 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006380 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006381 for_each_domain(this_cpu, sd) {
6382 unsigned long interval;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006383 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07006384 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006385
6386 if (!(sd->flags & SD_LOAD_BALANCE))
6387 continue;
6388
Jason Low9bd721c2013-09-13 11:26:52 -07006389 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6390 break;
6391
Peter Zijlstraf492e122009-12-23 15:29:42 +01006392 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07006393 t0 = sched_clock_cpu(this_cpu);
6394
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006395 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01006396 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006397 sd, CPU_NEWLY_IDLE,
6398 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07006399
6400 domain_cost = sched_clock_cpu(this_cpu) - t0;
6401 if (domain_cost > sd->max_newidle_lb_cost)
6402 sd->max_newidle_lb_cost = domain_cost;
6403
6404 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01006405 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006406
6407 interval = msecs_to_jiffies(sd->balance_interval);
6408 if (time_after(next_balance, sd->last_balance + interval))
6409 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08006410 if (pulled_task) {
6411 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006412 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08006413 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006414 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006415 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01006416
6417 raw_spin_lock(&this_rq->lock);
6418
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006419 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6420 /*
6421 * We are going idle. next_balance may be set based on
6422 * a busy processor. So reset next_balance.
6423 */
6424 this_rq->next_balance = next_balance;
6425 }
Jason Low9bd721c2013-09-13 11:26:52 -07006426
6427 if (curr_cost > this_rq->max_idle_balance_cost)
6428 this_rq->max_idle_balance_cost = curr_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006429}
6430
6431/*
Tejun Heo969c7922010-05-06 18:49:21 +02006432 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6433 * running tasks off the busiest CPU onto idle CPUs. It requires at
6434 * least 1 task to be running on each physical CPU where possible, and
6435 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006436 */
Tejun Heo969c7922010-05-06 18:49:21 +02006437static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006438{
Tejun Heo969c7922010-05-06 18:49:21 +02006439 struct rq *busiest_rq = data;
6440 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006441 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02006442 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006443 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02006444
6445 raw_spin_lock_irq(&busiest_rq->lock);
6446
6447 /* make sure the requested cpu hasn't gone down in the meantime */
6448 if (unlikely(busiest_cpu != smp_processor_id() ||
6449 !busiest_rq->active_balance))
6450 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006451
6452 /* Is there any task to move? */
6453 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02006454 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006455
6456 /*
6457 * This condition is "impossible", if it occurs
6458 * we need to fix it. Originally reported by
6459 * Bjorn Helgaas on a 128-cpu setup.
6460 */
6461 BUG_ON(busiest_rq == target_rq);
6462
6463 /* move a task from busiest_rq to target_rq */
6464 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006465
6466 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006467 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006468 for_each_domain(target_cpu, sd) {
6469 if ((sd->flags & SD_LOAD_BALANCE) &&
6470 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6471 break;
6472 }
6473
6474 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006475 struct lb_env env = {
6476 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006477 .dst_cpu = target_cpu,
6478 .dst_rq = target_rq,
6479 .src_cpu = busiest_rq->cpu,
6480 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006481 .idle = CPU_IDLE,
6482 };
6483
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006484 schedstat_inc(sd, alb_count);
6485
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006486 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006487 schedstat_inc(sd, alb_pushed);
6488 else
6489 schedstat_inc(sd, alb_failed);
6490 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006491 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006492 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02006493out_unlock:
6494 busiest_rq->active_balance = 0;
6495 raw_spin_unlock_irq(&busiest_rq->lock);
6496 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006497}
6498
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006499#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006500/*
6501 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006502 * - When one of the busy CPUs notice that there may be an idle rebalancing
6503 * needed, they will kick the idle load balancer, which then does idle
6504 * load balancing for all the idle CPUs.
6505 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006506static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006507 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006508 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006509 unsigned long next_balance; /* in jiffy units */
6510} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006511
Peter Zijlstra8e7fbcb2012-01-09 11:28:35 +01006512static inline int find_new_ilb(int call_cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006513{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006514 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006515
Suresh Siddha786d6dc72011-12-01 17:07:35 -08006516 if (ilb < nr_cpu_ids && idle_cpu(ilb))
6517 return ilb;
6518
6519 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006520}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006521
6522/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006523 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6524 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6525 * CPU (if there is one).
6526 */
6527static void nohz_balancer_kick(int cpu)
6528{
6529 int ilb_cpu;
6530
6531 nohz.next_balance++;
6532
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006533 ilb_cpu = find_new_ilb(cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006534
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006535 if (ilb_cpu >= nr_cpu_ids)
6536 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006537
Suresh Siddhacd490c52011-12-06 11:26:34 -08006538 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08006539 return;
6540 /*
6541 * Use smp_send_reschedule() instead of resched_cpu().
6542 * This way we generate a sched IPI on the target cpu which
6543 * is idle. And the softirq performing nohz idle load balance
6544 * will be run before returning from the IPI.
6545 */
6546 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006547 return;
6548}
6549
Alex Shic1cc0172012-09-10 15:10:58 +08006550static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08006551{
6552 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6553 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6554 atomic_dec(&nohz.nr_cpus);
6555 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6556 }
6557}
6558
Suresh Siddha69e1e812011-12-01 17:07:33 -08006559static inline void set_cpu_sd_state_busy(void)
6560{
6561 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306562 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08006563
Suresh Siddha69e1e812011-12-01 17:07:33 -08006564 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306565 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02006566
6567 if (!sd || !sd->nohz_idle)
6568 goto unlock;
6569 sd->nohz_idle = 0;
6570
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306571 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006572unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08006573 rcu_read_unlock();
6574}
6575
6576void set_cpu_sd_state_idle(void)
6577{
6578 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306579 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08006580
Suresh Siddha69e1e812011-12-01 17:07:33 -08006581 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306582 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02006583
6584 if (!sd || sd->nohz_idle)
6585 goto unlock;
6586 sd->nohz_idle = 1;
6587
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306588 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006589unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08006590 rcu_read_unlock();
6591}
6592
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006593/*
Alex Shic1cc0172012-09-10 15:10:58 +08006594 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006595 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006596 */
Alex Shic1cc0172012-09-10 15:10:58 +08006597void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006598{
Suresh Siddha71325962012-01-19 18:28:57 -08006599 /*
6600 * If this cpu is going down, then nothing needs to be done.
6601 */
6602 if (!cpu_active(cpu))
6603 return;
6604
Alex Shic1cc0172012-09-10 15:10:58 +08006605 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6606 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006607
Alex Shic1cc0172012-09-10 15:10:58 +08006608 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6609 atomic_inc(&nohz.nr_cpus);
6610 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006611}
Suresh Siddha71325962012-01-19 18:28:57 -08006612
Paul Gortmaker0db06282013-06-19 14:53:51 -04006613static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08006614 unsigned long action, void *hcpu)
6615{
6616 switch (action & ~CPU_TASKS_FROZEN) {
6617 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08006618 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08006619 return NOTIFY_OK;
6620 default:
6621 return NOTIFY_DONE;
6622 }
6623}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006624#endif
6625
6626static DEFINE_SPINLOCK(balancing);
6627
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006628/*
6629 * Scale the max load_balance interval with the number of CPUs in the system.
6630 * This trades load-balance latency on larger machines for less cross talk.
6631 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006632void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006633{
6634 max_load_balance_interval = HZ*num_online_cpus()/10;
6635}
6636
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006637/*
6638 * It checks each scheduling domain to see if it is due to be balanced,
6639 * and initiates a balancing operation if so.
6640 *
Libinb9b08532013-04-01 19:14:01 +08006641 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006642 */
6643static void rebalance_domains(int cpu, enum cpu_idle_type idle)
6644{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006645 int continue_balancing = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006646 struct rq *rq = cpu_rq(cpu);
6647 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02006648 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006649 /* Earliest time when we have to do rebalance again */
6650 unsigned long next_balance = jiffies + 60*HZ;
6651 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07006652 int need_serialize, need_decay = 0;
6653 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006654
Paul Turner48a16752012-10-04 13:18:31 +02006655 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08006656
Peter Zijlstradce840a2011-04-07 14:09:50 +02006657 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006658 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07006659 /*
6660 * Decay the newidle max times here because this is a regular
6661 * visit to all the domains. Decay ~1% per second.
6662 */
6663 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6664 sd->max_newidle_lb_cost =
6665 (sd->max_newidle_lb_cost * 253) / 256;
6666 sd->next_decay_max_lb_cost = jiffies + HZ;
6667 need_decay = 1;
6668 }
6669 max_cost += sd->max_newidle_lb_cost;
6670
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006671 if (!(sd->flags & SD_LOAD_BALANCE))
6672 continue;
6673
Jason Lowf48627e2013-09-13 11:26:53 -07006674 /*
6675 * Stop the load balance at this level. There is another
6676 * CPU in our sched group which is doing load balancing more
6677 * actively.
6678 */
6679 if (!continue_balancing) {
6680 if (need_decay)
6681 continue;
6682 break;
6683 }
6684
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006685 interval = sd->balance_interval;
6686 if (idle != CPU_IDLE)
6687 interval *= sd->busy_factor;
6688
6689 /* scale ms to jiffies */
6690 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006691 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006692
6693 need_serialize = sd->flags & SD_SERIALIZE;
6694
6695 if (need_serialize) {
6696 if (!spin_trylock(&balancing))
6697 goto out;
6698 }
6699
6700 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006701 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006702 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02006703 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09006704 * env->dst_cpu, so we can't know our idle
6705 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006706 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09006707 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006708 }
6709 sd->last_balance = jiffies;
6710 }
6711 if (need_serialize)
6712 spin_unlock(&balancing);
6713out:
6714 if (time_after(next_balance, sd->last_balance + interval)) {
6715 next_balance = sd->last_balance + interval;
6716 update_next_balance = 1;
6717 }
Jason Lowf48627e2013-09-13 11:26:53 -07006718 }
6719 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006720 /*
Jason Lowf48627e2013-09-13 11:26:53 -07006721 * Ensure the rq-wide value also decays but keep it at a
6722 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006723 */
Jason Lowf48627e2013-09-13 11:26:53 -07006724 rq->max_idle_balance_cost =
6725 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006726 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006727 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006728
6729 /*
6730 * next_balance will be updated only when there is a need.
6731 * When the cpu is attached to null domain for ex, it will not be
6732 * updated.
6733 */
6734 if (likely(update_next_balance))
6735 rq->next_balance = next_balance;
6736}
6737
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006738#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006739/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006740 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006741 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6742 */
6743static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
6744{
6745 struct rq *this_rq = cpu_rq(this_cpu);
6746 struct rq *rq;
6747 int balance_cpu;
6748
Suresh Siddha1c792db2011-12-01 17:07:32 -08006749 if (idle != CPU_IDLE ||
6750 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6751 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006752
6753 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08006754 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006755 continue;
6756
6757 /*
6758 * If this cpu gets work to do, stop the load balancing
6759 * work being done for other cpus. Next load
6760 * balancing owner will pick it up.
6761 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08006762 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006763 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006764
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02006765 rq = cpu_rq(balance_cpu);
6766
6767 raw_spin_lock_irq(&rq->lock);
6768 update_rq_clock(rq);
6769 update_idle_cpu_load(rq);
6770 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006771
6772 rebalance_domains(balance_cpu, CPU_IDLE);
6773
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006774 if (time_after(this_rq->next_balance, rq->next_balance))
6775 this_rq->next_balance = rq->next_balance;
6776 }
6777 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006778end:
6779 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006780}
6781
6782/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006783 * Current heuristic for kicking the idle load balancer in the presence
6784 * of an idle cpu is the system.
6785 * - This rq has more than one task.
6786 * - At any scheduler domain level, this cpu's scheduler group has multiple
6787 * busy cpu's exceeding the group's power.
6788 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6789 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006790 */
6791static inline int nohz_kick_needed(struct rq *rq, int cpu)
6792{
6793 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006794 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306795 struct sched_group_power *sgp;
6796 int nr_busy;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006797
Suresh Siddha1c792db2011-12-01 17:07:32 -08006798 if (unlikely(idle_cpu(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006799 return 0;
6800
Suresh Siddha1c792db2011-12-01 17:07:32 -08006801 /*
6802 * We may be recently in ticked or tickless idle mode. At the first
6803 * busy tick after returning from idle, we will update the busy stats.
6804 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08006805 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08006806 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006807
6808 /*
6809 * None are in tickless mode and hence no need for NOHZ idle load
6810 * balancing.
6811 */
6812 if (likely(!atomic_read(&nohz.nr_cpus)))
6813 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006814
6815 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006816 return 0;
6817
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006818 if (rq->nr_running >= 2)
6819 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006820
Peter Zijlstra067491b2011-12-07 14:32:08 +01006821 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306822 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006823
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306824 if (sd) {
6825 sgp = sd->groups->sgp;
6826 nr_busy = atomic_read(&sgp->nr_busy_cpus);
6827
6828 if (nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01006829 goto need_kick_unlock;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006830 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306831
6832 sd = rcu_dereference(per_cpu(sd_asym, cpu));
6833
6834 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
6835 sched_domain_span(sd)) < cpu))
6836 goto need_kick_unlock;
6837
Peter Zijlstra067491b2011-12-07 14:32:08 +01006838 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006839 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01006840
6841need_kick_unlock:
6842 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006843need_kick:
6844 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006845}
6846#else
6847static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
6848#endif
6849
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006850/*
6851 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006852 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006853 */
6854static void run_rebalance_domains(struct softirq_action *h)
6855{
6856 int this_cpu = smp_processor_id();
6857 struct rq *this_rq = cpu_rq(this_cpu);
Suresh Siddha6eb57e02011-10-03 15:09:01 -07006858 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006859 CPU_IDLE : CPU_NOT_IDLE;
6860
6861 rebalance_domains(this_cpu, idle);
6862
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006863 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006864 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006865 * balancing on behalf of the other idle cpus whose ticks are
6866 * stopped.
6867 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006868 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006869}
6870
6871static inline int on_null_domain(int cpu)
6872{
Paul E. McKenney90a65012010-02-28 08:32:18 -08006873 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006874}
6875
6876/*
6877 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006878 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006879void trigger_load_balance(struct rq *rq, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006880{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006881 /* Don't need to rebalance while attached to NULL domain */
6882 if (time_after_eq(jiffies, rq->next_balance) &&
6883 likely(!on_null_domain(cpu)))
6884 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006885#ifdef CONFIG_NO_HZ_COMMON
Suresh Siddha1c792db2011-12-01 17:07:32 -08006886 if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006887 nohz_balancer_kick(cpu);
6888#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006889}
6890
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006891static void rq_online_fair(struct rq *rq)
6892{
6893 update_sysctl();
6894}
6895
6896static void rq_offline_fair(struct rq *rq)
6897{
6898 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07006899
6900 /* Ensure any throttled groups are reachable by pick_next_task */
6901 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006902}
6903
Dhaval Giani55e12e52008-06-24 23:39:43 +05306904#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02006905
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006906/*
6907 * scheduler tick hitting a task of our scheduling class:
6908 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006909static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006910{
6911 struct cfs_rq *cfs_rq;
6912 struct sched_entity *se = &curr->se;
6913
6914 for_each_sched_entity(se) {
6915 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006916 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006917 }
Ben Segall18bf2802012-10-04 12:51:20 +02006918
Dave Kleikamp10e84b92013-07-31 13:53:35 -07006919 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02006920 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08006921
Ben Segall18bf2802012-10-04 12:51:20 +02006922 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006923}
6924
6925/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006926 * called on fork with the child task as argument from the parent's context
6927 * - child not yet on the tasklist
6928 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006929 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006930static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006931{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006932 struct cfs_rq *cfs_rq;
6933 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02006934 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006935 struct rq *rq = this_rq();
6936 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006937
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006938 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006939
Peter Zijlstra861d0342010-08-19 13:31:43 +02006940 update_rq_clock(rq);
6941
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006942 cfs_rq = task_cfs_rq(current);
6943 curr = cfs_rq->curr;
6944
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09006945 /*
6946 * Not only the cpu but also the task_group of the parent might have
6947 * been changed after parent->se.parent,cfs_rq were copied to
6948 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6949 * of child point to valid ones.
6950 */
6951 rcu_read_lock();
6952 __set_task_cpu(p, this_cpu);
6953 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006954
Ting Yang7109c442007-08-28 12:53:24 +02006955 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006956
Mike Galbraithb5d9d732009-09-08 11:12:28 +02006957 if (curr)
6958 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02006959 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006960
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006961 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02006962 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02006963 * Upon rescheduling, sched_class::put_prev_task() will place
6964 * 'current' within the tree based on its new key value.
6965 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006966 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05306967 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006968 }
6969
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006970 se->vruntime -= cfs_rq->min_vruntime;
6971
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006972 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006973}
6974
Steven Rostedtcb469842008-01-25 21:08:22 +01006975/*
6976 * Priority of the task has changed. Check to see if we preempt
6977 * the current task.
6978 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006979static void
6980prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01006981{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006982 if (!p->se.on_rq)
6983 return;
6984
Steven Rostedtcb469842008-01-25 21:08:22 +01006985 /*
6986 * Reschedule if we are currently running on this runqueue and
6987 * our priority decreased, or if we are not currently running on
6988 * this runqueue and our priority is higher than the current's
6989 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006990 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01006991 if (p->prio > oldprio)
6992 resched_task(rq->curr);
6993 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006994 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006995}
6996
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006997static void switched_from_fair(struct rq *rq, struct task_struct *p)
6998{
6999 struct sched_entity *se = &p->se;
7000 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7001
7002 /*
7003 * Ensure the task's vruntime is normalized, so that when its
7004 * switched back to the fair class the enqueue_entity(.flags=0) will
7005 * do the right thing.
7006 *
7007 * If it was on_rq, then the dequeue_entity(.flags=0) will already
7008 * have normalized the vruntime, if it was !on_rq, then only when
7009 * the task is sleeping will it still have non-normalized vruntime.
7010 */
7011 if (!se->on_rq && p->state != TASK_RUNNING) {
7012 /*
7013 * Fix up our vruntime so that the current sleep doesn't
7014 * cause 'unlimited' sleep bonus.
7015 */
7016 place_entity(cfs_rq, se, 0);
7017 se->vruntime -= cfs_rq->min_vruntime;
7018 }
Paul Turner9ee474f2012-10-04 13:18:30 +02007019
Alex Shi141965c2013-06-26 13:05:39 +08007020#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02007021 /*
7022 * Remove our load from contribution when we leave sched_fair
7023 * and ensure we don't carry in an old decay_count if we
7024 * switch back.
7025 */
Kirill Tkhai87e3c8a2013-07-21 04:32:07 +04007026 if (se->avg.decay_count) {
7027 __synchronize_entity_decay(se);
7028 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +02007029 }
7030#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007031}
7032
Steven Rostedtcb469842008-01-25 21:08:22 +01007033/*
7034 * We switched to the sched_fair class.
7035 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007036static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01007037{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007038 if (!p->se.on_rq)
7039 return;
7040
Steven Rostedtcb469842008-01-25 21:08:22 +01007041 /*
7042 * We were most likely switched from sched_rt, so
7043 * kick off the schedule if running, otherwise just see
7044 * if we can still preempt the current task.
7045 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007046 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01007047 resched_task(rq->curr);
7048 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02007049 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01007050}
7051
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007052/* Account for a task changing its policy or group.
7053 *
7054 * This routine is mostly called to set cfs_rq->curr field when a task
7055 * migrates between groups/classes.
7056 */
7057static void set_curr_task_fair(struct rq *rq)
7058{
7059 struct sched_entity *se = &rq->curr->se;
7060
Paul Turnerec12cb72011-07-21 09:43:30 -07007061 for_each_sched_entity(se) {
7062 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7063
7064 set_next_entity(cfs_rq, se);
7065 /* ensure bandwidth has been allocated on our new cfs_rq */
7066 account_cfs_rq_runtime(cfs_rq, 0);
7067 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007068}
7069
Peter Zijlstra029632f2011-10-25 10:00:11 +02007070void init_cfs_rq(struct cfs_rq *cfs_rq)
7071{
7072 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007073 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7074#ifndef CONFIG_64BIT
7075 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7076#endif
Alex Shi141965c2013-06-26 13:05:39 +08007077#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02007078 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08007079 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02007080#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007081}
7082
Peter Zijlstra810b3812008-02-29 15:21:01 -05007083#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007084static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05007085{
Paul Turneraff3e492012-10-04 13:18:30 +02007086 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007087 /*
7088 * If the task was not on the rq at the time of this cgroup movement
7089 * it must have been asleep, sleeping tasks keep their ->vruntime
7090 * absolute on their old rq until wakeup (needed for the fair sleeper
7091 * bonus in place_entity()).
7092 *
7093 * If it was on the rq, we've just 'preempted' it, which does convert
7094 * ->vruntime to a relative base.
7095 *
7096 * Make sure both cases convert their relative position when migrating
7097 * to another cgroup's rq. This does somewhat interfere with the
7098 * fair sleeper stuff for the first placement, but who cares.
7099 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007100 /*
7101 * When !on_rq, vruntime of the task has usually NOT been normalized.
7102 * But there are some cases where it has already been normalized:
7103 *
7104 * - Moving a forked child which is waiting for being woken up by
7105 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09007106 * - Moving a task which has been woken up by try_to_wake_up() and
7107 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007108 *
7109 * To prevent boost or penalty in the new cfs_rq caused by delta
7110 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7111 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09007112 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007113 on_rq = 1;
7114
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007115 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007116 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
7117 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02007118 if (!on_rq) {
7119 cfs_rq = cfs_rq_of(&p->se);
7120 p->se.vruntime += cfs_rq->min_vruntime;
7121#ifdef CONFIG_SMP
7122 /*
7123 * migrate_task_rq_fair() will have removed our previous
7124 * contribution, but we must synchronize for ongoing future
7125 * decay.
7126 */
7127 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7128 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
7129#endif
7130 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05007131}
Peter Zijlstra029632f2011-10-25 10:00:11 +02007132
7133void free_fair_sched_group(struct task_group *tg)
7134{
7135 int i;
7136
7137 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7138
7139 for_each_possible_cpu(i) {
7140 if (tg->cfs_rq)
7141 kfree(tg->cfs_rq[i]);
7142 if (tg->se)
7143 kfree(tg->se[i]);
7144 }
7145
7146 kfree(tg->cfs_rq);
7147 kfree(tg->se);
7148}
7149
7150int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7151{
7152 struct cfs_rq *cfs_rq;
7153 struct sched_entity *se;
7154 int i;
7155
7156 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7157 if (!tg->cfs_rq)
7158 goto err;
7159 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7160 if (!tg->se)
7161 goto err;
7162
7163 tg->shares = NICE_0_LOAD;
7164
7165 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7166
7167 for_each_possible_cpu(i) {
7168 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7169 GFP_KERNEL, cpu_to_node(i));
7170 if (!cfs_rq)
7171 goto err;
7172
7173 se = kzalloc_node(sizeof(struct sched_entity),
7174 GFP_KERNEL, cpu_to_node(i));
7175 if (!se)
7176 goto err_free_rq;
7177
7178 init_cfs_rq(cfs_rq);
7179 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7180 }
7181
7182 return 1;
7183
7184err_free_rq:
7185 kfree(cfs_rq);
7186err:
7187 return 0;
7188}
7189
7190void unregister_fair_sched_group(struct task_group *tg, int cpu)
7191{
7192 struct rq *rq = cpu_rq(cpu);
7193 unsigned long flags;
7194
7195 /*
7196 * Only empty task groups can be destroyed; so we can speculatively
7197 * check on_list without danger of it being re-added.
7198 */
7199 if (!tg->cfs_rq[cpu]->on_list)
7200 return;
7201
7202 raw_spin_lock_irqsave(&rq->lock, flags);
7203 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7204 raw_spin_unlock_irqrestore(&rq->lock, flags);
7205}
7206
7207void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7208 struct sched_entity *se, int cpu,
7209 struct sched_entity *parent)
7210{
7211 struct rq *rq = cpu_rq(cpu);
7212
7213 cfs_rq->tg = tg;
7214 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007215 init_cfs_rq_runtime(cfs_rq);
7216
7217 tg->cfs_rq[cpu] = cfs_rq;
7218 tg->se[cpu] = se;
7219
7220 /* se could be NULL for root_task_group */
7221 if (!se)
7222 return;
7223
7224 if (!parent)
7225 se->cfs_rq = &rq->cfs;
7226 else
7227 se->cfs_rq = parent->my_q;
7228
7229 se->my_q = cfs_rq;
Paul Turner0ac9b1c2013-10-16 11:16:27 -07007230 /* guarantee group entities always have weight */
7231 update_load_set(&se->load, NICE_0_LOAD);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007232 se->parent = parent;
7233}
7234
7235static DEFINE_MUTEX(shares_mutex);
7236
7237int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7238{
7239 int i;
7240 unsigned long flags;
7241
7242 /*
7243 * We can't change the weight of the root cgroup.
7244 */
7245 if (!tg->se[0])
7246 return -EINVAL;
7247
7248 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7249
7250 mutex_lock(&shares_mutex);
7251 if (tg->shares == shares)
7252 goto done;
7253
7254 tg->shares = shares;
7255 for_each_possible_cpu(i) {
7256 struct rq *rq = cpu_rq(i);
7257 struct sched_entity *se;
7258
7259 se = tg->se[i];
7260 /* Propagate contribution to hierarchy */
7261 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02007262
7263 /* Possible calls to update_curr() need rq clock */
7264 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08007265 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02007266 update_cfs_shares(group_cfs_rq(se));
7267 raw_spin_unlock_irqrestore(&rq->lock, flags);
7268 }
7269
7270done:
7271 mutex_unlock(&shares_mutex);
7272 return 0;
7273}
7274#else /* CONFIG_FAIR_GROUP_SCHED */
7275
7276void free_fair_sched_group(struct task_group *tg) { }
7277
7278int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7279{
7280 return 1;
7281}
7282
7283void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7284
7285#endif /* CONFIG_FAIR_GROUP_SCHED */
7286
Peter Zijlstra810b3812008-02-29 15:21:01 -05007287
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07007288static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00007289{
7290 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00007291 unsigned int rr_interval = 0;
7292
7293 /*
7294 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7295 * idle runqueue:
7296 */
Peter Williams0d721ce2009-09-21 01:31:53 +00007297 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08007298 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00007299
7300 return rr_interval;
7301}
7302
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007303/*
7304 * All the scheduling class methods:
7305 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007306const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02007307 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007308 .enqueue_task = enqueue_task_fair,
7309 .dequeue_task = dequeue_task_fair,
7310 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05007311 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007312
Ingo Molnar2e09bf52007-10-15 17:00:05 +02007313 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007314
7315 .pick_next_task = pick_next_task_fair,
7316 .put_prev_task = put_prev_task_fair,
7317
Peter Williams681f3e62007-10-24 18:23:51 +02007318#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08007319 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02007320 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08007321
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007322 .rq_online = rq_online_fair,
7323 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007324
7325 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02007326#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007327
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007328 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007329 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007330 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01007331
7332 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007333 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01007334 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05007335
Peter Williams0d721ce2009-09-21 01:31:53 +00007336 .get_rr_interval = get_rr_interval_fair,
7337
Peter Zijlstra810b3812008-02-29 15:21:01 -05007338#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007339 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05007340#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007341};
7342
7343#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02007344void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007345{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007346 struct cfs_rq *cfs_rq;
7347
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01007348 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02007349 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02007350 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01007351 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007352}
7353#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007354
7355__init void init_sched_fair_class(void)
7356{
7357#ifdef CONFIG_SMP
7358 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7359
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007360#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08007361 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007362 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08007363 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007364#endif
7365#endif /* SMP */
7366
7367}