blob: 77e9166d7bbfe304dcc2bdb9c71276d4a3a3b18b [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Arjan van de Ven97455122008-01-25 21:08:34 +010025
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020026/*
Peter Zijlstra21805082007-08-25 18:41:53 +020027 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090028 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020029 *
Peter Zijlstra21805082007-08-25 18:41:53 +020030 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020031 * 'timeslice length' - timeslices in CFS are of variable length
32 * and have no persistent notion like in traditional, time-slice
33 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020034 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020035 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037 */
Mike Galbraith21406922010-03-11 17:17:15 +010038unsigned int sysctl_sched_latency = 6000000ULL;
39unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020040
41/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010042 * The initial- and re-scaling of tunables is configurable
43 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
44 *
45 * Options are:
46 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
47 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
48 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
49 */
50enum sched_tunable_scaling sysctl_sched_tunable_scaling
51 = SCHED_TUNABLESCALING_LOG;
52
53/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010054 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090055 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010056 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020057unsigned int sysctl_sched_min_granularity = 750000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010059
60/*
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020063static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010064
65/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020066 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020067 * parent will (try to) run first.
68 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020069unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020070
71/*
Ingo Molnar1799e352007-09-19 23:34:46 +020072 * sys_sched_yield() compat mode
73 *
74 * This option switches the agressive yield implementation of the
75 * old scheduler back on.
76 */
77unsigned int __read_mostly sysctl_sched_compat_yield;
78
79/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020080 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020081 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020082 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
Mike Galbraith172e0822009-09-09 15:41:37 +020087unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010088unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020089
Ingo Molnarda84d962007-10-15 17:00:18 +020090const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
Paul Turnera7a4f8a2010-11-15 15:47:06 -080092/*
93 * The exponential sliding window over which load is averaged for shares
94 * distribution.
95 * (default: 10msec)
96 */
97unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98
Peter Zijlstraa4c2f002008-10-17 19:27:03 +020099static const struct sched_class fair_sched_class;
100
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200101/**************************************************************
102 * CFS operations on generic schedulable entities:
103 */
104
105#ifdef CONFIG_FAIR_GROUP_SCHED
106
107/* cpu runqueue to which this cfs_rq is attached */
108static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
109{
110 return cfs_rq->rq;
111}
112
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200113/* An entity is a task if it doesn't "own" a runqueue */
114#define entity_is_task(se) (!se->my_q)
115
Peter Zijlstra8f488942009-07-24 12:25:30 +0200116static inline struct task_struct *task_of(struct sched_entity *se)
117{
118#ifdef CONFIG_SCHED_DEBUG
119 WARN_ON_ONCE(!entity_is_task(se));
120#endif
121 return container_of(se, struct task_struct, se);
122}
123
Peter Zijlstrab7581492008-04-19 19:45:00 +0200124/* Walk up scheduling entities hierarchy */
125#define for_each_sched_entity(se) \
126 for (; se; se = se->parent)
127
128static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
129{
130 return p->se.cfs_rq;
131}
132
133/* runqueue on which this entity is (to be) queued */
134static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
135{
136 return se->cfs_rq;
137}
138
139/* runqueue "owned" by this group */
140static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
141{
142 return grp->my_q;
143}
144
145/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
146 * another cpu ('this_cpu')
147 */
148static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
149{
150 return cfs_rq->tg->cfs_rq[this_cpu];
151}
152
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800153static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
154{
155 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800156 /*
157 * Ensure we either appear before our parent (if already
158 * enqueued) or force our parent to appear after us when it is
159 * enqueued. The fact that we always enqueue bottom-up
160 * reduces this to two cases.
161 */
162 if (cfs_rq->tg->parent &&
163 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
164 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800165 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800166 } else {
167 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
168 &rq_of(cfs_rq)->leaf_cfs_rq_list);
169 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800170
171 cfs_rq->on_list = 1;
172 }
173}
174
175static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
176{
177 if (cfs_rq->on_list) {
178 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
179 cfs_rq->on_list = 0;
180 }
181}
182
Peter Zijlstrab7581492008-04-19 19:45:00 +0200183/* Iterate thr' all leaf cfs_rq's on a runqueue */
184#define for_each_leaf_cfs_rq(rq, cfs_rq) \
185 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
186
187/* Do the two (enqueued) entities belong to the same group ? */
188static inline int
189is_same_group(struct sched_entity *se, struct sched_entity *pse)
190{
191 if (se->cfs_rq == pse->cfs_rq)
192 return 1;
193
194 return 0;
195}
196
197static inline struct sched_entity *parent_entity(struct sched_entity *se)
198{
199 return se->parent;
200}
201
Peter Zijlstra464b7522008-10-24 11:06:15 +0200202/* return depth at which a sched entity is present in the hierarchy */
203static inline int depth_se(struct sched_entity *se)
204{
205 int depth = 0;
206
207 for_each_sched_entity(se)
208 depth++;
209
210 return depth;
211}
212
213static void
214find_matching_se(struct sched_entity **se, struct sched_entity **pse)
215{
216 int se_depth, pse_depth;
217
218 /*
219 * preemption test can be made between sibling entities who are in the
220 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
221 * both tasks until we find their ancestors who are siblings of common
222 * parent.
223 */
224
225 /* First walk up until both entities are at same depth */
226 se_depth = depth_se(*se);
227 pse_depth = depth_se(*pse);
228
229 while (se_depth > pse_depth) {
230 se_depth--;
231 *se = parent_entity(*se);
232 }
233
234 while (pse_depth > se_depth) {
235 pse_depth--;
236 *pse = parent_entity(*pse);
237 }
238
239 while (!is_same_group(*se, *pse)) {
240 *se = parent_entity(*se);
241 *pse = parent_entity(*pse);
242 }
243}
244
Peter Zijlstra8f488942009-07-24 12:25:30 +0200245#else /* !CONFIG_FAIR_GROUP_SCHED */
246
247static inline struct task_struct *task_of(struct sched_entity *se)
248{
249 return container_of(se, struct task_struct, se);
250}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200251
252static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
253{
254 return container_of(cfs_rq, struct rq, cfs);
255}
256
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200257#define entity_is_task(se) 1
258
Peter Zijlstrab7581492008-04-19 19:45:00 +0200259#define for_each_sched_entity(se) \
260 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200261
Peter Zijlstrab7581492008-04-19 19:45:00 +0200262static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200263{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200264 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200265}
266
Peter Zijlstrab7581492008-04-19 19:45:00 +0200267static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
268{
269 struct task_struct *p = task_of(se);
270 struct rq *rq = task_rq(p);
271
272 return &rq->cfs;
273}
274
275/* runqueue "owned" by this group */
276static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
277{
278 return NULL;
279}
280
281static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
282{
283 return &cpu_rq(this_cpu)->cfs;
284}
285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288}
289
290static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
291{
292}
293
Peter Zijlstrab7581492008-04-19 19:45:00 +0200294#define for_each_leaf_cfs_rq(rq, cfs_rq) \
295 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
296
297static inline int
298is_same_group(struct sched_entity *se, struct sched_entity *pse)
299{
300 return 1;
301}
302
303static inline struct sched_entity *parent_entity(struct sched_entity *se)
304{
305 return NULL;
306}
307
Peter Zijlstra464b7522008-10-24 11:06:15 +0200308static inline void
309find_matching_se(struct sched_entity **se, struct sched_entity **pse)
310{
311}
312
Peter Zijlstrab7581492008-04-19 19:45:00 +0200313#endif /* CONFIG_FAIR_GROUP_SCHED */
314
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200315
316/**************************************************************
317 * Scheduling class tree data structure manipulation methods:
318 */
319
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200320static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200321{
Peter Zijlstra368059a2007-10-15 17:00:11 +0200322 s64 delta = (s64)(vruntime - min_vruntime);
323 if (delta > 0)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200324 min_vruntime = vruntime;
325
326 return min_vruntime;
327}
328
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200329static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200330{
331 s64 delta = (s64)(vruntime - min_vruntime);
332 if (delta < 0)
333 min_vruntime = vruntime;
334
335 return min_vruntime;
336}
337
Fabio Checconi54fdc582009-07-16 12:32:27 +0200338static inline int entity_before(struct sched_entity *a,
339 struct sched_entity *b)
340{
341 return (s64)(a->vruntime - b->vruntime) < 0;
342}
343
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200344static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra90146232007-10-15 17:00:05 +0200345{
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200346 return se->vruntime - cfs_rq->min_vruntime;
Peter Zijlstra90146232007-10-15 17:00:05 +0200347}
348
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200349static void update_min_vruntime(struct cfs_rq *cfs_rq)
350{
351 u64 vruntime = cfs_rq->min_vruntime;
352
353 if (cfs_rq->curr)
354 vruntime = cfs_rq->curr->vruntime;
355
356 if (cfs_rq->rb_leftmost) {
357 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
358 struct sched_entity,
359 run_node);
360
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100361 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200362 vruntime = se->vruntime;
363 else
364 vruntime = min_vruntime(vruntime, se->vruntime);
365 }
366
367 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
368}
369
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200370/*
371 * Enqueue an entity into the rb-tree:
372 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200373static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200374{
375 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
376 struct rb_node *parent = NULL;
377 struct sched_entity *entry;
Peter Zijlstra90146232007-10-15 17:00:05 +0200378 s64 key = entity_key(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200379 int leftmost = 1;
380
381 /*
382 * Find the right place in the rbtree:
383 */
384 while (*link) {
385 parent = *link;
386 entry = rb_entry(parent, struct sched_entity, run_node);
387 /*
388 * We dont care about collisions. Nodes with
389 * the same key stay together.
390 */
Peter Zijlstra90146232007-10-15 17:00:05 +0200391 if (key < entity_key(cfs_rq, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200392 link = &parent->rb_left;
393 } else {
394 link = &parent->rb_right;
395 leftmost = 0;
396 }
397 }
398
399 /*
400 * Maintain a cache of leftmost tree entries (it is frequently
401 * used):
402 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200403 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200404 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200405
406 rb_link_node(&se->run_node, parent, link);
407 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200408}
409
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200410static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200411{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100412 if (cfs_rq->rb_leftmost == &se->run_node) {
413 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100414
415 next_node = rb_next(&se->run_node);
416 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100417 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200418
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200419 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200420}
421
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200422static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
423{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100424 struct rb_node *left = cfs_rq->rb_leftmost;
425
426 if (!left)
427 return NULL;
428
429 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200430}
431
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100432static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200433{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100434 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200435
Balbir Singh70eee742008-02-22 13:25:53 +0530436 if (!last)
437 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100438
439 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200440}
441
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200442/**************************************************************
443 * Scheduling class statistics methods:
444 */
445
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100446#ifdef CONFIG_SCHED_DEBUG
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100447int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700448 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100449 loff_t *ppos)
450{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700451 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100452 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100453
454 if (ret || !write)
455 return ret;
456
457 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
458 sysctl_sched_min_granularity);
459
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100460#define WRT_SYSCTL(name) \
461 (normalized_sysctl_##name = sysctl_##name / (factor))
462 WRT_SYSCTL(sched_min_granularity);
463 WRT_SYSCTL(sched_latency);
464 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100465#undef WRT_SYSCTL
466
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100467 return 0;
468}
469#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200470
471/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200472 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200473 */
474static inline unsigned long
475calc_delta_fair(unsigned long delta, struct sched_entity *se)
476{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200477 if (unlikely(se->load.weight != NICE_0_LOAD))
478 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200479
480 return delta;
481}
482
483/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200484 * The idea is to set a period in which each task runs once.
485 *
486 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
487 * this period because otherwise the slices get too small.
488 *
489 * p = (nr <= nl) ? l : l*nr/nl
490 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200491static u64 __sched_period(unsigned long nr_running)
492{
493 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100494 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200495
496 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100497 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200498 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200499 }
500
501 return period;
502}
503
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200504/*
505 * We calculate the wall-time slice from the period by taking a part
506 * proportional to the weight.
507 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200508 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200509 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200510static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200511{
Mike Galbraith0a582442009-01-02 12:16:42 +0100512 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200513
Mike Galbraith0a582442009-01-02 12:16:42 +0100514 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100515 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200516 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100517
518 cfs_rq = cfs_rq_of(se);
519 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200520
Mike Galbraith0a582442009-01-02 12:16:42 +0100521 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200522 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100523
524 update_load_add(&lw, se->load.weight);
525 load = &lw;
526 }
527 slice = calc_delta_mine(slice, se->load.weight, load);
528 }
529 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200530}
531
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200532/*
Peter Zijlstraac884de2008-04-19 19:45:00 +0200533 * We calculate the vruntime slice of a to be inserted task
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200534 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200535 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200536 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200537static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200538{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200539 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200540}
541
Paul Turnerd6b55912010-11-15 15:47:09 -0800542static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
Paul Turner3b3d1902010-11-15 15:47:08 -0800543static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta);
544
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200545/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200546 * Update the current task's runtime statistics. Skip current tasks that
547 * are not in our scheduling class.
548 */
549static inline void
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200550__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
551 unsigned long delta_exec)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200552{
Ingo Molnarbbdba7c2007-10-15 17:00:06 +0200553 unsigned long delta_exec_weighted;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200554
Lucas De Marchi41acab82010-03-10 23:37:45 -0300555 schedstat_set(curr->statistics.exec_max,
556 max((u64)delta_exec, curr->statistics.exec_max));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200557
558 curr->sum_exec_runtime += delta_exec;
Ingo Molnar7a62eab2007-10-15 17:00:06 +0200559 schedstat_add(cfs_rq, exec_clock, delta_exec);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200560 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100561
Ingo Molnare9acbff2007-10-15 17:00:04 +0200562 curr->vruntime += delta_exec_weighted;
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200563 update_min_vruntime(cfs_rq);
Paul Turner3b3d1902010-11-15 15:47:08 -0800564
Peter Zijlstra70caf8a2010-11-20 00:53:51 +0100565#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
Paul Turner3b3d1902010-11-15 15:47:08 -0800566 cfs_rq->load_unacc_exec_time += delta_exec;
Paul Turner3b3d1902010-11-15 15:47:08 -0800567#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200568}
569
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200570static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200571{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200572 struct sched_entity *curr = cfs_rq->curr;
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700573 u64 now = rq_of(cfs_rq)->clock_task;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200574 unsigned long delta_exec;
575
576 if (unlikely(!curr))
577 return;
578
579 /*
580 * Get the amount of time the current task was running
581 * since the last time we changed load (this cannot
582 * overflow on 32 bits):
583 */
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200584 delta_exec = (unsigned long)(now - curr->exec_start);
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100585 if (!delta_exec)
586 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200587
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200588 __update_curr(cfs_rq, curr, delta_exec);
589 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100590
591 if (entity_is_task(curr)) {
592 struct task_struct *curtask = task_of(curr);
593
Ingo Molnarf977bb42009-09-13 18:15:54 +0200594 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100595 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700596 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100597 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200598}
599
600static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200601update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200602{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300603 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200604}
605
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200606/*
607 * Task is being enqueued - update stats:
608 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200609static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200610{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200611 /*
612 * Are we enqueueing a waiting task? (for current tasks
613 * a dequeue/enqueue event is a NOP)
614 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200615 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200616 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200617}
618
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200619static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200620update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200621{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300622 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
623 rq_of(cfs_rq)->clock - se->statistics.wait_start));
624 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
625 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
626 rq_of(cfs_rq)->clock - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200627#ifdef CONFIG_SCHEDSTATS
628 if (entity_is_task(se)) {
629 trace_sched_stat_wait(task_of(se),
Lucas De Marchi41acab82010-03-10 23:37:45 -0300630 rq_of(cfs_rq)->clock - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200631 }
632#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300633 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200634}
635
636static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200637update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200638{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200639 /*
640 * Mark the end of the wait period if dequeueing a
641 * waiting task:
642 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200643 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200644 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200645}
646
647/*
648 * We are picking a new current task - update its stats:
649 */
650static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200651update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200652{
653 /*
654 * We are starting a new run period:
655 */
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -0700656 se->exec_start = rq_of(cfs_rq)->clock_task;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200657}
658
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200659/**************************************************
660 * Scheduling class queueing methods:
661 */
662
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200663#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
664static void
665add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
666{
667 cfs_rq->task_weight += weight;
668}
669#else
670static inline void
671add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
672{
673}
674#endif
675
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200676static void
677account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
678{
679 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200680 if (!parent_entity(se))
681 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
Bharata B Raob87f1722008-09-25 09:53:54 +0530682 if (entity_is_task(se)) {
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200683 add_cfs_task_weight(cfs_rq, se->load.weight);
Bharata B Raob87f1722008-09-25 09:53:54 +0530684 list_add(&se->group_node, &cfs_rq->tasks);
685 }
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200686 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200687}
688
689static void
690account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
691{
692 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200693 if (!parent_entity(se))
694 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
Bharata B Raob87f1722008-09-25 09:53:54 +0530695 if (entity_is_task(se)) {
Peter Zijlstrac09595f2008-06-27 13:41:14 +0200696 add_cfs_task_weight(cfs_rq, -se->load.weight);
Bharata B Raob87f1722008-09-25 09:53:54 +0530697 list_del_init(&se->group_node);
698 }
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200699 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +0200700}
701
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800702#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
Paul Turnerd6b55912010-11-15 15:47:09 -0800703static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
704 int global_update)
705{
706 struct task_group *tg = cfs_rq->tg;
707 long load_avg;
708
709 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
710 load_avg -= cfs_rq->load_contribution;
711
712 if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
713 atomic_add(load_avg, &tg->load_weight);
714 cfs_rq->load_contribution += load_avg;
715 }
716}
717
718static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800719{
Paul Turnera7a4f8a2010-11-15 15:47:06 -0800720 u64 period = sysctl_sched_shares_window;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800721 u64 now, delta;
Paul Turnere33078b2010-11-15 15:47:04 -0800722 unsigned long load = cfs_rq->load.weight;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800723
724 if (!cfs_rq)
725 return;
726
727 now = rq_of(cfs_rq)->clock;
728 delta = now - cfs_rq->load_stamp;
729
Paul Turnere33078b2010-11-15 15:47:04 -0800730 /* truncate load history at 4 idle periods */
731 if (cfs_rq->load_stamp > cfs_rq->load_last &&
732 now - cfs_rq->load_last > 4 * period) {
733 cfs_rq->load_period = 0;
734 cfs_rq->load_avg = 0;
735 }
736
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800737 cfs_rq->load_stamp = now;
Paul Turner3b3d1902010-11-15 15:47:08 -0800738 cfs_rq->load_unacc_exec_time = 0;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800739 cfs_rq->load_period += delta;
Paul Turnere33078b2010-11-15 15:47:04 -0800740 if (load) {
741 cfs_rq->load_last = now;
742 cfs_rq->load_avg += delta * load;
743 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800744
Paul Turnerd6b55912010-11-15 15:47:09 -0800745 /* consider updating load contribution on each fold or truncate */
746 if (global_update || cfs_rq->load_period > period
747 || !cfs_rq->load_period)
748 update_cfs_rq_load_contribution(cfs_rq, global_update);
749
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800750 while (cfs_rq->load_period > period) {
751 /*
752 * Inline assembly required to prevent the compiler
753 * optimising this loop into a divmod call.
754 * See __iter_div_u64_rem() for another example of this.
755 */
756 asm("" : "+rm" (cfs_rq->load_period));
757 cfs_rq->load_period /= 2;
758 cfs_rq->load_avg /= 2;
759 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800760
Paul Turnere33078b2010-11-15 15:47:04 -0800761 if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
762 list_del_leaf_cfs_rq(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800763}
764
765static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
766 unsigned long weight)
767{
Paul Turner19e5eeb2010-12-15 19:10:18 -0800768 if (se->on_rq) {
769 /* commit outstanding execution time */
770 if (cfs_rq->curr == se)
771 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800772 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -0800773 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800774
775 update_load_set(&se->load, weight);
776
777 if (se->on_rq)
778 account_entity_enqueue(cfs_rq, se);
779}
780
Paul Turnerf0d74422010-11-15 15:47:03 -0800781static void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800782{
783 struct task_group *tg;
784 struct sched_entity *se;
785 long load_weight, load, shares;
786
787 if (!cfs_rq)
788 return;
789
790 tg = cfs_rq->tg;
791 se = tg->se[cpu_of(rq_of(cfs_rq))];
792 if (!se)
793 return;
794
Paul Turnerf0d74422010-11-15 15:47:03 -0800795 load = cfs_rq->load.weight + weight_delta;
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800796
797 load_weight = atomic_read(&tg->load_weight);
798 load_weight -= cfs_rq->load_contribution;
799 load_weight += load;
800
801 shares = (tg->shares * load);
802 if (load_weight)
803 shares /= load_weight;
804
805 if (shares < MIN_SHARES)
806 shares = MIN_SHARES;
807 if (shares > tg->shares)
808 shares = tg->shares;
809
810 reweight_entity(cfs_rq_of(se), se, shares);
811}
Paul Turner43365bd2010-12-15 19:10:17 -0800812
813static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
814{
815 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
816 update_cfs_load(cfs_rq, 0);
817 update_cfs_shares(cfs_rq, 0);
818 }
819}
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800820#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turnerd6b55912010-11-15 15:47:09 -0800821static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800822{
823}
824
Paul Turnerf0d74422010-11-15 15:47:03 -0800825static inline void update_cfs_shares(struct cfs_rq *cfs_rq, long weight_delta)
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800826{
827}
Paul Turner43365bd2010-12-15 19:10:17 -0800828
829static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
830{
831}
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800832#endif /* CONFIG_FAIR_GROUP_SCHED */
833
Ingo Molnar2396af62007-08-09 11:16:48 +0200834static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200835{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200836#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +0200837 struct task_struct *tsk = NULL;
838
839 if (entity_is_task(se))
840 tsk = task_of(se);
841
Lucas De Marchi41acab82010-03-10 23:37:45 -0300842 if (se->statistics.sleep_start) {
843 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200844
845 if ((s64)delta < 0)
846 delta = 0;
847
Lucas De Marchi41acab82010-03-10 23:37:45 -0300848 if (unlikely(delta > se->statistics.sleep_max))
849 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200850
Lucas De Marchi41acab82010-03-10 23:37:45 -0300851 se->statistics.sleep_start = 0;
852 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +0100853
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200854 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +0200855 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200856 trace_sched_stat_sleep(tsk, delta);
857 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200858 }
Lucas De Marchi41acab82010-03-10 23:37:45 -0300859 if (se->statistics.block_start) {
860 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200861
862 if ((s64)delta < 0)
863 delta = 0;
864
Lucas De Marchi41acab82010-03-10 23:37:45 -0300865 if (unlikely(delta > se->statistics.block_max))
866 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200867
Lucas De Marchi41acab82010-03-10 23:37:45 -0300868 se->statistics.block_start = 0;
869 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +0200870
Peter Zijlstrae4143142009-07-23 20:13:26 +0200871 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -0700872 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -0300873 se->statistics.iowait_sum += delta;
874 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200875 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -0700876 }
877
Peter Zijlstrae4143142009-07-23 20:13:26 +0200878 /*
879 * Blocking time is in units of nanosecs, so shift by
880 * 20 to get a milliseconds-range estimation of the
881 * amount of time that the task spent sleeping:
882 */
883 if (unlikely(prof_on == SLEEP_PROFILING)) {
884 profile_hits(SLEEP_PROFILING,
885 (void *)get_wchan(tsk),
886 delta >> 20);
887 }
888 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +0200889 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200890 }
891#endif
892}
893
Peter Zijlstraddc97292007-10-15 17:00:10 +0200894static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
895{
896#ifdef CONFIG_SCHED_DEBUG
897 s64 d = se->vruntime - cfs_rq->min_vruntime;
898
899 if (d < 0)
900 d = -d;
901
902 if (d > 3*sysctl_sched_latency)
903 schedstat_inc(cfs_rq, nr_spread_over);
904#endif
905}
906
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200907static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200908place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
909{
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200910 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200911
Peter Zijlstra2cb86002007-11-09 22:39:37 +0100912 /*
913 * The 'current' period is already promised to the current tasks,
914 * however the extra weight of the new task will slow them down a
915 * little, place the new task so that it fits in the slot that
916 * stays open at the end.
917 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +0200918 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200919 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200920
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +0200921 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +0100922 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +0200923 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200924
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +0200925 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +0200926 * Halve their sleep time's effect, to allow
927 * for a gentler effect of sleepers:
928 */
929 if (sched_feat(GENTLE_FAIR_SLEEPERS))
930 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +0200931
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +0200932 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200933 }
934
Mike Galbraithb5d9d732009-09-08 11:12:28 +0200935 /* ensure we never gain time by being placed backwards. */
936 vruntime = max_vruntime(se->vruntime, vruntime);
937
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200938 se->vruntime = vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200939}
940
941static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100942enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200943{
944 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100945 * Update the normalized vruntime before updating min_vruntime
946 * through callig update_curr().
947 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100948 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100949 se->vruntime += cfs_rq->min_vruntime;
950
951 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +0200952 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200953 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200954 update_curr(cfs_rq);
Paul Turnerd6b55912010-11-15 15:47:09 -0800955 update_cfs_load(cfs_rq, 0);
Paul Turnerf0d74422010-11-15 15:47:03 -0800956 update_cfs_shares(cfs_rq, se->load.weight);
Peter Zijlstraa9922412008-05-05 23:56:17 +0200957 account_entity_enqueue(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200958
Peter Zijlstra88ec22d2009-12-16 18:04:41 +0100959 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200960 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +0200961 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +0200962 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200963
Ingo Molnard2417e52007-08-09 11:16:47 +0200964 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +0200965 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +0200966 if (se != cfs_rq->curr)
967 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -0800968 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800969
970 if (cfs_rq->nr_running == 1)
971 list_add_leaf_cfs_rq(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200972}
973
Peter Zijlstraa571bbe2009-01-28 14:51:40 +0100974static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +0100975{
Peter Zijlstrade69a802009-09-17 09:01:20 +0200976 if (!se || cfs_rq->last == se)
Peter Zijlstra2002c692008-11-11 11:52:33 +0100977 cfs_rq->last = NULL;
978
Peter Zijlstrade69a802009-09-17 09:01:20 +0200979 if (!se || cfs_rq->next == se)
Peter Zijlstra2002c692008-11-11 11:52:33 +0100980 cfs_rq->next = NULL;
981}
982
Peter Zijlstraa571bbe2009-01-28 14:51:40 +0100983static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
984{
985 for_each_sched_entity(se)
986 __clear_buddies(cfs_rq_of(se), se);
987}
988
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200989static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100990dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200991{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +0200992 /*
993 * Update run-time statistics of the 'current'.
994 */
995 update_curr(cfs_rq);
996
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200997 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +0100998 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +0200999#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001000 if (entity_is_task(se)) {
1001 struct task_struct *tsk = task_of(se);
1002
1003 if (tsk->state & TASK_INTERRUPTIBLE)
Lucas De Marchi41acab82010-03-10 23:37:45 -03001004 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001005 if (tsk->state & TASK_UNINTERRUPTIBLE)
Lucas De Marchi41acab82010-03-10 23:37:45 -03001006 se->statistics.block_start = rq_of(cfs_rq)->clock;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001007 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02001008#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02001009 }
1010
Peter Zijlstra2002c692008-11-11 11:52:33 +01001011 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001012
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001013 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001014 __dequeue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001015 se->on_rq = 0;
Paul Turnerd6b55912010-11-15 15:47:09 -08001016 update_cfs_load(cfs_rq, 0);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001017 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra1af5f732008-10-24 11:06:13 +02001018 update_min_vruntime(cfs_rq);
Paul Turnerf0d74422010-11-15 15:47:03 -08001019 update_cfs_shares(cfs_rq, 0);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001020
1021 /*
1022 * Normalize the entity after updating the min_vruntime because the
1023 * update can refer to the ->curr item and we need to reflect this
1024 * movement in our normalized position.
1025 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001026 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001027 se->vruntime -= cfs_rq->min_vruntime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001028}
1029
1030/*
1031 * Preempt the current task with a newly woken task if needed:
1032 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02001033static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001034check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001035{
Peter Zijlstra11697832007-09-05 14:32:49 +02001036 unsigned long ideal_runtime, delta_exec;
1037
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02001038 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02001039 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001040 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001041 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001042 /*
1043 * The current task ran long enough, ensure it doesn't get
1044 * re-elected due to buddy favours.
1045 */
1046 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001047 return;
1048 }
1049
1050 /*
1051 * Ensure that a task that missed wakeup preemption by a
1052 * narrow margin doesn't have to wait for a full slice.
1053 * This also mitigates buddy induced latencies under load.
1054 */
1055 if (!sched_feat(WAKEUP_PREEMPT))
1056 return;
1057
1058 if (delta_exec < sysctl_sched_min_granularity)
1059 return;
1060
1061 if (cfs_rq->nr_running > 1) {
1062 struct sched_entity *se = __pick_next_entity(cfs_rq);
1063 s64 delta = curr->vruntime - se->vruntime;
1064
Mike Galbraithd7d82942011-01-05 05:41:17 +01001065 if (delta < 0)
1066 return;
1067
Mike Galbraithf685cea2009-10-23 23:09:22 +02001068 if (delta > ideal_runtime)
1069 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01001070 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001071}
1072
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001073static void
Ingo Molnar8494f412007-08-09 11:16:48 +02001074set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001075{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02001076 /* 'current' is not kept within the tree. */
1077 if (se->on_rq) {
1078 /*
1079 * Any task has to be enqueued before it get to execute on
1080 * a CPU. So account for the time it spent waiting on the
1081 * runqueue.
1082 */
1083 update_stats_wait_end(cfs_rq, se);
1084 __dequeue_entity(cfs_rq, se);
1085 }
1086
Ingo Molnar79303e92007-08-09 11:16:47 +02001087 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02001088 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02001089#ifdef CONFIG_SCHEDSTATS
1090 /*
1091 * Track our maximum slice length, if the CPU's load is at
1092 * least twice that of our own weight (i.e. dont track it
1093 * when there are only lesser-weight tasks around):
1094 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02001095 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001096 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02001097 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1098 }
1099#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02001100 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001101}
1102
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02001103static int
1104wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1105
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001106static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001107{
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001108 struct sched_entity *se = __pick_next_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001109 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001110
Mike Galbraithf685cea2009-10-23 23:09:22 +02001111 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1112 se = cfs_rq->next;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001113
Mike Galbraithf685cea2009-10-23 23:09:22 +02001114 /*
1115 * Prefer last buddy, try to return the CPU to a preempted task.
1116 */
1117 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1118 se = cfs_rq->last;
1119
1120 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01001121
1122 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01001123}
1124
Ingo Molnarab6cde22007-08-09 11:16:48 +02001125static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001126{
1127 /*
1128 * If still on the runqueue then deactivate_task()
1129 * was not called and update_curr() has to be done:
1130 */
1131 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02001132 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001133
Peter Zijlstraddc97292007-10-15 17:00:10 +02001134 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001135 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02001136 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001137 /* Put 'current' back into the tree. */
1138 __enqueue_entity(cfs_rq, prev);
1139 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02001140 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001141}
1142
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001143static void
1144entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001145{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001146 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001147 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001148 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001149 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001150
Paul Turner43365bd2010-12-15 19:10:17 -08001151 /*
1152 * Update share accounting for long-running entities.
1153 */
1154 update_entity_shares_tick(cfs_rq);
1155
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001156#ifdef CONFIG_SCHED_HRTICK
1157 /*
1158 * queued ticks are scheduled to match the slice, so don't bother
1159 * validating it and just reschedule.
1160 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07001161 if (queued) {
1162 resched_task(rq_of(cfs_rq)->curr);
1163 return;
1164 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001165 /*
1166 * don't let the period tick interfere with the hrtick preemption
1167 */
1168 if (!sched_feat(DOUBLE_TICK) &&
1169 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1170 return;
1171#endif
1172
Peter Zijlstrace6c1312007-10-15 17:00:14 +02001173 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
Ingo Molnar2e09bf52007-10-15 17:00:05 +02001174 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001175}
1176
1177/**************************************************
1178 * CFS operations on tasks:
1179 */
1180
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001181#ifdef CONFIG_SCHED_HRTICK
1182static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
1183{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001184 struct sched_entity *se = &p->se;
1185 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1186
1187 WARN_ON(task_rq(p) != rq);
1188
1189 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
1190 u64 slice = sched_slice(cfs_rq, se);
1191 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
1192 s64 delta = slice - ran;
1193
1194 if (delta < 0) {
1195 if (rq->curr == p)
1196 resched_task(p);
1197 return;
1198 }
1199
1200 /*
1201 * Don't schedule slices shorter than 10000ns, that just
1202 * doesn't make sense. Rely on vruntime for fairness.
1203 */
Peter Zijlstra31656512008-07-18 18:01:23 +02001204 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02001205 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001206
Peter Zijlstra31656512008-07-18 18:01:23 +02001207 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001208 }
1209}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02001210
1211/*
1212 * called from enqueue/dequeue and updates the hrtick when the
1213 * current task is from our class and nr_running is low enough
1214 * to matter.
1215 */
1216static void hrtick_update(struct rq *rq)
1217{
1218 struct task_struct *curr = rq->curr;
1219
1220 if (curr->sched_class != &fair_sched_class)
1221 return;
1222
1223 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1224 hrtick_start_fair(rq, curr);
1225}
Dhaval Giani55e12e52008-06-24 23:39:43 +05301226#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001227static inline void
1228hrtick_start_fair(struct rq *rq, struct task_struct *p)
1229{
1230}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02001231
1232static inline void hrtick_update(struct rq *rq)
1233{
1234}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001235#endif
1236
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001237/*
1238 * The enqueue_task method is called before nr_running is
1239 * increased. Here we update the fair scheduling stats and
1240 * then put the task into the rbtree:
1241 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00001242static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001243enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001244{
1245 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01001246 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001247
1248 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01001249 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001250 break;
1251 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001252 enqueue_entity(cfs_rq, se, flags);
1253 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001254 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001255
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001256 for_each_sched_entity(se) {
1257 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1258
Paul Turnerd6b55912010-11-15 15:47:09 -08001259 update_cfs_load(cfs_rq, 0);
Paul Turnerf0d74422010-11-15 15:47:03 -08001260 update_cfs_shares(cfs_rq, 0);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001261 }
1262
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02001263 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001264}
1265
1266/*
1267 * The dequeue_task method is called before nr_running is
1268 * decreased. We remove the task from the rbtree and
1269 * update the fair scheduling stats:
1270 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001271static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001272{
1273 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01001274 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001275
1276 for_each_sched_entity(se) {
1277 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001278 dequeue_entity(cfs_rq, se, flags);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001279
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001280 /* Don't dequeue parent if it has other entities besides us */
Peter Zijlstra62fb1852008-02-25 17:34:02 +01001281 if (cfs_rq->load.weight)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001282 break;
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01001283 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001284 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001285
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001286 for_each_sched_entity(se) {
1287 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1288
Paul Turnerd6b55912010-11-15 15:47:09 -08001289 update_cfs_load(cfs_rq, 0);
Paul Turnerf0d74422010-11-15 15:47:03 -08001290 update_cfs_shares(cfs_rq, 0);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001291 }
1292
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02001293 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001294}
1295
1296/*
Ingo Molnar1799e352007-09-19 23:34:46 +02001297 * sched_yield() support is very simple - we dequeue and enqueue.
1298 *
1299 * If compat_yield is turned on then we requeue to the end of the tree.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001300 */
Dmitry Adamushko4530d7a2007-10-15 17:00:08 +02001301static void yield_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001302{
Ingo Molnardb292ca2007-12-04 17:04:39 +01001303 struct task_struct *curr = rq->curr;
1304 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1305 struct sched_entity *rightmost, *se = &curr->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001306
1307 /*
Ingo Molnar1799e352007-09-19 23:34:46 +02001308 * Are we the only task in the tree?
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001309 */
Ingo Molnar1799e352007-09-19 23:34:46 +02001310 if (unlikely(cfs_rq->nr_running == 1))
1311 return;
1312
Peter Zijlstra2002c692008-11-11 11:52:33 +01001313 clear_buddies(cfs_rq, se);
1314
Ingo Molnardb292ca2007-12-04 17:04:39 +01001315 if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
Peter Zijlstra3e51f332008-05-03 18:29:28 +02001316 update_rq_clock(rq);
Ingo Molnar1799e352007-09-19 23:34:46 +02001317 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02001318 * Update run-time statistics of the 'current'.
Ingo Molnar1799e352007-09-19 23:34:46 +02001319 */
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +02001320 update_curr(cfs_rq);
Ingo Molnar1799e352007-09-19 23:34:46 +02001321
1322 return;
1323 }
1324 /*
1325 * Find the rightmost entry in the rbtree:
1326 */
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +02001327 rightmost = __pick_last_entity(cfs_rq);
Ingo Molnar1799e352007-09-19 23:34:46 +02001328 /*
1329 * Already in the rightmost position?
1330 */
Fabio Checconi54fdc582009-07-16 12:32:27 +02001331 if (unlikely(!rightmost || entity_before(rightmost, se)))
Ingo Molnar1799e352007-09-19 23:34:46 +02001332 return;
1333
1334 /*
1335 * Minimally necessary key value to be last in the tree:
Dmitry Adamushko2b1e3152007-10-15 17:00:12 +02001336 * Upon rescheduling, sched_class::put_prev_task() will place
1337 * 'current' within the tree based on its new key value.
Ingo Molnar1799e352007-09-19 23:34:46 +02001338 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001339 se->vruntime = rightmost->vruntime + 1;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001340}
1341
Gregory Haskinse7693a32008-01-25 21:08:09 +01001342#ifdef CONFIG_SMP
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001343
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01001344static void task_waking_fair(struct rq *rq, struct task_struct *p)
1345{
1346 struct sched_entity *se = &p->se;
1347 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1348
1349 se->vruntime -= cfs_rq->min_vruntime;
1350}
1351
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001352#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02001353/*
1354 * effective_load() calculates the load change as seen from the root_task_group
1355 *
1356 * Adding load to a group doesn't make a group heavier, but can cause movement
1357 * of group shares between cpus. Assuming the shares were perfectly aligned one
1358 * can calculate the shift in shares.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02001359 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001360static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001361{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001362 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02001363
1364 if (!tg->parent)
1365 return wl;
1366
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001367 for_each_sched_entity(se) {
Paul Turner977dda72011-01-14 17:57:50 -08001368 long lw, w;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001369
Paul Turner977dda72011-01-14 17:57:50 -08001370 tg = se->my_q->tg;
1371 w = se->my_q->load.weight;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001372
Paul Turner977dda72011-01-14 17:57:50 -08001373 /* use this cpu's instantaneous contribution */
1374 lw = atomic_read(&tg->load_weight);
1375 lw -= se->my_q->load_contribution;
1376 lw += w + wg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001377
Paul Turner977dda72011-01-14 17:57:50 -08001378 wl += w;
Peter Zijlstra940959e2008-09-23 15:33:42 +02001379
Paul Turner977dda72011-01-14 17:57:50 -08001380 if (lw > 0 && wl < lw)
1381 wl = (wl * tg->shares) / lw;
1382 else
1383 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02001384
Paul Turner977dda72011-01-14 17:57:50 -08001385 /* zero point is MIN_SHARES */
1386 if (wl < MIN_SHARES)
1387 wl = MIN_SHARES;
1388 wl -= se->load.weight;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001389 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001390 }
1391
1392 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001393}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001394
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001395#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001396
Peter Zijlstra83378262008-06-27 13:41:37 +02001397static inline unsigned long effective_load(struct task_group *tg, int cpu,
1398 unsigned long wl, unsigned long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001399{
Peter Zijlstra83378262008-06-27 13:41:37 +02001400 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001401}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02001402
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02001403#endif
1404
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001405static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001406{
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001407 unsigned long this_load, load;
1408 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001409 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001410 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02001411 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02001412 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001413
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001414 idx = sd->wake_idx;
1415 this_cpu = smp_processor_id();
1416 prev_cpu = task_cpu(p);
1417 load = source_load(prev_cpu, idx);
1418 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001419
1420 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001421 * If sync wakeup then subtract the (maximum possible)
1422 * effect of the currently running task from the load
1423 * of the current CPU:
1424 */
Daniel J Bluemanf3b577d2010-06-01 14:06:13 +01001425 rcu_read_lock();
Peter Zijlstra83378262008-06-27 13:41:37 +02001426 if (sync) {
1427 tg = task_group(current);
1428 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001429
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001430 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02001431 load += effective_load(tg, prev_cpu, 0, -weight);
1432 }
1433
1434 tg = task_group(p);
1435 weight = p->se.load.weight;
1436
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02001437 /*
1438 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001439 * due to the sync cause above having dropped this_load to 0, we'll
1440 * always have an imbalance, but there's really nothing you can do
1441 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02001442 *
1443 * Otherwise check if either cpus are near enough in load to allow this
1444 * task to be woken on this_cpu.
1445 */
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02001446 if (this_load) {
1447 unsigned long this_eff_load, prev_eff_load;
1448
1449 this_eff_load = 100;
1450 this_eff_load *= power_of(prev_cpu);
1451 this_eff_load *= this_load +
1452 effective_load(tg, this_cpu, weight, weight);
1453
1454 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1455 prev_eff_load *= power_of(this_cpu);
1456 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1457
1458 balanced = this_eff_load <= prev_eff_load;
1459 } else
1460 balanced = true;
Daniel J Bluemanf3b577d2010-06-01 14:06:13 +01001461 rcu_read_unlock();
Mike Galbraithb3137bc2008-05-29 11:11:41 +02001462
1463 /*
1464 * If the currently running task will sleep within
1465 * a reasonable amount of time then attract this newly
1466 * woken task:
1467 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02001468 if (sync && balanced)
1469 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02001470
Lucas De Marchi41acab82010-03-10 23:37:45 -03001471 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02001472 tl_per_task = cpu_avg_load_per_task(this_cpu);
1473
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001474 if (balanced ||
1475 (this_load <= load &&
1476 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001477 /*
1478 * This domain has SD_WAKE_AFFINE and
1479 * p is cache cold in this domain, and
1480 * there is no bad imbalance.
1481 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001482 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03001483 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01001484
1485 return 1;
1486 }
1487 return 0;
1488}
1489
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001490/*
1491 * find_idlest_group finds and returns the least busy CPU group within the
1492 * domain.
1493 */
1494static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02001495find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02001496 int this_cpu, int load_idx)
Gregory Haskinse7693a32008-01-25 21:08:09 +01001497{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07001498 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001499 unsigned long min_load = ULONG_MAX, this_load = 0;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001500 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001501
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001502 do {
1503 unsigned long load, avg_load;
1504 int local_group;
1505 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001506
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001507 /* Skip over this group if it has no CPUs allowed */
1508 if (!cpumask_intersects(sched_group_cpus(group),
1509 &p->cpus_allowed))
1510 continue;
1511
1512 local_group = cpumask_test_cpu(this_cpu,
1513 sched_group_cpus(group));
1514
1515 /* Tally up the load of all CPUs in the group */
1516 avg_load = 0;
1517
1518 for_each_cpu(i, sched_group_cpus(group)) {
1519 /* Bias balancing toward cpus of our domain */
1520 if (local_group)
1521 load = source_load(i, load_idx);
1522 else
1523 load = target_load(i, load_idx);
1524
1525 avg_load += load;
1526 }
1527
1528 /* Adjust by relative CPU power of the group */
1529 avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
1530
1531 if (local_group) {
1532 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001533 } else if (avg_load < min_load) {
1534 min_load = avg_load;
1535 idlest = group;
1536 }
1537 } while (group = group->next, group != sd->groups);
1538
1539 if (!idlest || 100*this_load < imbalance*min_load)
1540 return NULL;
1541 return idlest;
1542}
1543
1544/*
1545 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1546 */
1547static int
1548find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1549{
1550 unsigned long load, min_load = ULONG_MAX;
1551 int idlest = -1;
1552 int i;
1553
1554 /* Traverse only the allowed CPUs */
1555 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1556 load = weighted_cpuload(i);
1557
1558 if (load < min_load || (load == min_load && i == this_cpu)) {
1559 min_load = load;
1560 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001561 }
1562 }
1563
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001564 return idlest;
1565}
Gregory Haskinse7693a32008-01-25 21:08:09 +01001566
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001567/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001568 * Try and locate an idle CPU in the sched_domain.
1569 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001570static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001571{
1572 int cpu = smp_processor_id();
1573 int prev_cpu = task_cpu(p);
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001574 struct sched_domain *sd;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001575 int i;
1576
1577 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001578 * If the task is going to be woken-up on this cpu and if it is
1579 * already idle, then it is the right target.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001580 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001581 if (target == cpu && idle_cpu(cpu))
1582 return cpu;
1583
1584 /*
1585 * If the task is going to be woken-up on the cpu where it previously
1586 * ran and if it is currently idle, then it the right target.
1587 */
1588 if (target == prev_cpu && idle_cpu(prev_cpu))
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01001589 return prev_cpu;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001590
1591 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001592 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001593 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001594 for_each_domain(target, sd) {
1595 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01001596 break;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001597
1598 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1599 if (idle_cpu(i)) {
1600 target = i;
1601 break;
1602 }
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001603 }
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001604
1605 /*
1606 * Lets stop looking for an idle sibling when we reached
1607 * the domain that spans the current cpu and prev_cpu.
1608 */
1609 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1610 cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1611 break;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01001612 }
1613
1614 return target;
1615}
1616
1617/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001618 * sched_balance_self: balance the current task (running on cpu) in domains
1619 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1620 * SD_BALANCE_EXEC.
1621 *
1622 * Balance, ie. select the least loaded group.
1623 *
1624 * Returns the target CPU number, or the same CPU if no balancing is needed.
1625 *
1626 * preempt must be disabled.
1627 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01001628static int
1629select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001630{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02001631 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001632 int cpu = smp_processor_id();
1633 int prev_cpu = task_cpu(p);
1634 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001635 int want_affine = 0;
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02001636 int want_sd = 1;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02001637 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001638
Peter Zijlstra0763a662009-09-14 19:37:39 +02001639 if (sd_flag & SD_BALANCE_WAKE) {
Mike Galbraithbeac4c72010-03-11 17:17:20 +01001640 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001641 want_affine = 1;
1642 new_cpu = prev_cpu;
1643 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01001644
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001645 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01001646 if (!(tmp->flags & SD_LOAD_BALANCE))
1647 continue;
1648
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001649 /*
Peter Zijlstraae154be2009-09-10 14:40:57 +02001650 * If power savings logic is enabled for a domain, see if we
1651 * are not overloaded, if so, don't balance wider.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001652 */
Peter Zijlstra59abf022009-09-16 08:28:30 +02001653 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
Peter Zijlstraae154be2009-09-10 14:40:57 +02001654 unsigned long power = 0;
1655 unsigned long nr_running = 0;
1656 unsigned long capacity;
1657 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001658
Peter Zijlstraae154be2009-09-10 14:40:57 +02001659 for_each_cpu(i, sched_domain_span(tmp)) {
1660 power += power_of(i);
1661 nr_running += cpu_rq(i)->cfs.nr_running;
1662 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01001663
Peter Zijlstraae154be2009-09-10 14:40:57 +02001664 capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01001665
Peter Zijlstra59abf022009-09-16 08:28:30 +02001666 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1667 nr_running /= 2;
1668
1669 if (nr_running < capacity)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02001670 want_sd = 0;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001671 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001672
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01001673 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001674 * If both cpu and prev_cpu are part of this domain,
1675 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01001676 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001677 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1678 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1679 affine_sd = tmp;
1680 want_affine = 0;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001681 }
1682
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02001683 if (!want_sd && !want_affine)
1684 break;
1685
Peter Zijlstra0763a662009-09-14 19:37:39 +02001686 if (!(tmp->flags & sd_flag))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001687 continue;
1688
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02001689 if (want_sd)
1690 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001691 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001692
Mike Galbraith8b911ac2010-03-11 17:17:16 +01001693 if (affine_sd) {
Suresh Siddha99bd5e22010-03-31 16:47:45 -07001694 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1695 return select_idle_sibling(p, cpu);
1696 else
1697 return select_idle_sibling(p, prev_cpu);
Mike Galbraith8b911ac2010-03-11 17:17:16 +01001698 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02001699
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001700 while (sd) {
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02001701 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001702 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001703 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001704
Peter Zijlstra0763a662009-09-14 19:37:39 +02001705 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001706 sd = sd->child;
1707 continue;
1708 }
1709
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02001710 if (sd_flag & SD_BALANCE_WAKE)
1711 load_idx = sd->wake_idx;
1712
1713 group = find_idlest_group(sd, p, cpu, load_idx);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001714 if (!group) {
1715 sd = sd->child;
1716 continue;
1717 }
1718
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02001719 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001720 if (new_cpu == -1 || new_cpu == cpu) {
1721 /* Now try balancing at a lower domain level of cpu */
1722 sd = sd->child;
1723 continue;
1724 }
1725
1726 /* Now try balancing at a lower domain level of new_cpu */
1727 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02001728 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001729 sd = NULL;
1730 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02001731 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001732 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02001733 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02001734 sd = tmp;
1735 }
1736 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01001737 }
1738
Peter Zijlstrac88d5912009-09-10 13:50:02 +02001739 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01001740}
1741#endif /* CONFIG_SMP */
1742
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01001743static unsigned long
1744wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02001745{
1746 unsigned long gran = sysctl_sched_wakeup_granularity;
1747
1748 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01001749 * Since its curr running now, convert the gran from real-time
1750 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01001751 *
1752 * By using 'se' instead of 'curr' we penalize light tasks, so
1753 * they get preempted easier. That is, if 'se' < 'curr' then
1754 * the resulting gran will be larger, therefore penalizing the
1755 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1756 * be smaller, again penalizing the lighter task.
1757 *
1758 * This is especially important for buddies when the leftmost
1759 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02001760 */
Mike Galbraith13814d42010-03-11 17:17:04 +01001761 if (unlikely(se->load.weight != NICE_0_LOAD))
1762 gran = calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02001763
1764 return gran;
1765}
1766
1767/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02001768 * Should 'se' preempt 'curr'.
1769 *
1770 * |s1
1771 * |s2
1772 * |s3
1773 * g
1774 * |<--->|c
1775 *
1776 * w(c, s1) = -1
1777 * w(c, s2) = 0
1778 * w(c, s3) = 1
1779 *
1780 */
1781static int
1782wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1783{
1784 s64 gran, vdiff = curr->vruntime - se->vruntime;
1785
1786 if (vdiff <= 0)
1787 return -1;
1788
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01001789 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02001790 if (vdiff > gran)
1791 return 1;
1792
1793 return 0;
1794}
1795
Peter Zijlstra02479092008-11-04 21:25:10 +01001796static void set_last_buddy(struct sched_entity *se)
1797{
Peter Zijlstra6bc912b2009-01-15 14:53:38 +01001798 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1799 for_each_sched_entity(se)
1800 cfs_rq_of(se)->last = se;
1801 }
Peter Zijlstra02479092008-11-04 21:25:10 +01001802}
1803
1804static void set_next_buddy(struct sched_entity *se)
1805{
Peter Zijlstra6bc912b2009-01-15 14:53:38 +01001806 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1807 for_each_sched_entity(se)
1808 cfs_rq_of(se)->next = se;
1809 }
Peter Zijlstra02479092008-11-04 21:25:10 +01001810}
1811
Peter Zijlstra464b7522008-10-24 11:06:15 +02001812/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001813 * Preempt the current task with a newly woken task if needed:
1814 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02001815static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001816{
1817 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02001818 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01001819 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02001820 int scale = cfs_rq->nr_running >= sched_nr_latency;
Mike Galbraith03e89e42008-12-16 08:45:30 +01001821
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01001822 if (unlikely(se == pse))
1823 return;
1824
Mike Galbraithf685cea2009-10-23 23:09:22 +02001825 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
Mike Galbraith3cb63d52009-09-11 12:01:17 +02001826 set_next_buddy(pse);
Peter Zijlstra57fdc262008-09-23 15:33:45 +02001827
Bharata B Raoaec0a512008-08-28 14:42:49 +05301828 /*
1829 * We can come here with TIF_NEED_RESCHED already set from new task
1830 * wake up path.
1831 */
1832 if (test_tsk_need_resched(curr))
1833 return;
1834
Ingo Molnar91c234b2007-10-15 17:00:18 +02001835 /*
Peter Zijlstra6bc912b2009-01-15 14:53:38 +01001836 * Batch and idle tasks do not preempt (their preemption is driven by
Ingo Molnar91c234b2007-10-15 17:00:18 +02001837 * the tick):
1838 */
Peter Zijlstra6bc912b2009-01-15 14:53:38 +01001839 if (unlikely(p->policy != SCHED_NORMAL))
Ingo Molnar91c234b2007-10-15 17:00:18 +02001840 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001841
Peter Zijlstra6bc912b2009-01-15 14:53:38 +01001842 /* Idle tasks are by definition preempted by everybody. */
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01001843 if (unlikely(curr->policy == SCHED_IDLE))
1844 goto preempt;
Peter Zijlstra6bc912b2009-01-15 14:53:38 +01001845
Peter Zijlstraad4b78b2009-09-16 12:31:31 +02001846 if (!sched_feat(WAKEUP_PREEMPT))
1847 return;
1848
Jupyung Leea65ac742009-11-17 18:51:40 +09001849 update_curr(cfs_rq);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01001850 find_matching_se(&se, &pse);
1851 BUG_ON(!pse);
1852 if (wakeup_preempt_entity(se, pse) == 1)
1853 goto preempt;
Jupyung Leea65ac742009-11-17 18:51:40 +09001854
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01001855 return;
1856
1857preempt:
1858 resched_task(curr);
1859 /*
1860 * Only set the backward buddy when the current task is still
1861 * on the rq. This can happen when a wakeup gets interleaved
1862 * with schedule on the ->pre_schedule() or idle_balance()
1863 * point, either of which can * drop the rq lock.
1864 *
1865 * Also, during early boot the idle thread is in the fair class,
1866 * for obvious reasons its a bad idea to schedule back to it.
1867 */
1868 if (unlikely(!se->on_rq || curr == rq->idle))
1869 return;
1870
1871 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1872 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001873}
1874
Ingo Molnarfb8d4722007-08-09 11:16:48 +02001875static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001876{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001877 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001878 struct cfs_rq *cfs_rq = &rq->cfs;
1879 struct sched_entity *se;
1880
Tim Blechmann36ace272009-11-24 11:55:45 +01001881 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001882 return NULL;
1883
1884 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02001885 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01001886 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001887 cfs_rq = group_cfs_rq(se);
1888 } while (cfs_rq);
1889
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01001890 p = task_of(se);
1891 hrtick_start_fair(rq, p);
1892
1893 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001894}
1895
1896/*
1897 * Account for a descheduled task:
1898 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02001899static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001900{
1901 struct sched_entity *se = &prev->se;
1902 struct cfs_rq *cfs_rq;
1903
1904 for_each_sched_entity(se) {
1905 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02001906 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001907 }
1908}
1909
Peter Williams681f3e62007-10-24 18:23:51 +02001910#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001911/**************************************************
1912 * Fair scheduling class load-balancing methods:
1913 */
1914
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001915/*
1916 * pull_task - move a task from a remote runqueue to the local runqueue.
1917 * Both runqueues must be locked.
1918 */
1919static void pull_task(struct rq *src_rq, struct task_struct *p,
1920 struct rq *this_rq, int this_cpu)
1921{
1922 deactivate_task(src_rq, p, 0);
1923 set_task_cpu(p, this_cpu);
1924 activate_task(this_rq, p, 0);
1925 check_preempt_curr(this_rq, p, 0);
1926}
1927
1928/*
1929 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1930 */
1931static
1932int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1933 struct sched_domain *sd, enum cpu_idle_type idle,
1934 int *all_pinned)
1935{
1936 int tsk_cache_hot = 0;
1937 /*
1938 * We do not migrate tasks that are:
1939 * 1) running (obviously), or
1940 * 2) cannot be migrated to this CPU due to cpus_allowed, or
1941 * 3) are cache-hot on their current CPU.
1942 */
1943 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001944 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001945 return 0;
1946 }
1947 *all_pinned = 0;
1948
1949 if (task_running(rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001950 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001951 return 0;
1952 }
1953
1954 /*
1955 * Aggressive migration if:
1956 * 1) task is cache cold, or
1957 * 2) too many balance attempts have failed.
1958 */
1959
Venkatesh Pallipadi305e6832010-10-04 17:03:21 -07001960 tsk_cache_hot = task_hot(p, rq->clock_task, sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001961 if (!tsk_cache_hot ||
1962 sd->nr_balance_failed > sd->cache_nice_tries) {
1963#ifdef CONFIG_SCHEDSTATS
1964 if (tsk_cache_hot) {
1965 schedstat_inc(sd, lb_hot_gained[idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03001966 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001967 }
1968#endif
1969 return 1;
1970 }
1971
1972 if (tsk_cache_hot) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03001973 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01001974 return 0;
1975 }
1976 return 1;
1977}
1978
Peter Zijlstra897c3952009-12-17 17:45:42 +01001979/*
1980 * move_one_task tries to move exactly one task from busiest to this_rq, as
1981 * part of active balancing operations within "domain".
1982 * Returns 1 if successful and 0 otherwise.
1983 *
1984 * Called with both runqueues locked.
1985 */
1986static int
1987move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
1988 struct sched_domain *sd, enum cpu_idle_type idle)
1989{
1990 struct task_struct *p, *n;
1991 struct cfs_rq *cfs_rq;
1992 int pinned = 0;
1993
1994 for_each_leaf_cfs_rq(busiest, cfs_rq) {
1995 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
1996
1997 if (!can_migrate_task(p, busiest, this_cpu,
1998 sd, idle, &pinned))
1999 continue;
2000
2001 pull_task(busiest, p, this_rq, this_cpu);
2002 /*
2003 * Right now, this is only the second place pull_task()
2004 * is called, so we can safely collect pull_task()
2005 * stats here rather than inside pull_task().
2006 */
2007 schedstat_inc(sd, lb_gained[idle]);
2008 return 1;
2009 }
2010 }
2011
2012 return 0;
2013}
2014
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002015static unsigned long
2016balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2017 unsigned long max_load_move, struct sched_domain *sd,
2018 enum cpu_idle_type idle, int *all_pinned,
Peter Zijlstraee00e662009-12-17 17:25:20 +01002019 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002020{
2021 int loops = 0, pulled = 0, pinned = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002022 long rem_load_move = max_load_move;
Peter Zijlstraee00e662009-12-17 17:25:20 +01002023 struct task_struct *p, *n;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002024
2025 if (max_load_move == 0)
2026 goto out;
2027
2028 pinned = 1;
2029
Peter Zijlstraee00e662009-12-17 17:25:20 +01002030 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2031 if (loops++ > sysctl_sched_nr_migrate)
2032 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002033
Peter Zijlstraee00e662009-12-17 17:25:20 +01002034 if ((p->se.load.weight >> 1) > rem_load_move ||
2035 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned))
2036 continue;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002037
Peter Zijlstraee00e662009-12-17 17:25:20 +01002038 pull_task(busiest, p, this_rq, this_cpu);
2039 pulled++;
2040 rem_load_move -= p->se.load.weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002041
2042#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01002043 /*
2044 * NEWIDLE balancing is a source of latency, so preemptible
2045 * kernels will stop after the first task is pulled to minimize
2046 * the critical section.
2047 */
2048 if (idle == CPU_NEWLY_IDLE)
2049 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002050#endif
2051
Peter Zijlstraee00e662009-12-17 17:25:20 +01002052 /*
2053 * We only want to steal up to the prescribed amount of
2054 * weighted load.
2055 */
2056 if (rem_load_move <= 0)
2057 break;
2058
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002059 if (p->prio < *this_best_prio)
2060 *this_best_prio = p->prio;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002061 }
2062out:
2063 /*
2064 * Right now, this is one of only two places pull_task() is called,
2065 * so we can safely collect pull_task() stats here rather than
2066 * inside pull_task().
2067 */
2068 schedstat_add(sd, lb_gained[idle], pulled);
2069
2070 if (all_pinned)
2071 *all_pinned = pinned;
2072
2073 return max_load_move - rem_load_move;
2074}
2075
Peter Zijlstra230059de2009-12-17 17:47:12 +01002076#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002077/*
2078 * update tg->load_weight by folding this cpu's load_avg
2079 */
Paul Turner67e86252010-11-15 15:47:05 -08002080static int update_shares_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002081{
2082 struct cfs_rq *cfs_rq;
2083 unsigned long flags;
2084 struct rq *rq;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002085
2086 if (!tg->se[cpu])
2087 return 0;
2088
2089 rq = cpu_rq(cpu);
2090 cfs_rq = tg->cfs_rq[cpu];
2091
2092 raw_spin_lock_irqsave(&rq->lock, flags);
2093
2094 update_rq_clock(rq);
Paul Turnerd6b55912010-11-15 15:47:09 -08002095 update_cfs_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002096
2097 /*
2098 * We need to update shares after updating tg->load_weight in
2099 * order to adjust the weight of groups with long running tasks.
2100 */
Paul Turnerf0d74422010-11-15 15:47:03 -08002101 update_cfs_shares(cfs_rq, 0);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002102
2103 raw_spin_unlock_irqrestore(&rq->lock, flags);
2104
2105 return 0;
2106}
2107
2108static void update_shares(int cpu)
2109{
2110 struct cfs_rq *cfs_rq;
2111 struct rq *rq = cpu_rq(cpu);
2112
2113 rcu_read_lock();
Paul Turner67e86252010-11-15 15:47:05 -08002114 for_each_leaf_cfs_rq(rq, cfs_rq)
2115 update_shares_cpu(cfs_rq->tg, cpu);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002116 rcu_read_unlock();
2117}
2118
Peter Zijlstra230059de2009-12-17 17:47:12 +01002119static unsigned long
2120load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2121 unsigned long max_load_move,
2122 struct sched_domain *sd, enum cpu_idle_type idle,
2123 int *all_pinned, int *this_best_prio)
2124{
2125 long rem_load_move = max_load_move;
2126 int busiest_cpu = cpu_of(busiest);
2127 struct task_group *tg;
2128
2129 rcu_read_lock();
2130 update_h_load(busiest_cpu);
2131
2132 list_for_each_entry_rcu(tg, &task_groups, list) {
2133 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
2134 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
2135 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
2136 u64 rem_load, moved_load;
2137
2138 /*
2139 * empty group
2140 */
2141 if (!busiest_cfs_rq->task_weight)
2142 continue;
2143
2144 rem_load = (u64)rem_load_move * busiest_weight;
2145 rem_load = div_u64(rem_load, busiest_h_load + 1);
2146
2147 moved_load = balance_tasks(this_rq, this_cpu, busiest,
2148 rem_load, sd, idle, all_pinned, this_best_prio,
2149 busiest_cfs_rq);
2150
2151 if (!moved_load)
2152 continue;
2153
2154 moved_load *= busiest_h_load;
2155 moved_load = div_u64(moved_load, busiest_weight + 1);
2156
2157 rem_load_move -= moved_load;
2158 if (rem_load_move < 0)
2159 break;
2160 }
2161 rcu_read_unlock();
2162
2163 return max_load_move - rem_load_move;
2164}
2165#else
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08002166static inline void update_shares(int cpu)
2167{
2168}
2169
Peter Zijlstra230059de2009-12-17 17:47:12 +01002170static unsigned long
2171load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2172 unsigned long max_load_move,
2173 struct sched_domain *sd, enum cpu_idle_type idle,
2174 int *all_pinned, int *this_best_prio)
2175{
2176 return balance_tasks(this_rq, this_cpu, busiest,
2177 max_load_move, sd, idle, all_pinned,
2178 this_best_prio, &busiest->cfs);
2179}
2180#endif
2181
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002182/*
2183 * move_tasks tries to move up to max_load_move weighted load from busiest to
2184 * this_rq, as part of a balancing operation within domain "sd".
2185 * Returns 1 if successful and 0 otherwise.
2186 *
2187 * Called with both runqueues locked.
2188 */
2189static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2190 unsigned long max_load_move,
2191 struct sched_domain *sd, enum cpu_idle_type idle,
2192 int *all_pinned)
2193{
Peter Zijlstra3d45fd82009-12-17 17:12:46 +01002194 unsigned long total_load_moved = 0, load_moved;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002195 int this_best_prio = this_rq->curr->prio;
2196
2197 do {
Peter Zijlstra3d45fd82009-12-17 17:12:46 +01002198 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002199 max_load_move - total_load_moved,
2200 sd, idle, all_pinned, &this_best_prio);
Peter Zijlstra3d45fd82009-12-17 17:12:46 +01002201
2202 total_load_moved += load_moved;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002203
2204#ifdef CONFIG_PREEMPT
2205 /*
2206 * NEWIDLE balancing is a source of latency, so preemptible
2207 * kernels will stop after the first task is pulled to minimize
2208 * the critical section.
2209 */
2210 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2211 break;
Peter Zijlstrabaa8c112009-12-17 18:10:09 +01002212
2213 if (raw_spin_is_contended(&this_rq->lock) ||
2214 raw_spin_is_contended(&busiest->lock))
2215 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002216#endif
Peter Zijlstra3d45fd82009-12-17 17:12:46 +01002217 } while (load_moved && max_load_move > total_load_moved);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002218
2219 return total_load_moved > 0;
2220}
2221
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002222/********** Helpers for find_busiest_group ************************/
2223/*
2224 * sd_lb_stats - Structure to store the statistics of a sched_domain
2225 * during load balancing.
2226 */
2227struct sd_lb_stats {
2228 struct sched_group *busiest; /* Busiest group in this sd */
2229 struct sched_group *this; /* Local group in this sd */
2230 unsigned long total_load; /* Total load of all groups in sd */
2231 unsigned long total_pwr; /* Total power of all groups in sd */
2232 unsigned long avg_load; /* Average load across all groups in sd */
2233
2234 /** Statistics of this group */
2235 unsigned long this_load;
2236 unsigned long this_load_per_task;
2237 unsigned long this_nr_running;
Nikhil Raofab47622010-10-15 13:12:29 -07002238 unsigned long this_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002239 unsigned int this_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002240
2241 /* Statistics of the busiest group */
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002242 unsigned int busiest_idle_cpus;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002243 unsigned long max_load;
2244 unsigned long busiest_load_per_task;
2245 unsigned long busiest_nr_running;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002246 unsigned long busiest_group_capacity;
Nikhil Raofab47622010-10-15 13:12:29 -07002247 unsigned long busiest_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002248 unsigned int busiest_group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002249
2250 int group_imb; /* Is there imbalance in this sd */
2251#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2252 int power_savings_balance; /* Is powersave balance needed for this sd */
2253 struct sched_group *group_min; /* Least loaded group in sd */
2254 struct sched_group *group_leader; /* Group which relieves group_min */
2255 unsigned long min_load_per_task; /* load_per_task in group_min */
2256 unsigned long leader_nr_running; /* Nr running of group_leader */
2257 unsigned long min_nr_running; /* Nr running of group_min */
2258#endif
2259};
2260
2261/*
2262 * sg_lb_stats - stats of a sched_group required for load_balancing
2263 */
2264struct sg_lb_stats {
2265 unsigned long avg_load; /*Avg load across the CPUs of the group */
2266 unsigned long group_load; /* Total load over the CPUs of the group */
2267 unsigned long sum_nr_running; /* Nr tasks running in the group */
2268 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2269 unsigned long group_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002270 unsigned long idle_cpus;
2271 unsigned long group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002272 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07002273 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002274};
2275
2276/**
2277 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2278 * @group: The group whose first cpu is to be returned.
2279 */
2280static inline unsigned int group_first_cpu(struct sched_group *group)
2281{
2282 return cpumask_first(sched_group_cpus(group));
2283}
2284
2285/**
2286 * get_sd_load_idx - Obtain the load index for a given sched domain.
2287 * @sd: The sched_domain whose load_idx is to be obtained.
2288 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2289 */
2290static inline int get_sd_load_idx(struct sched_domain *sd,
2291 enum cpu_idle_type idle)
2292{
2293 int load_idx;
2294
2295 switch (idle) {
2296 case CPU_NOT_IDLE:
2297 load_idx = sd->busy_idx;
2298 break;
2299
2300 case CPU_NEWLY_IDLE:
2301 load_idx = sd->newidle_idx;
2302 break;
2303 default:
2304 load_idx = sd->idle_idx;
2305 break;
2306 }
2307
2308 return load_idx;
2309}
2310
2311
2312#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2313/**
2314 * init_sd_power_savings_stats - Initialize power savings statistics for
2315 * the given sched_domain, during load balancing.
2316 *
2317 * @sd: Sched domain whose power-savings statistics are to be initialized.
2318 * @sds: Variable containing the statistics for sd.
2319 * @idle: Idle status of the CPU at which we're performing load-balancing.
2320 */
2321static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2322 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2323{
2324 /*
2325 * Busy processors will not participate in power savings
2326 * balance.
2327 */
2328 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2329 sds->power_savings_balance = 0;
2330 else {
2331 sds->power_savings_balance = 1;
2332 sds->min_nr_running = ULONG_MAX;
2333 sds->leader_nr_running = 0;
2334 }
2335}
2336
2337/**
2338 * update_sd_power_savings_stats - Update the power saving stats for a
2339 * sched_domain while performing load balancing.
2340 *
2341 * @group: sched_group belonging to the sched_domain under consideration.
2342 * @sds: Variable containing the statistics of the sched_domain
2343 * @local_group: Does group contain the CPU for which we're performing
2344 * load balancing ?
2345 * @sgs: Variable containing the statistics of the group.
2346 */
2347static inline void update_sd_power_savings_stats(struct sched_group *group,
2348 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2349{
2350
2351 if (!sds->power_savings_balance)
2352 return;
2353
2354 /*
2355 * If the local group is idle or completely loaded
2356 * no need to do power savings balance at this domain
2357 */
2358 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2359 !sds->this_nr_running))
2360 sds->power_savings_balance = 0;
2361
2362 /*
2363 * If a group is already running at full capacity or idle,
2364 * don't include that group in power savings calculations
2365 */
2366 if (!sds->power_savings_balance ||
2367 sgs->sum_nr_running >= sgs->group_capacity ||
2368 !sgs->sum_nr_running)
2369 return;
2370
2371 /*
2372 * Calculate the group which has the least non-idle load.
2373 * This is the group from where we need to pick up the load
2374 * for saving power
2375 */
2376 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2377 (sgs->sum_nr_running == sds->min_nr_running &&
2378 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2379 sds->group_min = group;
2380 sds->min_nr_running = sgs->sum_nr_running;
2381 sds->min_load_per_task = sgs->sum_weighted_load /
2382 sgs->sum_nr_running;
2383 }
2384
2385 /*
2386 * Calculate the group which is almost near its
2387 * capacity but still has some space to pick up some load
2388 * from other group and save more power
2389 */
2390 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2391 return;
2392
2393 if (sgs->sum_nr_running > sds->leader_nr_running ||
2394 (sgs->sum_nr_running == sds->leader_nr_running &&
2395 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2396 sds->group_leader = group;
2397 sds->leader_nr_running = sgs->sum_nr_running;
2398 }
2399}
2400
2401/**
2402 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2403 * @sds: Variable containing the statistics of the sched_domain
2404 * under consideration.
2405 * @this_cpu: Cpu at which we're currently performing load-balancing.
2406 * @imbalance: Variable to store the imbalance.
2407 *
2408 * Description:
2409 * Check if we have potential to perform some power-savings balance.
2410 * If yes, set the busiest group to be the least loaded group in the
2411 * sched_domain, so that it's CPUs can be put to idle.
2412 *
2413 * Returns 1 if there is potential to perform power-savings balance.
2414 * Else returns 0.
2415 */
2416static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2417 int this_cpu, unsigned long *imbalance)
2418{
2419 if (!sds->power_savings_balance)
2420 return 0;
2421
2422 if (sds->this != sds->group_leader ||
2423 sds->group_leader == sds->group_min)
2424 return 0;
2425
2426 *imbalance = sds->min_load_per_task;
2427 sds->busiest = sds->group_min;
2428
2429 return 1;
2430
2431}
2432#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2433static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2434 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2435{
2436 return;
2437}
2438
2439static inline void update_sd_power_savings_stats(struct sched_group *group,
2440 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2441{
2442 return;
2443}
2444
2445static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2446 int this_cpu, unsigned long *imbalance)
2447{
2448 return 0;
2449}
2450#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2451
2452
2453unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2454{
2455 return SCHED_LOAD_SCALE;
2456}
2457
2458unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2459{
2460 return default_scale_freq_power(sd, cpu);
2461}
2462
2463unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2464{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02002465 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002466 unsigned long smt_gain = sd->smt_gain;
2467
2468 smt_gain /= weight;
2469
2470 return smt_gain;
2471}
2472
2473unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2474{
2475 return default_scale_smt_power(sd, cpu);
2476}
2477
2478unsigned long scale_rt_power(int cpu)
2479{
2480 struct rq *rq = cpu_rq(cpu);
2481 u64 total, available;
2482
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002483 total = sched_avg_period() + (rq->clock - rq->age_stamp);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07002484
2485 if (unlikely(total < rq->rt_avg)) {
2486 /* Ensures that power won't end up being negative */
2487 available = 0;
2488 } else {
2489 available = total - rq->rt_avg;
2490 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002491
2492 if (unlikely((s64)total < SCHED_LOAD_SCALE))
2493 total = SCHED_LOAD_SCALE;
2494
2495 total >>= SCHED_LOAD_SHIFT;
2496
2497 return div_u64(available, total);
2498}
2499
2500static void update_cpu_power(struct sched_domain *sd, int cpu)
2501{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02002502 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002503 unsigned long power = SCHED_LOAD_SCALE;
2504 struct sched_group *sdg = sd->groups;
2505
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002506 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2507 if (sched_feat(ARCH_POWER))
2508 power *= arch_scale_smt_power(sd, cpu);
2509 else
2510 power *= default_scale_smt_power(sd, cpu);
2511
2512 power >>= SCHED_LOAD_SHIFT;
2513 }
2514
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10002515 sdg->cpu_power_orig = power;
2516
2517 if (sched_feat(ARCH_POWER))
2518 power *= arch_scale_freq_power(sd, cpu);
2519 else
2520 power *= default_scale_freq_power(sd, cpu);
2521
2522 power >>= SCHED_LOAD_SHIFT;
2523
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002524 power *= scale_rt_power(cpu);
2525 power >>= SCHED_LOAD_SHIFT;
2526
2527 if (!power)
2528 power = 1;
2529
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02002530 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002531 sdg->cpu_power = power;
2532}
2533
2534static void update_group_power(struct sched_domain *sd, int cpu)
2535{
2536 struct sched_domain *child = sd->child;
2537 struct sched_group *group, *sdg = sd->groups;
2538 unsigned long power;
2539
2540 if (!child) {
2541 update_cpu_power(sd, cpu);
2542 return;
2543 }
2544
2545 power = 0;
2546
2547 group = child->groups;
2548 do {
2549 power += group->cpu_power;
2550 group = group->next;
2551 } while (group != child->groups);
2552
2553 sdg->cpu_power = power;
2554}
2555
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10002556/*
2557 * Try and fix up capacity for tiny siblings, this is needed when
2558 * things like SD_ASYM_PACKING need f_b_g to select another sibling
2559 * which on its own isn't powerful enough.
2560 *
2561 * See update_sd_pick_busiest() and check_asym_packing().
2562 */
2563static inline int
2564fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2565{
2566 /*
2567 * Only siblings can have significantly less than SCHED_LOAD_SCALE
2568 */
2569 if (sd->level != SD_LV_SIBLING)
2570 return 0;
2571
2572 /*
2573 * If ~90% of the cpu_power is still there, we're good.
2574 */
Michael Neuling694f5a12010-06-10 09:03:37 +10002575 if (group->cpu_power * 32 > group->cpu_power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10002576 return 1;
2577
2578 return 0;
2579}
2580
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002581/**
2582 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2583 * @sd: The sched_domain whose statistics are to be updated.
2584 * @group: sched_group whose statistics are to be updated.
2585 * @this_cpu: Cpu for which load balance is currently performed.
2586 * @idle: Idle status of this_cpu
2587 * @load_idx: Load index of sched_domain of this_cpu for load calc.
2588 * @sd_idle: Idle status of the sched_domain containing group.
2589 * @local_group: Does group contain this_cpu.
2590 * @cpus: Set of cpus considered for load balancing.
2591 * @balance: Should we balance.
2592 * @sgs: variable to hold the statistics for this group.
2593 */
2594static inline void update_sg_lb_stats(struct sched_domain *sd,
2595 struct sched_group *group, int this_cpu,
2596 enum cpu_idle_type idle, int load_idx, int *sd_idle,
2597 int local_group, const struct cpumask *cpus,
2598 int *balance, struct sg_lb_stats *sgs)
2599{
Nikhil Rao2582f0e2010-10-13 12:09:36 -07002600 unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002601 int i;
2602 unsigned int balance_cpu = -1, first_idle_cpu = 0;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002603 unsigned long avg_load_per_task = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002604
Gautham R Shenoy871e35b2010-01-20 14:02:44 -06002605 if (local_group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002606 balance_cpu = group_first_cpu(group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002607
2608 /* Tally up the load of all CPUs in the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002609 max_cpu_load = 0;
2610 min_cpu_load = ~0UL;
Nikhil Rao2582f0e2010-10-13 12:09:36 -07002611 max_nr_running = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002612
2613 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2614 struct rq *rq = cpu_rq(i);
2615
2616 if (*sd_idle && rq->nr_running)
2617 *sd_idle = 0;
2618
2619 /* Bias balancing toward cpus of our domain */
2620 if (local_group) {
2621 if (idle_cpu(i) && !first_idle_cpu) {
2622 first_idle_cpu = 1;
2623 balance_cpu = i;
2624 }
2625
2626 load = target_load(i, load_idx);
2627 } else {
2628 load = source_load(i, load_idx);
Nikhil Rao2582f0e2010-10-13 12:09:36 -07002629 if (load > max_cpu_load) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002630 max_cpu_load = load;
Nikhil Rao2582f0e2010-10-13 12:09:36 -07002631 max_nr_running = rq->nr_running;
2632 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002633 if (min_cpu_load > load)
2634 min_cpu_load = load;
2635 }
2636
2637 sgs->group_load += load;
2638 sgs->sum_nr_running += rq->nr_running;
2639 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002640 if (idle_cpu(i))
2641 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002642 }
2643
2644 /*
2645 * First idle cpu or the first cpu(busiest) in this sched group
2646 * is eligible for doing load balancing at this and above
2647 * domains. In the newly idle case, we will allow all the cpu's
2648 * to do the newly idle load balance.
2649 */
Peter Zijlstrabbc8cb52010-07-09 15:15:43 +02002650 if (idle != CPU_NEWLY_IDLE && local_group) {
2651 if (balance_cpu != this_cpu) {
2652 *balance = 0;
2653 return;
2654 }
2655 update_group_power(sd, this_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002656 }
2657
2658 /* Adjust by relative CPU power of the group */
2659 sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
2660
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002661 /*
2662 * Consider the group unbalanced when the imbalance is larger
2663 * than the average weight of two tasks.
2664 *
2665 * APZ: with cgroup the avg task weight can vary wildly and
2666 * might not be a suitable number - should we keep a
2667 * normalized nr_running number somewhere that negates
2668 * the hierarchy?
2669 */
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002670 if (sgs->sum_nr_running)
2671 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002672
Nikhil Rao2582f0e2010-10-13 12:09:36 -07002673 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task && max_nr_running > 1)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002674 sgs->group_imb = 1;
2675
Nikhil Rao2582f0e2010-10-13 12:09:36 -07002676 sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10002677 if (!sgs->group_capacity)
2678 sgs->group_capacity = fix_small_capacity(sd, group);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002679 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07002680
2681 if (sgs->group_capacity > sgs->sum_nr_running)
2682 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002683}
2684
2685/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10002686 * update_sd_pick_busiest - return 1 on busiest group
2687 * @sd: sched_domain whose statistics are to be checked
2688 * @sds: sched_domain statistics
2689 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10002690 * @sgs: sched_group statistics
2691 * @this_cpu: the current cpu
Michael Neuling532cb4c2010-06-08 14:57:02 +10002692 *
2693 * Determine if @sg is a busier group than the previously selected
2694 * busiest group.
2695 */
2696static bool update_sd_pick_busiest(struct sched_domain *sd,
2697 struct sd_lb_stats *sds,
2698 struct sched_group *sg,
2699 struct sg_lb_stats *sgs,
2700 int this_cpu)
2701{
2702 if (sgs->avg_load <= sds->max_load)
2703 return false;
2704
2705 if (sgs->sum_nr_running > sgs->group_capacity)
2706 return true;
2707
2708 if (sgs->group_imb)
2709 return true;
2710
2711 /*
2712 * ASYM_PACKING needs to move all the work to the lowest
2713 * numbered CPUs in the group, therefore mark all groups
2714 * higher than ourself as busy.
2715 */
2716 if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
2717 this_cpu < group_first_cpu(sg)) {
2718 if (!sds->busiest)
2719 return true;
2720
2721 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2722 return true;
2723 }
2724
2725 return false;
2726}
2727
2728/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002729 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2730 * @sd: sched_domain whose statistics are to be updated.
2731 * @this_cpu: Cpu for which load balance is currently performed.
2732 * @idle: Idle status of this_cpu
Michael Neuling532cb4c2010-06-08 14:57:02 +10002733 * @sd_idle: Idle status of the sched_domain containing sg.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002734 * @cpus: Set of cpus considered for load balancing.
2735 * @balance: Should we balance.
2736 * @sds: variable to hold the statistics for this sched_domain.
2737 */
2738static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
2739 enum cpu_idle_type idle, int *sd_idle,
2740 const struct cpumask *cpus, int *balance,
2741 struct sd_lb_stats *sds)
2742{
2743 struct sched_domain *child = sd->child;
Michael Neuling532cb4c2010-06-08 14:57:02 +10002744 struct sched_group *sg = sd->groups;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002745 struct sg_lb_stats sgs;
2746 int load_idx, prefer_sibling = 0;
2747
2748 if (child && child->flags & SD_PREFER_SIBLING)
2749 prefer_sibling = 1;
2750
2751 init_sd_power_savings_stats(sd, sds, idle);
2752 load_idx = get_sd_load_idx(sd, idle);
2753
2754 do {
2755 int local_group;
2756
Michael Neuling532cb4c2010-06-08 14:57:02 +10002757 local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002758 memset(&sgs, 0, sizeof(sgs));
Michael Neuling532cb4c2010-06-08 14:57:02 +10002759 update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx, sd_idle,
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002760 local_group, cpus, balance, &sgs);
2761
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01002762 if (local_group && !(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002763 return;
2764
2765 sds->total_load += sgs.group_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10002766 sds->total_pwr += sg->cpu_power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002767
2768 /*
2769 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10002770 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07002771 * and move all the excess tasks away. We lower the capacity
2772 * of a group only if the local group has the capacity to fit
2773 * these excess tasks, i.e. nr_running < group_capacity. The
2774 * extra check prevents the case where you always pull from the
2775 * heaviest group when it is already under-utilized (possible
2776 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002777 */
Nikhil Rao75dd3212010-10-15 13:12:30 -07002778 if (prefer_sibling && !local_group && sds->this_has_capacity)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002779 sgs.group_capacity = min(sgs.group_capacity, 1UL);
2780
2781 if (local_group) {
2782 sds->this_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10002783 sds->this = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002784 sds->this_nr_running = sgs.sum_nr_running;
2785 sds->this_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07002786 sds->this_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002787 sds->this_idle_cpus = sgs.idle_cpus;
Michael Neuling532cb4c2010-06-08 14:57:02 +10002788 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002789 sds->max_load = sgs.avg_load;
Michael Neuling532cb4c2010-06-08 14:57:02 +10002790 sds->busiest = sg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002791 sds->busiest_nr_running = sgs.sum_nr_running;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002792 sds->busiest_idle_cpus = sgs.idle_cpus;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002793 sds->busiest_group_capacity = sgs.group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002794 sds->busiest_load_per_task = sgs.sum_weighted_load;
Nikhil Raofab47622010-10-15 13:12:29 -07002795 sds->busiest_has_capacity = sgs.group_has_capacity;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07002796 sds->busiest_group_weight = sgs.group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002797 sds->group_imb = sgs.group_imb;
2798 }
2799
Michael Neuling532cb4c2010-06-08 14:57:02 +10002800 update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2801 sg = sg->next;
2802 } while (sg != sd->groups);
2803}
2804
Michael Neuling2ec57d42010-06-29 12:02:01 +10002805int __weak arch_sd_sibling_asym_packing(void)
Michael Neuling532cb4c2010-06-08 14:57:02 +10002806{
2807 return 0*SD_ASYM_PACKING;
2808}
2809
2810/**
2811 * check_asym_packing - Check to see if the group is packed into the
2812 * sched doman.
2813 *
2814 * This is primarily intended to used at the sibling level. Some
2815 * cores like POWER7 prefer to use lower numbered SMT threads. In the
2816 * case of POWER7, it can move to lower SMT modes only when higher
2817 * threads are idle. When in lower SMT modes, the threads will
2818 * perform better since they share less core resources. Hence when we
2819 * have idle threads, we want them to be the higher ones.
2820 *
2821 * This packing function is run on idle threads. It checks to see if
2822 * the busiest CPU in this domain (core in the P7 case) has a higher
2823 * CPU number than the packing function is being run on. Here we are
2824 * assuming lower CPU number will be equivalent to lower a SMT thread
2825 * number.
2826 *
Michael Neulingb6b12292010-06-10 12:06:21 +10002827 * Returns 1 when packing is required and a task should be moved to
2828 * this CPU. The amount of the imbalance is returned in *imbalance.
2829 *
Michael Neuling532cb4c2010-06-08 14:57:02 +10002830 * @sd: The sched_domain whose packing is to be checked.
2831 * @sds: Statistics of the sched_domain which is to be packed
2832 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2833 * @imbalance: returns amount of imbalanced due to packing.
Michael Neuling532cb4c2010-06-08 14:57:02 +10002834 */
2835static int check_asym_packing(struct sched_domain *sd,
2836 struct sd_lb_stats *sds,
2837 int this_cpu, unsigned long *imbalance)
2838{
2839 int busiest_cpu;
2840
2841 if (!(sd->flags & SD_ASYM_PACKING))
2842 return 0;
2843
2844 if (!sds->busiest)
2845 return 0;
2846
2847 busiest_cpu = group_first_cpu(sds->busiest);
2848 if (this_cpu > busiest_cpu)
2849 return 0;
2850
2851 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->cpu_power,
2852 SCHED_LOAD_SCALE);
2853 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002854}
2855
2856/**
2857 * fix_small_imbalance - Calculate the minor imbalance that exists
2858 * amongst the groups of a sched_domain, during
2859 * load balancing.
2860 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
2861 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2862 * @imbalance: Variable to store the imbalance.
2863 */
2864static inline void fix_small_imbalance(struct sd_lb_stats *sds,
2865 int this_cpu, unsigned long *imbalance)
2866{
2867 unsigned long tmp, pwr_now = 0, pwr_move = 0;
2868 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002869 unsigned long scaled_busy_load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002870
2871 if (sds->this_nr_running) {
2872 sds->this_load_per_task /= sds->this_nr_running;
2873 if (sds->busiest_load_per_task >
2874 sds->this_load_per_task)
2875 imbn = 1;
2876 } else
2877 sds->this_load_per_task =
2878 cpu_avg_load_per_task(this_cpu);
2879
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002880 scaled_busy_load_per_task = sds->busiest_load_per_task
2881 * SCHED_LOAD_SCALE;
2882 scaled_busy_load_per_task /= sds->busiest->cpu_power;
2883
2884 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
2885 (scaled_busy_load_per_task * imbn)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002886 *imbalance = sds->busiest_load_per_task;
2887 return;
2888 }
2889
2890 /*
2891 * OK, we don't have enough imbalance to justify moving tasks,
2892 * however we may be able to increase total CPU power used by
2893 * moving them.
2894 */
2895
2896 pwr_now += sds->busiest->cpu_power *
2897 min(sds->busiest_load_per_task, sds->max_load);
2898 pwr_now += sds->this->cpu_power *
2899 min(sds->this_load_per_task, sds->this_load);
2900 pwr_now /= SCHED_LOAD_SCALE;
2901
2902 /* Amount of load we'd subtract */
2903 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2904 sds->busiest->cpu_power;
2905 if (sds->max_load > tmp)
2906 pwr_move += sds->busiest->cpu_power *
2907 min(sds->busiest_load_per_task, sds->max_load - tmp);
2908
2909 /* Amount of load we'd add */
2910 if (sds->max_load * sds->busiest->cpu_power <
2911 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
2912 tmp = (sds->max_load * sds->busiest->cpu_power) /
2913 sds->this->cpu_power;
2914 else
2915 tmp = (sds->busiest_load_per_task * SCHED_LOAD_SCALE) /
2916 sds->this->cpu_power;
2917 pwr_move += sds->this->cpu_power *
2918 min(sds->this_load_per_task, sds->this_load + tmp);
2919 pwr_move /= SCHED_LOAD_SCALE;
2920
2921 /* Move if we gain throughput */
2922 if (pwr_move > pwr_now)
2923 *imbalance = sds->busiest_load_per_task;
2924}
2925
2926/**
2927 * calculate_imbalance - Calculate the amount of imbalance present within the
2928 * groups of a given sched_domain during load balance.
2929 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
2930 * @this_cpu: Cpu for which currently load balance is being performed.
2931 * @imbalance: The variable to store the imbalance.
2932 */
2933static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
2934 unsigned long *imbalance)
2935{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002936 unsigned long max_pull, load_above_capacity = ~0UL;
2937
2938 sds->busiest_load_per_task /= sds->busiest_nr_running;
2939 if (sds->group_imb) {
2940 sds->busiest_load_per_task =
2941 min(sds->busiest_load_per_task, sds->avg_load);
2942 }
2943
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002944 /*
2945 * In the presence of smp nice balancing, certain scenarios can have
2946 * max load less than avg load(as we skip the groups at or below
2947 * its cpu_power, while calculating max_load..)
2948 */
2949 if (sds->max_load < sds->avg_load) {
2950 *imbalance = 0;
2951 return fix_small_imbalance(sds, this_cpu, imbalance);
2952 }
2953
Suresh Siddhadd5feea2010-02-23 16:13:52 -08002954 if (!sds->group_imb) {
2955 /*
2956 * Don't want to pull so many tasks that a group would go idle.
2957 */
2958 load_above_capacity = (sds->busiest_nr_running -
2959 sds->busiest_group_capacity);
2960
2961 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
2962
2963 load_above_capacity /= sds->busiest->cpu_power;
2964 }
2965
2966 /*
2967 * We're trying to get all the cpus to the average_load, so we don't
2968 * want to push ourselves above the average load, nor do we wish to
2969 * reduce the max loaded cpu below the average load. At the same time,
2970 * we also don't want to reduce the group load below the group capacity
2971 * (so that we can implement power-savings policies etc). Thus we look
2972 * for the minimum possible imbalance.
2973 * Be careful of negative numbers as they'll appear as very large values
2974 * with unsigned longs.
2975 */
2976 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002977
2978 /* How much load to actually move to equalise the imbalance */
2979 *imbalance = min(max_pull * sds->busiest->cpu_power,
2980 (sds->avg_load - sds->this_load) * sds->this->cpu_power)
2981 / SCHED_LOAD_SCALE;
2982
2983 /*
2984 * if *imbalance is less than the average load per runnable task
2985 * there is no gaurantee that any tasks will be moved so we'll have
2986 * a think about bumping its value to force at least one task to be
2987 * moved
2988 */
2989 if (*imbalance < sds->busiest_load_per_task)
2990 return fix_small_imbalance(sds, this_cpu, imbalance);
2991
2992}
Nikhil Raofab47622010-10-15 13:12:29 -07002993
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01002994/******* find_busiest_group() helpers end here *********************/
2995
2996/**
2997 * find_busiest_group - Returns the busiest group within the sched_domain
2998 * if there is an imbalance. If there isn't an imbalance, and
2999 * the user has opted for power-savings, it returns a group whose
3000 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3001 * such a group exists.
3002 *
3003 * Also calculates the amount of weighted load which should be moved
3004 * to restore balance.
3005 *
3006 * @sd: The sched_domain whose busiest group is to be returned.
3007 * @this_cpu: The cpu for which load balancing is currently being performed.
3008 * @imbalance: Variable which stores amount of weighted load which should
3009 * be moved to restore balance/put a group to idle.
3010 * @idle: The idle status of this_cpu.
3011 * @sd_idle: The idleness of sd
3012 * @cpus: The set of CPUs under consideration for load-balancing.
3013 * @balance: Pointer to a variable indicating if this_cpu
3014 * is the appropriate cpu to perform load balancing at this_level.
3015 *
3016 * Returns: - the busiest group if imbalance exists.
3017 * - If no imbalance and user has opted for power-savings balance,
3018 * return the least loaded group whose CPUs can be
3019 * put to idle by rebalancing its tasks onto our group.
3020 */
3021static struct sched_group *
3022find_busiest_group(struct sched_domain *sd, int this_cpu,
3023 unsigned long *imbalance, enum cpu_idle_type idle,
3024 int *sd_idle, const struct cpumask *cpus, int *balance)
3025{
3026 struct sd_lb_stats sds;
3027
3028 memset(&sds, 0, sizeof(sds));
3029
3030 /*
3031 * Compute the various statistics relavent for load balancing at
3032 * this level.
3033 */
3034 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3035 balance, &sds);
3036
3037 /* Cases where imbalance does not exist from POV of this_cpu */
3038 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3039 * at this level.
3040 * 2) There is no busy sibling group to pull from.
3041 * 3) This group is the busiest group.
3042 * 4) This group is more busy than the avg busieness at this
3043 * sched_domain.
3044 * 5) The imbalance is within the specified limit.
Nikhil Raofab47622010-10-15 13:12:29 -07003045 *
3046 * Note: when doing newidle balance, if the local group has excess
3047 * capacity (i.e. nr_running < group_capacity) and the busiest group
3048 * does not have any capacity, we force a load balance to pull tasks
3049 * to the local group. In this case, we skip past checks 3, 4 and 5.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003050 */
Peter Zijlstra8f190fb2009-12-24 14:18:21 +01003051 if (!(*balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003052 goto ret;
3053
Michael Neuling532cb4c2010-06-08 14:57:02 +10003054 if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
3055 check_asym_packing(sd, &sds, this_cpu, imbalance))
3056 return sds.busiest;
3057
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003058 if (!sds.busiest || sds.busiest_nr_running == 0)
3059 goto out_balanced;
3060
Nikhil Raofab47622010-10-15 13:12:29 -07003061 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
3062 if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
3063 !sds.busiest_has_capacity)
3064 goto force_balance;
3065
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003066 if (sds.this_load >= sds.max_load)
3067 goto out_balanced;
3068
3069 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3070
3071 if (sds.this_load >= sds.avg_load)
3072 goto out_balanced;
3073
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07003074 /*
3075 * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative.
3076 * And to check for busy balance use !idle_cpu instead of
3077 * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE
3078 * even when they are idle.
3079 */
3080 if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) {
3081 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3082 goto out_balanced;
3083 } else {
3084 /*
3085 * This cpu is idle. If the busiest group load doesn't
3086 * have more tasks than the number of available cpu's and
3087 * there is no imbalance between this and busiest group
3088 * wrt to idle cpu's, it is balanced.
3089 */
3090 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
3091 sds.busiest_nr_running <= sds.busiest_group_weight)
3092 goto out_balanced;
3093 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003094
Nikhil Raofab47622010-10-15 13:12:29 -07003095force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003096 /* Looks like there is an imbalance. Compute it */
3097 calculate_imbalance(&sds, this_cpu, imbalance);
3098 return sds.busiest;
3099
3100out_balanced:
3101 /*
3102 * There is no obvious imbalance. But check if we can do some balancing
3103 * to save power.
3104 */
3105 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3106 return sds.busiest;
3107ret:
3108 *imbalance = 0;
3109 return NULL;
3110}
3111
3112/*
3113 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3114 */
3115static struct rq *
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10003116find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
3117 enum cpu_idle_type idle, unsigned long imbalance,
3118 const struct cpumask *cpus)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003119{
3120 struct rq *busiest = NULL, *rq;
3121 unsigned long max_load = 0;
3122 int i;
3123
3124 for_each_cpu(i, sched_group_cpus(group)) {
3125 unsigned long power = power_of(i);
3126 unsigned long capacity = DIV_ROUND_CLOSEST(power, SCHED_LOAD_SCALE);
3127 unsigned long wl;
3128
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10003129 if (!capacity)
3130 capacity = fix_small_capacity(sd, group);
3131
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003132 if (!cpumask_test_cpu(i, cpus))
3133 continue;
3134
3135 rq = cpu_rq(i);
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01003136 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003137
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01003138 /*
3139 * When comparing with imbalance, use weighted_cpuload()
3140 * which is not scaled with the cpu power.
3141 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003142 if (capacity && rq->nr_running == 1 && wl > imbalance)
3143 continue;
3144
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01003145 /*
3146 * For the load comparisons with the other cpu's, consider
3147 * the weighted_cpuload() scaled with the cpu power, so that
3148 * the load can be moved away from the cpu that is potentially
3149 * running at a lower capacity.
3150 */
3151 wl = (wl * SCHED_LOAD_SCALE) / power;
3152
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003153 if (wl > max_load) {
3154 max_load = wl;
3155 busiest = rq;
3156 }
3157 }
3158
3159 return busiest;
3160}
3161
3162/*
3163 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3164 * so long as it is large enough.
3165 */
3166#define MAX_PINNED_INTERVAL 512
3167
3168/* Working cpumask for load_balance and load_balance_newidle. */
3169static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3170
Michael Neuling532cb4c2010-06-08 14:57:02 +10003171static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle,
3172 int busiest_cpu, int this_cpu)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01003173{
3174 if (idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10003175
3176 /*
3177 * ASYM_PACKING needs to force migrate tasks from busy but
3178 * higher numbered CPUs in order to pack all tasks in the
3179 * lowest numbered CPUs.
3180 */
3181 if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
3182 return 1;
3183
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01003184 /*
3185 * The only task running in a non-idle cpu can be moved to this
3186 * cpu in an attempt to completely freeup the other CPU
3187 * package.
3188 *
3189 * The package power saving logic comes from
3190 * find_busiest_group(). If there are no imbalance, then
3191 * f_b_g() will return NULL. However when sched_mc={1,2} then
3192 * f_b_g() will select a group from which a running task may be
3193 * pulled to this cpu in order to make the other package idle.
3194 * If there is no opportunity to make a package idle and if
3195 * there are no imbalance, then f_b_g() will return NULL and no
3196 * action will be taken in load_balance_newidle().
3197 *
3198 * Under normal task pull operation due to imbalance, there
3199 * will be more than one task in the source run queue and
3200 * move_tasks() will succeed. ld_moved will be true and this
3201 * active balance code will not be triggered.
3202 */
3203 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3204 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3205 return 0;
3206
3207 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3208 return 0;
3209 }
3210
3211 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
3212}
3213
Tejun Heo969c7922010-05-06 18:49:21 +02003214static int active_load_balance_cpu_stop(void *data);
3215
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003216/*
3217 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3218 * tasks if there is an imbalance.
3219 */
3220static int load_balance(int this_cpu, struct rq *this_rq,
3221 struct sched_domain *sd, enum cpu_idle_type idle,
3222 int *balance)
3223{
3224 int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
3225 struct sched_group *group;
3226 unsigned long imbalance;
3227 struct rq *busiest;
3228 unsigned long flags;
3229 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3230
3231 cpumask_copy(cpus, cpu_active_mask);
3232
3233 /*
3234 * When power savings policy is enabled for the parent domain, idle
3235 * sibling can pick up load irrespective of busy siblings. In this case,
3236 * let the state of idle sibling percolate up as CPU_IDLE, instead of
3237 * portraying it as CPU_NOT_IDLE.
3238 */
3239 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
3240 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3241 sd_idle = 1;
3242
3243 schedstat_inc(sd, lb_count[idle]);
3244
3245redo:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003246 group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
3247 cpus, balance);
3248
3249 if (*balance == 0)
3250 goto out_balanced;
3251
3252 if (!group) {
3253 schedstat_inc(sd, lb_nobusyg[idle]);
3254 goto out_balanced;
3255 }
3256
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10003257 busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003258 if (!busiest) {
3259 schedstat_inc(sd, lb_nobusyq[idle]);
3260 goto out_balanced;
3261 }
3262
3263 BUG_ON(busiest == this_rq);
3264
3265 schedstat_add(sd, lb_imbalance[idle], imbalance);
3266
3267 ld_moved = 0;
3268 if (busiest->nr_running > 1) {
3269 /*
3270 * Attempt to move tasks. If find_busiest_group has found
3271 * an imbalance but busiest->nr_running <= 1, the group is
3272 * still unbalanced. ld_moved simply stays zero, so it is
3273 * correctly treated as an imbalance.
3274 */
3275 local_irq_save(flags);
3276 double_rq_lock(this_rq, busiest);
3277 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3278 imbalance, sd, idle, &all_pinned);
3279 double_rq_unlock(this_rq, busiest);
3280 local_irq_restore(flags);
3281
3282 /*
3283 * some other cpu did the load balance for us.
3284 */
3285 if (ld_moved && this_cpu != smp_processor_id())
3286 resched_cpu(this_cpu);
3287
3288 /* All tasks on this runqueue were pinned by CPU affinity */
3289 if (unlikely(all_pinned)) {
3290 cpumask_clear_cpu(cpu_of(busiest), cpus);
3291 if (!cpumask_empty(cpus))
3292 goto redo;
3293 goto out_balanced;
3294 }
3295 }
3296
3297 if (!ld_moved) {
3298 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07003299 /*
3300 * Increment the failure counter only on periodic balance.
3301 * We do not want newidle balance, which can be very
3302 * frequent, pollute the failure counter causing
3303 * excessive cache_hot migrations and active balances.
3304 */
3305 if (idle != CPU_NEWLY_IDLE)
3306 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003307
Michael Neuling532cb4c2010-06-08 14:57:02 +10003308 if (need_active_balance(sd, sd_idle, idle, cpu_of(busiest),
3309 this_cpu)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003310 raw_spin_lock_irqsave(&busiest->lock, flags);
3311
Tejun Heo969c7922010-05-06 18:49:21 +02003312 /* don't kick the active_load_balance_cpu_stop,
3313 * if the curr task on busiest cpu can't be
3314 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003315 */
3316 if (!cpumask_test_cpu(this_cpu,
3317 &busiest->curr->cpus_allowed)) {
3318 raw_spin_unlock_irqrestore(&busiest->lock,
3319 flags);
3320 all_pinned = 1;
3321 goto out_one_pinned;
3322 }
3323
Tejun Heo969c7922010-05-06 18:49:21 +02003324 /*
3325 * ->active_balance synchronizes accesses to
3326 * ->active_balance_work. Once set, it's cleared
3327 * only after active load balance is finished.
3328 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003329 if (!busiest->active_balance) {
3330 busiest->active_balance = 1;
3331 busiest->push_cpu = this_cpu;
3332 active_balance = 1;
3333 }
3334 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02003335
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003336 if (active_balance)
Tejun Heo969c7922010-05-06 18:49:21 +02003337 stop_one_cpu_nowait(cpu_of(busiest),
3338 active_load_balance_cpu_stop, busiest,
3339 &busiest->active_balance_work);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003340
3341 /*
3342 * We've kicked active balancing, reset the failure
3343 * counter.
3344 */
3345 sd->nr_balance_failed = sd->cache_nice_tries+1;
3346 }
3347 } else
3348 sd->nr_balance_failed = 0;
3349
3350 if (likely(!active_balance)) {
3351 /* We were unbalanced, so reset the balancing interval */
3352 sd->balance_interval = sd->min_interval;
3353 } else {
3354 /*
3355 * If we've begun active balancing, start to back off. This
3356 * case may not be covered by the all_pinned logic if there
3357 * is only 1 task on the busy runqueue (because we don't call
3358 * move_tasks).
3359 */
3360 if (sd->balance_interval < sd->max_interval)
3361 sd->balance_interval *= 2;
3362 }
3363
3364 if (!ld_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3365 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3366 ld_moved = -1;
3367
3368 goto out;
3369
3370out_balanced:
3371 schedstat_inc(sd, lb_balanced[idle]);
3372
3373 sd->nr_balance_failed = 0;
3374
3375out_one_pinned:
3376 /* tune up the balancing interval */
3377 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3378 (sd->balance_interval < sd->max_interval))
3379 sd->balance_interval *= 2;
3380
3381 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
3382 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
3383 ld_moved = -1;
3384 else
3385 ld_moved = 0;
3386out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003387 return ld_moved;
3388}
3389
3390/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003391 * idle_balance is called by schedule() if this_cpu is about to become
3392 * idle. Attempts to pull tasks from other CPUs.
3393 */
3394static void idle_balance(int this_cpu, struct rq *this_rq)
3395{
3396 struct sched_domain *sd;
3397 int pulled_task = 0;
3398 unsigned long next_balance = jiffies + HZ;
3399
3400 this_rq->idle_stamp = this_rq->clock;
3401
3402 if (this_rq->avg_idle < sysctl_sched_migration_cost)
3403 return;
3404
Peter Zijlstraf492e122009-12-23 15:29:42 +01003405 /*
3406 * Drop the rq->lock, but keep IRQ/preempt disabled.
3407 */
3408 raw_spin_unlock(&this_rq->lock);
3409
Paul Turnerc66eaf62010-11-15 15:47:07 -08003410 update_shares(this_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003411 for_each_domain(this_cpu, sd) {
3412 unsigned long interval;
Peter Zijlstraf492e122009-12-23 15:29:42 +01003413 int balance = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003414
3415 if (!(sd->flags & SD_LOAD_BALANCE))
3416 continue;
3417
Peter Zijlstraf492e122009-12-23 15:29:42 +01003418 if (sd->flags & SD_BALANCE_NEWIDLE) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003419 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01003420 pulled_task = load_balance(this_cpu, this_rq,
3421 sd, CPU_NEWLY_IDLE, &balance);
3422 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003423
3424 interval = msecs_to_jiffies(sd->balance_interval);
3425 if (time_after(next_balance, sd->last_balance + interval))
3426 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08003427 if (pulled_task) {
3428 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003429 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08003430 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003431 }
Peter Zijlstraf492e122009-12-23 15:29:42 +01003432
3433 raw_spin_lock(&this_rq->lock);
3434
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003435 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3436 /*
3437 * We are going idle. next_balance may be set based on
3438 * a busy processor. So reset next_balance.
3439 */
3440 this_rq->next_balance = next_balance;
3441 }
3442}
3443
3444/*
Tejun Heo969c7922010-05-06 18:49:21 +02003445 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3446 * running tasks off the busiest CPU onto idle CPUs. It requires at
3447 * least 1 task to be running on each physical CPU where possible, and
3448 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003449 */
Tejun Heo969c7922010-05-06 18:49:21 +02003450static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003451{
Tejun Heo969c7922010-05-06 18:49:21 +02003452 struct rq *busiest_rq = data;
3453 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003454 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02003455 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003456 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02003457
3458 raw_spin_lock_irq(&busiest_rq->lock);
3459
3460 /* make sure the requested cpu hasn't gone down in the meantime */
3461 if (unlikely(busiest_cpu != smp_processor_id() ||
3462 !busiest_rq->active_balance))
3463 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003464
3465 /* Is there any task to move? */
3466 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02003467 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003468
3469 /*
3470 * This condition is "impossible", if it occurs
3471 * we need to fix it. Originally reported by
3472 * Bjorn Helgaas on a 128-cpu setup.
3473 */
3474 BUG_ON(busiest_rq == target_rq);
3475
3476 /* move a task from busiest_rq to target_rq */
3477 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003478
3479 /* Search for an sd spanning us and the target CPU. */
3480 for_each_domain(target_cpu, sd) {
3481 if ((sd->flags & SD_LOAD_BALANCE) &&
3482 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3483 break;
3484 }
3485
3486 if (likely(sd)) {
3487 schedstat_inc(sd, alb_count);
3488
3489 if (move_one_task(target_rq, target_cpu, busiest_rq,
3490 sd, CPU_IDLE))
3491 schedstat_inc(sd, alb_pushed);
3492 else
3493 schedstat_inc(sd, alb_failed);
3494 }
3495 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02003496out_unlock:
3497 busiest_rq->active_balance = 0;
3498 raw_spin_unlock_irq(&busiest_rq->lock);
3499 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003500}
3501
3502#ifdef CONFIG_NO_HZ
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003503
3504static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
3505
3506static void trigger_sched_softirq(void *data)
3507{
3508 raise_softirq_irqoff(SCHED_SOFTIRQ);
3509}
3510
3511static inline void init_sched_softirq_csd(struct call_single_data *csd)
3512{
3513 csd->func = trigger_sched_softirq;
3514 csd->info = NULL;
3515 csd->flags = 0;
3516 csd->priv = 0;
3517}
3518
3519/*
3520 * idle load balancing details
3521 * - One of the idle CPUs nominates itself as idle load_balancer, while
3522 * entering idle.
3523 * - This idle load balancer CPU will also go into tickless mode when
3524 * it is idle, just like all other idle CPUs
3525 * - When one of the busy CPUs notice that there may be an idle rebalancing
3526 * needed, they will kick the idle load balancer, which then does idle
3527 * load balancing for all the idle CPUs.
3528 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003529static struct {
3530 atomic_t load_balancer;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003531 atomic_t first_pick_cpu;
3532 atomic_t second_pick_cpu;
3533 cpumask_var_t idle_cpus_mask;
3534 cpumask_var_t grp_idle_mask;
3535 unsigned long next_balance; /* in jiffy units */
3536} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003537
3538int get_nohz_load_balancer(void)
3539{
3540 return atomic_read(&nohz.load_balancer);
3541}
3542
3543#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3544/**
3545 * lowest_flag_domain - Return lowest sched_domain containing flag.
3546 * @cpu: The cpu whose lowest level of sched domain is to
3547 * be returned.
3548 * @flag: The flag to check for the lowest sched_domain
3549 * for the given cpu.
3550 *
3551 * Returns the lowest sched_domain of a cpu which contains the given flag.
3552 */
3553static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3554{
3555 struct sched_domain *sd;
3556
3557 for_each_domain(cpu, sd)
3558 if (sd && (sd->flags & flag))
3559 break;
3560
3561 return sd;
3562}
3563
3564/**
3565 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3566 * @cpu: The cpu whose domains we're iterating over.
3567 * @sd: variable holding the value of the power_savings_sd
3568 * for cpu.
3569 * @flag: The flag to filter the sched_domains to be iterated.
3570 *
3571 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3572 * set, starting from the lowest sched_domain to the highest.
3573 */
3574#define for_each_flag_domain(cpu, sd, flag) \
3575 for (sd = lowest_flag_domain(cpu, flag); \
3576 (sd && (sd->flags & flag)); sd = sd->parent)
3577
3578/**
3579 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3580 * @ilb_group: group to be checked for semi-idleness
3581 *
3582 * Returns: 1 if the group is semi-idle. 0 otherwise.
3583 *
3584 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3585 * and atleast one non-idle CPU. This helper function checks if the given
3586 * sched_group is semi-idle or not.
3587 */
3588static inline int is_semi_idle_group(struct sched_group *ilb_group)
3589{
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003590 cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003591 sched_group_cpus(ilb_group));
3592
3593 /*
3594 * A sched_group is semi-idle when it has atleast one busy cpu
3595 * and atleast one idle cpu.
3596 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003597 if (cpumask_empty(nohz.grp_idle_mask))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003598 return 0;
3599
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003600 if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003601 return 0;
3602
3603 return 1;
3604}
3605/**
3606 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3607 * @cpu: The cpu which is nominating a new idle_load_balancer.
3608 *
3609 * Returns: Returns the id of the idle load balancer if it exists,
3610 * Else, returns >= nr_cpu_ids.
3611 *
3612 * This algorithm picks the idle load balancer such that it belongs to a
3613 * semi-idle powersavings sched_domain. The idea is to try and avoid
3614 * completely idle packages/cores just for the purpose of idle load balancing
3615 * when there are other idle cpu's which are better suited for that job.
3616 */
3617static int find_new_ilb(int cpu)
3618{
3619 struct sched_domain *sd;
3620 struct sched_group *ilb_group;
3621
3622 /*
3623 * Have idle load balancer selection from semi-idle packages only
3624 * when power-aware load balancing is enabled
3625 */
3626 if (!(sched_smt_power_savings || sched_mc_power_savings))
3627 goto out_done;
3628
3629 /*
3630 * Optimize for the case when we have no idle CPUs or only one
3631 * idle CPU. Don't walk the sched_domain hierarchy in such cases
3632 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003633 if (cpumask_weight(nohz.idle_cpus_mask) < 2)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003634 goto out_done;
3635
3636 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3637 ilb_group = sd->groups;
3638
3639 do {
3640 if (is_semi_idle_group(ilb_group))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003641 return cpumask_first(nohz.grp_idle_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003642
3643 ilb_group = ilb_group->next;
3644
3645 } while (ilb_group != sd->groups);
3646 }
3647
3648out_done:
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003649 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003650}
3651#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3652static inline int find_new_ilb(int call_cpu)
3653{
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003654 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003655}
3656#endif
3657
3658/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003659 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
3660 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
3661 * CPU (if there is one).
3662 */
3663static void nohz_balancer_kick(int cpu)
3664{
3665 int ilb_cpu;
3666
3667 nohz.next_balance++;
3668
3669 ilb_cpu = get_nohz_load_balancer();
3670
3671 if (ilb_cpu >= nr_cpu_ids) {
3672 ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
3673 if (ilb_cpu >= nr_cpu_ids)
3674 return;
3675 }
3676
3677 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
3678 struct call_single_data *cp;
3679
3680 cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
3681 cp = &per_cpu(remote_sched_softirq_cb, cpu);
3682 __smp_call_function_single(ilb_cpu, cp, 0);
3683 }
3684 return;
3685}
3686
3687/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003688 * This routine will try to nominate the ilb (idle load balancing)
3689 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003690 * load balancing on behalf of all those cpus.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003691 *
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003692 * When the ilb owner becomes busy, we will not have new ilb owner until some
3693 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
3694 * idle load balancing by kicking one of the idle CPUs.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003695 *
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003696 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
3697 * ilb owner CPU in future (when there is a need for idle load balancing on
3698 * behalf of all idle CPUs).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003699 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003700void select_nohz_load_balancer(int stop_tick)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003701{
3702 int cpu = smp_processor_id();
3703
3704 if (stop_tick) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003705 if (!cpu_active(cpu)) {
3706 if (atomic_read(&nohz.load_balancer) != cpu)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003707 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003708
3709 /*
3710 * If we are going offline and still the leader,
3711 * give up!
3712 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003713 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3714 nr_cpu_ids) != cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003715 BUG();
3716
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003717 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003718 }
3719
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003720 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003721
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003722 if (atomic_read(&nohz.first_pick_cpu) == cpu)
3723 atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
3724 if (atomic_read(&nohz.second_pick_cpu) == cpu)
3725 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003726
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003727 if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003728 int new_ilb;
3729
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003730 /* make me the ilb owner */
3731 if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
3732 cpu) != nr_cpu_ids)
3733 return;
3734
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003735 /*
3736 * Check to see if there is a more power-efficient
3737 * ilb.
3738 */
3739 new_ilb = find_new_ilb(cpu);
3740 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003741 atomic_set(&nohz.load_balancer, nr_cpu_ids);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003742 resched_cpu(new_ilb);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003743 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003744 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003745 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003746 }
3747 } else {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003748 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
3749 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003750
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003751 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003752
3753 if (atomic_read(&nohz.load_balancer) == cpu)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003754 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3755 nr_cpu_ids) != cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003756 BUG();
3757 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003758 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003759}
3760#endif
3761
3762static DEFINE_SPINLOCK(balancing);
3763
3764/*
3765 * It checks each scheduling domain to see if it is due to be balanced,
3766 * and initiates a balancing operation if so.
3767 *
3768 * Balancing parameters are set up in arch_init_sched_domains.
3769 */
3770static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3771{
3772 int balance = 1;
3773 struct rq *rq = cpu_rq(cpu);
3774 unsigned long interval;
3775 struct sched_domain *sd;
3776 /* Earliest time when we have to do rebalance again */
3777 unsigned long next_balance = jiffies + 60*HZ;
3778 int update_next_balance = 0;
3779 int need_serialize;
3780
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003781 update_shares(cpu);
3782
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003783 for_each_domain(cpu, sd) {
3784 if (!(sd->flags & SD_LOAD_BALANCE))
3785 continue;
3786
3787 interval = sd->balance_interval;
3788 if (idle != CPU_IDLE)
3789 interval *= sd->busy_factor;
3790
3791 /* scale ms to jiffies */
3792 interval = msecs_to_jiffies(interval);
3793 if (unlikely(!interval))
3794 interval = 1;
3795 if (interval > HZ*NR_CPUS/10)
3796 interval = HZ*NR_CPUS/10;
3797
3798 need_serialize = sd->flags & SD_SERIALIZE;
3799
3800 if (need_serialize) {
3801 if (!spin_trylock(&balancing))
3802 goto out;
3803 }
3804
3805 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3806 if (load_balance(cpu, rq, sd, idle, &balance)) {
3807 /*
3808 * We've pulled tasks over so either we're no
3809 * longer idle, or one of our SMT siblings is
3810 * not idle.
3811 */
3812 idle = CPU_NOT_IDLE;
3813 }
3814 sd->last_balance = jiffies;
3815 }
3816 if (need_serialize)
3817 spin_unlock(&balancing);
3818out:
3819 if (time_after(next_balance, sd->last_balance + interval)) {
3820 next_balance = sd->last_balance + interval;
3821 update_next_balance = 1;
3822 }
3823
3824 /*
3825 * Stop the load balance at this level. There is another
3826 * CPU in our sched group which is doing load balancing more
3827 * actively.
3828 */
3829 if (!balance)
3830 break;
3831 }
3832
3833 /*
3834 * next_balance will be updated only when there is a need.
3835 * When the cpu is attached to null domain for ex, it will not be
3836 * updated.
3837 */
3838 if (likely(update_next_balance))
3839 rq->next_balance = next_balance;
3840}
3841
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003842#ifdef CONFIG_NO_HZ
3843/*
3844 * In CONFIG_NO_HZ case, the idle balance kickee will do the
3845 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3846 */
3847static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
3848{
3849 struct rq *this_rq = cpu_rq(this_cpu);
3850 struct rq *rq;
3851 int balance_cpu;
3852
3853 if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
3854 return;
3855
3856 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
3857 if (balance_cpu == this_cpu)
3858 continue;
3859
3860 /*
3861 * If this cpu gets work to do, stop the load balancing
3862 * work being done for other cpus. Next load
3863 * balancing owner will pick it up.
3864 */
3865 if (need_resched()) {
3866 this_rq->nohz_balance_kick = 0;
3867 break;
3868 }
3869
3870 raw_spin_lock_irq(&this_rq->lock);
Suresh Siddha5343bdb2010-07-09 15:19:54 +02003871 update_rq_clock(this_rq);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003872 update_cpu_load(this_rq);
3873 raw_spin_unlock_irq(&this_rq->lock);
3874
3875 rebalance_domains(balance_cpu, CPU_IDLE);
3876
3877 rq = cpu_rq(balance_cpu);
3878 if (time_after(this_rq->next_balance, rq->next_balance))
3879 this_rq->next_balance = rq->next_balance;
3880 }
3881 nohz.next_balance = this_rq->next_balance;
3882 this_rq->nohz_balance_kick = 0;
3883}
3884
3885/*
3886 * Current heuristic for kicking the idle load balancer
3887 * - first_pick_cpu is the one of the busy CPUs. It will kick
3888 * idle load balancer when it has more than one process active. This
3889 * eliminates the need for idle load balancing altogether when we have
3890 * only one running process in the system (common case).
3891 * - If there are more than one busy CPU, idle load balancer may have
3892 * to run for active_load_balance to happen (i.e., two busy CPUs are
3893 * SMT or core siblings and can run better if they move to different
3894 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
3895 * which will kick idle load balancer as soon as it has any load.
3896 */
3897static inline int nohz_kick_needed(struct rq *rq, int cpu)
3898{
3899 unsigned long now = jiffies;
3900 int ret;
3901 int first_pick_cpu, second_pick_cpu;
3902
3903 if (time_before(now, nohz.next_balance))
3904 return 0;
3905
Suresh Siddhaf6c3f162010-09-13 11:02:21 -07003906 if (rq->idle_at_tick)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003907 return 0;
3908
3909 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
3910 second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
3911
3912 if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
3913 second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
3914 return 0;
3915
3916 ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
3917 if (ret == nr_cpu_ids || ret == cpu) {
3918 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
3919 if (rq->nr_running > 1)
3920 return 1;
3921 } else {
3922 ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
3923 if (ret == nr_cpu_ids || ret == cpu) {
3924 if (rq->nr_running)
3925 return 1;
3926 }
3927 }
3928 return 0;
3929}
3930#else
3931static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
3932#endif
3933
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003934/*
3935 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003936 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003937 */
3938static void run_rebalance_domains(struct softirq_action *h)
3939{
3940 int this_cpu = smp_processor_id();
3941 struct rq *this_rq = cpu_rq(this_cpu);
3942 enum cpu_idle_type idle = this_rq->idle_at_tick ?
3943 CPU_IDLE : CPU_NOT_IDLE;
3944
3945 rebalance_domains(this_cpu, idle);
3946
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003947 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003948 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003949 * balancing on behalf of the other idle cpus whose ticks are
3950 * stopped.
3951 */
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003952 nohz_idle_balance(this_cpu, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003953}
3954
3955static inline int on_null_domain(int cpu)
3956{
Paul E. McKenney90a65012010-02-28 08:32:18 -08003957 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003958}
3959
3960/*
3961 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003962 */
3963static inline void trigger_load_balance(struct rq *rq, int cpu)
3964{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003965 /* Don't need to rebalance while attached to NULL domain */
3966 if (time_after_eq(jiffies, rq->next_balance) &&
3967 likely(!on_null_domain(cpu)))
3968 raise_softirq(SCHED_SOFTIRQ);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07003969#ifdef CONFIG_NO_HZ
3970 else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
3971 nohz_balancer_kick(cpu);
3972#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003973}
3974
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01003975static void rq_online_fair(struct rq *rq)
3976{
3977 update_sysctl();
3978}
3979
3980static void rq_offline_fair(struct rq *rq)
3981{
3982 update_sysctl();
3983}
3984
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01003985#else /* CONFIG_SMP */
3986
3987/*
3988 * on UP we do not need to balance between CPUs:
3989 */
3990static inline void idle_balance(int cpu, struct rq *rq)
3991{
3992}
3993
Dhaval Giani55e12e52008-06-24 23:39:43 +05303994#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02003995
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003996/*
3997 * scheduler tick hitting a task of our scheduling class:
3998 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003999static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004000{
4001 struct cfs_rq *cfs_rq;
4002 struct sched_entity *se = &curr->se;
4003
4004 for_each_sched_entity(se) {
4005 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004006 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004007 }
4008}
4009
4010/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004011 * called on fork with the child task as argument from the parent's context
4012 * - child not yet on the tasklist
4013 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004014 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004015static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004016{
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004017 struct cfs_rq *cfs_rq = task_cfs_rq(current);
Ingo Molnar429d43b2007-10-15 17:00:03 +02004018 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02004019 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004020 struct rq *rq = this_rq();
4021 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004022
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004023 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004024
Peter Zijlstra861d0342010-08-19 13:31:43 +02004025 update_rq_clock(rq);
4026
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07004027 if (unlikely(task_cpu(p) != this_cpu)) {
4028 rcu_read_lock();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004029 __set_task_cpu(p, this_cpu);
Paul E. McKenneyb0a0f662010-10-06 17:32:51 -07004030 rcu_read_unlock();
4031 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004032
Ting Yang7109c442007-08-28 12:53:24 +02004033 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004034
Mike Galbraithb5d9d732009-09-08 11:12:28 +02004035 if (curr)
4036 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02004037 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02004038
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004039 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02004040 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02004041 * Upon rescheduling, sched_class::put_prev_task() will place
4042 * 'current' within the tree based on its new key value.
4043 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02004044 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05304045 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02004046 }
4047
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004048 se->vruntime -= cfs_rq->min_vruntime;
4049
Thomas Gleixner05fa7852009-11-17 14:28:38 +01004050 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004051}
4052
Steven Rostedtcb469842008-01-25 21:08:22 +01004053/*
4054 * Priority of the task has changed. Check to see if we preempt
4055 * the current task.
4056 */
4057static void prio_changed_fair(struct rq *rq, struct task_struct *p,
4058 int oldprio, int running)
4059{
4060 /*
4061 * Reschedule if we are currently running on this runqueue and
4062 * our priority decreased, or if we are not currently running on
4063 * this runqueue and our priority is higher than the current's
4064 */
4065 if (running) {
4066 if (p->prio > oldprio)
4067 resched_task(rq->curr);
4068 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02004069 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004070}
4071
4072/*
4073 * We switched to the sched_fair class.
4074 */
4075static void switched_to_fair(struct rq *rq, struct task_struct *p,
4076 int running)
4077{
4078 /*
4079 * We were most likely switched from sched_rt, so
4080 * kick off the schedule if running, otherwise just see
4081 * if we can still preempt the current task.
4082 */
4083 if (running)
4084 resched_task(rq->curr);
4085 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02004086 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01004087}
4088
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004089/* Account for a task changing its policy or group.
4090 *
4091 * This routine is mostly called to set cfs_rq->curr field when a task
4092 * migrates between groups/classes.
4093 */
4094static void set_curr_task_fair(struct rq *rq)
4095{
4096 struct sched_entity *se = &rq->curr->se;
4097
4098 for_each_sched_entity(se)
4099 set_next_entity(cfs_rq_of(se), se);
4100}
4101
Peter Zijlstra810b3812008-02-29 15:21:01 -05004102#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02004103static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05004104{
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02004105 /*
4106 * If the task was not on the rq at the time of this cgroup movement
4107 * it must have been asleep, sleeping tasks keep their ->vruntime
4108 * absolute on their old rq until wakeup (needed for the fair sleeper
4109 * bonus in place_entity()).
4110 *
4111 * If it was on the rq, we've just 'preempted' it, which does convert
4112 * ->vruntime to a relative base.
4113 *
4114 * Make sure both cases convert their relative position when migrating
4115 * to another cgroup's rq. This does somewhat interfere with the
4116 * fair sleeper stuff for the first placement, but who cares.
4117 */
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004118 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02004119 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
4120 set_task_rq(p, task_cpu(p));
4121 if (!on_rq)
4122 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
Peter Zijlstra810b3812008-02-29 15:21:01 -05004123}
4124#endif
4125
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07004126static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00004127{
4128 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00004129 unsigned int rr_interval = 0;
4130
4131 /*
4132 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
4133 * idle runqueue:
4134 */
Peter Williams0d721ce2009-09-21 01:31:53 +00004135 if (rq->cfs.load.weight)
4136 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
Peter Williams0d721ce2009-09-21 01:31:53 +00004137
4138 return rr_interval;
4139}
4140
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004141/*
4142 * All the scheduling class methods:
4143 */
Ingo Molnar5522d5d2007-10-15 17:00:12 +02004144static const struct sched_class fair_sched_class = {
4145 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004146 .enqueue_task = enqueue_task_fair,
4147 .dequeue_task = dequeue_task_fair,
4148 .yield_task = yield_task_fair,
4149
Ingo Molnar2e09bf52007-10-15 17:00:05 +02004150 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004151
4152 .pick_next_task = pick_next_task_fair,
4153 .put_prev_task = put_prev_task_fair,
4154
Peter Williams681f3e62007-10-24 18:23:51 +02004155#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08004156 .select_task_rq = select_task_rq_fair,
4157
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01004158 .rq_online = rq_online_fair,
4159 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004160
4161 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02004162#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004163
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02004164 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004165 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01004166 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01004167
4168 .prio_changed = prio_changed_fair,
4169 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05004170
Peter Williams0d721ce2009-09-21 01:31:53 +00004171 .get_rr_interval = get_rr_interval_fair,
4172
Peter Zijlstra810b3812008-02-29 15:21:01 -05004173#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02004174 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05004175#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004176};
4177
4178#ifdef CONFIG_SCHED_DEBUG
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02004179static void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004180{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004181 struct cfs_rq *cfs_rq;
4182
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01004183 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02004184 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02004185 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01004186 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004187}
4188#endif