blob: 3e616d704f670ac337346e3052baaeb80f49261d [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100181#define WMULT_CONST (~0U)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200182#define WMULT_SHIFT 32
183
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100184static void __update_inv_weight(struct load_weight *lw)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200185{
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100186 unsigned long w;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200187
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200197 else
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100198 lw->inv_weight = WMULT_CONST / w;
199}
Peter Zijlstra029632f2011-10-25 10:00:11 +0200200
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100201/*
202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
212 */
213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
214{
215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200217
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100218 __update_inv_weight(lw);
219
220 if (unlikely(fact >> 32)) {
221 while (fact >> 32) {
222 fact >>= 1;
223 shift--;
224 }
Peter Zijlstra029632f2011-10-25 10:00:11 +0200225 }
226
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100227 /* hint to use a 32x32->64 mul */
228 fact = (u64)(u32)fact * lw->inv_weight;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200229
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100230 while (fact >> 32) {
231 fact >>= 1;
232 shift--;
233 }
234
235 return mul_u64_u32_shr(delta_exec, fact, shift);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200236}
237
238
239const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200240
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200241/**************************************************************
242 * CFS operations on generic schedulable entities:
243 */
244
245#ifdef CONFIG_FAIR_GROUP_SCHED
246
247/* cpu runqueue to which this cfs_rq is attached */
248static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
249{
250 return cfs_rq->rq;
251}
252
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200253/* An entity is a task if it doesn't "own" a runqueue */
254#define entity_is_task(se) (!se->my_q)
255
Peter Zijlstra8f488942009-07-24 12:25:30 +0200256static inline struct task_struct *task_of(struct sched_entity *se)
257{
258#ifdef CONFIG_SCHED_DEBUG
259 WARN_ON_ONCE(!entity_is_task(se));
260#endif
261 return container_of(se, struct task_struct, se);
262}
263
Peter Zijlstrab7581492008-04-19 19:45:00 +0200264/* Walk up scheduling entities hierarchy */
265#define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
267
268static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269{
270 return p->se.cfs_rq;
271}
272
273/* runqueue on which this entity is (to be) queued */
274static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275{
276 return se->cfs_rq;
277}
278
279/* runqueue "owned" by this group */
280static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281{
282 return grp->my_q;
283}
284
Paul Turneraff3e492012-10-04 13:18:30 +0200285static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
286 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200287
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800288static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
289{
290 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800291 /*
292 * Ensure we either appear before our parent (if already
293 * enqueued) or force our parent to appear after us when it is
294 * enqueued. The fact that we always enqueue bottom-up
295 * reduces this to two cases.
296 */
297 if (cfs_rq->tg->parent &&
298 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
299 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800300 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800301 } else {
302 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
303 &rq_of(cfs_rq)->leaf_cfs_rq_list);
304 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800305
306 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200307 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200308 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800309 }
310}
311
312static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313{
314 if (cfs_rq->on_list) {
315 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316 cfs_rq->on_list = 0;
317 }
318}
319
Peter Zijlstrab7581492008-04-19 19:45:00 +0200320/* Iterate thr' all leaf cfs_rq's on a runqueue */
321#define for_each_leaf_cfs_rq(rq, cfs_rq) \
322 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323
324/* Do the two (enqueued) entities belong to the same group ? */
325static inline int
326is_same_group(struct sched_entity *se, struct sched_entity *pse)
327{
328 if (se->cfs_rq == pse->cfs_rq)
329 return 1;
330
331 return 0;
332}
333
334static inline struct sched_entity *parent_entity(struct sched_entity *se)
335{
336 return se->parent;
337}
338
Peter Zijlstra464b7522008-10-24 11:06:15 +0200339/* return depth at which a sched entity is present in the hierarchy */
340static inline int depth_se(struct sched_entity *se)
341{
342 int depth = 0;
343
344 for_each_sched_entity(se)
345 depth++;
346
347 return depth;
348}
349
350static void
351find_matching_se(struct sched_entity **se, struct sched_entity **pse)
352{
353 int se_depth, pse_depth;
354
355 /*
356 * preemption test can be made between sibling entities who are in the
357 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
358 * both tasks until we find their ancestors who are siblings of common
359 * parent.
360 */
361
362 /* First walk up until both entities are at same depth */
363 se_depth = depth_se(*se);
364 pse_depth = depth_se(*pse);
365
366 while (se_depth > pse_depth) {
367 se_depth--;
368 *se = parent_entity(*se);
369 }
370
371 while (pse_depth > se_depth) {
372 pse_depth--;
373 *pse = parent_entity(*pse);
374 }
375
376 while (!is_same_group(*se, *pse)) {
377 *se = parent_entity(*se);
378 *pse = parent_entity(*pse);
379 }
380}
381
Peter Zijlstra8f488942009-07-24 12:25:30 +0200382#else /* !CONFIG_FAIR_GROUP_SCHED */
383
384static inline struct task_struct *task_of(struct sched_entity *se)
385{
386 return container_of(se, struct task_struct, se);
387}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200388
389static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
390{
391 return container_of(cfs_rq, struct rq, cfs);
392}
393
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200394#define entity_is_task(se) 1
395
Peter Zijlstrab7581492008-04-19 19:45:00 +0200396#define for_each_sched_entity(se) \
397 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200398
Peter Zijlstrab7581492008-04-19 19:45:00 +0200399static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200400{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200401 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200402}
403
Peter Zijlstrab7581492008-04-19 19:45:00 +0200404static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
405{
406 struct task_struct *p = task_of(se);
407 struct rq *rq = task_rq(p);
408
409 return &rq->cfs;
410}
411
412/* runqueue "owned" by this group */
413static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
414{
415 return NULL;
416}
417
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800418static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
419{
420}
421
422static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
423{
424}
425
Peter Zijlstrab7581492008-04-19 19:45:00 +0200426#define for_each_leaf_cfs_rq(rq, cfs_rq) \
427 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
428
429static inline int
430is_same_group(struct sched_entity *se, struct sched_entity *pse)
431{
432 return 1;
433}
434
435static inline struct sched_entity *parent_entity(struct sched_entity *se)
436{
437 return NULL;
438}
439
Peter Zijlstra464b7522008-10-24 11:06:15 +0200440static inline void
441find_matching_se(struct sched_entity **se, struct sched_entity **pse)
442{
443}
444
Peter Zijlstrab7581492008-04-19 19:45:00 +0200445#endif /* CONFIG_FAIR_GROUP_SCHED */
446
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700447static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100448void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200449
450/**************************************************************
451 * Scheduling class tree data structure manipulation methods:
452 */
453
Andrei Epure1bf08232013-03-12 21:12:24 +0200454static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200455{
Andrei Epure1bf08232013-03-12 21:12:24 +0200456 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200457 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200458 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200459
Andrei Epure1bf08232013-03-12 21:12:24 +0200460 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200461}
462
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200463static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200464{
465 s64 delta = (s64)(vruntime - min_vruntime);
466 if (delta < 0)
467 min_vruntime = vruntime;
468
469 return min_vruntime;
470}
471
Fabio Checconi54fdc582009-07-16 12:32:27 +0200472static inline int entity_before(struct sched_entity *a,
473 struct sched_entity *b)
474{
475 return (s64)(a->vruntime - b->vruntime) < 0;
476}
477
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200478static void update_min_vruntime(struct cfs_rq *cfs_rq)
479{
480 u64 vruntime = cfs_rq->min_vruntime;
481
482 if (cfs_rq->curr)
483 vruntime = cfs_rq->curr->vruntime;
484
485 if (cfs_rq->rb_leftmost) {
486 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
487 struct sched_entity,
488 run_node);
489
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100490 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200491 vruntime = se->vruntime;
492 else
493 vruntime = min_vruntime(vruntime, se->vruntime);
494 }
495
Andrei Epure1bf08232013-03-12 21:12:24 +0200496 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200497 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200498#ifndef CONFIG_64BIT
499 smp_wmb();
500 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
501#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200502}
503
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200504/*
505 * Enqueue an entity into the rb-tree:
506 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200507static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200508{
509 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
510 struct rb_node *parent = NULL;
511 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200512 int leftmost = 1;
513
514 /*
515 * Find the right place in the rbtree:
516 */
517 while (*link) {
518 parent = *link;
519 entry = rb_entry(parent, struct sched_entity, run_node);
520 /*
521 * We dont care about collisions. Nodes with
522 * the same key stay together.
523 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200524 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200525 link = &parent->rb_left;
526 } else {
527 link = &parent->rb_right;
528 leftmost = 0;
529 }
530 }
531
532 /*
533 * Maintain a cache of leftmost tree entries (it is frequently
534 * used):
535 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200536 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200537 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200538
539 rb_link_node(&se->run_node, parent, link);
540 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200541}
542
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200543static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200544{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100545 if (cfs_rq->rb_leftmost == &se->run_node) {
546 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100547
548 next_node = rb_next(&se->run_node);
549 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100550 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200551
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200552 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200553}
554
Peter Zijlstra029632f2011-10-25 10:00:11 +0200555struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200556{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100557 struct rb_node *left = cfs_rq->rb_leftmost;
558
559 if (!left)
560 return NULL;
561
562 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200563}
564
Rik van Rielac53db52011-02-01 09:51:03 -0500565static struct sched_entity *__pick_next_entity(struct sched_entity *se)
566{
567 struct rb_node *next = rb_next(&se->run_node);
568
569 if (!next)
570 return NULL;
571
572 return rb_entry(next, struct sched_entity, run_node);
573}
574
575#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200576struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200577{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100578 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200579
Balbir Singh70eee742008-02-22 13:25:53 +0530580 if (!last)
581 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100582
583 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200584}
585
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200586/**************************************************************
587 * Scheduling class statistics methods:
588 */
589
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100590int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700591 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100592 loff_t *ppos)
593{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700594 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100595 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100596
597 if (ret || !write)
598 return ret;
599
600 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
601 sysctl_sched_min_granularity);
602
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100603#define WRT_SYSCTL(name) \
604 (normalized_sysctl_##name = sysctl_##name / (factor))
605 WRT_SYSCTL(sched_min_granularity);
606 WRT_SYSCTL(sched_latency);
607 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100608#undef WRT_SYSCTL
609
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100610 return 0;
611}
612#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200613
614/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200615 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200616 */
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100617static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200618{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200619 if (unlikely(se->load.weight != NICE_0_LOAD))
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100620 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200621
622 return delta;
623}
624
625/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200626 * The idea is to set a period in which each task runs once.
627 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200628 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200629 * this period because otherwise the slices get too small.
630 *
631 * p = (nr <= nl) ? l : l*nr/nl
632 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200633static u64 __sched_period(unsigned long nr_running)
634{
635 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100636 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200637
638 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100639 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200640 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200641 }
642
643 return period;
644}
645
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200646/*
647 * We calculate the wall-time slice from the period by taking a part
648 * proportional to the weight.
649 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200650 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200651 */
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +0200652static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200653{
Mike Galbraith0a582442009-01-02 12:16:42 +0100654 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200655
Mike Galbraith0a582442009-01-02 12:16:42 +0100656 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100657 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200658 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100659
660 cfs_rq = cfs_rq_of(se);
661 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200662
Mike Galbraith0a582442009-01-02 12:16:42 +0100663 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200664 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100665
666 update_load_add(&lw, se->load.weight);
667 load = &lw;
668 }
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100669 slice = __calc_delta(slice, se->load.weight, load);
Mike Galbraith0a582442009-01-02 12:16:42 +0100670 }
671 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200672}
673
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200674/*
Andrei Epure660cc002013-03-11 12:03:20 +0200675 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200676 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200677 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200678 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200679static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200680{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200681 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200682}
683
Alex Shia75cdaa2013-06-20 10:18:47 +0800684#ifdef CONFIG_SMP
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100685static unsigned long task_h_load(struct task_struct *p);
686
Alex Shia75cdaa2013-06-20 10:18:47 +0800687static inline void __update_task_entity_contrib(struct sched_entity *se);
688
689/* Give new task start runnable values to heavy its load in infant time */
690void init_task_runnable_average(struct task_struct *p)
691{
692 u32 slice;
693
694 p->se.avg.decay_count = 0;
695 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
696 p->se.avg.runnable_avg_sum = slice;
697 p->se.avg.runnable_avg_period = slice;
698 __update_task_entity_contrib(&p->se);
699}
700#else
701void init_task_runnable_average(struct task_struct *p)
702{
703}
704#endif
705
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200706/*
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100707 * Update the current task's runtime statistics.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200708 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200709static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200710{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200711 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200712 u64 now = rq_clock_task(rq_of(cfs_rq));
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100713 u64 delta_exec;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200714
715 if (unlikely(!curr))
716 return;
717
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100718 delta_exec = now - curr->exec_start;
719 if (unlikely((s64)delta_exec <= 0))
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100720 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200721
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200722 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100723
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100724 schedstat_set(curr->statistics.exec_max,
725 max(delta_exec, curr->statistics.exec_max));
726
727 curr->sum_exec_runtime += delta_exec;
728 schedstat_add(cfs_rq, exec_clock, delta_exec);
729
730 curr->vruntime += calc_delta_fair(delta_exec, curr);
731 update_min_vruntime(cfs_rq);
732
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100733 if (entity_is_task(curr)) {
734 struct task_struct *curtask = task_of(curr);
735
Ingo Molnarf977bb42009-09-13 18:15:54 +0200736 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100737 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700738 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100739 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700740
741 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200742}
743
744static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200745update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200746{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200747 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200748}
749
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200750/*
751 * Task is being enqueued - update stats:
752 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200753static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200754{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200755 /*
756 * Are we enqueueing a waiting task? (for current tasks
757 * a dequeue/enqueue event is a NOP)
758 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200759 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200760 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200761}
762
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200763static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200764update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200765{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300766 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200767 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300768 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
769 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200770 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200771#ifdef CONFIG_SCHEDSTATS
772 if (entity_is_task(se)) {
773 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200774 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200775 }
776#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300777 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200778}
779
780static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200781update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200782{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200783 /*
784 * Mark the end of the wait period if dequeueing a
785 * waiting task:
786 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200787 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200788 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200789}
790
791/*
792 * We are picking a new current task - update its stats:
793 */
794static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200795update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200796{
797 /*
798 * We are starting a new run period:
799 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200800 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801}
802
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200803/**************************************************
804 * Scheduling class queueing methods:
805 */
806
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200807#ifdef CONFIG_NUMA_BALANCING
808/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100809 * Approximate time to scan a full NUMA task in ms. The task scan period is
810 * calculated based on the tasks virtual memory size and
811 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200812 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100813unsigned int sysctl_numa_balancing_scan_period_min = 1000;
814unsigned int sysctl_numa_balancing_scan_period_max = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200815
816/* Portion of address space to scan in MB */
817unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200818
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200819/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
820unsigned int sysctl_numa_balancing_scan_delay = 1000;
821
Mel Gorman598f0ec2013-10-07 11:28:55 +0100822static unsigned int task_nr_scan_windows(struct task_struct *p)
823{
824 unsigned long rss = 0;
825 unsigned long nr_scan_pages;
826
827 /*
828 * Calculations based on RSS as non-present and empty pages are skipped
829 * by the PTE scanner and NUMA hinting faults should be trapped based
830 * on resident pages
831 */
832 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
833 rss = get_mm_rss(p->mm);
834 if (!rss)
835 rss = nr_scan_pages;
836
837 rss = round_up(rss, nr_scan_pages);
838 return rss / nr_scan_pages;
839}
840
841/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
842#define MAX_SCAN_WINDOW 2560
843
844static unsigned int task_scan_min(struct task_struct *p)
845{
846 unsigned int scan, floor;
847 unsigned int windows = 1;
848
849 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
850 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
851 floor = 1000 / windows;
852
853 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
854 return max_t(unsigned int, floor, scan);
855}
856
857static unsigned int task_scan_max(struct task_struct *p)
858{
859 unsigned int smin = task_scan_min(p);
860 unsigned int smax;
861
862 /* Watch for min being lower than max due to floor calculations */
863 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
864 return max(smin, smax);
865}
866
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100867static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
868{
869 rq->nr_numa_running += (p->numa_preferred_nid != -1);
870 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
871}
872
873static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
874{
875 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
876 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
877}
878
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100879struct numa_group {
880 atomic_t refcount;
881
882 spinlock_t lock; /* nr_tasks, tasks */
883 int nr_tasks;
Mel Gormane29cf082013-10-07 11:29:22 +0100884 pid_t gid;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100885 struct list_head task_list;
886
887 struct rcu_head rcu;
Mel Gorman989348b2013-10-07 11:29:40 +0100888 unsigned long total_faults;
889 unsigned long faults[0];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100890};
891
Mel Gormane29cf082013-10-07 11:29:22 +0100892pid_t task_numa_group_id(struct task_struct *p)
893{
894 return p->numa_group ? p->numa_group->gid : 0;
895}
896
Mel Gormanac8e8952013-10-07 11:29:03 +0100897static inline int task_faults_idx(int nid, int priv)
898{
899 return 2 * nid + priv;
900}
901
902static inline unsigned long task_faults(struct task_struct *p, int nid)
903{
Rik van Rielff1df892014-01-27 17:03:41 -0500904 if (!p->numa_faults_memory)
Mel Gormanac8e8952013-10-07 11:29:03 +0100905 return 0;
906
Rik van Rielff1df892014-01-27 17:03:41 -0500907 return p->numa_faults_memory[task_faults_idx(nid, 0)] +
908 p->numa_faults_memory[task_faults_idx(nid, 1)];
Mel Gormanac8e8952013-10-07 11:29:03 +0100909}
910
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100911static inline unsigned long group_faults(struct task_struct *p, int nid)
912{
913 if (!p->numa_group)
914 return 0;
915
Wanpeng Li82897b42013-12-12 15:23:25 +0800916 return p->numa_group->faults[task_faults_idx(nid, 0)] +
917 p->numa_group->faults[task_faults_idx(nid, 1)];
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100918}
919
920/*
921 * These return the fraction of accesses done by a particular task, or
922 * task group, on a particular numa node. The group weight is given a
923 * larger multiplier, in order to group tasks together that are almost
924 * evenly spread out between numa nodes.
925 */
926static inline unsigned long task_weight(struct task_struct *p, int nid)
927{
928 unsigned long total_faults;
929
Rik van Rielff1df892014-01-27 17:03:41 -0500930 if (!p->numa_faults_memory)
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100931 return 0;
932
933 total_faults = p->total_numa_faults;
934
935 if (!total_faults)
936 return 0;
937
938 return 1000 * task_faults(p, nid) / total_faults;
939}
940
941static inline unsigned long group_weight(struct task_struct *p, int nid)
942{
Mel Gorman989348b2013-10-07 11:29:40 +0100943 if (!p->numa_group || !p->numa_group->total_faults)
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100944 return 0;
945
Mel Gorman989348b2013-10-07 11:29:40 +0100946 return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100947}
948
Mel Gormane6628d52013-10-07 11:29:02 +0100949static unsigned long weighted_cpuload(const int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +0100950static unsigned long source_load(int cpu, int type);
951static unsigned long target_load(int cpu, int type);
952static unsigned long power_of(int cpu);
953static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
Mel Gormane6628d52013-10-07 11:29:02 +0100954
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100955/* Cached statistics for all CPUs within a node */
Mel Gorman58d081b2013-10-07 11:29:10 +0100956struct numa_stats {
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100957 unsigned long nr_running;
Mel Gorman58d081b2013-10-07 11:29:10 +0100958 unsigned long load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100959
960 /* Total compute capacity of CPUs on a node */
961 unsigned long power;
962
963 /* Approximate capacity in terms of runnable tasks on a node */
964 unsigned long capacity;
965 int has_capacity;
Mel Gorman58d081b2013-10-07 11:29:10 +0100966};
Mel Gormane6628d52013-10-07 11:29:02 +0100967
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100968/*
969 * XXX borrowed from update_sg_lb_stats
970 */
971static void update_numa_stats(struct numa_stats *ns, int nid)
972{
Peter Zijlstra5eca82a2013-11-06 18:47:57 +0100973 int cpu, cpus = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100974
975 memset(ns, 0, sizeof(*ns));
976 for_each_cpu(cpu, cpumask_of_node(nid)) {
977 struct rq *rq = cpu_rq(cpu);
978
979 ns->nr_running += rq->nr_running;
980 ns->load += weighted_cpuload(cpu);
981 ns->power += power_of(cpu);
Peter Zijlstra5eca82a2013-11-06 18:47:57 +0100982
983 cpus++;
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100984 }
985
Peter Zijlstra5eca82a2013-11-06 18:47:57 +0100986 /*
987 * If we raced with hotplug and there are no CPUs left in our mask
988 * the @ns structure is NULL'ed and task_numa_compare() will
989 * not find this node attractive.
990 *
991 * We'll either bail at !has_capacity, or we'll detect a huge imbalance
992 * and bail there.
993 */
994 if (!cpus)
995 return;
996
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100997 ns->load = (ns->load * SCHED_POWER_SCALE) / ns->power;
998 ns->capacity = DIV_ROUND_CLOSEST(ns->power, SCHED_POWER_SCALE);
999 ns->has_capacity = (ns->nr_running < ns->capacity);
1000}
1001
Mel Gorman58d081b2013-10-07 11:29:10 +01001002struct task_numa_env {
1003 struct task_struct *p;
1004
1005 int src_cpu, src_nid;
1006 int dst_cpu, dst_nid;
1007
1008 struct numa_stats src_stats, dst_stats;
1009
Wanpeng Li40ea2b42013-12-05 19:10:17 +08001010 int imbalance_pct;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001011
1012 struct task_struct *best_task;
1013 long best_imp;
Mel Gorman58d081b2013-10-07 11:29:10 +01001014 int best_cpu;
1015};
1016
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001017static void task_numa_assign(struct task_numa_env *env,
1018 struct task_struct *p, long imp)
1019{
1020 if (env->best_task)
1021 put_task_struct(env->best_task);
1022 if (p)
1023 get_task_struct(p);
1024
1025 env->best_task = p;
1026 env->best_imp = imp;
1027 env->best_cpu = env->dst_cpu;
1028}
1029
1030/*
1031 * This checks if the overall compute and NUMA accesses of the system would
1032 * be improved if the source tasks was migrated to the target dst_cpu taking
1033 * into account that it might be best if task running on the dst_cpu should
1034 * be exchanged with the source task
1035 */
Rik van Riel887c2902013-10-07 11:29:31 +01001036static void task_numa_compare(struct task_numa_env *env,
1037 long taskimp, long groupimp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001038{
1039 struct rq *src_rq = cpu_rq(env->src_cpu);
1040 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1041 struct task_struct *cur;
1042 long dst_load, src_load;
1043 long load;
Rik van Riel887c2902013-10-07 11:29:31 +01001044 long imp = (groupimp > 0) ? groupimp : taskimp;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001045
1046 rcu_read_lock();
1047 cur = ACCESS_ONCE(dst_rq->curr);
1048 if (cur->pid == 0) /* idle */
1049 cur = NULL;
1050
1051 /*
1052 * "imp" is the fault differential for the source task between the
1053 * source and destination node. Calculate the total differential for
1054 * the source task and potential destination task. The more negative
1055 * the value is, the more rmeote accesses that would be expected to
1056 * be incurred if the tasks were swapped.
1057 */
1058 if (cur) {
1059 /* Skip this swap candidate if cannot move to the source cpu */
1060 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1061 goto unlock;
1062
Rik van Riel887c2902013-10-07 11:29:31 +01001063 /*
1064 * If dst and source tasks are in the same NUMA group, or not
Rik van Rielca28aa52013-10-07 11:29:32 +01001065 * in any group then look only at task weights.
Rik van Riel887c2902013-10-07 11:29:31 +01001066 */
Rik van Rielca28aa52013-10-07 11:29:32 +01001067 if (cur->numa_group == env->p->numa_group) {
Rik van Riel887c2902013-10-07 11:29:31 +01001068 imp = taskimp + task_weight(cur, env->src_nid) -
1069 task_weight(cur, env->dst_nid);
Rik van Rielca28aa52013-10-07 11:29:32 +01001070 /*
1071 * Add some hysteresis to prevent swapping the
1072 * tasks within a group over tiny differences.
1073 */
1074 if (cur->numa_group)
1075 imp -= imp/16;
Rik van Riel887c2902013-10-07 11:29:31 +01001076 } else {
Rik van Rielca28aa52013-10-07 11:29:32 +01001077 /*
1078 * Compare the group weights. If a task is all by
1079 * itself (not part of a group), use the task weight
1080 * instead.
1081 */
1082 if (env->p->numa_group)
1083 imp = groupimp;
1084 else
1085 imp = taskimp;
1086
1087 if (cur->numa_group)
1088 imp += group_weight(cur, env->src_nid) -
1089 group_weight(cur, env->dst_nid);
1090 else
1091 imp += task_weight(cur, env->src_nid) -
1092 task_weight(cur, env->dst_nid);
Rik van Riel887c2902013-10-07 11:29:31 +01001093 }
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001094 }
1095
1096 if (imp < env->best_imp)
1097 goto unlock;
1098
1099 if (!cur) {
1100 /* Is there capacity at our destination? */
1101 if (env->src_stats.has_capacity &&
1102 !env->dst_stats.has_capacity)
1103 goto unlock;
1104
1105 goto balance;
1106 }
1107
1108 /* Balance doesn't matter much if we're running a task per cpu */
1109 if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
1110 goto assign;
1111
1112 /*
1113 * In the overloaded case, try and keep the load balanced.
1114 */
1115balance:
1116 dst_load = env->dst_stats.load;
1117 src_load = env->src_stats.load;
1118
1119 /* XXX missing power terms */
1120 load = task_h_load(env->p);
1121 dst_load += load;
1122 src_load -= load;
1123
1124 if (cur) {
1125 load = task_h_load(cur);
1126 dst_load -= load;
1127 src_load += load;
1128 }
1129
1130 /* make src_load the smaller */
1131 if (dst_load < src_load)
1132 swap(dst_load, src_load);
1133
1134 if (src_load * env->imbalance_pct < dst_load * 100)
1135 goto unlock;
1136
1137assign:
1138 task_numa_assign(env, cur, imp);
1139unlock:
1140 rcu_read_unlock();
1141}
1142
Rik van Riel887c2902013-10-07 11:29:31 +01001143static void task_numa_find_cpu(struct task_numa_env *env,
1144 long taskimp, long groupimp)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001145{
1146 int cpu;
1147
1148 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1149 /* Skip this CPU if the source task cannot migrate */
1150 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1151 continue;
1152
1153 env->dst_cpu = cpu;
Rik van Riel887c2902013-10-07 11:29:31 +01001154 task_numa_compare(env, taskimp, groupimp);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001155 }
1156}
1157
Mel Gorman58d081b2013-10-07 11:29:10 +01001158static int task_numa_migrate(struct task_struct *p)
Mel Gormane6628d52013-10-07 11:29:02 +01001159{
Mel Gorman58d081b2013-10-07 11:29:10 +01001160 struct task_numa_env env = {
1161 .p = p,
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001162
Mel Gorman58d081b2013-10-07 11:29:10 +01001163 .src_cpu = task_cpu(p),
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001164 .src_nid = task_node(p),
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001165
1166 .imbalance_pct = 112,
1167
1168 .best_task = NULL,
1169 .best_imp = 0,
1170 .best_cpu = -1
Mel Gorman58d081b2013-10-07 11:29:10 +01001171 };
1172 struct sched_domain *sd;
Rik van Riel887c2902013-10-07 11:29:31 +01001173 unsigned long taskweight, groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001174 int nid, ret;
Rik van Riel887c2902013-10-07 11:29:31 +01001175 long taskimp, groupimp;
Mel Gormane6628d52013-10-07 11:29:02 +01001176
Mel Gorman58d081b2013-10-07 11:29:10 +01001177 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001178 * Pick the lowest SD_NUMA domain, as that would have the smallest
1179 * imbalance and would be the first to start moving tasks about.
1180 *
1181 * And we want to avoid any moving of tasks about, as that would create
1182 * random movement of tasks -- counter the numa conditions we're trying
1183 * to satisfy here.
Mel Gorman58d081b2013-10-07 11:29:10 +01001184 */
Mel Gormane6628d52013-10-07 11:29:02 +01001185 rcu_read_lock();
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001186 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
Rik van Riel46a73e82013-11-11 19:29:25 -05001187 if (sd)
1188 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
Mel Gormane6628d52013-10-07 11:29:02 +01001189 rcu_read_unlock();
1190
Rik van Riel46a73e82013-11-11 19:29:25 -05001191 /*
1192 * Cpusets can break the scheduler domain tree into smaller
1193 * balance domains, some of which do not cross NUMA boundaries.
1194 * Tasks that are "trapped" in such domains cannot be migrated
1195 * elsewhere, so there is no point in (re)trying.
1196 */
1197 if (unlikely(!sd)) {
Wanpeng Lide1b3012013-12-12 15:23:24 +08001198 p->numa_preferred_nid = task_node(p);
Rik van Riel46a73e82013-11-11 19:29:25 -05001199 return -EINVAL;
1200 }
1201
Rik van Riel887c2902013-10-07 11:29:31 +01001202 taskweight = task_weight(p, env.src_nid);
1203 groupweight = group_weight(p, env.src_nid);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001204 update_numa_stats(&env.src_stats, env.src_nid);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001205 env.dst_nid = p->numa_preferred_nid;
Rik van Riel887c2902013-10-07 11:29:31 +01001206 taskimp = task_weight(p, env.dst_nid) - taskweight;
1207 groupimp = group_weight(p, env.dst_nid) - groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001208 update_numa_stats(&env.dst_stats, env.dst_nid);
Mel Gorman58d081b2013-10-07 11:29:10 +01001209
Rik van Riele1dda8a2013-10-07 11:29:19 +01001210 /* If the preferred nid has capacity, try to use it. */
1211 if (env.dst_stats.has_capacity)
Rik van Riel887c2902013-10-07 11:29:31 +01001212 task_numa_find_cpu(&env, taskimp, groupimp);
Rik van Riele1dda8a2013-10-07 11:29:19 +01001213
1214 /* No space available on the preferred nid. Look elsewhere. */
1215 if (env.best_cpu == -1) {
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001216 for_each_online_node(nid) {
1217 if (nid == env.src_nid || nid == p->numa_preferred_nid)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001218 continue;
1219
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001220 /* Only consider nodes where both task and groups benefit */
Rik van Riel887c2902013-10-07 11:29:31 +01001221 taskimp = task_weight(p, nid) - taskweight;
1222 groupimp = group_weight(p, nid) - groupweight;
1223 if (taskimp < 0 && groupimp < 0)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001224 continue;
1225
1226 env.dst_nid = nid;
1227 update_numa_stats(&env.dst_stats, env.dst_nid);
Rik van Riel887c2902013-10-07 11:29:31 +01001228 task_numa_find_cpu(&env, taskimp, groupimp);
Mel Gorman58d081b2013-10-07 11:29:10 +01001229 }
1230 }
1231
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001232 /* No better CPU than the current one was found. */
1233 if (env.best_cpu == -1)
1234 return -EAGAIN;
1235
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001236 sched_setnuma(p, env.dst_nid);
1237
Rik van Riel04bb2f92013-10-07 11:29:36 +01001238 /*
1239 * Reset the scan period if the task is being rescheduled on an
1240 * alternative node to recheck if the tasks is now properly placed.
1241 */
1242 p->numa_scan_period = task_scan_min(p);
1243
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001244 if (env.best_task == NULL) {
1245 int ret = migrate_task_to(p, env.best_cpu);
1246 return ret;
1247 }
1248
1249 ret = migrate_swap(p, env.best_task);
1250 put_task_struct(env.best_task);
1251 return ret;
Mel Gormane6628d52013-10-07 11:29:02 +01001252}
1253
Mel Gorman6b9a7462013-10-07 11:29:11 +01001254/* Attempt to migrate a task to a CPU on the preferred node. */
1255static void numa_migrate_preferred(struct task_struct *p)
1256{
Rik van Riel2739d3e2013-10-07 11:29:41 +01001257 /* This task has no NUMA fault statistics yet */
Rik van Rielff1df892014-01-27 17:03:41 -05001258 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
Rik van Riel2739d3e2013-10-07 11:29:41 +01001259 return;
1260
1261 /* Periodically retry migrating the task to the preferred node */
1262 p->numa_migrate_retry = jiffies + HZ;
1263
Mel Gorman6b9a7462013-10-07 11:29:11 +01001264 /* Success if task is already running on preferred CPU */
Wanpeng Lide1b3012013-12-12 15:23:24 +08001265 if (task_node(p) == p->numa_preferred_nid)
Mel Gorman6b9a7462013-10-07 11:29:11 +01001266 return;
1267
Mel Gorman6b9a7462013-10-07 11:29:11 +01001268 /* Otherwise, try migrate to a CPU on the preferred node */
Rik van Riel2739d3e2013-10-07 11:29:41 +01001269 task_numa_migrate(p);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001270}
1271
Rik van Riel04bb2f92013-10-07 11:29:36 +01001272/*
1273 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1274 * increments. The more local the fault statistics are, the higher the scan
1275 * period will be for the next scan window. If local/remote ratio is below
1276 * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
1277 * scan period will decrease
1278 */
1279#define NUMA_PERIOD_SLOTS 10
1280#define NUMA_PERIOD_THRESHOLD 3
1281
1282/*
1283 * Increase the scan period (slow down scanning) if the majority of
1284 * our memory is already on our local node, or if the majority of
1285 * the page accesses are shared with other processes.
1286 * Otherwise, decrease the scan period.
1287 */
1288static void update_task_scan_period(struct task_struct *p,
1289 unsigned long shared, unsigned long private)
1290{
1291 unsigned int period_slot;
1292 int ratio;
1293 int diff;
1294
1295 unsigned long remote = p->numa_faults_locality[0];
1296 unsigned long local = p->numa_faults_locality[1];
1297
1298 /*
1299 * If there were no record hinting faults then either the task is
1300 * completely idle or all activity is areas that are not of interest
1301 * to automatic numa balancing. Scan slower
1302 */
1303 if (local + shared == 0) {
1304 p->numa_scan_period = min(p->numa_scan_period_max,
1305 p->numa_scan_period << 1);
1306
1307 p->mm->numa_next_scan = jiffies +
1308 msecs_to_jiffies(p->numa_scan_period);
1309
1310 return;
1311 }
1312
1313 /*
1314 * Prepare to scale scan period relative to the current period.
1315 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1316 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1317 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1318 */
1319 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1320 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1321 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1322 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1323 if (!slot)
1324 slot = 1;
1325 diff = slot * period_slot;
1326 } else {
1327 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1328
1329 /*
1330 * Scale scan rate increases based on sharing. There is an
1331 * inverse relationship between the degree of sharing and
1332 * the adjustment made to the scanning period. Broadly
1333 * speaking the intent is that there is little point
1334 * scanning faster if shared accesses dominate as it may
1335 * simply bounce migrations uselessly
1336 */
Rik van Riel04bb2f92013-10-07 11:29:36 +01001337 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1338 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1339 }
1340
1341 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1342 task_scan_min(p), task_scan_max(p));
1343 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1344}
1345
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001346static void task_numa_placement(struct task_struct *p)
1347{
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001348 int seq, nid, max_nid = -1, max_group_nid = -1;
1349 unsigned long max_faults = 0, max_group_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001350 unsigned long fault_types[2] = { 0, 0 };
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001351 spinlock_t *group_lock = NULL;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001352
Hugh Dickins2832bc12012-12-19 17:42:16 -08001353 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001354 if (p->numa_scan_seq == seq)
1355 return;
1356 p->numa_scan_seq = seq;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001357 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001358
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001359 /* If the task is part of a group prevent parallel updates to group stats */
1360 if (p->numa_group) {
1361 group_lock = &p->numa_group->lock;
1362 spin_lock(group_lock);
1363 }
1364
Mel Gorman688b7582013-10-07 11:28:58 +01001365 /* Find the node with the highest number of faults */
1366 for_each_online_node(nid) {
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001367 unsigned long faults = 0, group_faults = 0;
Mel Gormanac8e8952013-10-07 11:29:03 +01001368 int priv, i;
Mel Gorman745d6142013-10-07 11:28:59 +01001369
Mel Gormanac8e8952013-10-07 11:29:03 +01001370 for (priv = 0; priv < 2; priv++) {
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001371 long diff;
1372
Mel Gormanac8e8952013-10-07 11:29:03 +01001373 i = task_faults_idx(nid, priv);
Rik van Rielff1df892014-01-27 17:03:41 -05001374 diff = -p->numa_faults_memory[i];
Mel Gorman745d6142013-10-07 11:28:59 +01001375
Mel Gormanac8e8952013-10-07 11:29:03 +01001376 /* Decay existing window, copy faults since last scan */
Rik van Rielff1df892014-01-27 17:03:41 -05001377 p->numa_faults_memory[i] >>= 1;
1378 p->numa_faults_memory[i] += p->numa_faults_buffer_memory[i];
1379 fault_types[priv] += p->numa_faults_buffer_memory[i];
1380 p->numa_faults_buffer_memory[i] = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001381
Rik van Rielff1df892014-01-27 17:03:41 -05001382 faults += p->numa_faults_memory[i];
1383 diff += p->numa_faults_memory[i];
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001384 p->total_numa_faults += diff;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001385 if (p->numa_group) {
1386 /* safe because we can only change our own group */
Mel Gorman989348b2013-10-07 11:29:40 +01001387 p->numa_group->faults[i] += diff;
1388 p->numa_group->total_faults += diff;
1389 group_faults += p->numa_group->faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001390 }
Mel Gormanac8e8952013-10-07 11:29:03 +01001391 }
1392
Mel Gorman688b7582013-10-07 11:28:58 +01001393 if (faults > max_faults) {
1394 max_faults = faults;
1395 max_nid = nid;
1396 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001397
1398 if (group_faults > max_group_faults) {
1399 max_group_faults = group_faults;
1400 max_group_nid = nid;
1401 }
1402 }
1403
Rik van Riel04bb2f92013-10-07 11:29:36 +01001404 update_task_scan_period(p, fault_types[0], fault_types[1]);
1405
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001406 if (p->numa_group) {
1407 /*
1408 * If the preferred task and group nids are different,
1409 * iterate over the nodes again to find the best place.
1410 */
1411 if (max_nid != max_group_nid) {
1412 unsigned long weight, max_weight = 0;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001413
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001414 for_each_online_node(nid) {
1415 weight = task_weight(p, nid) + group_weight(p, nid);
1416 if (weight > max_weight) {
1417 max_weight = weight;
1418 max_nid = nid;
1419 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001420 }
1421 }
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001422
1423 spin_unlock(group_lock);
Mel Gorman688b7582013-10-07 11:28:58 +01001424 }
1425
Mel Gorman6b9a7462013-10-07 11:29:11 +01001426 /* Preferred node as the node with the most faults */
Mel Gorman3a7053b2013-10-07 11:29:00 +01001427 if (max_faults && max_nid != p->numa_preferred_nid) {
Mel Gormane6628d52013-10-07 11:29:02 +01001428 /* Update the preferred nid and migrate task if possible */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001429 sched_setnuma(p, max_nid);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001430 numa_migrate_preferred(p);
Mel Gorman3a7053b2013-10-07 11:29:00 +01001431 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001432}
1433
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001434static inline int get_numa_group(struct numa_group *grp)
1435{
1436 return atomic_inc_not_zero(&grp->refcount);
1437}
1438
1439static inline void put_numa_group(struct numa_group *grp)
1440{
1441 if (atomic_dec_and_test(&grp->refcount))
1442 kfree_rcu(grp, rcu);
1443}
1444
Mel Gorman3e6a9412013-10-07 11:29:35 +01001445static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1446 int *priv)
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001447{
1448 struct numa_group *grp, *my_grp;
1449 struct task_struct *tsk;
1450 bool join = false;
1451 int cpu = cpupid_to_cpu(cpupid);
1452 int i;
1453
1454 if (unlikely(!p->numa_group)) {
1455 unsigned int size = sizeof(struct numa_group) +
Mel Gorman989348b2013-10-07 11:29:40 +01001456 2*nr_node_ids*sizeof(unsigned long);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001457
1458 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1459 if (!grp)
1460 return;
1461
1462 atomic_set(&grp->refcount, 1);
1463 spin_lock_init(&grp->lock);
1464 INIT_LIST_HEAD(&grp->task_list);
Mel Gormane29cf082013-10-07 11:29:22 +01001465 grp->gid = p->pid;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001466
1467 for (i = 0; i < 2*nr_node_ids; i++)
Rik van Rielff1df892014-01-27 17:03:41 -05001468 grp->faults[i] = p->numa_faults_memory[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001469
Mel Gorman989348b2013-10-07 11:29:40 +01001470 grp->total_faults = p->total_numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001471
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001472 list_add(&p->numa_entry, &grp->task_list);
1473 grp->nr_tasks++;
1474 rcu_assign_pointer(p->numa_group, grp);
1475 }
1476
1477 rcu_read_lock();
1478 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1479
1480 if (!cpupid_match_pid(tsk, cpupid))
Peter Zijlstra33547812013-10-09 10:24:48 +02001481 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001482
1483 grp = rcu_dereference(tsk->numa_group);
1484 if (!grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001485 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001486
1487 my_grp = p->numa_group;
1488 if (grp == my_grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001489 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001490
1491 /*
1492 * Only join the other group if its bigger; if we're the bigger group,
1493 * the other task will join us.
1494 */
1495 if (my_grp->nr_tasks > grp->nr_tasks)
Peter Zijlstra33547812013-10-09 10:24:48 +02001496 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001497
1498 /*
1499 * Tie-break on the grp address.
1500 */
1501 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001502 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001503
Rik van Rieldabe1d92013-10-07 11:29:34 +01001504 /* Always join threads in the same process. */
1505 if (tsk->mm == current->mm)
1506 join = true;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001507
Rik van Rieldabe1d92013-10-07 11:29:34 +01001508 /* Simple filter to avoid false positives due to PID collisions */
1509 if (flags & TNF_SHARED)
1510 join = true;
1511
Mel Gorman3e6a9412013-10-07 11:29:35 +01001512 /* Update priv based on whether false sharing was detected */
1513 *priv = !join;
1514
Rik van Rieldabe1d92013-10-07 11:29:34 +01001515 if (join && !get_numa_group(grp))
Peter Zijlstra33547812013-10-09 10:24:48 +02001516 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001517
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001518 rcu_read_unlock();
1519
1520 if (!join)
1521 return;
1522
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001523 double_lock(&my_grp->lock, &grp->lock);
1524
Mel Gorman989348b2013-10-07 11:29:40 +01001525 for (i = 0; i < 2*nr_node_ids; i++) {
Rik van Rielff1df892014-01-27 17:03:41 -05001526 my_grp->faults[i] -= p->numa_faults_memory[i];
1527 grp->faults[i] += p->numa_faults_memory[i];
Mel Gorman989348b2013-10-07 11:29:40 +01001528 }
1529 my_grp->total_faults -= p->total_numa_faults;
1530 grp->total_faults += p->total_numa_faults;
1531
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001532 list_move(&p->numa_entry, &grp->task_list);
1533 my_grp->nr_tasks--;
1534 grp->nr_tasks++;
1535
1536 spin_unlock(&my_grp->lock);
1537 spin_unlock(&grp->lock);
1538
1539 rcu_assign_pointer(p->numa_group, grp);
1540
1541 put_numa_group(my_grp);
Peter Zijlstra33547812013-10-09 10:24:48 +02001542 return;
1543
1544no_join:
1545 rcu_read_unlock();
1546 return;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001547}
1548
1549void task_numa_free(struct task_struct *p)
1550{
1551 struct numa_group *grp = p->numa_group;
1552 int i;
Rik van Rielff1df892014-01-27 17:03:41 -05001553 void *numa_faults = p->numa_faults_memory;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001554
1555 if (grp) {
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001556 spin_lock(&grp->lock);
Mel Gorman989348b2013-10-07 11:29:40 +01001557 for (i = 0; i < 2*nr_node_ids; i++)
Rik van Rielff1df892014-01-27 17:03:41 -05001558 grp->faults[i] -= p->numa_faults_memory[i];
Mel Gorman989348b2013-10-07 11:29:40 +01001559 grp->total_faults -= p->total_numa_faults;
1560
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001561 list_del(&p->numa_entry);
1562 grp->nr_tasks--;
1563 spin_unlock(&grp->lock);
1564 rcu_assign_pointer(p->numa_group, NULL);
1565 put_numa_group(grp);
1566 }
1567
Rik van Rielff1df892014-01-27 17:03:41 -05001568 p->numa_faults_memory = NULL;
1569 p->numa_faults_buffer_memory = NULL;
Rik van Riel82727012013-10-07 11:29:28 +01001570 kfree(numa_faults);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001571}
1572
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001573/*
1574 * Got a PROT_NONE fault for a page on @node.
1575 */
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001576void task_numa_fault(int last_cpupid, int node, int pages, int flags)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001577{
1578 struct task_struct *p = current;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001579 bool migrated = flags & TNF_MIGRATED;
Mel Gormanac8e8952013-10-07 11:29:03 +01001580 int priv;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001581
Dave Kleikamp10e84b92013-07-31 13:53:35 -07001582 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +00001583 return;
1584
Mel Gorman9ff1d9f2013-10-07 11:29:04 +01001585 /* for example, ksmd faulting in a user's mm */
1586 if (!p->mm)
1587 return;
1588
Rik van Riel82727012013-10-07 11:29:28 +01001589 /* Do not worry about placement if exiting */
1590 if (p->state == TASK_DEAD)
1591 return;
1592
Mel Gormanf809ca92013-10-07 11:28:57 +01001593 /* Allocate buffer to track faults on a per-node basis */
Rik van Rielff1df892014-01-27 17:03:41 -05001594 if (unlikely(!p->numa_faults_memory)) {
1595 int size = sizeof(*p->numa_faults_memory) * 2 * nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +01001596
Mel Gorman745d6142013-10-07 11:28:59 +01001597 /* numa_faults and numa_faults_buffer share the allocation */
Rik van Rielff1df892014-01-27 17:03:41 -05001598 p->numa_faults_memory = kzalloc(size * 2, GFP_KERNEL|__GFP_NOWARN);
1599 if (!p->numa_faults_memory)
Mel Gormanf809ca92013-10-07 11:28:57 +01001600 return;
Mel Gorman745d6142013-10-07 11:28:59 +01001601
Rik van Rielff1df892014-01-27 17:03:41 -05001602 BUG_ON(p->numa_faults_buffer_memory);
1603 p->numa_faults_buffer_memory = p->numa_faults_memory + (2 * nr_node_ids);
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001604 p->total_numa_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001605 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
Mel Gormanf809ca92013-10-07 11:28:57 +01001606 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001607
Mel Gormanfb003b82012-11-15 09:01:14 +00001608 /*
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001609 * First accesses are treated as private, otherwise consider accesses
1610 * to be private if the accessing pid has not changed
1611 */
1612 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1613 priv = 1;
1614 } else {
1615 priv = cpupid_match_pid(p, last_cpupid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001616 if (!priv && !(flags & TNF_NO_GROUP))
Mel Gorman3e6a9412013-10-07 11:29:35 +01001617 task_numa_group(p, last_cpupid, flags, &priv);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001618 }
1619
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001620 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01001621
Rik van Riel2739d3e2013-10-07 11:29:41 +01001622 /*
1623 * Retry task to preferred node migration periodically, in case it
1624 * case it previously failed, or the scheduler moved us.
1625 */
1626 if (time_after(jiffies, p->numa_migrate_retry))
Mel Gorman6b9a7462013-10-07 11:29:11 +01001627 numa_migrate_preferred(p);
1628
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001629 if (migrated)
1630 p->numa_pages_migrated += pages;
1631
Rik van Rielff1df892014-01-27 17:03:41 -05001632 p->numa_faults_buffer_memory[task_faults_idx(node, priv)] += pages;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001633 p->numa_faults_locality[!!(flags & TNF_FAULT_LOCAL)] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001634}
1635
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001636static void reset_ptenuma_scan(struct task_struct *p)
1637{
1638 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1639 p->mm->numa_scan_offset = 0;
1640}
1641
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001642/*
1643 * The expensive part of numa migration is done from task_work context.
1644 * Triggered from task_tick_numa().
1645 */
1646void task_numa_work(struct callback_head *work)
1647{
1648 unsigned long migrate, next_scan, now = jiffies;
1649 struct task_struct *p = current;
1650 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001651 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00001652 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001653 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +00001654 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001655
1656 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1657
1658 work->next = work; /* protect against double add */
1659 /*
1660 * Who cares about NUMA placement when they're dying.
1661 *
1662 * NOTE: make sure not to dereference p->mm before this check,
1663 * exit_task_work() happens _after_ exit_mm() so we could be called
1664 * without p->mm even though we still had it when we enqueued this
1665 * work.
1666 */
1667 if (p->flags & PF_EXITING)
1668 return;
1669
Mel Gorman930aa172013-10-07 11:29:37 +01001670 if (!mm->numa_next_scan) {
Mel Gorman7e8d16b2013-10-07 11:28:54 +01001671 mm->numa_next_scan = now +
1672 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
Mel Gormanb8593bf2012-11-21 01:18:23 +00001673 }
1674
1675 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001676 * Enforce maximal scan/migration frequency..
1677 */
1678 migrate = mm->numa_next_scan;
1679 if (time_before(now, migrate))
1680 return;
1681
Mel Gorman598f0ec2013-10-07 11:28:55 +01001682 if (p->numa_scan_period == 0) {
1683 p->numa_scan_period_max = task_scan_max(p);
1684 p->numa_scan_period = task_scan_min(p);
1685 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001686
Mel Gormanfb003b82012-11-15 09:01:14 +00001687 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001688 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1689 return;
1690
Mel Gormane14808b2012-11-19 10:59:15 +00001691 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001692 * Delay this task enough that another task of this mm will likely win
1693 * the next time around.
1694 */
1695 p->node_stamp += 2 * TICK_NSEC;
1696
Mel Gorman9f406042012-11-14 18:34:32 +00001697 start = mm->numa_scan_offset;
1698 pages = sysctl_numa_balancing_scan_size;
1699 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1700 if (!pages)
1701 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001702
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001703 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00001704 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001705 if (!vma) {
1706 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00001707 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001708 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001709 }
Mel Gorman9f406042012-11-14 18:34:32 +00001710 for (; vma; vma = vma->vm_next) {
Mel Gormanfc3147242013-10-07 11:29:09 +01001711 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001712 continue;
1713
Mel Gorman4591ce4f2013-10-07 11:29:13 +01001714 /*
1715 * Shared library pages mapped by multiple processes are not
1716 * migrated as it is expected they are cache replicated. Avoid
1717 * hinting faults in read-only file-backed mappings or the vdso
1718 * as migrating the pages will be of marginal benefit.
1719 */
1720 if (!vma->vm_mm ||
1721 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1722 continue;
1723
Mel Gorman3c67f472013-12-18 17:08:40 -08001724 /*
1725 * Skip inaccessible VMAs to avoid any confusion between
1726 * PROT_NONE and NUMA hinting ptes
1727 */
1728 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1729 continue;
1730
Mel Gorman9f406042012-11-14 18:34:32 +00001731 do {
1732 start = max(start, vma->vm_start);
1733 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1734 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01001735 nr_pte_updates += change_prot_numa(vma, start, end);
1736
1737 /*
1738 * Scan sysctl_numa_balancing_scan_size but ensure that
1739 * at least one PTE is updated so that unused virtual
1740 * address space is quickly skipped.
1741 */
1742 if (nr_pte_updates)
1743 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001744
Mel Gorman9f406042012-11-14 18:34:32 +00001745 start = end;
1746 if (pages <= 0)
1747 goto out;
1748 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001749 }
1750
Mel Gorman9f406042012-11-14 18:34:32 +00001751out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001752 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001753 * It is possible to reach the end of the VMA list but the last few
1754 * VMAs are not guaranteed to the vma_migratable. If they are not, we
1755 * would find the !migratable VMA on the next scan but not reset the
1756 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001757 */
1758 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00001759 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001760 else
1761 reset_ptenuma_scan(p);
1762 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001763}
1764
1765/*
1766 * Drive the periodic memory faults..
1767 */
1768void task_tick_numa(struct rq *rq, struct task_struct *curr)
1769{
1770 struct callback_head *work = &curr->numa_work;
1771 u64 period, now;
1772
1773 /*
1774 * We don't care about NUMA placement if we don't have memory.
1775 */
1776 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
1777 return;
1778
1779 /*
1780 * Using runtime rather than walltime has the dual advantage that
1781 * we (mostly) drive the selection from busy threads and that the
1782 * task needs to have done some actual work before we bother with
1783 * NUMA placement.
1784 */
1785 now = curr->se.sum_exec_runtime;
1786 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
1787
1788 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02001789 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01001790 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001791 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001792
1793 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
1794 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
1795 task_work_add(curr, work, true);
1796 }
1797 }
1798}
1799#else
1800static void task_tick_numa(struct rq *rq, struct task_struct *curr)
1801{
1802}
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001803
1804static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
1805{
1806}
1807
1808static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
1809{
1810}
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001811#endif /* CONFIG_NUMA_BALANCING */
1812
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001813static void
1814account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1815{
1816 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001817 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001818 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01001819#ifdef CONFIG_SMP
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001820 if (entity_is_task(se)) {
1821 struct rq *rq = rq_of(cfs_rq);
1822
1823 account_numa_enqueue(rq, task_of(se));
1824 list_add(&se->group_node, &rq->cfs_tasks);
1825 }
Peter Zijlstra367456c2012-02-20 21:49:09 +01001826#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001827 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001828}
1829
1830static void
1831account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
1832{
1833 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02001834 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02001835 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001836 if (entity_is_task(se)) {
1837 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
Bharata B Raob87f1722008-09-25 09:53:54 +05301838 list_del_init(&se->group_node);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001839 }
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001840 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02001841}
1842
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001843#ifdef CONFIG_FAIR_GROUP_SCHED
1844# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001845static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
1846{
1847 long tg_weight;
1848
1849 /*
1850 * Use this CPU's actual weight instead of the last load_contribution
1851 * to gain a more accurate current total weight. See
1852 * update_cfs_rq_load_contribution().
1853 */
Alex Shibf5b9862013-06-20 10:18:54 +08001854 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02001855 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001856 tg_weight += cfs_rq->load.weight;
1857
1858 return tg_weight;
1859}
1860
Paul Turner6d5ab292011-01-21 20:45:01 -08001861static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001862{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001863 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001864
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001865 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08001866 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001867
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001868 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02001869 if (tg_weight)
1870 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001871
1872 if (shares < MIN_SHARES)
1873 shares = MIN_SHARES;
1874 if (shares > tg->shares)
1875 shares = tg->shares;
1876
1877 return shares;
1878}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001879# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08001880static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001881{
1882 return tg->shares;
1883}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001884# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001885static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
1886 unsigned long weight)
1887{
Paul Turner19e5eeb2010-12-15 19:10:18 -08001888 if (se->on_rq) {
1889 /* commit outstanding execution time */
1890 if (cfs_rq->curr == se)
1891 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001892 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08001893 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001894
1895 update_load_set(&se->load, weight);
1896
1897 if (se->on_rq)
1898 account_entity_enqueue(cfs_rq, se);
1899}
1900
Paul Turner82958362012-10-04 13:18:31 +02001901static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
1902
Paul Turner6d5ab292011-01-21 20:45:01 -08001903static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001904{
1905 struct task_group *tg;
1906 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001907 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001908
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001909 tg = cfs_rq->tg;
1910 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07001911 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001912 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08001913#ifndef CONFIG_SMP
1914 if (likely(se->load.weight == tg->shares))
1915 return;
1916#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08001917 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001918
1919 reweight_entity(cfs_rq_of(se), se, shares);
1920}
1921#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08001922static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08001923{
1924}
1925#endif /* CONFIG_FAIR_GROUP_SCHED */
1926
Alex Shi141965c2013-06-26 13:05:39 +08001927#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02001928/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02001929 * We choose a half-life close to 1 scheduling period.
1930 * Note: The tables below are dependent on this value.
1931 */
1932#define LOAD_AVG_PERIOD 32
1933#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
1934#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
1935
1936/* Precomputed fixed inverse multiplies for multiplication by y^n */
1937static const u32 runnable_avg_yN_inv[] = {
1938 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
1939 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
1940 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
1941 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
1942 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
1943 0x85aac367, 0x82cd8698,
1944};
1945
1946/*
1947 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
1948 * over-estimates when re-combining.
1949 */
1950static const u32 runnable_avg_yN_sum[] = {
1951 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
1952 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
1953 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
1954};
1955
1956/*
Paul Turner9d85f212012-10-04 13:18:29 +02001957 * Approximate:
1958 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
1959 */
1960static __always_inline u64 decay_load(u64 val, u64 n)
1961{
Paul Turner5b51f2f2012-10-04 13:18:32 +02001962 unsigned int local_n;
1963
1964 if (!n)
1965 return val;
1966 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
1967 return 0;
1968
1969 /* after bounds checking we can collapse to 32-bit */
1970 local_n = n;
1971
1972 /*
1973 * As y^PERIOD = 1/2, we can combine
1974 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
1975 * With a look-up table which covers k^n (n<PERIOD)
1976 *
1977 * To achieve constant time decay_load.
1978 */
1979 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
1980 val >>= local_n / LOAD_AVG_PERIOD;
1981 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02001982 }
1983
Paul Turner5b51f2f2012-10-04 13:18:32 +02001984 val *= runnable_avg_yN_inv[local_n];
1985 /* We don't use SRR here since we always want to round down. */
1986 return val >> 32;
1987}
1988
1989/*
1990 * For updates fully spanning n periods, the contribution to runnable
1991 * average will be: \Sum 1024*y^n
1992 *
1993 * We can compute this reasonably efficiently by combining:
1994 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
1995 */
1996static u32 __compute_runnable_contrib(u64 n)
1997{
1998 u32 contrib = 0;
1999
2000 if (likely(n <= LOAD_AVG_PERIOD))
2001 return runnable_avg_yN_sum[n];
2002 else if (unlikely(n >= LOAD_AVG_MAX_N))
2003 return LOAD_AVG_MAX;
2004
2005 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2006 do {
2007 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2008 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2009
2010 n -= LOAD_AVG_PERIOD;
2011 } while (n > LOAD_AVG_PERIOD);
2012
2013 contrib = decay_load(contrib, n);
2014 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02002015}
2016
2017/*
2018 * We can represent the historical contribution to runnable average as the
2019 * coefficients of a geometric series. To do this we sub-divide our runnable
2020 * history into segments of approximately 1ms (1024us); label the segment that
2021 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2022 *
2023 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2024 * p0 p1 p2
2025 * (now) (~1ms ago) (~2ms ago)
2026 *
2027 * Let u_i denote the fraction of p_i that the entity was runnable.
2028 *
2029 * We then designate the fractions u_i as our co-efficients, yielding the
2030 * following representation of historical load:
2031 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2032 *
2033 * We choose y based on the with of a reasonably scheduling period, fixing:
2034 * y^32 = 0.5
2035 *
2036 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2037 * approximately half as much as the contribution to load within the last ms
2038 * (u_0).
2039 *
2040 * When a period "rolls over" and we have new u_0`, multiplying the previous
2041 * sum again by y is sufficient to update:
2042 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2043 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2044 */
2045static __always_inline int __update_entity_runnable_avg(u64 now,
2046 struct sched_avg *sa,
2047 int runnable)
2048{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002049 u64 delta, periods;
2050 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002051 int delta_w, decayed = 0;
2052
2053 delta = now - sa->last_runnable_update;
2054 /*
2055 * This should only happen when time goes backwards, which it
2056 * unfortunately does during sched clock init when we swap over to TSC.
2057 */
2058 if ((s64)delta < 0) {
2059 sa->last_runnable_update = now;
2060 return 0;
2061 }
2062
2063 /*
2064 * Use 1024ns as the unit of measurement since it's a reasonable
2065 * approximation of 1us and fast to compute.
2066 */
2067 delta >>= 10;
2068 if (!delta)
2069 return 0;
2070 sa->last_runnable_update = now;
2071
2072 /* delta_w is the amount already accumulated against our next period */
2073 delta_w = sa->runnable_avg_period % 1024;
2074 if (delta + delta_w >= 1024) {
2075 /* period roll-over */
2076 decayed = 1;
2077
2078 /*
2079 * Now that we know we're crossing a period boundary, figure
2080 * out how much from delta we need to complete the current
2081 * period and accrue it.
2082 */
2083 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02002084 if (runnable)
2085 sa->runnable_avg_sum += delta_w;
2086 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002087
Paul Turner5b51f2f2012-10-04 13:18:32 +02002088 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002089
Paul Turner5b51f2f2012-10-04 13:18:32 +02002090 /* Figure out how many additional periods this update spans */
2091 periods = delta / 1024;
2092 delta %= 1024;
2093
2094 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2095 periods + 1);
2096 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2097 periods + 1);
2098
2099 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2100 runnable_contrib = __compute_runnable_contrib(periods);
2101 if (runnable)
2102 sa->runnable_avg_sum += runnable_contrib;
2103 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002104 }
2105
2106 /* Remainder of delta accrued against u_0` */
2107 if (runnable)
2108 sa->runnable_avg_sum += delta;
2109 sa->runnable_avg_period += delta;
2110
2111 return decayed;
2112}
2113
Paul Turner9ee474f2012-10-04 13:18:30 +02002114/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02002115static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02002116{
2117 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2118 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2119
2120 decays -= se->avg.decay_count;
2121 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02002122 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02002123
2124 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2125 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02002126
2127 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02002128}
2129
Paul Turnerc566e8e2012-10-04 13:18:30 +02002130#ifdef CONFIG_FAIR_GROUP_SCHED
2131static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2132 int force_update)
2133{
2134 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08002135 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02002136
2137 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2138 tg_contrib -= cfs_rq->tg_load_contrib;
2139
Alex Shibf5b9862013-06-20 10:18:54 +08002140 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2141 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02002142 cfs_rq->tg_load_contrib += tg_contrib;
2143 }
2144}
Paul Turner8165e142012-10-04 13:18:31 +02002145
Paul Turnerbb17f652012-10-04 13:18:31 +02002146/*
2147 * Aggregate cfs_rq runnable averages into an equivalent task_group
2148 * representation for computing load contributions.
2149 */
2150static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2151 struct cfs_rq *cfs_rq)
2152{
2153 struct task_group *tg = cfs_rq->tg;
2154 long contrib;
2155
2156 /* The fraction of a cpu used by this cfs_rq */
Michal Nazarewicz85b088e2013-11-10 20:42:01 +01002157 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
Paul Turnerbb17f652012-10-04 13:18:31 +02002158 sa->runnable_avg_period + 1);
2159 contrib -= cfs_rq->tg_runnable_contrib;
2160
2161 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2162 atomic_add(contrib, &tg->runnable_avg);
2163 cfs_rq->tg_runnable_contrib += contrib;
2164 }
2165}
2166
Paul Turner8165e142012-10-04 13:18:31 +02002167static inline void __update_group_entity_contrib(struct sched_entity *se)
2168{
2169 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2170 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02002171 int runnable_avg;
2172
Paul Turner8165e142012-10-04 13:18:31 +02002173 u64 contrib;
2174
2175 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08002176 se->avg.load_avg_contrib = div_u64(contrib,
2177 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02002178
2179 /*
2180 * For group entities we need to compute a correction term in the case
2181 * that they are consuming <1 cpu so that we would contribute the same
2182 * load as a task of equal weight.
2183 *
2184 * Explicitly co-ordinating this measurement would be expensive, but
2185 * fortunately the sum of each cpus contribution forms a usable
2186 * lower-bound on the true value.
2187 *
2188 * Consider the aggregate of 2 contributions. Either they are disjoint
2189 * (and the sum represents true value) or they are disjoint and we are
2190 * understating by the aggregate of their overlap.
2191 *
2192 * Extending this to N cpus, for a given overlap, the maximum amount we
2193 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2194 * cpus that overlap for this interval and w_i is the interval width.
2195 *
2196 * On a small machine; the first term is well-bounded which bounds the
2197 * total error since w_i is a subset of the period. Whereas on a
2198 * larger machine, while this first term can be larger, if w_i is the
2199 * of consequential size guaranteed to see n_i*w_i quickly converge to
2200 * our upper bound of 1-cpu.
2201 */
2202 runnable_avg = atomic_read(&tg->runnable_avg);
2203 if (runnable_avg < NICE_0_LOAD) {
2204 se->avg.load_avg_contrib *= runnable_avg;
2205 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2206 }
Paul Turner8165e142012-10-04 13:18:31 +02002207}
Paul Turnerc566e8e2012-10-04 13:18:30 +02002208#else
2209static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2210 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02002211static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2212 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02002213static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Paul Turnerc566e8e2012-10-04 13:18:30 +02002214#endif
2215
Paul Turner8165e142012-10-04 13:18:31 +02002216static inline void __update_task_entity_contrib(struct sched_entity *se)
2217{
2218 u32 contrib;
2219
2220 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2221 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2222 contrib /= (se->avg.runnable_avg_period + 1);
2223 se->avg.load_avg_contrib = scale_load(contrib);
2224}
2225
Paul Turner2dac7542012-10-04 13:18:30 +02002226/* Compute the current contribution to load_avg by se, return any delta */
2227static long __update_entity_load_avg_contrib(struct sched_entity *se)
2228{
2229 long old_contrib = se->avg.load_avg_contrib;
2230
Paul Turner8165e142012-10-04 13:18:31 +02002231 if (entity_is_task(se)) {
2232 __update_task_entity_contrib(se);
2233 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02002234 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02002235 __update_group_entity_contrib(se);
2236 }
Paul Turner2dac7542012-10-04 13:18:30 +02002237
2238 return se->avg.load_avg_contrib - old_contrib;
2239}
2240
Paul Turner9ee474f2012-10-04 13:18:30 +02002241static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2242 long load_contrib)
2243{
2244 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2245 cfs_rq->blocked_load_avg -= load_contrib;
2246 else
2247 cfs_rq->blocked_load_avg = 0;
2248}
2249
Paul Turnerf1b17282012-10-04 13:18:31 +02002250static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2251
Paul Turner9d85f212012-10-04 13:18:29 +02002252/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02002253static inline void update_entity_load_avg(struct sched_entity *se,
2254 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02002255{
Paul Turner2dac7542012-10-04 13:18:30 +02002256 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2257 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02002258 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02002259
Paul Turnerf1b17282012-10-04 13:18:31 +02002260 /*
2261 * For a group entity we need to use their owned cfs_rq_clock_task() in
2262 * case they are the parent of a throttled hierarchy.
2263 */
2264 if (entity_is_task(se))
2265 now = cfs_rq_clock_task(cfs_rq);
2266 else
2267 now = cfs_rq_clock_task(group_cfs_rq(se));
2268
2269 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02002270 return;
2271
2272 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02002273
2274 if (!update_cfs_rq)
2275 return;
2276
Paul Turner2dac7542012-10-04 13:18:30 +02002277 if (se->on_rq)
2278 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02002279 else
2280 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2281}
2282
2283/*
2284 * Decay the load contributed by all blocked children and account this so that
2285 * their contribution may appropriately discounted when they wake up.
2286 */
Paul Turneraff3e492012-10-04 13:18:30 +02002287static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02002288{
Paul Turnerf1b17282012-10-04 13:18:31 +02002289 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02002290 u64 decays;
2291
2292 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02002293 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02002294 return;
2295
Alex Shi25099402013-06-20 10:18:55 +08002296 if (atomic_long_read(&cfs_rq->removed_load)) {
2297 unsigned long removed_load;
2298 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02002299 subtract_blocked_load_contrib(cfs_rq, removed_load);
2300 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002301
Paul Turneraff3e492012-10-04 13:18:30 +02002302 if (decays) {
2303 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2304 decays);
2305 atomic64_add(decays, &cfs_rq->decay_counter);
2306 cfs_rq->last_decay = now;
2307 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02002308
2309 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02002310}
Ben Segall18bf2802012-10-04 12:51:20 +02002311
2312static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2313{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002314 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
Paul Turnerbb17f652012-10-04 13:18:31 +02002315 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
Ben Segall18bf2802012-10-04 12:51:20 +02002316}
Paul Turner2dac7542012-10-04 13:18:30 +02002317
2318/* Add the load generated by se into cfs_rq's child load-average */
2319static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002320 struct sched_entity *se,
2321 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02002322{
Paul Turneraff3e492012-10-04 13:18:30 +02002323 /*
2324 * We track migrations using entity decay_count <= 0, on a wake-up
2325 * migration we use a negative decay count to track the remote decays
2326 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08002327 *
2328 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2329 * are seen by enqueue_entity_load_avg() as a migration with an already
2330 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02002331 */
2332 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002333 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02002334 if (se->avg.decay_count) {
2335 /*
2336 * In a wake-up migration we have to approximate the
2337 * time sleeping. This is because we can't synchronize
2338 * clock_task between the two cpus, and it is not
2339 * guaranteed to be read-safe. Instead, we can
2340 * approximate this using our carried decays, which are
2341 * explicitly atomically readable.
2342 */
2343 se->avg.last_runnable_update -= (-se->avg.decay_count)
2344 << 20;
2345 update_entity_load_avg(se, 0);
2346 /* Indicate that we're now synchronized and on-rq */
2347 se->avg.decay_count = 0;
2348 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002349 wakeup = 0;
2350 } else {
Vincent Guittot93906752014-01-22 08:45:34 +01002351 __synchronize_entity_decay(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02002352 }
2353
Paul Turneraff3e492012-10-04 13:18:30 +02002354 /* migrated tasks did not contribute to our blocked load */
2355 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02002356 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02002357 update_entity_load_avg(se, 0);
2358 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002359
Paul Turner2dac7542012-10-04 13:18:30 +02002360 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02002361 /* we force update consideration on load-balancer moves */
2362 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02002363}
2364
Paul Turner9ee474f2012-10-04 13:18:30 +02002365/*
2366 * Remove se's load from this cfs_rq child load-average, if the entity is
2367 * transitioning to a blocked state we track its projected decay using
2368 * blocked_load_avg.
2369 */
Paul Turner2dac7542012-10-04 13:18:30 +02002370static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002371 struct sched_entity *se,
2372 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02002373{
Paul Turner9ee474f2012-10-04 13:18:30 +02002374 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002375 /* we force update consideration on load-balancer moves */
2376 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02002377
Paul Turner2dac7542012-10-04 13:18:30 +02002378 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02002379 if (sleep) {
2380 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2381 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2382 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02002383}
Vincent Guittot642dbc32013-04-18 18:34:26 +02002384
2385/*
2386 * Update the rq's load with the elapsed running time before entering
2387 * idle. if the last scheduled task is not a CFS task, idle_enter will
2388 * be the only way to update the runnable statistic.
2389 */
2390void idle_enter_fair(struct rq *this_rq)
2391{
2392 update_rq_runnable_avg(this_rq, 1);
2393}
2394
2395/*
2396 * Update the rq's load with the elapsed idle time before a task is
2397 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2398 * be the only way to update the runnable statistic.
2399 */
2400void idle_exit_fair(struct rq *this_rq)
2401{
2402 update_rq_runnable_avg(this_rq, 0);
2403}
2404
Paul Turner9d85f212012-10-04 13:18:29 +02002405#else
Paul Turner9ee474f2012-10-04 13:18:30 +02002406static inline void update_entity_load_avg(struct sched_entity *se,
2407 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02002408static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002409static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002410 struct sched_entity *se,
2411 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002412static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002413 struct sched_entity *se,
2414 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02002415static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2416 int force_update) {}
Paul Turner9d85f212012-10-04 13:18:29 +02002417#endif
2418
Ingo Molnar2396af62007-08-09 11:16:48 +02002419static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002420{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002421#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02002422 struct task_struct *tsk = NULL;
2423
2424 if (entity_is_task(se))
2425 tsk = task_of(se);
2426
Lucas De Marchi41acab82010-03-10 23:37:45 -03002427 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002428 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002429
2430 if ((s64)delta < 0)
2431 delta = 0;
2432
Lucas De Marchi41acab82010-03-10 23:37:45 -03002433 if (unlikely(delta > se->statistics.sleep_max))
2434 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002435
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002436 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002437 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01002438
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002439 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02002440 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002441 trace_sched_stat_sleep(tsk, delta);
2442 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002443 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03002444 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002445 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002446
2447 if ((s64)delta < 0)
2448 delta = 0;
2449
Lucas De Marchi41acab82010-03-10 23:37:45 -03002450 if (unlikely(delta > se->statistics.block_max))
2451 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002452
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002453 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002454 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02002455
Peter Zijlstrae4143142009-07-23 20:13:26 +02002456 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002457 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002458 se->statistics.iowait_sum += delta;
2459 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002460 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002461 }
2462
Andrew Vaginb781a602011-11-28 12:03:35 +03002463 trace_sched_stat_blocked(tsk, delta);
2464
Peter Zijlstrae4143142009-07-23 20:13:26 +02002465 /*
2466 * Blocking time is in units of nanosecs, so shift by
2467 * 20 to get a milliseconds-range estimation of the
2468 * amount of time that the task spent sleeping:
2469 */
2470 if (unlikely(prof_on == SLEEP_PROFILING)) {
2471 profile_hits(SLEEP_PROFILING,
2472 (void *)get_wchan(tsk),
2473 delta >> 20);
2474 }
2475 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02002476 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002477 }
2478#endif
2479}
2480
Peter Zijlstraddc97292007-10-15 17:00:10 +02002481static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2482{
2483#ifdef CONFIG_SCHED_DEBUG
2484 s64 d = se->vruntime - cfs_rq->min_vruntime;
2485
2486 if (d < 0)
2487 d = -d;
2488
2489 if (d > 3*sysctl_sched_latency)
2490 schedstat_inc(cfs_rq, nr_spread_over);
2491#endif
2492}
2493
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002494static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002495place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2496{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02002497 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002498
Peter Zijlstra2cb86002007-11-09 22:39:37 +01002499 /*
2500 * The 'current' period is already promised to the current tasks,
2501 * however the extra weight of the new task will slow them down a
2502 * little, place the new task so that it fits in the slot that
2503 * stays open at the end.
2504 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002505 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02002506 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002507
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002508 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01002509 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002510 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02002511
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002512 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002513 * Halve their sleep time's effect, to allow
2514 * for a gentler effect of sleepers:
2515 */
2516 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2517 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02002518
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002519 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002520 }
2521
Mike Galbraithb5d9d732009-09-08 11:12:28 +02002522 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05302523 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002524}
2525
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002526static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2527
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002528static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002529enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002530{
2531 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002532 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05302533 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002534 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002535 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002536 se->vruntime += cfs_rq->min_vruntime;
2537
2538 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002539 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002540 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002541 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02002542 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002543 account_entity_enqueue(cfs_rq, se);
2544 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002545
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002546 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002547 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02002548 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02002549 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002550
Ingo Molnard2417e52007-08-09 11:16:47 +02002551 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02002552 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002553 if (se != cfs_rq->curr)
2554 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002555 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002556
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002557 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002558 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002559 check_enqueue_throttle(cfs_rq);
2560 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002561}
2562
Rik van Riel2c13c9192011-02-01 09:48:37 -05002563static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01002564{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002565 for_each_sched_entity(se) {
2566 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2567 if (cfs_rq->last == se)
2568 cfs_rq->last = NULL;
2569 else
2570 break;
2571 }
2572}
Peter Zijlstra2002c692008-11-11 11:52:33 +01002573
Rik van Riel2c13c9192011-02-01 09:48:37 -05002574static void __clear_buddies_next(struct sched_entity *se)
2575{
2576 for_each_sched_entity(se) {
2577 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2578 if (cfs_rq->next == se)
2579 cfs_rq->next = NULL;
2580 else
2581 break;
2582 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01002583}
2584
Rik van Rielac53db52011-02-01 09:51:03 -05002585static void __clear_buddies_skip(struct sched_entity *se)
2586{
2587 for_each_sched_entity(se) {
2588 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2589 if (cfs_rq->skip == se)
2590 cfs_rq->skip = NULL;
2591 else
2592 break;
2593 }
2594}
2595
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002596static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2597{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002598 if (cfs_rq->last == se)
2599 __clear_buddies_last(se);
2600
2601 if (cfs_rq->next == se)
2602 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05002603
2604 if (cfs_rq->skip == se)
2605 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002606}
2607
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002608static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07002609
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002610static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002611dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002612{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002613 /*
2614 * Update run-time statistics of the 'current'.
2615 */
2616 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002617 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002618
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02002619 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002620 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002621#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002622 if (entity_is_task(se)) {
2623 struct task_struct *tsk = task_of(se);
2624
2625 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002626 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002627 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002628 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002629 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02002630#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002631 }
2632
Peter Zijlstra2002c692008-11-11 11:52:33 +01002633 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002634
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002635 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002636 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002637 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002638 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002639
2640 /*
2641 * Normalize the entity after updating the min_vruntime because the
2642 * update can refer to the ->curr item and we need to reflect this
2643 * movement in our normalized position.
2644 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002645 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002646 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07002647
Paul Turnerd8b49862011-07-21 09:43:41 -07002648 /* return excess runtime on last dequeue */
2649 return_cfs_rq_runtime(cfs_rq);
2650
Peter Zijlstra1e876232011-05-17 16:21:10 -07002651 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002652 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002653}
2654
2655/*
2656 * Preempt the current task with a newly woken task if needed:
2657 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02002658static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002659check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002660{
Peter Zijlstra11697832007-09-05 14:32:49 +02002661 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002662 struct sched_entity *se;
2663 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02002664
Peter Zijlstra6d0f0ebd2007-10-15 17:00:05 +02002665 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02002666 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002667 if (delta_exec > ideal_runtime) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002668 resched_task(rq_of(cfs_rq)->curr);
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002669 /*
2670 * The current task ran long enough, ensure it doesn't get
2671 * re-elected due to buddy favours.
2672 */
2673 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002674 return;
2675 }
2676
2677 /*
2678 * Ensure that a task that missed wakeup preemption by a
2679 * narrow margin doesn't have to wait for a full slice.
2680 * This also mitigates buddy induced latencies under load.
2681 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002682 if (delta_exec < sysctl_sched_min_granularity)
2683 return;
2684
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002685 se = __pick_first_entity(cfs_rq);
2686 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02002687
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002688 if (delta < 0)
2689 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01002690
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002691 if (delta > ideal_runtime)
2692 resched_task(rq_of(cfs_rq)->curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002693}
2694
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002695static void
Ingo Molnar8494f412007-08-09 11:16:48 +02002696set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002697{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002698 /* 'current' is not kept within the tree. */
2699 if (se->on_rq) {
2700 /*
2701 * Any task has to be enqueued before it get to execute on
2702 * a CPU. So account for the time it spent waiting on the
2703 * runqueue.
2704 */
2705 update_stats_wait_end(cfs_rq, se);
2706 __dequeue_entity(cfs_rq, se);
2707 }
2708
Ingo Molnar79303e92007-08-09 11:16:47 +02002709 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02002710 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02002711#ifdef CONFIG_SCHEDSTATS
2712 /*
2713 * Track our maximum slice length, if the CPU's load is at
2714 * least twice that of our own weight (i.e. dont track it
2715 * when there are only lesser-weight tasks around):
2716 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002717 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002718 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02002719 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2720 }
2721#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02002722 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002723}
2724
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02002725static int
2726wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2727
Rik van Rielac53db52011-02-01 09:51:03 -05002728/*
2729 * Pick the next process, keeping these things in mind, in this order:
2730 * 1) keep things fair between processes/task groups
2731 * 2) pick the "next" process, since someone really wants that to run
2732 * 3) pick the "last" process, for cache locality
2733 * 4) do not run the "skip" process, if something else is available
2734 */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002735static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002736{
Rik van Rielac53db52011-02-01 09:51:03 -05002737 struct sched_entity *se = __pick_first_entity(cfs_rq);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002738 struct sched_entity *left = se;
Peter Zijlstraf4b67552008-11-04 21:25:07 +01002739
Rik van Rielac53db52011-02-01 09:51:03 -05002740 /*
2741 * Avoid running the skip buddy, if running something else can
2742 * be done without getting too unfair.
2743 */
2744 if (cfs_rq->skip == se) {
2745 struct sched_entity *second = __pick_next_entity(se);
2746 if (second && wakeup_preempt_entity(second, left) < 1)
2747 se = second;
2748 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002749
Mike Galbraithf685cea2009-10-23 23:09:22 +02002750 /*
2751 * Prefer last buddy, try to return the CPU to a preempted task.
2752 */
2753 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
2754 se = cfs_rq->last;
2755
Rik van Rielac53db52011-02-01 09:51:03 -05002756 /*
2757 * Someone really wants this to run. If it's not unfair, run it.
2758 */
2759 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
2760 se = cfs_rq->next;
2761
Mike Galbraithf685cea2009-10-23 23:09:22 +02002762 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002763
2764 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002765}
2766
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002767static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
2768
Ingo Molnarab6cde22007-08-09 11:16:48 +02002769static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002770{
2771 /*
2772 * If still on the runqueue then deactivate_task()
2773 * was not called and update_curr() has to be done:
2774 */
2775 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002776 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002777
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002778 /* throttle cfs_rqs exceeding runtime */
2779 check_cfs_rq_runtime(cfs_rq);
2780
Peter Zijlstraddc97292007-10-15 17:00:10 +02002781 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002782 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02002783 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002784 /* Put 'current' back into the tree. */
2785 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02002786 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02002787 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002788 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02002789 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002790}
2791
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002792static void
2793entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002794{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002795 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002796 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002797 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002798 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002799
Paul Turner43365bd2010-12-15 19:10:17 -08002800 /*
Paul Turner9d85f212012-10-04 13:18:29 +02002801 * Ensure that runnable average is periodically updated.
2802 */
Paul Turner9ee474f2012-10-04 13:18:30 +02002803 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002804 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02002805 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02002806
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002807#ifdef CONFIG_SCHED_HRTICK
2808 /*
2809 * queued ticks are scheduled to match the slice, so don't bother
2810 * validating it and just reschedule.
2811 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07002812 if (queued) {
2813 resched_task(rq_of(cfs_rq)->curr);
2814 return;
2815 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01002816 /*
2817 * don't let the period tick interfere with the hrtick preemption
2818 */
2819 if (!sched_feat(DOUBLE_TICK) &&
2820 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
2821 return;
2822#endif
2823
Yong Zhang2c2efae2011-07-29 16:20:33 +08002824 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002825 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002826}
2827
Paul Turnerab84d312011-07-21 09:43:28 -07002828
2829/**************************************************
2830 * CFS bandwidth control machinery
2831 */
2832
2833#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02002834
2835#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01002836static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02002837
2838static inline bool cfs_bandwidth_used(void)
2839{
Ingo Molnarc5905af2012-02-24 08:31:31 +01002840 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002841}
2842
Ben Segall1ee14e62013-10-16 11:16:12 -07002843void cfs_bandwidth_usage_inc(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +02002844{
Ben Segall1ee14e62013-10-16 11:16:12 -07002845 static_key_slow_inc(&__cfs_bandwidth_used);
2846}
2847
2848void cfs_bandwidth_usage_dec(void)
2849{
2850 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02002851}
2852#else /* HAVE_JUMP_LABEL */
2853static bool cfs_bandwidth_used(void)
2854{
2855 return true;
2856}
2857
Ben Segall1ee14e62013-10-16 11:16:12 -07002858void cfs_bandwidth_usage_inc(void) {}
2859void cfs_bandwidth_usage_dec(void) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02002860#endif /* HAVE_JUMP_LABEL */
2861
Paul Turnerab84d312011-07-21 09:43:28 -07002862/*
2863 * default period for cfs group bandwidth.
2864 * default: 0.1s, units: nanoseconds
2865 */
2866static inline u64 default_cfs_period(void)
2867{
2868 return 100000000ULL;
2869}
Paul Turnerec12cb72011-07-21 09:43:30 -07002870
2871static inline u64 sched_cfs_bandwidth_slice(void)
2872{
2873 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
2874}
2875
Paul Turnera9cf55b2011-07-21 09:43:32 -07002876/*
2877 * Replenish runtime according to assigned quota and update expiration time.
2878 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
2879 * additional synchronization around rq->lock.
2880 *
2881 * requires cfs_b->lock
2882 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02002883void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07002884{
2885 u64 now;
2886
2887 if (cfs_b->quota == RUNTIME_INF)
2888 return;
2889
2890 now = sched_clock_cpu(smp_processor_id());
2891 cfs_b->runtime = cfs_b->quota;
2892 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
2893}
2894
Peter Zijlstra029632f2011-10-25 10:00:11 +02002895static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2896{
2897 return &tg->cfs_bandwidth;
2898}
2899
Paul Turnerf1b17282012-10-04 13:18:31 +02002900/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
2901static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
2902{
2903 if (unlikely(cfs_rq->throttle_count))
2904 return cfs_rq->throttled_clock_task;
2905
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002906 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02002907}
2908
Paul Turner85dac902011-07-21 09:43:33 -07002909/* returns 0 on failure to allocate runtime */
2910static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07002911{
2912 struct task_group *tg = cfs_rq->tg;
2913 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002914 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002915
2916 /* note: this is a positive sum as runtime_remaining <= 0 */
2917 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
2918
2919 raw_spin_lock(&cfs_b->lock);
2920 if (cfs_b->quota == RUNTIME_INF)
2921 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07002922 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07002923 /*
2924 * If the bandwidth pool has become inactive, then at least one
2925 * period must have elapsed since the last consumption.
2926 * Refresh the global state and ensure bandwidth timer becomes
2927 * active.
2928 */
2929 if (!cfs_b->timer_active) {
2930 __refill_cfs_bandwidth_runtime(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07002931 __start_cfs_bandwidth(cfs_b);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002932 }
Paul Turner58088ad2011-07-21 09:43:31 -07002933
2934 if (cfs_b->runtime > 0) {
2935 amount = min(cfs_b->runtime, min_amount);
2936 cfs_b->runtime -= amount;
2937 cfs_b->idle = 0;
2938 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002939 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07002940 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07002941 raw_spin_unlock(&cfs_b->lock);
2942
2943 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002944 /*
2945 * we may have advanced our local expiration to account for allowed
2946 * spread between our sched_clock and the one on which runtime was
2947 * issued.
2948 */
2949 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
2950 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07002951
2952 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002953}
2954
2955/*
2956 * Note: This depends on the synchronization provided by sched_clock and the
2957 * fact that rq->clock snapshots this value.
2958 */
2959static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2960{
2961 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07002962
2963 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002964 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07002965 return;
2966
2967 if (cfs_rq->runtime_remaining < 0)
2968 return;
2969
2970 /*
2971 * If the local deadline has passed we have to consider the
2972 * possibility that our sched_clock is 'fast' and the global deadline
2973 * has not truly expired.
2974 *
2975 * Fortunately we can check determine whether this the case by checking
2976 * whether the global deadline has advanced.
2977 */
2978
2979 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
2980 /* extend local deadline, drift is bounded above by 2 ticks */
2981 cfs_rq->runtime_expires += TICK_NSEC;
2982 } else {
2983 /* global deadline is ahead, expiration has passed */
2984 cfs_rq->runtime_remaining = 0;
2985 }
Paul Turnerec12cb72011-07-21 09:43:30 -07002986}
2987
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01002988static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07002989{
Paul Turnera9cf55b2011-07-21 09:43:32 -07002990 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07002991 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07002992 expire_cfs_rq_runtime(cfs_rq);
2993
2994 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07002995 return;
2996
Paul Turner85dac902011-07-21 09:43:33 -07002997 /*
2998 * if we're unable to extend our runtime we resched so that the active
2999 * hierarchy can be throttled
3000 */
3001 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
3002 resched_task(rq_of(cfs_rq)->curr);
Paul Turnerec12cb72011-07-21 09:43:30 -07003003}
3004
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003005static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003006void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003007{
Paul Turner56f570e2011-11-07 20:26:33 -08003008 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07003009 return;
3010
3011 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3012}
3013
Paul Turner85dac902011-07-21 09:43:33 -07003014static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3015{
Paul Turner56f570e2011-11-07 20:26:33 -08003016 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07003017}
3018
Paul Turner64660c82011-07-21 09:43:36 -07003019/* check whether cfs_rq, or any parent, is throttled */
3020static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3021{
Paul Turner56f570e2011-11-07 20:26:33 -08003022 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07003023}
3024
3025/*
3026 * Ensure that neither of the group entities corresponding to src_cpu or
3027 * dest_cpu are members of a throttled hierarchy when performing group
3028 * load-balance operations.
3029 */
3030static inline int throttled_lb_pair(struct task_group *tg,
3031 int src_cpu, int dest_cpu)
3032{
3033 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3034
3035 src_cfs_rq = tg->cfs_rq[src_cpu];
3036 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3037
3038 return throttled_hierarchy(src_cfs_rq) ||
3039 throttled_hierarchy(dest_cfs_rq);
3040}
3041
3042/* updated child weight may affect parent so we have to do this bottom up */
3043static int tg_unthrottle_up(struct task_group *tg, void *data)
3044{
3045 struct rq *rq = data;
3046 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3047
3048 cfs_rq->throttle_count--;
3049#ifdef CONFIG_SMP
3050 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02003051 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003052 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02003053 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07003054 }
3055#endif
3056
3057 return 0;
3058}
3059
3060static int tg_throttle_down(struct task_group *tg, void *data)
3061{
3062 struct rq *rq = data;
3063 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3064
Paul Turner82958362012-10-04 13:18:31 +02003065 /* group is entering throttled state, stop time */
3066 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003067 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07003068 cfs_rq->throttle_count++;
3069
3070 return 0;
3071}
3072
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003073static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07003074{
3075 struct rq *rq = rq_of(cfs_rq);
3076 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3077 struct sched_entity *se;
3078 long task_delta, dequeue = 1;
3079
3080 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3081
Paul Turnerf1b17282012-10-04 13:18:31 +02003082 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07003083 rcu_read_lock();
3084 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3085 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07003086
3087 task_delta = cfs_rq->h_nr_running;
3088 for_each_sched_entity(se) {
3089 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3090 /* throttled entity or throttle-on-deactivate */
3091 if (!se->on_rq)
3092 break;
3093
3094 if (dequeue)
3095 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3096 qcfs_rq->h_nr_running -= task_delta;
3097
3098 if (qcfs_rq->load.weight)
3099 dequeue = 0;
3100 }
3101
3102 if (!se)
3103 rq->nr_running -= task_delta;
3104
3105 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003106 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07003107 raw_spin_lock(&cfs_b->lock);
3108 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
Ben Segallf9f9ffc2013-10-16 11:16:32 -07003109 if (!cfs_b->timer_active)
3110 __start_cfs_bandwidth(cfs_b);
Paul Turner85dac902011-07-21 09:43:33 -07003111 raw_spin_unlock(&cfs_b->lock);
3112}
3113
Peter Zijlstra029632f2011-10-25 10:00:11 +02003114void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07003115{
3116 struct rq *rq = rq_of(cfs_rq);
3117 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3118 struct sched_entity *se;
3119 int enqueue = 1;
3120 long task_delta;
3121
Michael Wang22b958d2013-06-04 14:23:39 +08003122 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07003123
3124 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02003125
3126 update_rq_clock(rq);
3127
Paul Turner671fd9d2011-07-21 09:43:34 -07003128 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003129 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07003130 list_del_rcu(&cfs_rq->throttled_list);
3131 raw_spin_unlock(&cfs_b->lock);
3132
Paul Turner64660c82011-07-21 09:43:36 -07003133 /* update hierarchical throttle state */
3134 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3135
Paul Turner671fd9d2011-07-21 09:43:34 -07003136 if (!cfs_rq->load.weight)
3137 return;
3138
3139 task_delta = cfs_rq->h_nr_running;
3140 for_each_sched_entity(se) {
3141 if (se->on_rq)
3142 enqueue = 0;
3143
3144 cfs_rq = cfs_rq_of(se);
3145 if (enqueue)
3146 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3147 cfs_rq->h_nr_running += task_delta;
3148
3149 if (cfs_rq_throttled(cfs_rq))
3150 break;
3151 }
3152
3153 if (!se)
3154 rq->nr_running += task_delta;
3155
3156 /* determine whether we need to wake up potentially idle cpu */
3157 if (rq->curr == rq->idle && rq->cfs.nr_running)
3158 resched_task(rq->curr);
3159}
3160
3161static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3162 u64 remaining, u64 expires)
3163{
3164 struct cfs_rq *cfs_rq;
3165 u64 runtime = remaining;
3166
3167 rcu_read_lock();
3168 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3169 throttled_list) {
3170 struct rq *rq = rq_of(cfs_rq);
3171
3172 raw_spin_lock(&rq->lock);
3173 if (!cfs_rq_throttled(cfs_rq))
3174 goto next;
3175
3176 runtime = -cfs_rq->runtime_remaining + 1;
3177 if (runtime > remaining)
3178 runtime = remaining;
3179 remaining -= runtime;
3180
3181 cfs_rq->runtime_remaining += runtime;
3182 cfs_rq->runtime_expires = expires;
3183
3184 /* we check whether we're throttled above */
3185 if (cfs_rq->runtime_remaining > 0)
3186 unthrottle_cfs_rq(cfs_rq);
3187
3188next:
3189 raw_spin_unlock(&rq->lock);
3190
3191 if (!remaining)
3192 break;
3193 }
3194 rcu_read_unlock();
3195
3196 return remaining;
3197}
3198
Paul Turner58088ad2011-07-21 09:43:31 -07003199/*
3200 * Responsible for refilling a task_group's bandwidth and unthrottling its
3201 * cfs_rqs as appropriate. If there has been no activity within the last
3202 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3203 * used to track this state.
3204 */
3205static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3206{
Paul Turner671fd9d2011-07-21 09:43:34 -07003207 u64 runtime, runtime_expires;
3208 int idle = 1, throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07003209
3210 raw_spin_lock(&cfs_b->lock);
3211 /* no need to continue the timer with no bandwidth constraint */
3212 if (cfs_b->quota == RUNTIME_INF)
3213 goto out_unlock;
3214
Paul Turner671fd9d2011-07-21 09:43:34 -07003215 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3216 /* idle depends on !throttled (for the case of a large deficit) */
3217 idle = cfs_b->idle && !throttled;
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003218 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07003219
Paul Turnera9cf55b2011-07-21 09:43:32 -07003220 /* if we're going inactive then everything else can be deferred */
3221 if (idle)
3222 goto out_unlock;
3223
Ben Segall927b54f2013-10-16 11:16:22 -07003224 /*
3225 * if we have relooped after returning idle once, we need to update our
3226 * status as actually running, so that other cpus doing
3227 * __start_cfs_bandwidth will stop trying to cancel us.
3228 */
3229 cfs_b->timer_active = 1;
3230
Paul Turnera9cf55b2011-07-21 09:43:32 -07003231 __refill_cfs_bandwidth_runtime(cfs_b);
3232
Paul Turner671fd9d2011-07-21 09:43:34 -07003233 if (!throttled) {
3234 /* mark as potentially idle for the upcoming period */
3235 cfs_b->idle = 1;
3236 goto out_unlock;
3237 }
Paul Turner58088ad2011-07-21 09:43:31 -07003238
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003239 /* account preceding periods in which throttling occurred */
3240 cfs_b->nr_throttled += overrun;
3241
Paul Turner671fd9d2011-07-21 09:43:34 -07003242 /*
3243 * There are throttled entities so we must first use the new bandwidth
3244 * to unthrottle them before making it generally available. This
3245 * ensures that all existing debts will be paid before a new cfs_rq is
3246 * allowed to run.
3247 */
3248 runtime = cfs_b->runtime;
3249 runtime_expires = cfs_b->runtime_expires;
3250 cfs_b->runtime = 0;
3251
3252 /*
3253 * This check is repeated as we are holding onto the new bandwidth
3254 * while we unthrottle. This can potentially race with an unthrottled
3255 * group trying to acquire new bandwidth from the global pool.
3256 */
3257 while (throttled && runtime > 0) {
3258 raw_spin_unlock(&cfs_b->lock);
3259 /* we can't nest cfs_b->lock while distributing bandwidth */
3260 runtime = distribute_cfs_runtime(cfs_b, runtime,
3261 runtime_expires);
3262 raw_spin_lock(&cfs_b->lock);
3263
3264 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
3265 }
3266
3267 /* return (any) remaining runtime */
3268 cfs_b->runtime = runtime;
3269 /*
3270 * While we are ensured activity in the period following an
3271 * unthrottle, this also covers the case in which the new bandwidth is
3272 * insufficient to cover the existing bandwidth deficit. (Forcing the
3273 * timer to remain active while there are any throttled entities.)
3274 */
3275 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07003276out_unlock:
3277 if (idle)
3278 cfs_b->timer_active = 0;
3279 raw_spin_unlock(&cfs_b->lock);
3280
3281 return idle;
3282}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003283
Paul Turnerd8b49862011-07-21 09:43:41 -07003284/* a cfs_rq won't donate quota below this amount */
3285static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3286/* minimum remaining period time to redistribute slack quota */
3287static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3288/* how long we wait to gather additional slack before distributing */
3289static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3290
Ben Segalldb06e782013-10-16 11:16:17 -07003291/*
3292 * Are we near the end of the current quota period?
3293 *
3294 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3295 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3296 * migrate_hrtimers, base is never cleared, so we are fine.
3297 */
Paul Turnerd8b49862011-07-21 09:43:41 -07003298static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3299{
3300 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3301 u64 remaining;
3302
3303 /* if the call-back is running a quota refresh is already occurring */
3304 if (hrtimer_callback_running(refresh_timer))
3305 return 1;
3306
3307 /* is a quota refresh about to occur? */
3308 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3309 if (remaining < min_expire)
3310 return 1;
3311
3312 return 0;
3313}
3314
3315static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3316{
3317 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3318
3319 /* if there's a quota refresh soon don't bother with slack */
3320 if (runtime_refresh_within(cfs_b, min_left))
3321 return;
3322
3323 start_bandwidth_timer(&cfs_b->slack_timer,
3324 ns_to_ktime(cfs_bandwidth_slack_period));
3325}
3326
3327/* we know any runtime found here is valid as update_curr() precedes return */
3328static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3329{
3330 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3331 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3332
3333 if (slack_runtime <= 0)
3334 return;
3335
3336 raw_spin_lock(&cfs_b->lock);
3337 if (cfs_b->quota != RUNTIME_INF &&
3338 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3339 cfs_b->runtime += slack_runtime;
3340
3341 /* we are under rq->lock, defer unthrottling using a timer */
3342 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3343 !list_empty(&cfs_b->throttled_cfs_rq))
3344 start_cfs_slack_bandwidth(cfs_b);
3345 }
3346 raw_spin_unlock(&cfs_b->lock);
3347
3348 /* even if it's not valid for return we don't want to try again */
3349 cfs_rq->runtime_remaining -= slack_runtime;
3350}
3351
3352static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3353{
Paul Turner56f570e2011-11-07 20:26:33 -08003354 if (!cfs_bandwidth_used())
3355 return;
3356
Paul Turnerfccfdc62011-11-07 20:26:34 -08003357 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07003358 return;
3359
3360 __return_cfs_rq_runtime(cfs_rq);
3361}
3362
3363/*
3364 * This is done with a timer (instead of inline with bandwidth return) since
3365 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3366 */
3367static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3368{
3369 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3370 u64 expires;
3371
3372 /* confirm we're still not at a refresh boundary */
Paul Turnerd8b49862011-07-21 09:43:41 -07003373 raw_spin_lock(&cfs_b->lock);
Ben Segalldb06e782013-10-16 11:16:17 -07003374 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3375 raw_spin_unlock(&cfs_b->lock);
3376 return;
3377 }
3378
Paul Turnerd8b49862011-07-21 09:43:41 -07003379 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
3380 runtime = cfs_b->runtime;
3381 cfs_b->runtime = 0;
3382 }
3383 expires = cfs_b->runtime_expires;
3384 raw_spin_unlock(&cfs_b->lock);
3385
3386 if (!runtime)
3387 return;
3388
3389 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3390
3391 raw_spin_lock(&cfs_b->lock);
3392 if (expires == cfs_b->runtime_expires)
3393 cfs_b->runtime = runtime;
3394 raw_spin_unlock(&cfs_b->lock);
3395}
3396
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003397/*
3398 * When a group wakes up we want to make sure that its quota is not already
3399 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3400 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3401 */
3402static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3403{
Paul Turner56f570e2011-11-07 20:26:33 -08003404 if (!cfs_bandwidth_used())
3405 return;
3406
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003407 /* an active group must be handled by the update_curr()->put() path */
3408 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3409 return;
3410
3411 /* ensure the group is not already throttled */
3412 if (cfs_rq_throttled(cfs_rq))
3413 return;
3414
3415 /* update runtime allocation */
3416 account_cfs_rq_runtime(cfs_rq, 0);
3417 if (cfs_rq->runtime_remaining <= 0)
3418 throttle_cfs_rq(cfs_rq);
3419}
3420
3421/* conditionally throttle active cfs_rq's from put_prev_entity() */
3422static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3423{
Paul Turner56f570e2011-11-07 20:26:33 -08003424 if (!cfs_bandwidth_used())
3425 return;
3426
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003427 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
3428 return;
3429
3430 /*
3431 * it's possible for a throttled entity to be forced into a running
3432 * state (e.g. set_curr_task), in this case we're finished.
3433 */
3434 if (cfs_rq_throttled(cfs_rq))
3435 return;
3436
3437 throttle_cfs_rq(cfs_rq);
3438}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003439
Peter Zijlstra029632f2011-10-25 10:00:11 +02003440static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3441{
3442 struct cfs_bandwidth *cfs_b =
3443 container_of(timer, struct cfs_bandwidth, slack_timer);
3444 do_sched_cfs_slack_timer(cfs_b);
3445
3446 return HRTIMER_NORESTART;
3447}
3448
3449static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3450{
3451 struct cfs_bandwidth *cfs_b =
3452 container_of(timer, struct cfs_bandwidth, period_timer);
3453 ktime_t now;
3454 int overrun;
3455 int idle = 0;
3456
3457 for (;;) {
3458 now = hrtimer_cb_get_time(timer);
3459 overrun = hrtimer_forward(timer, now, cfs_b->period);
3460
3461 if (!overrun)
3462 break;
3463
3464 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3465 }
3466
3467 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3468}
3469
3470void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3471{
3472 raw_spin_lock_init(&cfs_b->lock);
3473 cfs_b->runtime = 0;
3474 cfs_b->quota = RUNTIME_INF;
3475 cfs_b->period = ns_to_ktime(default_cfs_period());
3476
3477 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3478 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3479 cfs_b->period_timer.function = sched_cfs_period_timer;
3480 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3481 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3482}
3483
3484static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3485{
3486 cfs_rq->runtime_enabled = 0;
3487 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3488}
3489
3490/* requires cfs_b->lock, may release to reprogram timer */
3491void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3492{
3493 /*
3494 * The timer may be active because we're trying to set a new bandwidth
3495 * period or because we're racing with the tear-down path
3496 * (timer_active==0 becomes visible before the hrtimer call-back
3497 * terminates). In either case we ensure that it's re-programmed
3498 */
Ben Segall927b54f2013-10-16 11:16:22 -07003499 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3500 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3501 /* bounce the lock to allow do_sched_cfs_period_timer to run */
Peter Zijlstra029632f2011-10-25 10:00:11 +02003502 raw_spin_unlock(&cfs_b->lock);
Ben Segall927b54f2013-10-16 11:16:22 -07003503 cpu_relax();
Peter Zijlstra029632f2011-10-25 10:00:11 +02003504 raw_spin_lock(&cfs_b->lock);
3505 /* if someone else restarted the timer then we're done */
3506 if (cfs_b->timer_active)
3507 return;
3508 }
3509
3510 cfs_b->timer_active = 1;
3511 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3512}
3513
3514static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3515{
3516 hrtimer_cancel(&cfs_b->period_timer);
3517 hrtimer_cancel(&cfs_b->slack_timer);
3518}
3519
Arnd Bergmann38dc3342013-01-25 14:14:22 +00003520static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003521{
3522 struct cfs_rq *cfs_rq;
3523
3524 for_each_leaf_cfs_rq(rq, cfs_rq) {
3525 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3526
3527 if (!cfs_rq->runtime_enabled)
3528 continue;
3529
3530 /*
3531 * clock_task is not advancing so we just need to make sure
3532 * there's some valid quota amount
3533 */
3534 cfs_rq->runtime_remaining = cfs_b->quota;
3535 if (cfs_rq_throttled(cfs_rq))
3536 unthrottle_cfs_rq(cfs_rq);
3537 }
3538}
3539
3540#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02003541static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3542{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003543 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02003544}
3545
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003546static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003547static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
3548static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003549static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07003550
3551static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3552{
3553 return 0;
3554}
Paul Turner64660c82011-07-21 09:43:36 -07003555
3556static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3557{
3558 return 0;
3559}
3560
3561static inline int throttled_lb_pair(struct task_group *tg,
3562 int src_cpu, int dest_cpu)
3563{
3564 return 0;
3565}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003566
3567void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3568
3569#ifdef CONFIG_FAIR_GROUP_SCHED
3570static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07003571#endif
3572
Peter Zijlstra029632f2011-10-25 10:00:11 +02003573static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3574{
3575 return NULL;
3576}
3577static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07003578static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003579
3580#endif /* CONFIG_CFS_BANDWIDTH */
3581
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003582/**************************************************
3583 * CFS operations on tasks:
3584 */
3585
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003586#ifdef CONFIG_SCHED_HRTICK
3587static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3588{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003589 struct sched_entity *se = &p->se;
3590 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3591
3592 WARN_ON(task_rq(p) != rq);
3593
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003594 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003595 u64 slice = sched_slice(cfs_rq, se);
3596 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3597 s64 delta = slice - ran;
3598
3599 if (delta < 0) {
3600 if (rq->curr == p)
3601 resched_task(p);
3602 return;
3603 }
3604
3605 /*
3606 * Don't schedule slices shorter than 10000ns, that just
3607 * doesn't make sense. Rely on vruntime for fairness.
3608 */
Peter Zijlstra31656512008-07-18 18:01:23 +02003609 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02003610 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003611
Peter Zijlstra31656512008-07-18 18:01:23 +02003612 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003613 }
3614}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003615
3616/*
3617 * called from enqueue/dequeue and updates the hrtick when the
3618 * current task is from our class and nr_running is low enough
3619 * to matter.
3620 */
3621static void hrtick_update(struct rq *rq)
3622{
3623 struct task_struct *curr = rq->curr;
3624
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003625 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003626 return;
3627
3628 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3629 hrtick_start_fair(rq, curr);
3630}
Dhaval Giani55e12e52008-06-24 23:39:43 +05303631#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003632static inline void
3633hrtick_start_fair(struct rq *rq, struct task_struct *p)
3634{
3635}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003636
3637static inline void hrtick_update(struct rq *rq)
3638{
3639}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003640#endif
3641
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003642/*
3643 * The enqueue_task method is called before nr_running is
3644 * increased. Here we update the fair scheduling stats and
3645 * then put the task into the rbtree:
3646 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00003647static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003648enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003649{
3650 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003651 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003652
3653 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003654 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003655 break;
3656 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003657 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003658
3659 /*
3660 * end evaluation on encountering a throttled cfs_rq
3661 *
3662 * note: in the case of encountering a throttled cfs_rq we will
3663 * post the final h_nr_running increment below.
3664 */
3665 if (cfs_rq_throttled(cfs_rq))
3666 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003667 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07003668
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003669 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003670 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003671
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003672 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003673 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003674 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003675
Paul Turner85dac902011-07-21 09:43:33 -07003676 if (cfs_rq_throttled(cfs_rq))
3677 break;
3678
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003679 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003680 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003681 }
3682
Ben Segall18bf2802012-10-04 12:51:20 +02003683 if (!se) {
3684 update_rq_runnable_avg(rq, rq->nr_running);
Paul Turner85dac902011-07-21 09:43:33 -07003685 inc_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003686 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003687 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003688}
3689
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003690static void set_next_buddy(struct sched_entity *se);
3691
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003692/*
3693 * The dequeue_task method is called before nr_running is
3694 * decreased. We remove the task from the rbtree and
3695 * update the fair scheduling stats:
3696 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003697static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003698{
3699 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003700 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003701 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003702
3703 for_each_sched_entity(se) {
3704 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003705 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003706
3707 /*
3708 * end evaluation on encountering a throttled cfs_rq
3709 *
3710 * note: in the case of encountering a throttled cfs_rq we will
3711 * post the final h_nr_running decrement below.
3712 */
3713 if (cfs_rq_throttled(cfs_rq))
3714 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003715 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003716
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003717 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003718 if (cfs_rq->load.weight) {
3719 /*
3720 * Bias pick_next to pick a task from this cfs_rq, as
3721 * p is sleeping when it is within its sched_slice.
3722 */
3723 if (task_sleep && parent_entity(se))
3724 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07003725
3726 /* avoid re-evaluating load for this entity */
3727 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003728 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003729 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003730 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003731 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003732
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003733 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003734 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003735 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003736
Paul Turner85dac902011-07-21 09:43:33 -07003737 if (cfs_rq_throttled(cfs_rq))
3738 break;
3739
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003740 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003741 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003742 }
3743
Ben Segall18bf2802012-10-04 12:51:20 +02003744 if (!se) {
Paul Turner85dac902011-07-21 09:43:33 -07003745 dec_nr_running(rq);
Ben Segall18bf2802012-10-04 12:51:20 +02003746 update_rq_runnable_avg(rq, 1);
3747 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003748 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003749}
3750
Gregory Haskinse7693a32008-01-25 21:08:09 +01003751#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02003752/* Used instead of source_load when we know the type == 0 */
3753static unsigned long weighted_cpuload(const int cpu)
3754{
Alex Shib92486c2013-06-20 10:18:50 +08003755 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003756}
3757
3758/*
3759 * Return a low guess at the load of a migration-source cpu weighted
3760 * according to the scheduling class and "nice" value.
3761 *
3762 * We want to under-estimate the load of migration sources, to
3763 * balance conservatively.
3764 */
3765static unsigned long source_load(int cpu, int type)
3766{
3767 struct rq *rq = cpu_rq(cpu);
3768 unsigned long total = weighted_cpuload(cpu);
3769
3770 if (type == 0 || !sched_feat(LB_BIAS))
3771 return total;
3772
3773 return min(rq->cpu_load[type-1], total);
3774}
3775
3776/*
3777 * Return a high guess at the load of a migration-target cpu weighted
3778 * according to the scheduling class and "nice" value.
3779 */
3780static unsigned long target_load(int cpu, int type)
3781{
3782 struct rq *rq = cpu_rq(cpu);
3783 unsigned long total = weighted_cpuload(cpu);
3784
3785 if (type == 0 || !sched_feat(LB_BIAS))
3786 return total;
3787
3788 return max(rq->cpu_load[type-1], total);
3789}
3790
3791static unsigned long power_of(int cpu)
3792{
3793 return cpu_rq(cpu)->cpu_power;
3794}
3795
3796static unsigned long cpu_avg_load_per_task(int cpu)
3797{
3798 struct rq *rq = cpu_rq(cpu);
3799 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08003800 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003801
3802 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08003803 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003804
3805 return 0;
3806}
3807
Michael Wang62470412013-07-04 12:55:51 +08003808static void record_wakee(struct task_struct *p)
3809{
3810 /*
3811 * Rough decay (wiping) for cost saving, don't worry
3812 * about the boundary, really active task won't care
3813 * about the loss.
3814 */
3815 if (jiffies > current->wakee_flip_decay_ts + HZ) {
3816 current->wakee_flips = 0;
3817 current->wakee_flip_decay_ts = jiffies;
3818 }
3819
3820 if (current->last_wakee != p) {
3821 current->last_wakee = p;
3822 current->wakee_flips++;
3823 }
3824}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003825
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003826static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003827{
3828 struct sched_entity *se = &p->se;
3829 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003830 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003831
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003832#ifndef CONFIG_64BIT
3833 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02003834
Peter Zijlstra3fe16982011-04-05 17:23:48 +02003835 do {
3836 min_vruntime_copy = cfs_rq->min_vruntime_copy;
3837 smp_rmb();
3838 min_vruntime = cfs_rq->min_vruntime;
3839 } while (min_vruntime != min_vruntime_copy);
3840#else
3841 min_vruntime = cfs_rq->min_vruntime;
3842#endif
3843
3844 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08003845 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003846}
3847
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003848#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003849/*
3850 * effective_load() calculates the load change as seen from the root_task_group
3851 *
3852 * Adding load to a group doesn't make a group heavier, but can cause movement
3853 * of group shares between cpus. Assuming the shares were perfectly aligned one
3854 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003855 *
3856 * Calculate the effective load difference if @wl is added (subtracted) to @tg
3857 * on this @cpu and results in a total addition (subtraction) of @wg to the
3858 * total group weight.
3859 *
3860 * Given a runqueue weight distribution (rw_i) we can compute a shares
3861 * distribution (s_i) using:
3862 *
3863 * s_i = rw_i / \Sum rw_j (1)
3864 *
3865 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
3866 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
3867 * shares distribution (s_i):
3868 *
3869 * rw_i = { 2, 4, 1, 0 }
3870 * s_i = { 2/7, 4/7, 1/7, 0 }
3871 *
3872 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
3873 * task used to run on and the CPU the waker is running on), we need to
3874 * compute the effect of waking a task on either CPU and, in case of a sync
3875 * wakeup, compute the effect of the current task going to sleep.
3876 *
3877 * So for a change of @wl to the local @cpu with an overall group weight change
3878 * of @wl we can compute the new shares distribution (s'_i) using:
3879 *
3880 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
3881 *
3882 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
3883 * differences in waking a task to CPU 0. The additional task changes the
3884 * weight and shares distributions like:
3885 *
3886 * rw'_i = { 3, 4, 1, 0 }
3887 * s'_i = { 3/8, 4/8, 1/8, 0 }
3888 *
3889 * We can then compute the difference in effective weight by using:
3890 *
3891 * dw_i = S * (s'_i - s_i) (3)
3892 *
3893 * Where 'S' is the group weight as seen by its parent.
3894 *
3895 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
3896 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
3897 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02003898 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003899static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003900{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003901 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003902
Rik van Riel9722c2d2014-01-06 11:39:12 +00003903 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02003904 return wl;
3905
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003906 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003907 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003908
Paul Turner977dda72011-01-14 17:57:50 -08003909 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003910
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003911 /*
3912 * W = @wg + \Sum rw_j
3913 */
3914 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003915
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003916 /*
3917 * w = rw_i + @wl
3918 */
3919 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003920
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003921 /*
3922 * wl = S * s'_i; see (2)
3923 */
3924 if (W > 0 && w < W)
3925 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08003926 else
3927 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02003928
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003929 /*
3930 * Per the above, wl is the new se->load.weight value; since
3931 * those are clipped to [MIN_SHARES, ...) do so now. See
3932 * calc_cfs_shares().
3933 */
Paul Turner977dda72011-01-14 17:57:50 -08003934 if (wl < MIN_SHARES)
3935 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003936
3937 /*
3938 * wl = dw_i = S * (s'_i - s_i); see (3)
3939 */
Paul Turner977dda72011-01-14 17:57:50 -08003940 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02003941
3942 /*
3943 * Recursively apply this logic to all parent groups to compute
3944 * the final effective load change on the root group. Since
3945 * only the @tg group gets extra weight, all parent groups can
3946 * only redistribute existing shares. @wl is the shift in shares
3947 * resulting from this level per the above.
3948 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003949 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003950 }
3951
3952 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003953}
3954#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003955
Mel Gorman58d081b2013-10-07 11:29:10 +01003956static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003957{
Peter Zijlstra83378262008-06-27 13:41:37 +02003958 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003959}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02003960
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02003961#endif
3962
Michael Wang62470412013-07-04 12:55:51 +08003963static int wake_wide(struct task_struct *p)
3964{
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08003965 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08003966
3967 /*
3968 * Yeah, it's the switching-frequency, could means many wakee or
3969 * rapidly switch, use factor here will just help to automatically
3970 * adjust the loose-degree, so bigger node will lead to more pull.
3971 */
3972 if (p->wakee_flips > factor) {
3973 /*
3974 * wakee is somewhat hot, it needs certain amount of cpu
3975 * resource, so if waker is far more hot, prefer to leave
3976 * it alone.
3977 */
3978 if (current->wakee_flips > (factor * p->wakee_flips))
3979 return 1;
3980 }
3981
3982 return 0;
3983}
3984
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003985static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003986{
Paul Turnere37b6a72011-01-21 20:44:59 -08003987 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003988 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003989 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02003990 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02003991 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02003992 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01003993
Michael Wang62470412013-07-04 12:55:51 +08003994 /*
3995 * If we wake multiple tasks be careful to not bounce
3996 * ourselves around too much.
3997 */
3998 if (wake_wide(p))
3999 return 0;
4000
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004001 idx = sd->wake_idx;
4002 this_cpu = smp_processor_id();
4003 prev_cpu = task_cpu(p);
4004 load = source_load(prev_cpu, idx);
4005 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004006
4007 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004008 * If sync wakeup then subtract the (maximum possible)
4009 * effect of the currently running task from the load
4010 * of the current CPU:
4011 */
Peter Zijlstra83378262008-06-27 13:41:37 +02004012 if (sync) {
4013 tg = task_group(current);
4014 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004015
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004016 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02004017 load += effective_load(tg, prev_cpu, 0, -weight);
4018 }
4019
4020 tg = task_group(p);
4021 weight = p->se.load.weight;
4022
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004023 /*
4024 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004025 * due to the sync cause above having dropped this_load to 0, we'll
4026 * always have an imbalance, but there's really nothing you can do
4027 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004028 *
4029 * Otherwise check if either cpus are near enough in load to allow this
4030 * task to be woken on this_cpu.
4031 */
Paul Turnere37b6a72011-01-21 20:44:59 -08004032 if (this_load > 0) {
4033 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004034
4035 this_eff_load = 100;
4036 this_eff_load *= power_of(prev_cpu);
4037 this_eff_load *= this_load +
4038 effective_load(tg, this_cpu, weight, weight);
4039
4040 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4041 prev_eff_load *= power_of(this_cpu);
4042 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4043
4044 balanced = this_eff_load <= prev_eff_load;
4045 } else
4046 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004047
4048 /*
4049 * If the currently running task will sleep within
4050 * a reasonable amount of time then attract this newly
4051 * woken task:
4052 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02004053 if (sync && balanced)
4054 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004055
Lucas De Marchi41acab82010-03-10 23:37:45 -03004056 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004057 tl_per_task = cpu_avg_load_per_task(this_cpu);
4058
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004059 if (balanced ||
4060 (this_load <= load &&
4061 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004062 /*
4063 * This domain has SD_WAKE_AFFINE and
4064 * p is cache cold in this domain, and
4065 * there is no bad imbalance.
4066 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004067 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004068 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004069
4070 return 1;
4071 }
4072 return 0;
4073}
4074
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004075/*
4076 * find_idlest_group finds and returns the least busy CPU group within the
4077 * domain.
4078 */
4079static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02004080find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004081 int this_cpu, int sd_flag)
Gregory Haskinse7693a32008-01-25 21:08:09 +01004082{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07004083 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004084 unsigned long min_load = ULONG_MAX, this_load = 0;
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004085 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004086 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004087
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004088 if (sd_flag & SD_BALANCE_WAKE)
4089 load_idx = sd->wake_idx;
4090
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004091 do {
4092 unsigned long load, avg_load;
4093 int local_group;
4094 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004095
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004096 /* Skip over this group if it has no CPUs allowed */
4097 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004098 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004099 continue;
4100
4101 local_group = cpumask_test_cpu(this_cpu,
4102 sched_group_cpus(group));
4103
4104 /* Tally up the load of all CPUs in the group */
4105 avg_load = 0;
4106
4107 for_each_cpu(i, sched_group_cpus(group)) {
4108 /* Bias balancing toward cpus of our domain */
4109 if (local_group)
4110 load = source_load(i, load_idx);
4111 else
4112 load = target_load(i, load_idx);
4113
4114 avg_load += load;
4115 }
4116
4117 /* Adjust by relative CPU power of the group */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02004118 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004119
4120 if (local_group) {
4121 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004122 } else if (avg_load < min_load) {
4123 min_load = avg_load;
4124 idlest = group;
4125 }
4126 } while (group = group->next, group != sd->groups);
4127
4128 if (!idlest || 100*this_load < imbalance*min_load)
4129 return NULL;
4130 return idlest;
4131}
4132
4133/*
4134 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4135 */
4136static int
4137find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4138{
4139 unsigned long load, min_load = ULONG_MAX;
4140 int idlest = -1;
4141 int i;
4142
4143 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004144 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004145 load = weighted_cpuload(i);
4146
4147 if (load < min_load || (load == min_load && i == this_cpu)) {
4148 min_load = load;
4149 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004150 }
4151 }
4152
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004153 return idlest;
4154}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004155
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004156/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004157 * Try and locate an idle CPU in the sched_domain.
4158 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004159static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004160{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004161 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07004162 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004163 int i = task_cpu(p);
4164
4165 if (idle_cpu(target))
4166 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004167
4168 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004169 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004170 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004171 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4172 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004173
4174 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07004175 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004176 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01004177 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08004178 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07004179 sg = sd->groups;
4180 do {
4181 if (!cpumask_intersects(sched_group_cpus(sg),
4182 tsk_cpus_allowed(p)))
4183 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02004184
Linus Torvalds37407ea2012-09-16 12:29:43 -07004185 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004186 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07004187 goto next;
4188 }
4189
4190 target = cpumask_first_and(sched_group_cpus(sg),
4191 tsk_cpus_allowed(p));
4192 goto done;
4193next:
4194 sg = sg->next;
4195 } while (sg != sd->groups);
4196 }
4197done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004198 return target;
4199}
4200
4201/*
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004202 * sched_balance_self: balance the current task (running on cpu) in domains
4203 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
4204 * SD_BALANCE_EXEC.
4205 *
4206 * Balance, ie. select the least loaded group.
4207 *
4208 * Returns the target CPU number, or the same CPU if no balancing is needed.
4209 *
4210 * preempt must be disabled.
4211 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01004212static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01004213select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004214{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004215 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004216 int cpu = smp_processor_id();
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004217 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004218 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004219 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004220
Peter Zijlstra29baa742012-04-23 12:11:21 +02004221 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01004222 return prev_cpu;
4223
Peter Zijlstra0763a662009-09-14 19:37:39 +02004224 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004225 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004226 want_affine = 1;
4227 new_cpu = prev_cpu;
4228 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01004229
Peter Zijlstradce840a2011-04-07 14:09:50 +02004230 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004231 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f428882009-12-16 18:04:34 +01004232 if (!(tmp->flags & SD_LOAD_BALANCE))
4233 continue;
4234
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004235 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004236 * If both cpu and prev_cpu are part of this domain,
4237 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01004238 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004239 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4240 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4241 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08004242 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004243 }
4244
Alex Shif03542a2012-07-26 08:55:34 +08004245 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004246 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004247 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004248
Mike Galbraith8b911ac2010-03-11 17:17:16 +01004249 if (affine_sd) {
Alex Shif03542a2012-07-26 08:55:34 +08004250 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
Peter Zijlstradce840a2011-04-07 14:09:50 +02004251 prev_cpu = cpu;
4252
4253 new_cpu = select_idle_sibling(p, prev_cpu);
4254 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01004255 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02004256
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004257 while (sd) {
4258 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004259 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004260
Peter Zijlstra0763a662009-09-14 19:37:39 +02004261 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004262 sd = sd->child;
4263 continue;
4264 }
4265
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004266 group = find_idlest_group(sd, p, cpu, sd_flag);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004267 if (!group) {
4268 sd = sd->child;
4269 continue;
4270 }
4271
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02004272 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004273 if (new_cpu == -1 || new_cpu == cpu) {
4274 /* Now try balancing at a lower domain level of cpu */
4275 sd = sd->child;
4276 continue;
4277 }
4278
4279 /* Now try balancing at a lower domain level of new_cpu */
4280 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004281 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004282 sd = NULL;
4283 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004284 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004285 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02004286 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004287 sd = tmp;
4288 }
4289 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01004290 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02004291unlock:
4292 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01004293
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004294 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004295}
Paul Turner0a74bef2012-10-04 13:18:30 +02004296
4297/*
4298 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4299 * cfs_rq_of(p) references at time of call are still valid and identify the
4300 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4301 * other assumptions, including the state of rq->lock, should be made.
4302 */
4303static void
4304migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4305{
Paul Turneraff3e492012-10-04 13:18:30 +02004306 struct sched_entity *se = &p->se;
4307 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4308
4309 /*
4310 * Load tracking: accumulate removed load so that it can be processed
4311 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4312 * to blocked load iff they have a positive decay-count. It can never
4313 * be negative here since on-rq tasks have decay-count == 0.
4314 */
4315 if (se->avg.decay_count) {
4316 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08004317 atomic_long_add(se->avg.load_avg_contrib,
4318 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02004319 }
Paul Turner0a74bef2012-10-04 13:18:30 +02004320}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004321#endif /* CONFIG_SMP */
4322
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004323static unsigned long
4324wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004325{
4326 unsigned long gran = sysctl_sched_wakeup_granularity;
4327
4328 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004329 * Since its curr running now, convert the gran from real-time
4330 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01004331 *
4332 * By using 'se' instead of 'curr' we penalize light tasks, so
4333 * they get preempted easier. That is, if 'se' < 'curr' then
4334 * the resulting gran will be larger, therefore penalizing the
4335 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4336 * be smaller, again penalizing the lighter task.
4337 *
4338 * This is especially important for buddies when the leftmost
4339 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004340 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08004341 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004342}
4343
4344/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02004345 * Should 'se' preempt 'curr'.
4346 *
4347 * |s1
4348 * |s2
4349 * |s3
4350 * g
4351 * |<--->|c
4352 *
4353 * w(c, s1) = -1
4354 * w(c, s2) = 0
4355 * w(c, s3) = 1
4356 *
4357 */
4358static int
4359wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4360{
4361 s64 gran, vdiff = curr->vruntime - se->vruntime;
4362
4363 if (vdiff <= 0)
4364 return -1;
4365
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004366 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02004367 if (vdiff > gran)
4368 return 1;
4369
4370 return 0;
4371}
4372
Peter Zijlstra02479092008-11-04 21:25:10 +01004373static void set_last_buddy(struct sched_entity *se)
4374{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004375 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4376 return;
4377
4378 for_each_sched_entity(se)
4379 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004380}
4381
4382static void set_next_buddy(struct sched_entity *se)
4383{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004384 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4385 return;
4386
4387 for_each_sched_entity(se)
4388 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004389}
4390
Rik van Rielac53db52011-02-01 09:51:03 -05004391static void set_skip_buddy(struct sched_entity *se)
4392{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004393 for_each_sched_entity(se)
4394 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05004395}
4396
Peter Zijlstra464b7522008-10-24 11:06:15 +02004397/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004398 * Preempt the current task with a newly woken task if needed:
4399 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02004400static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004401{
4402 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02004403 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004404 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02004405 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004406 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004407
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01004408 if (unlikely(se == pse))
4409 return;
4410
Paul Turner5238cdd2011-07-21 09:43:37 -07004411 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004412 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07004413 * unconditionally check_prempt_curr() after an enqueue (which may have
4414 * lead to a throttle). This both saves work and prevents false
4415 * next-buddy nomination below.
4416 */
4417 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4418 return;
4419
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004420 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02004421 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004422 next_buddy_marked = 1;
4423 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02004424
Bharata B Raoaec0a512008-08-28 14:42:49 +05304425 /*
4426 * We can come here with TIF_NEED_RESCHED already set from new task
4427 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07004428 *
4429 * Note: this also catches the edge-case of curr being in a throttled
4430 * group (e.g. via set_curr_task), since update_curr() (in the
4431 * enqueue of curr) will have resulted in resched being set. This
4432 * prevents us from potentially nominating it as a false LAST_BUDDY
4433 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05304434 */
4435 if (test_tsk_need_resched(curr))
4436 return;
4437
Darren Harta2f5c9a2011-02-22 13:04:33 -08004438 /* Idle tasks are by definition preempted by non-idle tasks. */
4439 if (unlikely(curr->policy == SCHED_IDLE) &&
4440 likely(p->policy != SCHED_IDLE))
4441 goto preempt;
4442
Ingo Molnar91c234b2007-10-15 17:00:18 +02004443 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08004444 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4445 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02004446 */
Ingo Molnar8ed92e512012-10-14 14:28:50 +02004447 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02004448 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004449
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004450 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07004451 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004452 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004453 if (wakeup_preempt_entity(se, pse) == 1) {
4454 /*
4455 * Bias pick_next to pick the sched entity that is
4456 * triggering this preemption.
4457 */
4458 if (!next_buddy_marked)
4459 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004460 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004461 }
Jupyung Leea65ac742009-11-17 18:51:40 +09004462
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004463 return;
4464
4465preempt:
4466 resched_task(curr);
4467 /*
4468 * Only set the backward buddy when the current task is still
4469 * on the rq. This can happen when a wakeup gets interleaved
4470 * with schedule on the ->pre_schedule() or idle_balance()
4471 * point, either of which can * drop the rq lock.
4472 *
4473 * Also, during early boot the idle thread is in the fair class,
4474 * for obvious reasons its a bad idea to schedule back to it.
4475 */
4476 if (unlikely(!se->on_rq || curr == rq->idle))
4477 return;
4478
4479 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4480 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004481}
4482
Ingo Molnarfb8d4722007-08-09 11:16:48 +02004483static struct task_struct *pick_next_task_fair(struct rq *rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004484{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004485 struct task_struct *p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004486 struct cfs_rq *cfs_rq = &rq->cfs;
4487 struct sched_entity *se;
4488
Tim Blechmann36ace272009-11-24 11:55:45 +01004489 if (!cfs_rq->nr_running)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004490 return NULL;
4491
4492 do {
Ingo Molnar9948f4b2007-08-09 11:16:48 +02004493 se = pick_next_entity(cfs_rq);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01004494 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004495 cfs_rq = group_cfs_rq(se);
4496 } while (cfs_rq);
4497
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004498 p = task_of(se);
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004499 if (hrtick_enabled(rq))
4500 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004501
4502 return p;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004503}
4504
4505/*
4506 * Account for a descheduled task:
4507 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02004508static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004509{
4510 struct sched_entity *se = &prev->se;
4511 struct cfs_rq *cfs_rq;
4512
4513 for_each_sched_entity(se) {
4514 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02004515 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004516 }
4517}
4518
Rik van Rielac53db52011-02-01 09:51:03 -05004519/*
4520 * sched_yield() is very simple
4521 *
4522 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4523 */
4524static void yield_task_fair(struct rq *rq)
4525{
4526 struct task_struct *curr = rq->curr;
4527 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4528 struct sched_entity *se = &curr->se;
4529
4530 /*
4531 * Are we the only task in the tree?
4532 */
4533 if (unlikely(rq->nr_running == 1))
4534 return;
4535
4536 clear_buddies(cfs_rq, se);
4537
4538 if (curr->policy != SCHED_BATCH) {
4539 update_rq_clock(rq);
4540 /*
4541 * Update run-time statistics of the 'current'.
4542 */
4543 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01004544 /*
4545 * Tell update_rq_clock() that we've just updated,
4546 * so we don't do microscopic update in schedule()
4547 * and double the fastpath cost.
4548 */
4549 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05004550 }
4551
4552 set_skip_buddy(se);
4553}
4554
Mike Galbraithd95f4122011-02-01 09:50:51 -05004555static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4556{
4557 struct sched_entity *se = &p->se;
4558
Paul Turner5238cdd2011-07-21 09:43:37 -07004559 /* throttled hierarchies are not runnable */
4560 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05004561 return false;
4562
4563 /* Tell the scheduler that we'd really like pse to run next. */
4564 set_next_buddy(se);
4565
Mike Galbraithd95f4122011-02-01 09:50:51 -05004566 yield_task_fair(rq);
4567
4568 return true;
4569}
4570
Peter Williams681f3e62007-10-24 18:23:51 +02004571#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004572/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02004573 * Fair scheduling class load-balancing methods.
4574 *
4575 * BASICS
4576 *
4577 * The purpose of load-balancing is to achieve the same basic fairness the
4578 * per-cpu scheduler provides, namely provide a proportional amount of compute
4579 * time to each task. This is expressed in the following equation:
4580 *
4581 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4582 *
4583 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4584 * W_i,0 is defined as:
4585 *
4586 * W_i,0 = \Sum_j w_i,j (2)
4587 *
4588 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4589 * is derived from the nice value as per prio_to_weight[].
4590 *
4591 * The weight average is an exponential decay average of the instantaneous
4592 * weight:
4593 *
4594 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4595 *
4596 * P_i is the cpu power (or compute capacity) of cpu i, typically it is the
4597 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4598 * can also include other factors [XXX].
4599 *
4600 * To achieve this balance we define a measure of imbalance which follows
4601 * directly from (1):
4602 *
4603 * imb_i,j = max{ avg(W/P), W_i/P_i } - min{ avg(W/P), W_j/P_j } (4)
4604 *
4605 * We them move tasks around to minimize the imbalance. In the continuous
4606 * function space it is obvious this converges, in the discrete case we get
4607 * a few fun cases generally called infeasible weight scenarios.
4608 *
4609 * [XXX expand on:
4610 * - infeasible weights;
4611 * - local vs global optima in the discrete case. ]
4612 *
4613 *
4614 * SCHED DOMAINS
4615 *
4616 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
4617 * for all i,j solution, we create a tree of cpus that follows the hardware
4618 * topology where each level pairs two lower groups (or better). This results
4619 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
4620 * tree to only the first of the previous level and we decrease the frequency
4621 * of load-balance at each level inv. proportional to the number of cpus in
4622 * the groups.
4623 *
4624 * This yields:
4625 *
4626 * log_2 n 1 n
4627 * \Sum { --- * --- * 2^i } = O(n) (5)
4628 * i = 0 2^i 2^i
4629 * `- size of each group
4630 * | | `- number of cpus doing load-balance
4631 * | `- freq
4632 * `- sum over all levels
4633 *
4634 * Coupled with a limit on how many tasks we can migrate every balance pass,
4635 * this makes (5) the runtime complexity of the balancer.
4636 *
4637 * An important property here is that each CPU is still (indirectly) connected
4638 * to every other cpu in at most O(log n) steps:
4639 *
4640 * The adjacency matrix of the resulting graph is given by:
4641 *
4642 * log_2 n
4643 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
4644 * k = 0
4645 *
4646 * And you'll find that:
4647 *
4648 * A^(log_2 n)_i,j != 0 for all i,j (7)
4649 *
4650 * Showing there's indeed a path between every cpu in at most O(log n) steps.
4651 * The task movement gives a factor of O(m), giving a convergence complexity
4652 * of:
4653 *
4654 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
4655 *
4656 *
4657 * WORK CONSERVING
4658 *
4659 * In order to avoid CPUs going idle while there's still work to do, new idle
4660 * balancing is more aggressive and has the newly idle cpu iterate up the domain
4661 * tree itself instead of relying on other CPUs to bring it work.
4662 *
4663 * This adds some complexity to both (5) and (8) but it reduces the total idle
4664 * time.
4665 *
4666 * [XXX more?]
4667 *
4668 *
4669 * CGROUPS
4670 *
4671 * Cgroups make a horror show out of (2), instead of a simple sum we get:
4672 *
4673 * s_k,i
4674 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
4675 * S_k
4676 *
4677 * Where
4678 *
4679 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
4680 *
4681 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
4682 *
4683 * The big problem is S_k, its a global sum needed to compute a local (W_i)
4684 * property.
4685 *
4686 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
4687 * rewrite all of this once again.]
4688 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004689
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09004690static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4691
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01004692enum fbq_type { regular, remote, all };
4693
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004694#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01004695#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02004696#define LBF_DST_PINNED 0x04
4697#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004698
4699struct lb_env {
4700 struct sched_domain *sd;
4701
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004702 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05304703 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004704
4705 int dst_cpu;
4706 struct rq *dst_rq;
4707
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304708 struct cpumask *dst_grpmask;
4709 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004710 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004711 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08004712 /* The set of CPUs under consideration for load-balancing */
4713 struct cpumask *cpus;
4714
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004715 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004716
4717 unsigned int loop;
4718 unsigned int loop_break;
4719 unsigned int loop_max;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01004720
4721 enum fbq_type fbq_type;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004722};
4723
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004724/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004725 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004726 * Both runqueues must be locked.
4727 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004728static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004729{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004730 deactivate_task(env->src_rq, p, 0);
4731 set_task_cpu(p, env->dst_cpu);
4732 activate_task(env->dst_rq, p, 0);
4733 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004734}
4735
4736/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02004737 * Is this task likely cache-hot:
4738 */
4739static int
4740task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
4741{
4742 s64 delta;
4743
4744 if (p->sched_class != &fair_sched_class)
4745 return 0;
4746
4747 if (unlikely(p->policy == SCHED_IDLE))
4748 return 0;
4749
4750 /*
4751 * Buddy candidates are cache hot:
4752 */
4753 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
4754 (&p->se == cfs_rq_of(&p->se)->next ||
4755 &p->se == cfs_rq_of(&p->se)->last))
4756 return 1;
4757
4758 if (sysctl_sched_migration_cost == -1)
4759 return 1;
4760 if (sysctl_sched_migration_cost == 0)
4761 return 0;
4762
4763 delta = now - p->se.exec_start;
4764
4765 return delta < (s64)sysctl_sched_migration_cost;
4766}
4767
Mel Gorman3a7053b2013-10-07 11:29:00 +01004768#ifdef CONFIG_NUMA_BALANCING
4769/* Returns true if the destination node has incurred more faults */
4770static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
4771{
4772 int src_nid, dst_nid;
4773
Rik van Rielff1df892014-01-27 17:03:41 -05004774 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
Mel Gorman3a7053b2013-10-07 11:29:00 +01004775 !(env->sd->flags & SD_NUMA)) {
4776 return false;
4777 }
4778
4779 src_nid = cpu_to_node(env->src_cpu);
4780 dst_nid = cpu_to_node(env->dst_cpu);
4781
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004782 if (src_nid == dst_nid)
Mel Gorman3a7053b2013-10-07 11:29:00 +01004783 return false;
4784
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004785 /* Always encourage migration to the preferred node. */
4786 if (dst_nid == p->numa_preferred_nid)
4787 return true;
4788
Rik van Riel887c2902013-10-07 11:29:31 +01004789 /* If both task and group weight improve, this move is a winner. */
4790 if (task_weight(p, dst_nid) > task_weight(p, src_nid) &&
4791 group_weight(p, dst_nid) > group_weight(p, src_nid))
Mel Gorman3a7053b2013-10-07 11:29:00 +01004792 return true;
4793
4794 return false;
4795}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004796
4797
4798static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
4799{
4800 int src_nid, dst_nid;
4801
4802 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
4803 return false;
4804
Rik van Rielff1df892014-01-27 17:03:41 -05004805 if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
Mel Gorman7a0f3082013-10-07 11:29:01 +01004806 return false;
4807
4808 src_nid = cpu_to_node(env->src_cpu);
4809 dst_nid = cpu_to_node(env->dst_cpu);
4810
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004811 if (src_nid == dst_nid)
Mel Gorman7a0f3082013-10-07 11:29:01 +01004812 return false;
4813
Mel Gorman83e1d2c2013-10-07 11:29:27 +01004814 /* Migrating away from the preferred node is always bad. */
4815 if (src_nid == p->numa_preferred_nid)
4816 return true;
4817
Rik van Riel887c2902013-10-07 11:29:31 +01004818 /* If either task or group weight get worse, don't do it. */
4819 if (task_weight(p, dst_nid) < task_weight(p, src_nid) ||
4820 group_weight(p, dst_nid) < group_weight(p, src_nid))
Mel Gorman7a0f3082013-10-07 11:29:01 +01004821 return true;
4822
4823 return false;
4824}
4825
Mel Gorman3a7053b2013-10-07 11:29:00 +01004826#else
4827static inline bool migrate_improves_locality(struct task_struct *p,
4828 struct lb_env *env)
4829{
4830 return false;
4831}
Mel Gorman7a0f3082013-10-07 11:29:01 +01004832
4833static inline bool migrate_degrades_locality(struct task_struct *p,
4834 struct lb_env *env)
4835{
4836 return false;
4837}
Mel Gorman3a7053b2013-10-07 11:29:00 +01004838#endif
4839
Peter Zijlstra029632f2011-10-25 10:00:11 +02004840/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004841 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
4842 */
4843static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004844int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004845{
4846 int tsk_cache_hot = 0;
4847 /*
4848 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09004849 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004850 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09004851 * 3) running (obviously), or
4852 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004853 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09004854 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
4855 return 0;
4856
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004857 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004858 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304859
Lucas De Marchi41acab82010-03-10 23:37:45 -03004860 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304861
Peter Zijlstra62633222013-08-19 12:41:09 +02004862 env->flags |= LBF_SOME_PINNED;
4863
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304864 /*
4865 * Remember if this task can be migrated to any other cpu in
4866 * our sched_group. We may want to revisit it if we couldn't
4867 * meet load balance goals by pulling other tasks on src_cpu.
4868 *
4869 * Also avoid computing new_dst_cpu if we have already computed
4870 * one in current iteration.
4871 */
Peter Zijlstra62633222013-08-19 12:41:09 +02004872 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304873 return 0;
4874
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004875 /* Prevent to re-select dst_cpu via env's cpus */
4876 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
4877 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02004878 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004879 env->new_dst_cpu = cpu;
4880 break;
4881 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304882 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09004883
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004884 return 0;
4885 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05304886
4887 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004888 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004889
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004890 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03004891 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004892 return 0;
4893 }
4894
4895 /*
4896 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01004897 * 1) destination numa is preferred
4898 * 2) task is cache cold, or
4899 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004900 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004901 tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq), env->sd);
Mel Gorman7a0f3082013-10-07 11:29:01 +01004902 if (!tsk_cache_hot)
4903 tsk_cache_hot = migrate_degrades_locality(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01004904
4905 if (migrate_improves_locality(p, env)) {
4906#ifdef CONFIG_SCHEDSTATS
4907 if (tsk_cache_hot) {
4908 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
4909 schedstat_inc(p, se.statistics.nr_forced_migrations);
4910 }
4911#endif
4912 return 1;
4913 }
4914
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004915 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004916 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004917
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004918 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004919 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004920 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004921 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004922
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004923 return 1;
4924 }
4925
Zhang Hang4e2dcb72013-04-10 14:04:55 +08004926 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
4927 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004928}
4929
Peter Zijlstra897c3952009-12-17 17:45:42 +01004930/*
4931 * move_one_task tries to move exactly one task from busiest to this_rq, as
4932 * part of active balancing operations within "domain".
4933 * Returns 1 if successful and 0 otherwise.
4934 *
4935 * Called with both runqueues locked.
4936 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004937static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01004938{
4939 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004940
Peter Zijlstra367456c2012-02-20 21:49:09 +01004941 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01004942 if (!can_migrate_task(p, env))
4943 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004944
Peter Zijlstra367456c2012-02-20 21:49:09 +01004945 move_task(p, env);
4946 /*
4947 * Right now, this is only the second place move_task()
4948 * is called, so we can safely collect move_task()
4949 * stats here rather than inside move_task().
4950 */
4951 schedstat_inc(env->sd, lb_gained[env->idle]);
4952 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01004953 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01004954 return 0;
4955}
4956
Peter Zijlstraeb953082012-04-17 13:38:40 +02004957static const unsigned int sched_nr_migrate_break = 32;
4958
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004959/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004960 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004961 * this_rq, as part of a balancing operation within domain "sd".
4962 * Returns 1 if successful and 0 otherwise.
4963 *
4964 * Called with both runqueues locked.
4965 */
4966static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004967{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004968 struct list_head *tasks = &env->src_rq->cfs_tasks;
4969 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01004970 unsigned long load;
4971 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004972
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004973 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004974 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004975
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004976 while (!list_empty(tasks)) {
4977 p = list_first_entry(tasks, struct task_struct, se.group_node);
4978
Peter Zijlstra367456c2012-02-20 21:49:09 +01004979 env->loop++;
4980 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004981 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004982 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004983
4984 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01004985 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02004986 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01004987 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01004988 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02004989 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004990
Joonsoo Kimd3198082013-04-23 17:27:40 +09004991 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01004992 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01004993
Peter Zijlstra367456c2012-02-20 21:49:09 +01004994 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01004995
Peter Zijlstraeb953082012-04-17 13:38:40 +02004996 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01004997 goto next;
4998
Peter Zijlstrabd939f42012-05-02 14:20:37 +02004999 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005000 goto next;
5001
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005002 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01005003 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005004 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005005
5006#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01005007 /*
5008 * NEWIDLE balancing is a source of latency, so preemptible
5009 * kernels will stop after the first task is pulled to minimize
5010 * the critical section.
5011 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005012 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005013 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005014#endif
5015
Peter Zijlstraee00e662009-12-17 17:25:20 +01005016 /*
5017 * We only want to steal up to the prescribed amount of
5018 * weighted load.
5019 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005020 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005021 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005022
Peter Zijlstra367456c2012-02-20 21:49:09 +01005023 continue;
5024next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005025 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005026 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005027
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005028 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005029 * Right now, this is one of only two places move_task() is called,
5030 * so we can safely collect move_task() stats here rather than
5031 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005032 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005033 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005034
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005035 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005036}
5037
Peter Zijlstra230059de2009-12-17 17:47:12 +01005038#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005039/*
5040 * update tg->load_weight by folding this cpu's load_avg
5041 */
Paul Turner48a16752012-10-04 13:18:31 +02005042static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005043{
Paul Turner48a16752012-10-04 13:18:31 +02005044 struct sched_entity *se = tg->se[cpu];
5045 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005046
Paul Turner48a16752012-10-04 13:18:31 +02005047 /* throttled entities do not contribute to load */
5048 if (throttled_hierarchy(cfs_rq))
5049 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005050
Paul Turneraff3e492012-10-04 13:18:30 +02005051 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005052
Paul Turner82958362012-10-04 13:18:31 +02005053 if (se) {
5054 update_entity_load_avg(se, 1);
5055 /*
5056 * We pivot on our runnable average having decayed to zero for
5057 * list removal. This generally implies that all our children
5058 * have also been removed (modulo rounding error or bandwidth
5059 * control); however, such cases are rare and we can fix these
5060 * at enqueue.
5061 *
5062 * TODO: fix up out-of-order children on enqueue.
5063 */
5064 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5065 list_del_leaf_cfs_rq(cfs_rq);
5066 } else {
Paul Turner48a16752012-10-04 13:18:31 +02005067 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02005068 update_rq_runnable_avg(rq, rq->nr_running);
5069 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005070}
5071
Paul Turner48a16752012-10-04 13:18:31 +02005072static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005073{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005074 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02005075 struct cfs_rq *cfs_rq;
5076 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005077
Paul Turner48a16752012-10-04 13:18:31 +02005078 raw_spin_lock_irqsave(&rq->lock, flags);
5079 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02005080 /*
5081 * Iterates the task_group tree in a bottom up fashion, see
5082 * list_add_leaf_cfs_rq() for details.
5083 */
Paul Turner64660c82011-07-21 09:43:36 -07005084 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02005085 /*
5086 * Note: We may want to consider periodically releasing
5087 * rq->lock about these updates so that creating many task
5088 * groups does not result in continually extending hold time.
5089 */
5090 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07005091 }
Paul Turner48a16752012-10-04 13:18:31 +02005092
5093 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005094}
5095
Peter Zijlstra9763b672011-07-13 13:09:25 +02005096/*
Vladimir Davydov68520792013-07-15 17:49:19 +04005097 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02005098 * This needs to be done in a top-down fashion because the load of a child
5099 * group is a fraction of its parents load.
5100 */
Vladimir Davydov68520792013-07-15 17:49:19 +04005101static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02005102{
Vladimir Davydov68520792013-07-15 17:49:19 +04005103 struct rq *rq = rq_of(cfs_rq);
5104 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005105 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04005106 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005107
Vladimir Davydov68520792013-07-15 17:49:19 +04005108 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005109 return;
5110
Vladimir Davydov68520792013-07-15 17:49:19 +04005111 cfs_rq->h_load_next = NULL;
5112 for_each_sched_entity(se) {
5113 cfs_rq = cfs_rq_of(se);
5114 cfs_rq->h_load_next = se;
5115 if (cfs_rq->last_h_load_update == now)
5116 break;
5117 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005118
Vladimir Davydov68520792013-07-15 17:49:19 +04005119 if (!se) {
Vladimir Davydov7e3115e2013-09-14 19:39:46 +04005120 cfs_rq->h_load = cfs_rq->runnable_load_avg;
Vladimir Davydov68520792013-07-15 17:49:19 +04005121 cfs_rq->last_h_load_update = now;
5122 }
5123
5124 while ((se = cfs_rq->h_load_next) != NULL) {
5125 load = cfs_rq->h_load;
5126 load = div64_ul(load * se->avg.load_avg_contrib,
5127 cfs_rq->runnable_load_avg + 1);
5128 cfs_rq = group_cfs_rq(se);
5129 cfs_rq->h_load = load;
5130 cfs_rq->last_h_load_update = now;
5131 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02005132}
5133
Peter Zijlstra367456c2012-02-20 21:49:09 +01005134static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01005135{
Peter Zijlstra367456c2012-02-20 21:49:09 +01005136 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005137
Vladimir Davydov68520792013-07-15 17:49:19 +04005138 update_cfs_rq_h_load(cfs_rq);
Alex Shia003a252013-06-20 10:18:51 +08005139 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5140 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005141}
5142#else
Paul Turner48a16752012-10-04 13:18:31 +02005143static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005144{
5145}
5146
Peter Zijlstra367456c2012-02-20 21:49:09 +01005147static unsigned long task_h_load(struct task_struct *p)
5148{
Alex Shia003a252013-06-20 10:18:51 +08005149 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01005150}
5151#endif
5152
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005153/********** Helpers for find_busiest_group ************************/
5154/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005155 * sg_lb_stats - stats of a sched_group required for load_balancing
5156 */
5157struct sg_lb_stats {
5158 unsigned long avg_load; /*Avg load across the CPUs of the group */
5159 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005160 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005161 unsigned long load_per_task;
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005162 unsigned long group_power;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005163 unsigned int sum_nr_running; /* Nr tasks running in the group */
5164 unsigned int group_capacity;
5165 unsigned int idle_cpus;
5166 unsigned int group_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005167 int group_imb; /* Is there an imbalance in the group ? */
Nikhil Raofab47622010-10-15 13:12:29 -07005168 int group_has_capacity; /* Is there extra capacity in the group? */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005169#ifdef CONFIG_NUMA_BALANCING
5170 unsigned int nr_numa_running;
5171 unsigned int nr_preferred_running;
5172#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005173};
5174
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005175/*
5176 * sd_lb_stats - Structure to store the statistics of a sched_domain
5177 * during load balancing.
5178 */
5179struct sd_lb_stats {
5180 struct sched_group *busiest; /* Busiest group in this sd */
5181 struct sched_group *local; /* Local group in this sd */
5182 unsigned long total_load; /* Total load of all groups in sd */
5183 unsigned long total_pwr; /* Total power of all groups in sd */
5184 unsigned long avg_load; /* Average load across all groups in sd */
5185
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005186 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005187 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005188};
5189
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005190static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5191{
5192 /*
5193 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5194 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5195 * We must however clear busiest_stat::avg_load because
5196 * update_sd_pick_busiest() reads this before assignment.
5197 */
5198 *sds = (struct sd_lb_stats){
5199 .busiest = NULL,
5200 .local = NULL,
5201 .total_load = 0UL,
5202 .total_pwr = 0UL,
5203 .busiest_stat = {
5204 .avg_load = 0UL,
5205 },
5206 };
5207}
5208
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005209/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005210 * get_sd_load_idx - Obtain the load index for a given sched domain.
5211 * @sd: The sched_domain whose load_idx is to be obtained.
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305212 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005213 *
5214 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005215 */
5216static inline int get_sd_load_idx(struct sched_domain *sd,
5217 enum cpu_idle_type idle)
5218{
5219 int load_idx;
5220
5221 switch (idle) {
5222 case CPU_NOT_IDLE:
5223 load_idx = sd->busy_idx;
5224 break;
5225
5226 case CPU_NEWLY_IDLE:
5227 load_idx = sd->newidle_idx;
5228 break;
5229 default:
5230 load_idx = sd->idle_idx;
5231 break;
5232 }
5233
5234 return load_idx;
5235}
5236
Li Zefan15f803c2013-03-05 16:07:11 +08005237static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005238{
Nikhil Rao1399fa72011-05-18 10:09:39 -07005239 return SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005240}
5241
5242unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
5243{
5244 return default_scale_freq_power(sd, cpu);
5245}
5246
Li Zefan15f803c2013-03-05 16:07:11 +08005247static unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005248{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005249 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005250 unsigned long smt_gain = sd->smt_gain;
5251
5252 smt_gain /= weight;
5253
5254 return smt_gain;
5255}
5256
5257unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
5258{
5259 return default_scale_smt_power(sd, cpu);
5260}
5261
Li Zefan15f803c2013-03-05 16:07:11 +08005262static unsigned long scale_rt_power(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005263{
5264 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005265 u64 total, available, age_stamp, avg;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005266
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005267 /*
5268 * Since we're reading these variables without serialization make sure
5269 * we read them once before doing sanity checks on them.
5270 */
5271 age_stamp = ACCESS_ONCE(rq->age_stamp);
5272 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005273
Frederic Weisbecker78becc22013-04-12 01:51:02 +02005274 total = sched_avg_period() + (rq_clock(rq) - age_stamp);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005275
5276 if (unlikely(total < avg)) {
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005277 /* Ensures that power won't end up being negative */
5278 available = 0;
5279 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005280 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005281 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005282
Nikhil Rao1399fa72011-05-18 10:09:39 -07005283 if (unlikely((s64)total < SCHED_POWER_SCALE))
5284 total = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005285
Nikhil Rao1399fa72011-05-18 10:09:39 -07005286 total >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005287
5288 return div_u64(available, total);
5289}
5290
5291static void update_cpu_power(struct sched_domain *sd, int cpu)
5292{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005293 unsigned long weight = sd->span_weight;
Nikhil Rao1399fa72011-05-18 10:09:39 -07005294 unsigned long power = SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005295 struct sched_group *sdg = sd->groups;
5296
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005297 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
5298 if (sched_feat(ARCH_POWER))
5299 power *= arch_scale_smt_power(sd, cpu);
5300 else
5301 power *= default_scale_smt_power(sd, cpu);
5302
Nikhil Rao1399fa72011-05-18 10:09:39 -07005303 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005304 }
5305
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005306 sdg->sgp->power_orig = power;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005307
5308 if (sched_feat(ARCH_POWER))
5309 power *= arch_scale_freq_power(sd, cpu);
5310 else
5311 power *= default_scale_freq_power(sd, cpu);
5312
Nikhil Rao1399fa72011-05-18 10:09:39 -07005313 power >>= SCHED_POWER_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005314
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005315 power *= scale_rt_power(cpu);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005316 power >>= SCHED_POWER_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005317
5318 if (!power)
5319 power = 1;
5320
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02005321 cpu_rq(cpu)->cpu_power = power;
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005322 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005323}
5324
Peter Zijlstra029632f2011-10-25 10:00:11 +02005325void update_group_power(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005326{
5327 struct sched_domain *child = sd->child;
5328 struct sched_group *group, *sdg = sd->groups;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005329 unsigned long power, power_orig;
Vincent Guittot4ec44122011-12-12 20:21:08 +01005330 unsigned long interval;
5331
5332 interval = msecs_to_jiffies(sd->balance_interval);
5333 interval = clamp(interval, 1UL, max_load_balance_interval);
5334 sdg->sgp->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005335
5336 if (!child) {
5337 update_cpu_power(sd, cpu);
5338 return;
5339 }
5340
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005341 power_orig = power = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005342
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005343 if (child->flags & SD_OVERLAP) {
5344 /*
5345 * SD_OVERLAP domains cannot assume that child groups
5346 * span the current group.
5347 */
5348
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005349 for_each_cpu(cpu, sched_group_cpus(sdg)) {
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305350 struct sched_group_power *sgp;
5351 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005352
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305353 /*
5354 * build_sched_domains() -> init_sched_groups_power()
5355 * gets here before we've attached the domains to the
5356 * runqueues.
5357 *
5358 * Use power_of(), which is set irrespective of domains
5359 * in update_cpu_power().
5360 *
5361 * This avoids power/power_orig from being 0 and
5362 * causing divide-by-zero issues on boot.
5363 *
5364 * Runtime updates will correct power_orig.
5365 */
5366 if (unlikely(!rq->sd)) {
5367 power_orig += power_of(cpu);
5368 power += power_of(cpu);
5369 continue;
5370 }
5371
5372 sgp = rq->sd->groups->sgp;
5373 power_orig += sgp->power_orig;
5374 power += sgp->power;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005375 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005376 } else {
5377 /*
5378 * !SD_OVERLAP domains can assume that child groups
5379 * span the current group.
5380 */
5381
5382 group = child->groups;
5383 do {
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005384 power_orig += group->sgp->power_orig;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005385 power += group->sgp->power;
5386 group = group->next;
5387 } while (group != child->groups);
5388 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005389
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005390 sdg->sgp->power_orig = power_orig;
5391 sdg->sgp->power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005392}
5393
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005394/*
5395 * Try and fix up capacity for tiny siblings, this is needed when
5396 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5397 * which on its own isn't powerful enough.
5398 *
5399 * See update_sd_pick_busiest() and check_asym_packing().
5400 */
5401static inline int
5402fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5403{
5404 /*
Nikhil Rao1399fa72011-05-18 10:09:39 -07005405 * Only siblings can have significantly less than SCHED_POWER_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005406 */
Peter Zijlstraa6c75f22011-04-07 14:09:52 +02005407 if (!(sd->flags & SD_SHARE_CPUPOWER))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005408 return 0;
5409
5410 /*
5411 * If ~90% of the cpu_power is still there, we're good.
5412 */
Peter Zijlstra9c3f75c2011-07-14 13:00:06 +02005413 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005414 return 1;
5415
5416 return 0;
5417}
5418
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005419/*
5420 * Group imbalance indicates (and tries to solve) the problem where balancing
5421 * groups is inadequate due to tsk_cpus_allowed() constraints.
5422 *
5423 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5424 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5425 * Something like:
5426 *
5427 * { 0 1 2 3 } { 4 5 6 7 }
5428 * * * * *
5429 *
5430 * If we were to balance group-wise we'd place two tasks in the first group and
5431 * two tasks in the second group. Clearly this is undesired as it will overload
5432 * cpu 3 and leave one of the cpus in the second group unused.
5433 *
5434 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02005435 * by noticing the lower domain failed to reach balance and had difficulty
5436 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005437 *
5438 * When this is so detected; this group becomes a candidate for busiest; see
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305439 * update_sd_pick_busiest(). And calculate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02005440 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005441 * to create an effective group imbalance.
5442 *
5443 * This is a somewhat tricky proposition since the next run might not find the
5444 * group imbalance and decide the groups need to be balanced again. A most
5445 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005446 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005447
Peter Zijlstra62633222013-08-19 12:41:09 +02005448static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005449{
Peter Zijlstra62633222013-08-19 12:41:09 +02005450 return group->sgp->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005451}
5452
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005453/*
5454 * Compute the group capacity.
5455 *
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005456 * Avoid the issue where N*frac(smt_power) >= 1 creates 'phantom' cores by
5457 * first dividing out the smt factor and computing the actual number of cores
5458 * and limit power unit capacity with that.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005459 */
5460static inline int sg_capacity(struct lb_env *env, struct sched_group *group)
5461{
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005462 unsigned int capacity, smt, cpus;
5463 unsigned int power, power_orig;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005464
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005465 power = group->sgp->power;
5466 power_orig = group->sgp->power_orig;
5467 cpus = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005468
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005469 /* smt := ceil(cpus / power), assumes: 1 < smt_power < 2 */
5470 smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, power_orig);
5471 capacity = cpus / smt; /* cores */
5472
5473 capacity = min_t(unsigned, capacity, DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE));
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005474 if (!capacity)
5475 capacity = fix_small_capacity(env->sd, group);
5476
5477 return capacity;
5478}
5479
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005480/**
5481 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5482 * @env: The load balancing environment.
5483 * @group: sched_group whose statistics are to be updated.
5484 * @load_idx: Load index of sched_domain of this_cpu for load calc.
5485 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005486 * @sgs: variable to hold the statistics for this group.
5487 */
5488static inline void update_sg_lb_stats(struct lb_env *env,
5489 struct sched_group *group, int load_idx,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005490 int local_group, struct sg_lb_stats *sgs)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005491{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005492 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005493 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005494
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005495 memset(sgs, 0, sizeof(*sgs));
5496
Michael Wangb94031302012-07-12 16:10:13 +08005497 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005498 struct rq *rq = cpu_rq(i);
5499
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005500 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02005501 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005502 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02005503 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005504 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005505
5506 sgs->group_load += load;
Kamalesh Babulal380c9072013-11-15 15:06:52 +05305507 sgs->sum_nr_running += rq->nr_running;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005508#ifdef CONFIG_NUMA_BALANCING
5509 sgs->nr_numa_running += rq->nr_numa_running;
5510 sgs->nr_preferred_running += rq->nr_preferred_running;
5511#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005512 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005513 if (idle_cpu(i))
5514 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005515 }
5516
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005517 /* Adjust by relative CPU power of the group */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005518 sgs->group_power = group->sgp->power;
5519 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005520
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005521 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02005522 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005523
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005524 sgs->group_weight = group->group_weight;
Nikhil Raofab47622010-10-15 13:12:29 -07005525
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005526 sgs->group_imb = sg_imbalanced(group);
5527 sgs->group_capacity = sg_capacity(env, group);
5528
Nikhil Raofab47622010-10-15 13:12:29 -07005529 if (sgs->group_capacity > sgs->sum_nr_running)
5530 sgs->group_has_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005531}
5532
5533/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10005534 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07005535 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005536 * @sds: sched_domain statistics
5537 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10005538 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10005539 *
5540 * Determine if @sg is a busier group than the previously selected
5541 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005542 *
5543 * Return: %true if @sg is a busier group than the previously selected
5544 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005545 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005546static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10005547 struct sd_lb_stats *sds,
5548 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005549 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005550{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005551 if (sgs->avg_load <= sds->busiest_stat.avg_load)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005552 return false;
5553
5554 if (sgs->sum_nr_running > sgs->group_capacity)
5555 return true;
5556
5557 if (sgs->group_imb)
5558 return true;
5559
5560 /*
5561 * ASYM_PACKING needs to move all the work to the lowest
5562 * numbered CPUs in the group, therefore mark all groups
5563 * higher than ourself as busy.
5564 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005565 if ((env->sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
5566 env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005567 if (!sds->busiest)
5568 return true;
5569
5570 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
5571 return true;
5572 }
5573
5574 return false;
5575}
5576
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005577#ifdef CONFIG_NUMA_BALANCING
5578static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5579{
5580 if (sgs->sum_nr_running > sgs->nr_numa_running)
5581 return regular;
5582 if (sgs->sum_nr_running > sgs->nr_preferred_running)
5583 return remote;
5584 return all;
5585}
5586
5587static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5588{
5589 if (rq->nr_running > rq->nr_numa_running)
5590 return regular;
5591 if (rq->nr_running > rq->nr_preferred_running)
5592 return remote;
5593 return all;
5594}
5595#else
5596static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
5597{
5598 return all;
5599}
5600
5601static inline enum fbq_type fbq_classify_rq(struct rq *rq)
5602{
5603 return regular;
5604}
5605#endif /* CONFIG_NUMA_BALANCING */
5606
Michael Neuling532cb4c2010-06-08 14:57:02 +10005607/**
Hui Kang461819a2011-10-11 23:00:59 -04005608 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005609 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005610 * @sds: variable to hold the statistics for this sched_domain.
5611 */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005612static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005613{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005614 struct sched_domain *child = env->sd->child;
5615 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005616 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005617 int load_idx, prefer_sibling = 0;
5618
5619 if (child && child->flags & SD_PREFER_SIBLING)
5620 prefer_sibling = 1;
5621
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005622 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005623
5624 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005625 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005626 int local_group;
5627
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005628 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005629 if (local_group) {
5630 sds->local = sg;
5631 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005632
5633 if (env->idle != CPU_NEWLY_IDLE ||
5634 time_after_eq(jiffies, sg->sgp->next_update))
5635 update_group_power(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005636 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005637
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005638 update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005639
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005640 if (local_group)
5641 goto next_group;
5642
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005643 /*
5644 * In case the child domain prefers tasks go to siblings
Michael Neuling532cb4c2010-06-08 14:57:02 +10005645 * first, lower the sg capacity to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07005646 * and move all the excess tasks away. We lower the capacity
5647 * of a group only if the local group has the capacity to fit
5648 * these excess tasks, i.e. nr_running < group_capacity. The
5649 * extra check prevents the case where you always pull from the
5650 * heaviest group when it is already under-utilized (possible
5651 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005652 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005653 if (prefer_sibling && sds->local &&
5654 sds->local_stat.group_has_capacity)
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005655 sgs->group_capacity = min(sgs->group_capacity, 1U);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005656
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005657 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10005658 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005659 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005660 }
5661
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005662next_group:
5663 /* Now, start updating sd_lb_stats */
5664 sds->total_load += sgs->group_load;
5665 sds->total_pwr += sgs->group_power;
5666
Michael Neuling532cb4c2010-06-08 14:57:02 +10005667 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005668 } while (sg != env->sd->groups);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005669
5670 if (env->sd->flags & SD_NUMA)
5671 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
Michael Neuling532cb4c2010-06-08 14:57:02 +10005672}
5673
Michael Neuling532cb4c2010-06-08 14:57:02 +10005674/**
5675 * check_asym_packing - Check to see if the group is packed into the
5676 * sched doman.
5677 *
5678 * This is primarily intended to used at the sibling level. Some
5679 * cores like POWER7 prefer to use lower numbered SMT threads. In the
5680 * case of POWER7, it can move to lower SMT modes only when higher
5681 * threads are idle. When in lower SMT modes, the threads will
5682 * perform better since they share less core resources. Hence when we
5683 * have idle threads, we want them to be the higher ones.
5684 *
5685 * This packing function is run on idle threads. It checks to see if
5686 * the busiest CPU in this domain (core in the P7 case) has a higher
5687 * CPU number than the packing function is being run on. Here we are
5688 * assuming lower CPU number will be equivalent to lower a SMT thread
5689 * number.
5690 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005691 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10005692 * this CPU. The amount of the imbalance is returned in *imbalance.
5693 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005694 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005695 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10005696 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005697static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005698{
5699 int busiest_cpu;
5700
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005701 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005702 return 0;
5703
5704 if (!sds->busiest)
5705 return 0;
5706
5707 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005708 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005709 return 0;
5710
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005711 env->imbalance = DIV_ROUND_CLOSEST(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005712 sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
5713 SCHED_POWER_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005714
Michael Neuling532cb4c2010-06-08 14:57:02 +10005715 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005716}
5717
5718/**
5719 * fix_small_imbalance - Calculate the minor imbalance that exists
5720 * amongst the groups of a sched_domain, during
5721 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07005722 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005723 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005724 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005725static inline
5726void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005727{
5728 unsigned long tmp, pwr_now = 0, pwr_move = 0;
5729 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005730 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005731 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005732
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005733 local = &sds->local_stat;
5734 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005735
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005736 if (!local->sum_nr_running)
5737 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
5738 else if (busiest->load_per_task > local->load_per_task)
5739 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005740
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005741 scaled_busy_load_per_task =
5742 (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005743 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005744
Vladimir Davydov3029ede2013-09-15 17:49:14 +04005745 if (busiest->avg_load + scaled_busy_load_per_task >=
5746 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005747 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005748 return;
5749 }
5750
5751 /*
5752 * OK, we don't have enough imbalance to justify moving tasks,
5753 * however we may be able to increase total CPU power used by
5754 * moving them.
5755 */
5756
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005757 pwr_now += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005758 min(busiest->load_per_task, busiest->avg_load);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005759 pwr_now += local->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005760 min(local->load_per_task, local->avg_load);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005761 pwr_now /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005762
5763 /* Amount of load we'd subtract */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005764 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005765 busiest->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005766 if (busiest->avg_load > tmp) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005767 pwr_move += busiest->group_power *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005768 min(busiest->load_per_task,
5769 busiest->avg_load - tmp);
5770 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005771
5772 /* Amount of load we'd add */
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005773 if (busiest->avg_load * busiest->group_power <
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005774 busiest->load_per_task * SCHED_POWER_SCALE) {
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005775 tmp = (busiest->avg_load * busiest->group_power) /
5776 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005777 } else {
5778 tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005779 local->group_power;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005780 }
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005781 pwr_move += local->group_power *
5782 min(local->load_per_task, local->avg_load + tmp);
Nikhil Rao1399fa72011-05-18 10:09:39 -07005783 pwr_move /= SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005784
5785 /* Move if we gain throughput */
5786 if (pwr_move > pwr_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005787 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005788}
5789
5790/**
5791 * calculate_imbalance - Calculate the amount of imbalance present within the
5792 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005793 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005794 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005795 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005796static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005797{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005798 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005799 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005800
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005801 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005802 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005803
5804 if (busiest->group_imb) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005805 /*
5806 * In the group_imb case we cannot rely on group-wide averages
5807 * to ensure cpu-load equilibrium, look at wider averages. XXX
5808 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005809 busiest->load_per_task =
5810 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005811 }
5812
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005813 /*
5814 * In the presence of smp nice balancing, certain scenarios can have
5815 * max load less than avg load(as we skip the groups at or below
5816 * its cpu_power, while calculating max_load..)
5817 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04005818 if (busiest->avg_load <= sds->avg_load ||
5819 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005820 env->imbalance = 0;
5821 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005822 }
5823
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005824 if (!busiest->group_imb) {
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005825 /*
5826 * Don't want to pull so many tasks that a group would go idle.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005827 * Except of course for the group_imb case, since then we might
5828 * have to drop below capacity to reach cpu-load equilibrium.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005829 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005830 load_above_capacity =
5831 (busiest->sum_nr_running - busiest->group_capacity);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005832
Nikhil Rao1399fa72011-05-18 10:09:39 -07005833 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005834 load_above_capacity /= busiest->group_power;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005835 }
5836
5837 /*
5838 * We're trying to get all the cpus to the average_load, so we don't
5839 * want to push ourselves above the average load, nor do we wish to
5840 * reduce the max loaded cpu below the average load. At the same time,
5841 * we also don't want to reduce the group load below the group capacity
5842 * (so that we can implement power-savings policies etc). Thus we look
5843 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005844 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005845 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005846
5847 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005848 env->imbalance = min(
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02005849 max_pull * busiest->group_power,
5850 (sds->avg_load - local->avg_load) * local->group_power
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005851 ) / SCHED_POWER_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005852
5853 /*
5854 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005855 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005856 * a think about bumping its value to force at least one task to be
5857 * moved
5858 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005859 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005860 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005861}
Nikhil Raofab47622010-10-15 13:12:29 -07005862
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005863/******* find_busiest_group() helpers end here *********************/
5864
5865/**
5866 * find_busiest_group - Returns the busiest group within the sched_domain
5867 * if there is an imbalance. If there isn't an imbalance, and
5868 * the user has opted for power-savings, it returns a group whose
5869 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
5870 * such a group exists.
5871 *
5872 * Also calculates the amount of weighted load which should be moved
5873 * to restore balance.
5874 *
Randy Dunlapcd968912012-06-08 13:18:33 -07005875 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005876 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02005877 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005878 * - If no imbalance and user has opted for power-savings balance,
5879 * return the least loaded group whose CPUs can be
5880 * put to idle by rebalancing its tasks onto our group.
5881 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005882static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005883{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005884 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005885 struct sd_lb_stats sds;
5886
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005887 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005888
5889 /*
5890 * Compute the various statistics relavent for load balancing at
5891 * this level.
5892 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09005893 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005894 local = &sds.local_stat;
5895 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005896
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005897 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
5898 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10005899 return sds.busiest;
5900
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005901 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005902 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005903 goto out_balanced;
5904
Nikhil Rao1399fa72011-05-18 10:09:39 -07005905 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
Ken Chenb0432d82011-04-07 17:23:22 -07005906
Peter Zijlstra866ab432011-02-21 18:56:47 +01005907 /*
5908 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005909 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01005910 * isn't true due to cpus_allowed constraints and the like.
5911 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005912 if (busiest->group_imb)
Peter Zijlstra866ab432011-02-21 18:56:47 +01005913 goto force_balance;
5914
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005915 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005916 if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
5917 !busiest->group_has_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07005918 goto force_balance;
5919
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005920 /*
5921 * If the local group is more busy than the selected busiest group
5922 * don't try and pull any tasks.
5923 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005924 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005925 goto out_balanced;
5926
Peter Zijlstracc57aa82011-02-21 18:55:32 +01005927 /*
5928 * Don't pull any tasks if this group is already above the domain
5929 * average load.
5930 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005931 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005932 goto out_balanced;
5933
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005934 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005935 /*
5936 * This cpu is idle. If the busiest group load doesn't
5937 * have more tasks than the number of available cpu's and
5938 * there is no imbalance between this and busiest group
5939 * wrt to idle cpu's, it is balanced.
5940 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005941 if ((local->idle_cpus < busiest->idle_cpus) &&
5942 busiest->sum_nr_running <= busiest->group_weight)
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005943 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005944 } else {
5945 /*
5946 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
5947 * imbalance_pct to be conservative.
5948 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005949 if (100 * busiest->avg_load <=
5950 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01005951 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005952 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005953
Nikhil Raofab47622010-10-15 13:12:29 -07005954force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005955 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005956 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005957 return sds.busiest;
5958
5959out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005960 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005961 return NULL;
5962}
5963
5964/*
5965 * find_busiest_queue - find the busiest runqueue among the cpus in group.
5966 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005967static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08005968 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005969{
5970 struct rq *busiest = NULL, *rq;
Joonsoo Kim95a79b82013-08-06 17:36:41 +09005971 unsigned long busiest_load = 0, busiest_power = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005972 int i;
5973
Peter Zijlstra6906a402013-08-19 15:20:21 +02005974 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005975 unsigned long power, capacity, wl;
5976 enum fbq_type rt;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005977
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005978 rq = cpu_rq(i);
5979 rt = fbq_classify_rq(rq);
5980
5981 /*
5982 * We classify groups/runqueues into three groups:
5983 * - regular: there are !numa tasks
5984 * - remote: there are numa tasks that run on the 'wrong' node
5985 * - all: there is no distinction
5986 *
5987 * In order to avoid migrating ideally placed numa tasks,
5988 * ignore those when there's better options.
5989 *
5990 * If we ignore the actual busiest queue to migrate another
5991 * task, the next balance pass can still reduce the busiest
5992 * queue by moving tasks around inside the node.
5993 *
5994 * If we cannot move enough load due to this classification
5995 * the next pass will adjust the group classification and
5996 * allow migration of more tasks.
5997 *
5998 * Both cases only affect the total convergence complexity.
5999 */
6000 if (rt > env->fbq_type)
6001 continue;
6002
6003 power = power_of(i);
6004 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006005 if (!capacity)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006006 capacity = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006007
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006008 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006009
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006010 /*
6011 * When comparing with imbalance, use weighted_cpuload()
6012 * which is not scaled with the cpu power.
6013 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006014 if (capacity && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006015 continue;
6016
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006017 /*
6018 * For the load comparisons with the other cpu's, consider
6019 * the weighted_cpuload() scaled with the cpu power, so that
6020 * the load can be moved away from the cpu that is potentially
6021 * running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006022 *
6023 * Thus we're looking for max(wl_i / power_i), crosswise
6024 * multiplication to rid ourselves of the division works out
6025 * to: wl_i * power_j > wl_j * power_i; where j is our
6026 * previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006027 */
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006028 if (wl * busiest_power > busiest_load * power) {
6029 busiest_load = wl;
6030 busiest_power = power;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006031 busiest = rq;
6032 }
6033 }
6034
6035 return busiest;
6036}
6037
6038/*
6039 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6040 * so long as it is large enough.
6041 */
6042#define MAX_PINNED_INTERVAL 512
6043
6044/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09006045DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006046
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006047static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006048{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006049 struct sched_domain *sd = env->sd;
6050
6051 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006052
6053 /*
6054 * ASYM_PACKING needs to force migrate tasks from busy but
6055 * higher numbered CPUs in order to pack all tasks in the
6056 * lowest numbered CPUs.
6057 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006058 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006059 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006060 }
6061
6062 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6063}
6064
Tejun Heo969c7922010-05-06 18:49:21 +02006065static int active_load_balance_cpu_stop(void *data);
6066
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006067static int should_we_balance(struct lb_env *env)
6068{
6069 struct sched_group *sg = env->sd->groups;
6070 struct cpumask *sg_cpus, *sg_mask;
6071 int cpu, balance_cpu = -1;
6072
6073 /*
6074 * In the newly idle case, we will allow all the cpu's
6075 * to do the newly idle load balance.
6076 */
6077 if (env->idle == CPU_NEWLY_IDLE)
6078 return 1;
6079
6080 sg_cpus = sched_group_cpus(sg);
6081 sg_mask = sched_group_mask(sg);
6082 /* Try to find first idle cpu */
6083 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6084 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6085 continue;
6086
6087 balance_cpu = cpu;
6088 break;
6089 }
6090
6091 if (balance_cpu == -1)
6092 balance_cpu = group_balance_cpu(sg);
6093
6094 /*
6095 * First idle cpu or the first cpu(busiest) in this sched group
6096 * is eligible for doing load balancing at this and above domains.
6097 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09006098 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006099}
6100
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006101/*
6102 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6103 * tasks if there is an imbalance.
6104 */
6105static int load_balance(int this_cpu, struct rq *this_rq,
6106 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006107 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006108{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306109 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02006110 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006111 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006112 struct rq *busiest;
6113 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09006114 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006115
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006116 struct lb_env env = {
6117 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006118 .dst_cpu = this_cpu,
6119 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306120 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006121 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02006122 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08006123 .cpus = cpus,
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006124 .fbq_type = all,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006125 };
6126
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006127 /*
6128 * For NEWLY_IDLE load_balancing, we don't need to consider
6129 * other cpus in our group
6130 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006131 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006132 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006133
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006134 cpumask_copy(cpus, cpu_active_mask);
6135
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006136 schedstat_inc(sd, lb_count[idle]);
6137
6138redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006139 if (!should_we_balance(&env)) {
6140 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006141 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006142 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006143
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006144 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006145 if (!group) {
6146 schedstat_inc(sd, lb_nobusyg[idle]);
6147 goto out_balanced;
6148 }
6149
Michael Wangb94031302012-07-12 16:10:13 +08006150 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006151 if (!busiest) {
6152 schedstat_inc(sd, lb_nobusyq[idle]);
6153 goto out_balanced;
6154 }
6155
Michael Wang78feefc2012-08-06 16:41:59 +08006156 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006157
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006158 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006159
6160 ld_moved = 0;
6161 if (busiest->nr_running > 1) {
6162 /*
6163 * Attempt to move tasks. If find_busiest_group has found
6164 * an imbalance but busiest->nr_running <= 1, the group is
6165 * still unbalanced. ld_moved simply stays zero, so it is
6166 * correctly treated as an imbalance.
6167 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006168 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02006169 env.src_cpu = busiest->cpu;
6170 env.src_rq = busiest;
6171 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006172
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006173more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006174 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08006175 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306176
6177 /*
6178 * cur_ld_moved - load moved in current iteration
6179 * ld_moved - cumulative load moved across iterations
6180 */
6181 cur_ld_moved = move_tasks(&env);
6182 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08006183 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006184 local_irq_restore(flags);
6185
6186 /*
6187 * some other cpu did the load balance for us.
6188 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306189 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6190 resched_cpu(env.dst_cpu);
6191
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09006192 if (env.flags & LBF_NEED_BREAK) {
6193 env.flags &= ~LBF_NEED_BREAK;
6194 goto more_balance;
6195 }
6196
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306197 /*
6198 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6199 * us and move them to an alternate dst_cpu in our sched_group
6200 * where they can run. The upper limit on how many times we
6201 * iterate on same src_cpu is dependent on number of cpus in our
6202 * sched_group.
6203 *
6204 * This changes load balance semantics a bit on who can move
6205 * load to a given_cpu. In addition to the given_cpu itself
6206 * (or a ilb_cpu acting on its behalf where given_cpu is
6207 * nohz-idle), we now have balance_cpu in a position to move
6208 * load to given_cpu. In rare situations, this may cause
6209 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6210 * _independently_ and at _same_ time to move some load to
6211 * given_cpu) causing exceess load to be moved to given_cpu.
6212 * This however should not happen so much in practice and
6213 * moreover subsequent load balance cycles should correct the
6214 * excess load moved.
6215 */
Peter Zijlstra62633222013-08-19 12:41:09 +02006216 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306217
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04006218 /* Prevent to re-select dst_cpu via env's cpus */
6219 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6220
Michael Wang78feefc2012-08-06 16:41:59 +08006221 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306222 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02006223 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306224 env.loop = 0;
6225 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006226
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306227 /*
6228 * Go back to "more_balance" rather than "redo" since we
6229 * need to continue with same src_cpu.
6230 */
6231 goto more_balance;
6232 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006233
Peter Zijlstra62633222013-08-19 12:41:09 +02006234 /*
6235 * We failed to reach balance because of affinity.
6236 */
6237 if (sd_parent) {
6238 int *group_imbalance = &sd_parent->groups->sgp->imbalance;
6239
6240 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6241 *group_imbalance = 1;
6242 } else if (*group_imbalance)
6243 *group_imbalance = 0;
6244 }
6245
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006246 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006247 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006248 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05306249 if (!cpumask_empty(cpus)) {
6250 env.loop = 0;
6251 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006252 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05306253 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006254 goto out_balanced;
6255 }
6256 }
6257
6258 if (!ld_moved) {
6259 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07006260 /*
6261 * Increment the failure counter only on periodic balance.
6262 * We do not want newidle balance, which can be very
6263 * frequent, pollute the failure counter causing
6264 * excessive cache_hot migrations and active balances.
6265 */
6266 if (idle != CPU_NEWLY_IDLE)
6267 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006268
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006269 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006270 raw_spin_lock_irqsave(&busiest->lock, flags);
6271
Tejun Heo969c7922010-05-06 18:49:21 +02006272 /* don't kick the active_load_balance_cpu_stop,
6273 * if the curr task on busiest cpu can't be
6274 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006275 */
6276 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02006277 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006278 raw_spin_unlock_irqrestore(&busiest->lock,
6279 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006280 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006281 goto out_one_pinned;
6282 }
6283
Tejun Heo969c7922010-05-06 18:49:21 +02006284 /*
6285 * ->active_balance synchronizes accesses to
6286 * ->active_balance_work. Once set, it's cleared
6287 * only after active load balance is finished.
6288 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006289 if (!busiest->active_balance) {
6290 busiest->active_balance = 1;
6291 busiest->push_cpu = this_cpu;
6292 active_balance = 1;
6293 }
6294 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02006295
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006296 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02006297 stop_one_cpu_nowait(cpu_of(busiest),
6298 active_load_balance_cpu_stop, busiest,
6299 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006300 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006301
6302 /*
6303 * We've kicked active balancing, reset the failure
6304 * counter.
6305 */
6306 sd->nr_balance_failed = sd->cache_nice_tries+1;
6307 }
6308 } else
6309 sd->nr_balance_failed = 0;
6310
6311 if (likely(!active_balance)) {
6312 /* We were unbalanced, so reset the balancing interval */
6313 sd->balance_interval = sd->min_interval;
6314 } else {
6315 /*
6316 * If we've begun active balancing, start to back off. This
6317 * case may not be covered by the all_pinned logic if there
6318 * is only 1 task on the busy runqueue (because we don't call
6319 * move_tasks).
6320 */
6321 if (sd->balance_interval < sd->max_interval)
6322 sd->balance_interval *= 2;
6323 }
6324
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006325 goto out;
6326
6327out_balanced:
6328 schedstat_inc(sd, lb_balanced[idle]);
6329
6330 sd->nr_balance_failed = 0;
6331
6332out_one_pinned:
6333 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006334 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02006335 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006336 (sd->balance_interval < sd->max_interval))
6337 sd->balance_interval *= 2;
6338
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08006339 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006340out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006341 return ld_moved;
6342}
6343
6344/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006345 * idle_balance is called by schedule() if this_cpu is about to become
6346 * idle. Attempts to pull tasks from other CPUs.
6347 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006348void idle_balance(int this_cpu, struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006349{
6350 struct sched_domain *sd;
6351 int pulled_task = 0;
6352 unsigned long next_balance = jiffies + HZ;
Jason Low9bd721c2013-09-13 11:26:52 -07006353 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006354
Frederic Weisbecker78becc22013-04-12 01:51:02 +02006355 this_rq->idle_stamp = rq_clock(this_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006356
6357 if (this_rq->avg_idle < sysctl_sched_migration_cost)
6358 return;
6359
Peter Zijlstraf492e122009-12-23 15:29:42 +01006360 /*
6361 * Drop the rq->lock, but keep IRQ/preempt disabled.
6362 */
6363 raw_spin_unlock(&this_rq->lock);
6364
Paul Turner48a16752012-10-04 13:18:31 +02006365 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006366 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006367 for_each_domain(this_cpu, sd) {
6368 unsigned long interval;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006369 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07006370 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006371
6372 if (!(sd->flags & SD_LOAD_BALANCE))
6373 continue;
6374
Jason Low9bd721c2013-09-13 11:26:52 -07006375 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost)
6376 break;
6377
Peter Zijlstraf492e122009-12-23 15:29:42 +01006378 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07006379 t0 = sched_clock_cpu(this_cpu);
6380
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006381 /* If we've pulled tasks over stop searching: */
Peter Zijlstraf492e122009-12-23 15:29:42 +01006382 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006383 sd, CPU_NEWLY_IDLE,
6384 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07006385
6386 domain_cost = sched_clock_cpu(this_cpu) - t0;
6387 if (domain_cost > sd->max_newidle_lb_cost)
6388 sd->max_newidle_lb_cost = domain_cost;
6389
6390 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01006391 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006392
6393 interval = msecs_to_jiffies(sd->balance_interval);
6394 if (time_after(next_balance, sd->last_balance + interval))
6395 next_balance = sd->last_balance + interval;
Nikhil Raod5ad1402010-11-17 11:42:04 -08006396 if (pulled_task) {
6397 this_rq->idle_stamp = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006398 break;
Nikhil Raod5ad1402010-11-17 11:42:04 -08006399 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006400 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006401 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01006402
6403 raw_spin_lock(&this_rq->lock);
6404
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006405 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
6406 /*
6407 * We are going idle. next_balance may be set based on
6408 * a busy processor. So reset next_balance.
6409 */
6410 this_rq->next_balance = next_balance;
6411 }
Jason Low9bd721c2013-09-13 11:26:52 -07006412
6413 if (curr_cost > this_rq->max_idle_balance_cost)
6414 this_rq->max_idle_balance_cost = curr_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006415}
6416
6417/*
Tejun Heo969c7922010-05-06 18:49:21 +02006418 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6419 * running tasks off the busiest CPU onto idle CPUs. It requires at
6420 * least 1 task to be running on each physical CPU where possible, and
6421 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006422 */
Tejun Heo969c7922010-05-06 18:49:21 +02006423static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006424{
Tejun Heo969c7922010-05-06 18:49:21 +02006425 struct rq *busiest_rq = data;
6426 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006427 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02006428 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006429 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02006430
6431 raw_spin_lock_irq(&busiest_rq->lock);
6432
6433 /* make sure the requested cpu hasn't gone down in the meantime */
6434 if (unlikely(busiest_cpu != smp_processor_id() ||
6435 !busiest_rq->active_balance))
6436 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006437
6438 /* Is there any task to move? */
6439 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02006440 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006441
6442 /*
6443 * This condition is "impossible", if it occurs
6444 * we need to fix it. Originally reported by
6445 * Bjorn Helgaas on a 128-cpu setup.
6446 */
6447 BUG_ON(busiest_rq == target_rq);
6448
6449 /* move a task from busiest_rq to target_rq */
6450 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006451
6452 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006453 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006454 for_each_domain(target_cpu, sd) {
6455 if ((sd->flags & SD_LOAD_BALANCE) &&
6456 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6457 break;
6458 }
6459
6460 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006461 struct lb_env env = {
6462 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006463 .dst_cpu = target_cpu,
6464 .dst_rq = target_rq,
6465 .src_cpu = busiest_rq->cpu,
6466 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006467 .idle = CPU_IDLE,
6468 };
6469
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006470 schedstat_inc(sd, alb_count);
6471
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006472 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006473 schedstat_inc(sd, alb_pushed);
6474 else
6475 schedstat_inc(sd, alb_failed);
6476 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006477 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006478 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02006479out_unlock:
6480 busiest_rq->active_balance = 0;
6481 raw_spin_unlock_irq(&busiest_rq->lock);
6482 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006483}
6484
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006485#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006486/*
6487 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006488 * - When one of the busy CPUs notice that there may be an idle rebalancing
6489 * needed, they will kick the idle load balancer, which then does idle
6490 * load balancing for all the idle CPUs.
6491 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006492static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006493 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006494 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006495 unsigned long next_balance; /* in jiffy units */
6496} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006497
Daniel Lezcano3dd03372014-01-06 12:34:41 +01006498static inline int find_new_ilb(void)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006499{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006500 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006501
Suresh Siddha786d6dc72011-12-01 17:07:35 -08006502 if (ilb < nr_cpu_ids && idle_cpu(ilb))
6503 return ilb;
6504
6505 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006506}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006507
6508/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006509 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
6510 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
6511 * CPU (if there is one).
6512 */
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01006513static void nohz_balancer_kick(void)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006514{
6515 int ilb_cpu;
6516
6517 nohz.next_balance++;
6518
Daniel Lezcano3dd03372014-01-06 12:34:41 +01006519 ilb_cpu = find_new_ilb();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006520
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006521 if (ilb_cpu >= nr_cpu_ids)
6522 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006523
Suresh Siddhacd490c52011-12-06 11:26:34 -08006524 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08006525 return;
6526 /*
6527 * Use smp_send_reschedule() instead of resched_cpu().
6528 * This way we generate a sched IPI on the target cpu which
6529 * is idle. And the softirq performing nohz idle load balance
6530 * will be run before returning from the IPI.
6531 */
6532 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006533 return;
6534}
6535
Alex Shic1cc0172012-09-10 15:10:58 +08006536static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08006537{
6538 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
6539 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
6540 atomic_dec(&nohz.nr_cpus);
6541 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
6542 }
6543}
6544
Suresh Siddha69e1e812011-12-01 17:07:33 -08006545static inline void set_cpu_sd_state_busy(void)
6546{
6547 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306548 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08006549
Suresh Siddha69e1e812011-12-01 17:07:33 -08006550 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306551 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02006552
6553 if (!sd || !sd->nohz_idle)
6554 goto unlock;
6555 sd->nohz_idle = 0;
6556
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306557 atomic_inc(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006558unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08006559 rcu_read_unlock();
6560}
6561
6562void set_cpu_sd_state_idle(void)
6563{
6564 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306565 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08006566
Suresh Siddha69e1e812011-12-01 17:07:33 -08006567 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306568 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02006569
6570 if (!sd || sd->nohz_idle)
6571 goto unlock;
6572 sd->nohz_idle = 1;
6573
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306574 atomic_dec(&sd->groups->sgp->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02006575unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08006576 rcu_read_unlock();
6577}
6578
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006579/*
Alex Shic1cc0172012-09-10 15:10:58 +08006580 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006581 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006582 */
Alex Shic1cc0172012-09-10 15:10:58 +08006583void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006584{
Suresh Siddha71325962012-01-19 18:28:57 -08006585 /*
6586 * If this cpu is going down, then nothing needs to be done.
6587 */
6588 if (!cpu_active(cpu))
6589 return;
6590
Alex Shic1cc0172012-09-10 15:10:58 +08006591 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
6592 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006593
Alex Shic1cc0172012-09-10 15:10:58 +08006594 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
6595 atomic_inc(&nohz.nr_cpus);
6596 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006597}
Suresh Siddha71325962012-01-19 18:28:57 -08006598
Paul Gortmaker0db06282013-06-19 14:53:51 -04006599static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08006600 unsigned long action, void *hcpu)
6601{
6602 switch (action & ~CPU_TASKS_FROZEN) {
6603 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08006604 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08006605 return NOTIFY_OK;
6606 default:
6607 return NOTIFY_DONE;
6608 }
6609}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006610#endif
6611
6612static DEFINE_SPINLOCK(balancing);
6613
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006614/*
6615 * Scale the max load_balance interval with the number of CPUs in the system.
6616 * This trades load-balance latency on larger machines for less cross talk.
6617 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02006618void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006619{
6620 max_load_balance_interval = HZ*num_online_cpus()/10;
6621}
6622
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006623/*
6624 * It checks each scheduling domain to see if it is due to be balanced,
6625 * and initiates a balancing operation if so.
6626 *
Libinb9b08532013-04-01 19:14:01 +08006627 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006628 */
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01006629static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006630{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006631 int continue_balancing = 1;
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01006632 int cpu = rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006633 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02006634 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006635 /* Earliest time when we have to do rebalance again */
6636 unsigned long next_balance = jiffies + 60*HZ;
6637 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07006638 int need_serialize, need_decay = 0;
6639 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006640
Paul Turner48a16752012-10-04 13:18:31 +02006641 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08006642
Peter Zijlstradce840a2011-04-07 14:09:50 +02006643 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006644 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07006645 /*
6646 * Decay the newidle max times here because this is a regular
6647 * visit to all the domains. Decay ~1% per second.
6648 */
6649 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
6650 sd->max_newidle_lb_cost =
6651 (sd->max_newidle_lb_cost * 253) / 256;
6652 sd->next_decay_max_lb_cost = jiffies + HZ;
6653 need_decay = 1;
6654 }
6655 max_cost += sd->max_newidle_lb_cost;
6656
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006657 if (!(sd->flags & SD_LOAD_BALANCE))
6658 continue;
6659
Jason Lowf48627e2013-09-13 11:26:53 -07006660 /*
6661 * Stop the load balance at this level. There is another
6662 * CPU in our sched group which is doing load balancing more
6663 * actively.
6664 */
6665 if (!continue_balancing) {
6666 if (need_decay)
6667 continue;
6668 break;
6669 }
6670
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006671 interval = sd->balance_interval;
6672 if (idle != CPU_IDLE)
6673 interval *= sd->busy_factor;
6674
6675 /* scale ms to jiffies */
6676 interval = msecs_to_jiffies(interval);
Peter Zijlstra49c022e2011-04-05 10:14:25 +02006677 interval = clamp(interval, 1UL, max_load_balance_interval);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006678
6679 need_serialize = sd->flags & SD_SERIALIZE;
6680
6681 if (need_serialize) {
6682 if (!spin_trylock(&balancing))
6683 goto out;
6684 }
6685
6686 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006687 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006688 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02006689 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09006690 * env->dst_cpu, so we can't know our idle
6691 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006692 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09006693 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006694 }
6695 sd->last_balance = jiffies;
6696 }
6697 if (need_serialize)
6698 spin_unlock(&balancing);
6699out:
6700 if (time_after(next_balance, sd->last_balance + interval)) {
6701 next_balance = sd->last_balance + interval;
6702 update_next_balance = 1;
6703 }
Jason Lowf48627e2013-09-13 11:26:53 -07006704 }
6705 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006706 /*
Jason Lowf48627e2013-09-13 11:26:53 -07006707 * Ensure the rq-wide value also decays but keep it at a
6708 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006709 */
Jason Lowf48627e2013-09-13 11:26:53 -07006710 rq->max_idle_balance_cost =
6711 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006712 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006713 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006714
6715 /*
6716 * next_balance will be updated only when there is a need.
6717 * When the cpu is attached to null domain for ex, it will not be
6718 * updated.
6719 */
6720 if (likely(update_next_balance))
6721 rq->next_balance = next_balance;
6722}
6723
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006724#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006725/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006726 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006727 * rebalancing for all the cpus for whom scheduler ticks are stopped.
6728 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01006729static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006730{
Daniel Lezcano208cb162014-01-06 12:34:44 +01006731 int this_cpu = this_rq->cpu;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006732 struct rq *rq;
6733 int balance_cpu;
6734
Suresh Siddha1c792db2011-12-01 17:07:32 -08006735 if (idle != CPU_IDLE ||
6736 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
6737 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006738
6739 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08006740 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006741 continue;
6742
6743 /*
6744 * If this cpu gets work to do, stop the load balancing
6745 * work being done for other cpus. Next load
6746 * balancing owner will pick it up.
6747 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08006748 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006749 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006750
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02006751 rq = cpu_rq(balance_cpu);
6752
6753 raw_spin_lock_irq(&rq->lock);
6754 update_rq_clock(rq);
6755 update_idle_cpu_load(rq);
6756 raw_spin_unlock_irq(&rq->lock);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006757
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01006758 rebalance_domains(rq, CPU_IDLE);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006759
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006760 if (time_after(this_rq->next_balance, rq->next_balance))
6761 this_rq->next_balance = rq->next_balance;
6762 }
6763 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006764end:
6765 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006766}
6767
6768/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006769 * Current heuristic for kicking the idle load balancer in the presence
6770 * of an idle cpu is the system.
6771 * - This rq has more than one task.
6772 * - At any scheduler domain level, this cpu's scheduler group has multiple
6773 * busy cpu's exceeding the group's power.
6774 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
6775 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006776 */
Daniel Lezcano4a725622014-01-06 12:34:39 +01006777static inline int nohz_kick_needed(struct rq *rq)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006778{
6779 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006780 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306781 struct sched_group_power *sgp;
Daniel Lezcano4a725622014-01-06 12:34:39 +01006782 int nr_busy, cpu = rq->cpu;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006783
Daniel Lezcano4a725622014-01-06 12:34:39 +01006784 if (unlikely(rq->idle_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006785 return 0;
6786
Suresh Siddha1c792db2011-12-01 17:07:32 -08006787 /*
6788 * We may be recently in ticked or tickless idle mode. At the first
6789 * busy tick after returning from idle, we will update the busy stats.
6790 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08006791 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08006792 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006793
6794 /*
6795 * None are in tickless mode and hence no need for NOHZ idle load
6796 * balancing.
6797 */
6798 if (likely(!atomic_read(&nohz.nr_cpus)))
6799 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08006800
6801 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006802 return 0;
6803
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006804 if (rq->nr_running >= 2)
6805 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006806
Peter Zijlstra067491b2011-12-07 14:32:08 +01006807 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306808 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006809
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306810 if (sd) {
6811 sgp = sd->groups->sgp;
6812 nr_busy = atomic_read(&sgp->nr_busy_cpus);
6813
6814 if (nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01006815 goto need_kick_unlock;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006816 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05306817
6818 sd = rcu_dereference(per_cpu(sd_asym, cpu));
6819
6820 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
6821 sched_domain_span(sd)) < cpu))
6822 goto need_kick_unlock;
6823
Peter Zijlstra067491b2011-12-07 14:32:08 +01006824 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006825 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01006826
6827need_kick_unlock:
6828 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08006829need_kick:
6830 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006831}
6832#else
Daniel Lezcano208cb162014-01-06 12:34:44 +01006833static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006834#endif
6835
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006836/*
6837 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006838 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006839 */
6840static void run_rebalance_domains(struct softirq_action *h)
6841{
Daniel Lezcano208cb162014-01-06 12:34:44 +01006842 struct rq *this_rq = this_rq();
Suresh Siddha6eb57e02011-10-03 15:09:01 -07006843 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006844 CPU_IDLE : CPU_NOT_IDLE;
6845
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01006846 rebalance_domains(this_rq, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006847
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006848 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006849 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006850 * balancing on behalf of the other idle cpus whose ticks are
6851 * stopped.
6852 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01006853 nohz_idle_balance(this_rq, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006854}
6855
Daniel Lezcano63f609b2014-01-06 12:34:40 +01006856static inline int on_null_domain(struct rq *rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006857{
Daniel Lezcano63f609b2014-01-06 12:34:40 +01006858 return !rcu_dereference_sched(rq->sd);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006859}
6860
6861/*
6862 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006863 */
Daniel Lezcano7caff662014-01-06 12:34:38 +01006864void trigger_load_balance(struct rq *rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006865{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006866 /* Don't need to rebalance while attached to NULL domain */
Daniel Lezcanoc7260992014-01-06 12:34:45 +01006867 if (unlikely(on_null_domain(rq)))
6868 return;
6869
6870 if (time_after_eq(jiffies, rq->next_balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006871 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02006872#ifdef CONFIG_NO_HZ_COMMON
Daniel Lezcanoc7260992014-01-06 12:34:45 +01006873 if (nohz_kick_needed(rq))
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01006874 nohz_balancer_kick();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07006875#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006876}
6877
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006878static void rq_online_fair(struct rq *rq)
6879{
6880 update_sysctl();
6881}
6882
6883static void rq_offline_fair(struct rq *rq)
6884{
6885 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07006886
6887 /* Ensure any throttled groups are reachable by pick_next_task */
6888 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01006889}
6890
Dhaval Giani55e12e52008-06-24 23:39:43 +05306891#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02006892
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006893/*
6894 * scheduler tick hitting a task of our scheduling class:
6895 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006896static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006897{
6898 struct cfs_rq *cfs_rq;
6899 struct sched_entity *se = &curr->se;
6900
6901 for_each_sched_entity(se) {
6902 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01006903 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006904 }
Ben Segall18bf2802012-10-04 12:51:20 +02006905
Dave Kleikamp10e84b92013-07-31 13:53:35 -07006906 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02006907 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08006908
Ben Segall18bf2802012-10-04 12:51:20 +02006909 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006910}
6911
6912/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006913 * called on fork with the child task as argument from the parent's context
6914 * - child not yet on the tasklist
6915 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006916 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006917static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006918{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006919 struct cfs_rq *cfs_rq;
6920 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02006921 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006922 struct rq *rq = this_rq();
6923 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006924
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006925 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006926
Peter Zijlstra861d0342010-08-19 13:31:43 +02006927 update_rq_clock(rq);
6928
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09006929 cfs_rq = task_cfs_rq(current);
6930 curr = cfs_rq->curr;
6931
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09006932 /*
6933 * Not only the cpu but also the task_group of the parent might have
6934 * been changed after parent->se.parent,cfs_rq were copied to
6935 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
6936 * of child point to valid ones.
6937 */
6938 rcu_read_lock();
6939 __set_task_cpu(p, this_cpu);
6940 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006941
Ting Yang7109c442007-08-28 12:53:24 +02006942 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006943
Mike Galbraithb5d9d732009-09-08 11:12:28 +02006944 if (curr)
6945 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02006946 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006947
Peter Zijlstracd29fe62009-11-27 17:32:46 +01006948 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02006949 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02006950 * Upon rescheduling, sched_class::put_prev_task() will place
6951 * 'current' within the tree based on its new key value.
6952 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006953 swap(curr->vruntime, se->vruntime);
Bharata B Raoaec0a512008-08-28 14:42:49 +05306954 resched_task(rq->curr);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02006955 }
6956
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01006957 se->vruntime -= cfs_rq->min_vruntime;
6958
Thomas Gleixner05fa7852009-11-17 14:28:38 +01006959 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02006960}
6961
Steven Rostedtcb469842008-01-25 21:08:22 +01006962/*
6963 * Priority of the task has changed. Check to see if we preempt
6964 * the current task.
6965 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006966static void
6967prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01006968{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006969 if (!p->se.on_rq)
6970 return;
6971
Steven Rostedtcb469842008-01-25 21:08:22 +01006972 /*
6973 * Reschedule if we are currently running on this runqueue and
6974 * our priority decreased, or if we are not currently running on
6975 * this runqueue and our priority is higher than the current's
6976 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006977 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01006978 if (p->prio > oldprio)
6979 resched_task(rq->curr);
6980 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02006981 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01006982}
6983
Peter Zijlstrada7a7352011-01-17 17:03:27 +01006984static void switched_from_fair(struct rq *rq, struct task_struct *p)
6985{
6986 struct sched_entity *se = &p->se;
6987 struct cfs_rq *cfs_rq = cfs_rq_of(se);
6988
6989 /*
6990 * Ensure the task's vruntime is normalized, so that when its
6991 * switched back to the fair class the enqueue_entity(.flags=0) will
6992 * do the right thing.
6993 *
6994 * If it was on_rq, then the dequeue_entity(.flags=0) will already
6995 * have normalized the vruntime, if it was !on_rq, then only when
6996 * the task is sleeping will it still have non-normalized vruntime.
6997 */
6998 if (!se->on_rq && p->state != TASK_RUNNING) {
6999 /*
7000 * Fix up our vruntime so that the current sleep doesn't
7001 * cause 'unlimited' sleep bonus.
7002 */
7003 place_entity(cfs_rq, se, 0);
7004 se->vruntime -= cfs_rq->min_vruntime;
7005 }
Paul Turner9ee474f2012-10-04 13:18:30 +02007006
Alex Shi141965c2013-06-26 13:05:39 +08007007#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02007008 /*
7009 * Remove our load from contribution when we leave sched_fair
7010 * and ensure we don't carry in an old decay_count if we
7011 * switch back.
7012 */
Kirill Tkhai87e3c8a2013-07-21 04:32:07 +04007013 if (se->avg.decay_count) {
7014 __synchronize_entity_decay(se);
7015 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +02007016 }
7017#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007018}
7019
Steven Rostedtcb469842008-01-25 21:08:22 +01007020/*
7021 * We switched to the sched_fair class.
7022 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007023static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01007024{
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007025 if (!p->se.on_rq)
7026 return;
7027
Steven Rostedtcb469842008-01-25 21:08:22 +01007028 /*
7029 * We were most likely switched from sched_rt, so
7030 * kick off the schedule if running, otherwise just see
7031 * if we can still preempt the current task.
7032 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007033 if (rq->curr == p)
Steven Rostedtcb469842008-01-25 21:08:22 +01007034 resched_task(rq->curr);
7035 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02007036 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01007037}
7038
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007039/* Account for a task changing its policy or group.
7040 *
7041 * This routine is mostly called to set cfs_rq->curr field when a task
7042 * migrates between groups/classes.
7043 */
7044static void set_curr_task_fair(struct rq *rq)
7045{
7046 struct sched_entity *se = &rq->curr->se;
7047
Paul Turnerec12cb72011-07-21 09:43:30 -07007048 for_each_sched_entity(se) {
7049 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7050
7051 set_next_entity(cfs_rq, se);
7052 /* ensure bandwidth has been allocated on our new cfs_rq */
7053 account_cfs_rq_runtime(cfs_rq, 0);
7054 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007055}
7056
Peter Zijlstra029632f2011-10-25 10:00:11 +02007057void init_cfs_rq(struct cfs_rq *cfs_rq)
7058{
7059 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007060 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7061#ifndef CONFIG_64BIT
7062 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7063#endif
Alex Shi141965c2013-06-26 13:05:39 +08007064#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02007065 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08007066 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02007067#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007068}
7069
Peter Zijlstra810b3812008-02-29 15:21:01 -05007070#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007071static void task_move_group_fair(struct task_struct *p, int on_rq)
Peter Zijlstra810b3812008-02-29 15:21:01 -05007072{
Paul Turneraff3e492012-10-04 13:18:30 +02007073 struct cfs_rq *cfs_rq;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007074 /*
7075 * If the task was not on the rq at the time of this cgroup movement
7076 * it must have been asleep, sleeping tasks keep their ->vruntime
7077 * absolute on their old rq until wakeup (needed for the fair sleeper
7078 * bonus in place_entity()).
7079 *
7080 * If it was on the rq, we've just 'preempted' it, which does convert
7081 * ->vruntime to a relative base.
7082 *
7083 * Make sure both cases convert their relative position when migrating
7084 * to another cgroup's rq. This does somewhat interfere with the
7085 * fair sleeper stuff for the first placement, but who cares.
7086 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007087 /*
7088 * When !on_rq, vruntime of the task has usually NOT been normalized.
7089 * But there are some cases where it has already been normalized:
7090 *
7091 * - Moving a forked child which is waiting for being woken up by
7092 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09007093 * - Moving a task which has been woken up by try_to_wake_up() and
7094 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007095 *
7096 * To prevent boost or penalty in the new cfs_rq caused by delta
7097 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7098 */
Daisuke Nishimura62af3782011-12-15 14:37:41 +09007099 if (!on_rq && (!p->se.sum_exec_runtime || p->state == TASK_WAKING))
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007100 on_rq = 1;
7101
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007102 if (!on_rq)
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007103 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
7104 set_task_rq(p, task_cpu(p));
Paul Turneraff3e492012-10-04 13:18:30 +02007105 if (!on_rq) {
7106 cfs_rq = cfs_rq_of(&p->se);
7107 p->se.vruntime += cfs_rq->min_vruntime;
7108#ifdef CONFIG_SMP
7109 /*
7110 * migrate_task_rq_fair() will have removed our previous
7111 * contribution, but we must synchronize for ongoing future
7112 * decay.
7113 */
7114 p->se.avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7115 cfs_rq->blocked_load_avg += p->se.avg.load_avg_contrib;
7116#endif
7117 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05007118}
Peter Zijlstra029632f2011-10-25 10:00:11 +02007119
7120void free_fair_sched_group(struct task_group *tg)
7121{
7122 int i;
7123
7124 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7125
7126 for_each_possible_cpu(i) {
7127 if (tg->cfs_rq)
7128 kfree(tg->cfs_rq[i]);
7129 if (tg->se)
7130 kfree(tg->se[i]);
7131 }
7132
7133 kfree(tg->cfs_rq);
7134 kfree(tg->se);
7135}
7136
7137int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7138{
7139 struct cfs_rq *cfs_rq;
7140 struct sched_entity *se;
7141 int i;
7142
7143 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7144 if (!tg->cfs_rq)
7145 goto err;
7146 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7147 if (!tg->se)
7148 goto err;
7149
7150 tg->shares = NICE_0_LOAD;
7151
7152 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7153
7154 for_each_possible_cpu(i) {
7155 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7156 GFP_KERNEL, cpu_to_node(i));
7157 if (!cfs_rq)
7158 goto err;
7159
7160 se = kzalloc_node(sizeof(struct sched_entity),
7161 GFP_KERNEL, cpu_to_node(i));
7162 if (!se)
7163 goto err_free_rq;
7164
7165 init_cfs_rq(cfs_rq);
7166 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7167 }
7168
7169 return 1;
7170
7171err_free_rq:
7172 kfree(cfs_rq);
7173err:
7174 return 0;
7175}
7176
7177void unregister_fair_sched_group(struct task_group *tg, int cpu)
7178{
7179 struct rq *rq = cpu_rq(cpu);
7180 unsigned long flags;
7181
7182 /*
7183 * Only empty task groups can be destroyed; so we can speculatively
7184 * check on_list without danger of it being re-added.
7185 */
7186 if (!tg->cfs_rq[cpu]->on_list)
7187 return;
7188
7189 raw_spin_lock_irqsave(&rq->lock, flags);
7190 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7191 raw_spin_unlock_irqrestore(&rq->lock, flags);
7192}
7193
7194void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7195 struct sched_entity *se, int cpu,
7196 struct sched_entity *parent)
7197{
7198 struct rq *rq = cpu_rq(cpu);
7199
7200 cfs_rq->tg = tg;
7201 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007202 init_cfs_rq_runtime(cfs_rq);
7203
7204 tg->cfs_rq[cpu] = cfs_rq;
7205 tg->se[cpu] = se;
7206
7207 /* se could be NULL for root_task_group */
7208 if (!se)
7209 return;
7210
7211 if (!parent)
7212 se->cfs_rq = &rq->cfs;
7213 else
7214 se->cfs_rq = parent->my_q;
7215
7216 se->my_q = cfs_rq;
Paul Turner0ac9b1c2013-10-16 11:16:27 -07007217 /* guarantee group entities always have weight */
7218 update_load_set(&se->load, NICE_0_LOAD);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007219 se->parent = parent;
7220}
7221
7222static DEFINE_MUTEX(shares_mutex);
7223
7224int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7225{
7226 int i;
7227 unsigned long flags;
7228
7229 /*
7230 * We can't change the weight of the root cgroup.
7231 */
7232 if (!tg->se[0])
7233 return -EINVAL;
7234
7235 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7236
7237 mutex_lock(&shares_mutex);
7238 if (tg->shares == shares)
7239 goto done;
7240
7241 tg->shares = shares;
7242 for_each_possible_cpu(i) {
7243 struct rq *rq = cpu_rq(i);
7244 struct sched_entity *se;
7245
7246 se = tg->se[i];
7247 /* Propagate contribution to hierarchy */
7248 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02007249
7250 /* Possible calls to update_curr() need rq clock */
7251 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08007252 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02007253 update_cfs_shares(group_cfs_rq(se));
7254 raw_spin_unlock_irqrestore(&rq->lock, flags);
7255 }
7256
7257done:
7258 mutex_unlock(&shares_mutex);
7259 return 0;
7260}
7261#else /* CONFIG_FAIR_GROUP_SCHED */
7262
7263void free_fair_sched_group(struct task_group *tg) { }
7264
7265int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7266{
7267 return 1;
7268}
7269
7270void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7271
7272#endif /* CONFIG_FAIR_GROUP_SCHED */
7273
Peter Zijlstra810b3812008-02-29 15:21:01 -05007274
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07007275static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00007276{
7277 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00007278 unsigned int rr_interval = 0;
7279
7280 /*
7281 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7282 * idle runqueue:
7283 */
Peter Williams0d721ce2009-09-21 01:31:53 +00007284 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08007285 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00007286
7287 return rr_interval;
7288}
7289
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007290/*
7291 * All the scheduling class methods:
7292 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007293const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02007294 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007295 .enqueue_task = enqueue_task_fair,
7296 .dequeue_task = dequeue_task_fair,
7297 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05007298 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007299
Ingo Molnar2e09bf52007-10-15 17:00:05 +02007300 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007301
7302 .pick_next_task = pick_next_task_fair,
7303 .put_prev_task = put_prev_task_fair,
7304
Peter Williams681f3e62007-10-24 18:23:51 +02007305#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08007306 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02007307 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08007308
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007309 .rq_online = rq_online_fair,
7310 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007311
7312 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02007313#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007314
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007315 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007316 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007317 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01007318
7319 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007320 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01007321 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05007322
Peter Williams0d721ce2009-09-21 01:31:53 +00007323 .get_rr_interval = get_rr_interval_fair,
7324
Peter Zijlstra810b3812008-02-29 15:21:01 -05007325#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007326 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05007327#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007328};
7329
7330#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02007331void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007332{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007333 struct cfs_rq *cfs_rq;
7334
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01007335 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02007336 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02007337 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01007338 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007339}
7340#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007341
7342__init void init_sched_fair_class(void)
7343{
7344#ifdef CONFIG_SMP
7345 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7346
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007347#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08007348 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007349 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08007350 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007351#endif
7352#endif /* SMP */
7353
7354}