blob: 9e6ca0d88f51a3db906af9df692738d81f5d67bd [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020026#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020029#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000030#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020031#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020032
33#include <trace/events/sched.h>
34
35#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010036
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020037/*
Peter Zijlstra21805082007-08-25 18:41:53 +020038 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090039 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020040 *
Peter Zijlstra21805082007-08-25 18:41:53 +020041 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020042 * 'timeslice length' - timeslices in CFS are of variable length
43 * and have no persistent notion like in traditional, time-slice
44 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020045 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020046 * (to see the precise effective timeslice length of your workload,
47 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020048 */
Mike Galbraith21406922010-03-11 17:17:15 +010049unsigned int sysctl_sched_latency = 6000000ULL;
50unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020051
52/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010053 * The initial- and re-scaling of tunables is configurable
54 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
55 *
56 * Options are:
57 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
58 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
59 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
60 */
61enum sched_tunable_scaling sysctl_sched_tunable_scaling
62 = SCHED_TUNABLESCALING_LOG;
63
64/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010065 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090066 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010067 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020068unsigned int sysctl_sched_min_granularity = 750000ULL;
69unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010070
71/*
72 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
73 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020074static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010075
76/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020077 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020078 * parent will (try to) run first.
79 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020080unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020081
82/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020083 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020084 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020085 *
86 * This option delays the preemption effects of decoupled workloads
87 * and reduces their over-scheduling. Synchronous workloads will still
88 * have immediate wakeup/sleep latencies.
89 */
Mike Galbraith172e0822009-09-09 15:41:37 +020090unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010091unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020092
Ingo Molnarda84d962007-10-15 17:00:18 +020093const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
94
Paul Turnera7a4f8a2010-11-15 15:47:06 -080095/*
96 * The exponential sliding window over which load is averaged for shares
97 * distribution.
98 * (default: 10msec)
99 */
100unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
101
Paul Turnerec12cb72011-07-21 09:43:30 -0700102#ifdef CONFIG_CFS_BANDWIDTH
103/*
104 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
105 * each time a cfs_rq requests quota.
106 *
107 * Note: in the case that the slice exceeds the runtime remaining (either due
108 * to consumption or the quota being specified to be smaller than the slice)
109 * we will always only issue the remaining available time.
110 *
111 * default: 5 msec, units: microseconds
112 */
113unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
114#endif
115
Paul Gortmaker85276322013-04-19 15:10:50 -0400116static inline void update_load_add(struct load_weight *lw, unsigned long inc)
117{
118 lw->weight += inc;
119 lw->inv_weight = 0;
120}
121
122static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
123{
124 lw->weight -= dec;
125 lw->inv_weight = 0;
126}
127
128static inline void update_load_set(struct load_weight *lw, unsigned long w)
129{
130 lw->weight = w;
131 lw->inv_weight = 0;
132}
133
Peter Zijlstra029632f2011-10-25 10:00:11 +0200134/*
135 * Increase the granularity value when there are more CPUs,
136 * because with more CPUs the 'effective latency' as visible
137 * to users decreases. But the relationship is not linear,
138 * so pick a second-best guess by going with the log2 of the
139 * number of CPUs.
140 *
141 * This idea comes from the SD scheduler of Con Kolivas:
142 */
143static int get_update_sysctl_factor(void)
144{
145 unsigned int cpus = min_t(int, num_online_cpus(), 8);
146 unsigned int factor;
147
148 switch (sysctl_sched_tunable_scaling) {
149 case SCHED_TUNABLESCALING_NONE:
150 factor = 1;
151 break;
152 case SCHED_TUNABLESCALING_LINEAR:
153 factor = cpus;
154 break;
155 case SCHED_TUNABLESCALING_LOG:
156 default:
157 factor = 1 + ilog2(cpus);
158 break;
159 }
160
161 return factor;
162}
163
164static void update_sysctl(void)
165{
166 unsigned int factor = get_update_sysctl_factor();
167
168#define SET_SYSCTL(name) \
169 (sysctl_##name = (factor) * normalized_sysctl_##name)
170 SET_SYSCTL(sched_min_granularity);
171 SET_SYSCTL(sched_latency);
172 SET_SYSCTL(sched_wakeup_granularity);
173#undef SET_SYSCTL
174}
175
176void sched_init_granularity(void)
177{
178 update_sysctl();
179}
180
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100181#define WMULT_CONST (~0U)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200182#define WMULT_SHIFT 32
183
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100184static void __update_inv_weight(struct load_weight *lw)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200185{
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100186 unsigned long w;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200187
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100188 if (likely(lw->inv_weight))
189 return;
190
191 w = scale_load_down(lw->weight);
192
193 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
194 lw->inv_weight = 1;
195 else if (unlikely(!w))
196 lw->inv_weight = WMULT_CONST;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200197 else
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100198 lw->inv_weight = WMULT_CONST / w;
199}
Peter Zijlstra029632f2011-10-25 10:00:11 +0200200
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100201/*
202 * delta_exec * weight / lw.weight
203 * OR
204 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
205 *
206 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
207 * we're guaranteed shift stays positive because inv_weight is guaranteed to
208 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
209 *
210 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
211 * weight/lw.weight <= 1, and therefore our shift will also be positive.
212 */
213static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
214{
215 u64 fact = scale_load_down(weight);
216 int shift = WMULT_SHIFT;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200217
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100218 __update_inv_weight(lw);
219
220 if (unlikely(fact >> 32)) {
221 while (fact >> 32) {
222 fact >>= 1;
223 shift--;
224 }
Peter Zijlstra029632f2011-10-25 10:00:11 +0200225 }
226
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100227 /* hint to use a 32x32->64 mul */
228 fact = (u64)(u32)fact * lw->inv_weight;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200229
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100230 while (fact >> 32) {
231 fact >>= 1;
232 shift--;
233 }
234
235 return mul_u64_u32_shr(delta_exec, fact, shift);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200236}
237
238
239const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200240
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200241/**************************************************************
242 * CFS operations on generic schedulable entities:
243 */
244
245#ifdef CONFIG_FAIR_GROUP_SCHED
246
247/* cpu runqueue to which this cfs_rq is attached */
248static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
249{
250 return cfs_rq->rq;
251}
252
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200253/* An entity is a task if it doesn't "own" a runqueue */
254#define entity_is_task(se) (!se->my_q)
255
Peter Zijlstra8f488942009-07-24 12:25:30 +0200256static inline struct task_struct *task_of(struct sched_entity *se)
257{
258#ifdef CONFIG_SCHED_DEBUG
259 WARN_ON_ONCE(!entity_is_task(se));
260#endif
261 return container_of(se, struct task_struct, se);
262}
263
Peter Zijlstrab7581492008-04-19 19:45:00 +0200264/* Walk up scheduling entities hierarchy */
265#define for_each_sched_entity(se) \
266 for (; se; se = se->parent)
267
268static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
269{
270 return p->se.cfs_rq;
271}
272
273/* runqueue on which this entity is (to be) queued */
274static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
275{
276 return se->cfs_rq;
277}
278
279/* runqueue "owned" by this group */
280static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
281{
282 return grp->my_q;
283}
284
Paul Turneraff3e492012-10-04 13:18:30 +0200285static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
286 int force_update);
Paul Turner9ee474f2012-10-04 13:18:30 +0200287
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800288static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
289{
290 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800291 /*
292 * Ensure we either appear before our parent (if already
293 * enqueued) or force our parent to appear after us when it is
294 * enqueued. The fact that we always enqueue bottom-up
295 * reduces this to two cases.
296 */
297 if (cfs_rq->tg->parent &&
298 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
299 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800300 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800301 } else {
302 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
303 &rq_of(cfs_rq)->leaf_cfs_rq_list);
304 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800305
306 cfs_rq->on_list = 1;
Paul Turner9ee474f2012-10-04 13:18:30 +0200307 /* We should have no load, but we need to update last_decay. */
Paul Turneraff3e492012-10-04 13:18:30 +0200308 update_cfs_rq_blocked_load(cfs_rq, 0);
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800309 }
310}
311
312static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
313{
314 if (cfs_rq->on_list) {
315 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
316 cfs_rq->on_list = 0;
317 }
318}
319
Peter Zijlstrab7581492008-04-19 19:45:00 +0200320/* Iterate thr' all leaf cfs_rq's on a runqueue */
321#define for_each_leaf_cfs_rq(rq, cfs_rq) \
322 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
323
324/* Do the two (enqueued) entities belong to the same group ? */
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100325static inline struct cfs_rq *
Peter Zijlstrab7581492008-04-19 19:45:00 +0200326is_same_group(struct sched_entity *se, struct sched_entity *pse)
327{
328 if (se->cfs_rq == pse->cfs_rq)
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100329 return se->cfs_rq;
Peter Zijlstrab7581492008-04-19 19:45:00 +0200330
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100331 return NULL;
Peter Zijlstrab7581492008-04-19 19:45:00 +0200332}
333
334static inline struct sched_entity *parent_entity(struct sched_entity *se)
335{
336 return se->parent;
337}
338
Peter Zijlstra464b7522008-10-24 11:06:15 +0200339static void
340find_matching_se(struct sched_entity **se, struct sched_entity **pse)
341{
342 int se_depth, pse_depth;
343
344 /*
345 * preemption test can be made between sibling entities who are in the
346 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
347 * both tasks until we find their ancestors who are siblings of common
348 * parent.
349 */
350
351 /* First walk up until both entities are at same depth */
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100352 se_depth = (*se)->depth;
353 pse_depth = (*pse)->depth;
Peter Zijlstra464b7522008-10-24 11:06:15 +0200354
355 while (se_depth > pse_depth) {
356 se_depth--;
357 *se = parent_entity(*se);
358 }
359
360 while (pse_depth > se_depth) {
361 pse_depth--;
362 *pse = parent_entity(*pse);
363 }
364
365 while (!is_same_group(*se, *pse)) {
366 *se = parent_entity(*se);
367 *pse = parent_entity(*pse);
368 }
369}
370
Peter Zijlstra8f488942009-07-24 12:25:30 +0200371#else /* !CONFIG_FAIR_GROUP_SCHED */
372
373static inline struct task_struct *task_of(struct sched_entity *se)
374{
375 return container_of(se, struct task_struct, se);
376}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200377
378static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
379{
380 return container_of(cfs_rq, struct rq, cfs);
381}
382
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200383#define entity_is_task(se) 1
384
Peter Zijlstrab7581492008-04-19 19:45:00 +0200385#define for_each_sched_entity(se) \
386 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200387
Peter Zijlstrab7581492008-04-19 19:45:00 +0200388static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200389{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200390 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200391}
392
Peter Zijlstrab7581492008-04-19 19:45:00 +0200393static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
394{
395 struct task_struct *p = task_of(se);
396 struct rq *rq = task_rq(p);
397
398 return &rq->cfs;
399}
400
401/* runqueue "owned" by this group */
402static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
403{
404 return NULL;
405}
406
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800407static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408{
409}
410
411static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
412{
413}
414
Peter Zijlstrab7581492008-04-19 19:45:00 +0200415#define for_each_leaf_cfs_rq(rq, cfs_rq) \
416 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
417
Peter Zijlstrab7581492008-04-19 19:45:00 +0200418static inline struct sched_entity *parent_entity(struct sched_entity *se)
419{
420 return NULL;
421}
422
Peter Zijlstra464b7522008-10-24 11:06:15 +0200423static inline void
424find_matching_se(struct sched_entity **se, struct sched_entity **pse)
425{
426}
427
Peter Zijlstrab7581492008-04-19 19:45:00 +0200428#endif /* CONFIG_FAIR_GROUP_SCHED */
429
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700430static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100431void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200432
433/**************************************************************
434 * Scheduling class tree data structure manipulation methods:
435 */
436
Andrei Epure1bf08232013-03-12 21:12:24 +0200437static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200438{
Andrei Epure1bf08232013-03-12 21:12:24 +0200439 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200440 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200441 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200442
Andrei Epure1bf08232013-03-12 21:12:24 +0200443 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200444}
445
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200446static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200447{
448 s64 delta = (s64)(vruntime - min_vruntime);
449 if (delta < 0)
450 min_vruntime = vruntime;
451
452 return min_vruntime;
453}
454
Fabio Checconi54fdc582009-07-16 12:32:27 +0200455static inline int entity_before(struct sched_entity *a,
456 struct sched_entity *b)
457{
458 return (s64)(a->vruntime - b->vruntime) < 0;
459}
460
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200461static void update_min_vruntime(struct cfs_rq *cfs_rq)
462{
463 u64 vruntime = cfs_rq->min_vruntime;
464
465 if (cfs_rq->curr)
466 vruntime = cfs_rq->curr->vruntime;
467
468 if (cfs_rq->rb_leftmost) {
469 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
470 struct sched_entity,
471 run_node);
472
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100473 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200474 vruntime = se->vruntime;
475 else
476 vruntime = min_vruntime(vruntime, se->vruntime);
477 }
478
Andrei Epure1bf08232013-03-12 21:12:24 +0200479 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200480 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200481#ifndef CONFIG_64BIT
482 smp_wmb();
483 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
484#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200485}
486
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200487/*
488 * Enqueue an entity into the rb-tree:
489 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200490static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200491{
492 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
493 struct rb_node *parent = NULL;
494 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200495 int leftmost = 1;
496
497 /*
498 * Find the right place in the rbtree:
499 */
500 while (*link) {
501 parent = *link;
502 entry = rb_entry(parent, struct sched_entity, run_node);
503 /*
504 * We dont care about collisions. Nodes with
505 * the same key stay together.
506 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200507 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200508 link = &parent->rb_left;
509 } else {
510 link = &parent->rb_right;
511 leftmost = 0;
512 }
513 }
514
515 /*
516 * Maintain a cache of leftmost tree entries (it is frequently
517 * used):
518 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200519 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200520 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200521
522 rb_link_node(&se->run_node, parent, link);
523 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200524}
525
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200526static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200527{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100528 if (cfs_rq->rb_leftmost == &se->run_node) {
529 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100530
531 next_node = rb_next(&se->run_node);
532 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100533 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200534
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200535 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200536}
537
Peter Zijlstra029632f2011-10-25 10:00:11 +0200538struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200539{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100540 struct rb_node *left = cfs_rq->rb_leftmost;
541
542 if (!left)
543 return NULL;
544
545 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200546}
547
Rik van Rielac53db52011-02-01 09:51:03 -0500548static struct sched_entity *__pick_next_entity(struct sched_entity *se)
549{
550 struct rb_node *next = rb_next(&se->run_node);
551
552 if (!next)
553 return NULL;
554
555 return rb_entry(next, struct sched_entity, run_node);
556}
557
558#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200559struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200560{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100561 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200562
Balbir Singh70eee742008-02-22 13:25:53 +0530563 if (!last)
564 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100565
566 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200567}
568
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200569/**************************************************************
570 * Scheduling class statistics methods:
571 */
572
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100573int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700574 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100575 loff_t *ppos)
576{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700577 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100578 int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100579
580 if (ret || !write)
581 return ret;
582
583 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
584 sysctl_sched_min_granularity);
585
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100586#define WRT_SYSCTL(name) \
587 (normalized_sysctl_##name = sysctl_##name / (factor))
588 WRT_SYSCTL(sched_min_granularity);
589 WRT_SYSCTL(sched_latency);
590 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100591#undef WRT_SYSCTL
592
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100593 return 0;
594}
595#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200596
597/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200598 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200599 */
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100600static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200601{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200602 if (unlikely(se->load.weight != NICE_0_LOAD))
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100603 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200604
605 return delta;
606}
607
608/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200609 * The idea is to set a period in which each task runs once.
610 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200611 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200612 * this period because otherwise the slices get too small.
613 *
614 * p = (nr <= nl) ? l : l*nr/nl
615 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200616static u64 __sched_period(unsigned long nr_running)
617{
618 u64 period = sysctl_sched_latency;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100619 unsigned long nr_latency = sched_nr_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200620
621 if (unlikely(nr_running > nr_latency)) {
Peter Zijlstra4bf0b772008-01-25 21:08:21 +0100622 period = sysctl_sched_min_granularity;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200623 period *= nr_running;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200624 }
625
626 return period;
627}
628
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200629/*
630 * We calculate the wall-time slice from the period by taking a part
631 * proportional to the weight.
632 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200633 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200634 */
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200635static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200636{
Mike Galbraith0a582442009-01-02 12:16:42 +0100637 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200638
Mike Galbraith0a582442009-01-02 12:16:42 +0100639 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100640 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200641 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100642
643 cfs_rq = cfs_rq_of(se);
644 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200645
Mike Galbraith0a582442009-01-02 12:16:42 +0100646 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200647 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100648
649 update_load_add(&lw, se->load.weight);
650 load = &lw;
651 }
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100652 slice = __calc_delta(slice, se->load.weight, load);
Mike Galbraith0a582442009-01-02 12:16:42 +0100653 }
654 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200655}
656
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200657/*
Andrei Epure660cc002013-03-11 12:03:20 +0200658 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200659 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200660 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200661 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200662static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200663{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200664 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200665}
666
Alex Shia75cdaa2013-06-20 10:18:47 +0800667#ifdef CONFIG_SMP
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100668static unsigned long task_h_load(struct task_struct *p);
669
Alex Shia75cdaa2013-06-20 10:18:47 +0800670static inline void __update_task_entity_contrib(struct sched_entity *se);
671
672/* Give new task start runnable values to heavy its load in infant time */
673void init_task_runnable_average(struct task_struct *p)
674{
675 u32 slice;
676
677 p->se.avg.decay_count = 0;
678 slice = sched_slice(task_cfs_rq(p), &p->se) >> 10;
679 p->se.avg.runnable_avg_sum = slice;
680 p->se.avg.runnable_avg_period = slice;
681 __update_task_entity_contrib(&p->se);
682}
683#else
684void init_task_runnable_average(struct task_struct *p)
685{
686}
687#endif
688
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200689/*
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100690 * Update the current task's runtime statistics.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200691 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200692static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200693{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200694 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200695 u64 now = rq_clock_task(rq_of(cfs_rq));
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100696 u64 delta_exec;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200697
698 if (unlikely(!curr))
699 return;
700
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100701 delta_exec = now - curr->exec_start;
702 if (unlikely((s64)delta_exec <= 0))
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100703 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200704
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200705 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100706
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100707 schedstat_set(curr->statistics.exec_max,
708 max(delta_exec, curr->statistics.exec_max));
709
710 curr->sum_exec_runtime += delta_exec;
711 schedstat_add(cfs_rq, exec_clock, delta_exec);
712
713 curr->vruntime += calc_delta_fair(delta_exec, curr);
714 update_min_vruntime(cfs_rq);
715
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100716 if (entity_is_task(curr)) {
717 struct task_struct *curtask = task_of(curr);
718
Ingo Molnarf977bb42009-09-13 18:15:54 +0200719 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100720 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700721 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100722 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700723
724 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200725}
726
727static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200728update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200729{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200730 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200731}
732
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200733/*
734 * Task is being enqueued - update stats:
735 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200736static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200737{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200738 /*
739 * Are we enqueueing a waiting task? (for current tasks
740 * a dequeue/enqueue event is a NOP)
741 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200742 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200743 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200744}
745
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200746static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200747update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200748{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300749 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200750 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300751 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
752 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200753 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200754#ifdef CONFIG_SCHEDSTATS
755 if (entity_is_task(se)) {
756 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200757 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200758 }
759#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300760 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200761}
762
763static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200764update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200765{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200766 /*
767 * Mark the end of the wait period if dequeueing a
768 * waiting task:
769 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200770 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200771 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200772}
773
774/*
775 * We are picking a new current task - update its stats:
776 */
777static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200778update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200779{
780 /*
781 * We are starting a new run period:
782 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200783 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200784}
785
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200786/**************************************************
787 * Scheduling class queueing methods:
788 */
789
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200790#ifdef CONFIG_NUMA_BALANCING
791/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100792 * Approximate time to scan a full NUMA task in ms. The task scan period is
793 * calculated based on the tasks virtual memory size and
794 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200795 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100796unsigned int sysctl_numa_balancing_scan_period_min = 1000;
797unsigned int sysctl_numa_balancing_scan_period_max = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200798
799/* Portion of address space to scan in MB */
800unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200801
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200802/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
803unsigned int sysctl_numa_balancing_scan_delay = 1000;
804
Mel Gorman598f0ec2013-10-07 11:28:55 +0100805static unsigned int task_nr_scan_windows(struct task_struct *p)
806{
807 unsigned long rss = 0;
808 unsigned long nr_scan_pages;
809
810 /*
811 * Calculations based on RSS as non-present and empty pages are skipped
812 * by the PTE scanner and NUMA hinting faults should be trapped based
813 * on resident pages
814 */
815 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
816 rss = get_mm_rss(p->mm);
817 if (!rss)
818 rss = nr_scan_pages;
819
820 rss = round_up(rss, nr_scan_pages);
821 return rss / nr_scan_pages;
822}
823
824/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
825#define MAX_SCAN_WINDOW 2560
826
827static unsigned int task_scan_min(struct task_struct *p)
828{
829 unsigned int scan, floor;
830 unsigned int windows = 1;
831
832 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW)
833 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size;
834 floor = 1000 / windows;
835
836 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
837 return max_t(unsigned int, floor, scan);
838}
839
840static unsigned int task_scan_max(struct task_struct *p)
841{
842 unsigned int smin = task_scan_min(p);
843 unsigned int smax;
844
845 /* Watch for min being lower than max due to floor calculations */
846 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
847 return max(smin, smax);
848}
849
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100850static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
851{
852 rq->nr_numa_running += (p->numa_preferred_nid != -1);
853 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
854}
855
856static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
857{
858 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
859 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
860}
861
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100862struct numa_group {
863 atomic_t refcount;
864
865 spinlock_t lock; /* nr_tasks, tasks */
866 int nr_tasks;
Mel Gormane29cf082013-10-07 11:29:22 +0100867 pid_t gid;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100868 struct list_head task_list;
869
870 struct rcu_head rcu;
Rik van Riel20e07de2014-01-27 17:03:43 -0500871 nodemask_t active_nodes;
Mel Gorman989348b2013-10-07 11:29:40 +0100872 unsigned long total_faults;
Rik van Riel7e2703e2014-01-27 17:03:45 -0500873 /*
874 * Faults_cpu is used to decide whether memory should move
875 * towards the CPU. As a consequence, these stats are weighted
876 * more by CPU use than by memory faults.
877 */
Rik van Riel50ec8a42014-01-27 17:03:42 -0500878 unsigned long *faults_cpu;
Mel Gorman989348b2013-10-07 11:29:40 +0100879 unsigned long faults[0];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100880};
881
Rik van Rielbe1e4e72014-01-27 17:03:48 -0500882/* Shared or private faults. */
883#define NR_NUMA_HINT_FAULT_TYPES 2
884
885/* Memory and CPU locality */
886#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
887
888/* Averaged statistics, and temporary buffers. */
889#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
890
Mel Gormane29cf082013-10-07 11:29:22 +0100891pid_t task_numa_group_id(struct task_struct *p)
892{
893 return p->numa_group ? p->numa_group->gid : 0;
894}
895
Mel Gormanac8e8952013-10-07 11:29:03 +0100896static inline int task_faults_idx(int nid, int priv)
897{
Rik van Rielbe1e4e72014-01-27 17:03:48 -0500898 return NR_NUMA_HINT_FAULT_TYPES * nid + priv;
Mel Gormanac8e8952013-10-07 11:29:03 +0100899}
900
901static inline unsigned long task_faults(struct task_struct *p, int nid)
902{
Rik van Rielff1df892014-01-27 17:03:41 -0500903 if (!p->numa_faults_memory)
Mel Gormanac8e8952013-10-07 11:29:03 +0100904 return 0;
905
Rik van Rielff1df892014-01-27 17:03:41 -0500906 return p->numa_faults_memory[task_faults_idx(nid, 0)] +
907 p->numa_faults_memory[task_faults_idx(nid, 1)];
Mel Gormanac8e8952013-10-07 11:29:03 +0100908}
909
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100910static inline unsigned long group_faults(struct task_struct *p, int nid)
911{
912 if (!p->numa_group)
913 return 0;
914
Wanpeng Li82897b42013-12-12 15:23:25 +0800915 return p->numa_group->faults[task_faults_idx(nid, 0)] +
916 p->numa_group->faults[task_faults_idx(nid, 1)];
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100917}
918
Rik van Riel20e07de2014-01-27 17:03:43 -0500919static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
920{
921 return group->faults_cpu[task_faults_idx(nid, 0)] +
922 group->faults_cpu[task_faults_idx(nid, 1)];
923}
924
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100925/*
926 * These return the fraction of accesses done by a particular task, or
927 * task group, on a particular numa node. The group weight is given a
928 * larger multiplier, in order to group tasks together that are almost
929 * evenly spread out between numa nodes.
930 */
931static inline unsigned long task_weight(struct task_struct *p, int nid)
932{
933 unsigned long total_faults;
934
Rik van Rielff1df892014-01-27 17:03:41 -0500935 if (!p->numa_faults_memory)
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100936 return 0;
937
938 total_faults = p->total_numa_faults;
939
940 if (!total_faults)
941 return 0;
942
943 return 1000 * task_faults(p, nid) / total_faults;
944}
945
946static inline unsigned long group_weight(struct task_struct *p, int nid)
947{
Mel Gorman989348b2013-10-07 11:29:40 +0100948 if (!p->numa_group || !p->numa_group->total_faults)
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100949 return 0;
950
Mel Gorman989348b2013-10-07 11:29:40 +0100951 return 1000 * group_faults(p, nid) / p->numa_group->total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100952}
953
Rik van Riel10f39042014-01-27 17:03:44 -0500954bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
955 int src_nid, int dst_cpu)
956{
957 struct numa_group *ng = p->numa_group;
958 int dst_nid = cpu_to_node(dst_cpu);
959 int last_cpupid, this_cpupid;
960
961 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
962
963 /*
964 * Multi-stage node selection is used in conjunction with a periodic
965 * migration fault to build a temporal task<->page relation. By using
966 * a two-stage filter we remove short/unlikely relations.
967 *
968 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
969 * a task's usage of a particular page (n_p) per total usage of this
970 * page (n_t) (in a given time-span) to a probability.
971 *
972 * Our periodic faults will sample this probability and getting the
973 * same result twice in a row, given these samples are fully
974 * independent, is then given by P(n)^2, provided our sample period
975 * is sufficiently short compared to the usage pattern.
976 *
977 * This quadric squishes small probabilities, making it less likely we
978 * act on an unlikely task<->page relation.
979 */
980 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
981 if (!cpupid_pid_unset(last_cpupid) &&
982 cpupid_to_nid(last_cpupid) != dst_nid)
983 return false;
984
985 /* Always allow migrate on private faults */
986 if (cpupid_match_pid(p, last_cpupid))
987 return true;
988
989 /* A shared fault, but p->numa_group has not been set up yet. */
990 if (!ng)
991 return true;
992
993 /*
994 * Do not migrate if the destination is not a node that
995 * is actively used by this numa group.
996 */
997 if (!node_isset(dst_nid, ng->active_nodes))
998 return false;
999
1000 /*
1001 * Source is a node that is not actively used by this
1002 * numa group, while the destination is. Migrate.
1003 */
1004 if (!node_isset(src_nid, ng->active_nodes))
1005 return true;
1006
1007 /*
1008 * Both source and destination are nodes in active
1009 * use by this numa group. Maximize memory bandwidth
1010 * by migrating from more heavily used groups, to less
1011 * heavily used ones, spreading the load around.
1012 * Use a 1/4 hysteresis to avoid spurious page movement.
1013 */
1014 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1015}
1016
Mel Gormane6628d52013-10-07 11:29:02 +01001017static unsigned long weighted_cpuload(const int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +01001018static unsigned long source_load(int cpu, int type);
1019static unsigned long target_load(int cpu, int type);
Nicolas Pitreced549f2014-05-26 18:19:38 -04001020static unsigned long capacity_of(int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +01001021static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
Mel Gormane6628d52013-10-07 11:29:02 +01001022
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001023/* Cached statistics for all CPUs within a node */
Mel Gorman58d081b2013-10-07 11:29:10 +01001024struct numa_stats {
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001025 unsigned long nr_running;
Mel Gorman58d081b2013-10-07 11:29:10 +01001026 unsigned long load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001027
1028 /* Total compute capacity of CPUs on a node */
Nicolas Pitre5ef20ca2014-05-26 18:19:34 -04001029 unsigned long compute_capacity;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001030
1031 /* Approximate capacity in terms of runnable tasks on a node */
Nicolas Pitre5ef20ca2014-05-26 18:19:34 -04001032 unsigned long task_capacity;
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001033 int has_free_capacity;
Mel Gorman58d081b2013-10-07 11:29:10 +01001034};
Mel Gormane6628d52013-10-07 11:29:02 +01001035
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001036/*
1037 * XXX borrowed from update_sg_lb_stats
1038 */
1039static void update_numa_stats(struct numa_stats *ns, int nid)
1040{
Rik van Riel83d7f242014-08-04 13:23:28 -04001041 int smt, cpu, cpus = 0;
1042 unsigned long capacity;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001043
1044 memset(ns, 0, sizeof(*ns));
1045 for_each_cpu(cpu, cpumask_of_node(nid)) {
1046 struct rq *rq = cpu_rq(cpu);
1047
1048 ns->nr_running += rq->nr_running;
1049 ns->load += weighted_cpuload(cpu);
Nicolas Pitreced549f2014-05-26 18:19:38 -04001050 ns->compute_capacity += capacity_of(cpu);
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001051
1052 cpus++;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001053 }
1054
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001055 /*
1056 * If we raced with hotplug and there are no CPUs left in our mask
1057 * the @ns structure is NULL'ed and task_numa_compare() will
1058 * not find this node attractive.
1059 *
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001060 * We'll either bail at !has_free_capacity, or we'll detect a huge
1061 * imbalance and bail there.
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001062 */
1063 if (!cpus)
1064 return;
1065
Rik van Riel83d7f242014-08-04 13:23:28 -04001066 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1067 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1068 capacity = cpus / smt; /* cores */
1069
1070 ns->task_capacity = min_t(unsigned, capacity,
1071 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001072 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001073}
1074
Mel Gorman58d081b2013-10-07 11:29:10 +01001075struct task_numa_env {
1076 struct task_struct *p;
1077
1078 int src_cpu, src_nid;
1079 int dst_cpu, dst_nid;
1080
1081 struct numa_stats src_stats, dst_stats;
1082
Wanpeng Li40ea2b42013-12-05 19:10:17 +08001083 int imbalance_pct;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001084
1085 struct task_struct *best_task;
1086 long best_imp;
Mel Gorman58d081b2013-10-07 11:29:10 +01001087 int best_cpu;
1088};
1089
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001090static void task_numa_assign(struct task_numa_env *env,
1091 struct task_struct *p, long imp)
1092{
1093 if (env->best_task)
1094 put_task_struct(env->best_task);
1095 if (p)
1096 get_task_struct(p);
1097
1098 env->best_task = p;
1099 env->best_imp = imp;
1100 env->best_cpu = env->dst_cpu;
1101}
1102
Rik van Riel28a21742014-06-23 11:46:13 -04001103static bool load_too_imbalanced(long src_load, long dst_load,
Rik van Riele63da032014-05-14 13:22:21 -04001104 struct task_numa_env *env)
1105{
1106 long imb, old_imb;
Rik van Riel28a21742014-06-23 11:46:13 -04001107 long orig_src_load, orig_dst_load;
1108 long src_capacity, dst_capacity;
1109
1110 /*
1111 * The load is corrected for the CPU capacity available on each node.
1112 *
1113 * src_load dst_load
1114 * ------------ vs ---------
1115 * src_capacity dst_capacity
1116 */
1117 src_capacity = env->src_stats.compute_capacity;
1118 dst_capacity = env->dst_stats.compute_capacity;
Rik van Riele63da032014-05-14 13:22:21 -04001119
1120 /* We care about the slope of the imbalance, not the direction. */
1121 if (dst_load < src_load)
1122 swap(dst_load, src_load);
1123
1124 /* Is the difference below the threshold? */
Rik van Riel28a21742014-06-23 11:46:13 -04001125 imb = dst_load * src_capacity * 100 -
1126 src_load * dst_capacity * env->imbalance_pct;
Rik van Riele63da032014-05-14 13:22:21 -04001127 if (imb <= 0)
1128 return false;
1129
1130 /*
1131 * The imbalance is above the allowed threshold.
1132 * Compare it with the old imbalance.
1133 */
Rik van Riel28a21742014-06-23 11:46:13 -04001134 orig_src_load = env->src_stats.load;
1135 orig_dst_load = env->dst_stats.load;
1136
Rik van Riele63da032014-05-14 13:22:21 -04001137 if (orig_dst_load < orig_src_load)
1138 swap(orig_dst_load, orig_src_load);
1139
Rik van Riel28a21742014-06-23 11:46:13 -04001140 old_imb = orig_dst_load * src_capacity * 100 -
1141 orig_src_load * dst_capacity * env->imbalance_pct;
Rik van Riele63da032014-05-14 13:22:21 -04001142
1143 /* Would this change make things worse? */
Rik van Riel16628672014-06-08 16:55:57 -04001144 return (imb > old_imb);
Rik van Riele63da032014-05-14 13:22:21 -04001145}
1146
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001147/*
1148 * This checks if the overall compute and NUMA accesses of the system would
1149 * be improved if the source tasks was migrated to the target dst_cpu taking
1150 * into account that it might be best if task running on the dst_cpu should
1151 * be exchanged with the source task
1152 */
Rik van Riel887c2902013-10-07 11:29:31 +01001153static void task_numa_compare(struct task_numa_env *env,
1154 long taskimp, long groupimp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001155{
1156 struct rq *src_rq = cpu_rq(env->src_cpu);
1157 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1158 struct task_struct *cur;
Rik van Riel28a21742014-06-23 11:46:13 -04001159 long src_load, dst_load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001160 long load;
Rik van Riel1c5d3eb2014-06-23 11:46:15 -04001161 long imp = env->p->numa_group ? groupimp : taskimp;
Rik van Riel0132c3e2014-06-23 11:46:16 -04001162 long moveimp = imp;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001163
1164 rcu_read_lock();
1165 cur = ACCESS_ONCE(dst_rq->curr);
1166 if (cur->pid == 0) /* idle */
1167 cur = NULL;
1168
1169 /*
1170 * "imp" is the fault differential for the source task between the
1171 * source and destination node. Calculate the total differential for
1172 * the source task and potential destination task. The more negative
1173 * the value is, the more rmeote accesses that would be expected to
1174 * be incurred if the tasks were swapped.
1175 */
1176 if (cur) {
1177 /* Skip this swap candidate if cannot move to the source cpu */
1178 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1179 goto unlock;
1180
Rik van Riel887c2902013-10-07 11:29:31 +01001181 /*
1182 * If dst and source tasks are in the same NUMA group, or not
Rik van Rielca28aa532013-10-07 11:29:32 +01001183 * in any group then look only at task weights.
Rik van Riel887c2902013-10-07 11:29:31 +01001184 */
Rik van Rielca28aa532013-10-07 11:29:32 +01001185 if (cur->numa_group == env->p->numa_group) {
Rik van Riel887c2902013-10-07 11:29:31 +01001186 imp = taskimp + task_weight(cur, env->src_nid) -
1187 task_weight(cur, env->dst_nid);
Rik van Rielca28aa532013-10-07 11:29:32 +01001188 /*
1189 * Add some hysteresis to prevent swapping the
1190 * tasks within a group over tiny differences.
1191 */
1192 if (cur->numa_group)
1193 imp -= imp/16;
Rik van Riel887c2902013-10-07 11:29:31 +01001194 } else {
Rik van Rielca28aa532013-10-07 11:29:32 +01001195 /*
1196 * Compare the group weights. If a task is all by
1197 * itself (not part of a group), use the task weight
1198 * instead.
1199 */
Rik van Rielca28aa532013-10-07 11:29:32 +01001200 if (cur->numa_group)
1201 imp += group_weight(cur, env->src_nid) -
1202 group_weight(cur, env->dst_nid);
1203 else
1204 imp += task_weight(cur, env->src_nid) -
1205 task_weight(cur, env->dst_nid);
Rik van Riel887c2902013-10-07 11:29:31 +01001206 }
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001207 }
1208
Rik van Riel0132c3e2014-06-23 11:46:16 -04001209 if (imp <= env->best_imp && moveimp <= env->best_imp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001210 goto unlock;
1211
1212 if (!cur) {
1213 /* Is there capacity at our destination? */
Rik van Rielb932c032014-08-04 13:23:27 -04001214 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001215 !env->dst_stats.has_free_capacity)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001216 goto unlock;
1217
1218 goto balance;
1219 }
1220
1221 /* Balance doesn't matter much if we're running a task per cpu */
Rik van Riel0132c3e2014-06-23 11:46:16 -04001222 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1223 dst_rq->nr_running == 1)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001224 goto assign;
1225
1226 /*
1227 * In the overloaded case, try and keep the load balanced.
1228 */
1229balance:
Peter Zijlstrae720fff2014-07-11 16:01:53 +02001230 load = task_h_load(env->p);
1231 dst_load = env->dst_stats.load + load;
1232 src_load = env->src_stats.load - load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001233
Rik van Riel0132c3e2014-06-23 11:46:16 -04001234 if (moveimp > imp && moveimp > env->best_imp) {
1235 /*
1236 * If the improvement from just moving env->p direction is
1237 * better than swapping tasks around, check if a move is
1238 * possible. Store a slightly smaller score than moveimp,
1239 * so an actually idle CPU will win.
1240 */
1241 if (!load_too_imbalanced(src_load, dst_load, env)) {
1242 imp = moveimp - 1;
1243 cur = NULL;
1244 goto assign;
1245 }
1246 }
1247
1248 if (imp <= env->best_imp)
1249 goto unlock;
1250
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001251 if (cur) {
Peter Zijlstrae720fff2014-07-11 16:01:53 +02001252 load = task_h_load(cur);
1253 dst_load -= load;
1254 src_load += load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001255 }
1256
Rik van Riel28a21742014-06-23 11:46:13 -04001257 if (load_too_imbalanced(src_load, dst_load, env))
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001258 goto unlock;
1259
1260assign:
1261 task_numa_assign(env, cur, imp);
1262unlock:
1263 rcu_read_unlock();
1264}
1265
Rik van Riel887c2902013-10-07 11:29:31 +01001266static void task_numa_find_cpu(struct task_numa_env *env,
1267 long taskimp, long groupimp)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001268{
1269 int cpu;
1270
1271 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1272 /* Skip this CPU if the source task cannot migrate */
1273 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1274 continue;
1275
1276 env->dst_cpu = cpu;
Rik van Riel887c2902013-10-07 11:29:31 +01001277 task_numa_compare(env, taskimp, groupimp);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001278 }
1279}
1280
Mel Gorman58d081b2013-10-07 11:29:10 +01001281static int task_numa_migrate(struct task_struct *p)
Mel Gormane6628d52013-10-07 11:29:02 +01001282{
Mel Gorman58d081b2013-10-07 11:29:10 +01001283 struct task_numa_env env = {
1284 .p = p,
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001285
Mel Gorman58d081b2013-10-07 11:29:10 +01001286 .src_cpu = task_cpu(p),
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001287 .src_nid = task_node(p),
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001288
1289 .imbalance_pct = 112,
1290
1291 .best_task = NULL,
1292 .best_imp = 0,
1293 .best_cpu = -1
Mel Gorman58d081b2013-10-07 11:29:10 +01001294 };
1295 struct sched_domain *sd;
Rik van Riel887c2902013-10-07 11:29:31 +01001296 unsigned long taskweight, groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001297 int nid, ret;
Rik van Riel887c2902013-10-07 11:29:31 +01001298 long taskimp, groupimp;
Mel Gormane6628d52013-10-07 11:29:02 +01001299
Mel Gorman58d081b2013-10-07 11:29:10 +01001300 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001301 * Pick the lowest SD_NUMA domain, as that would have the smallest
1302 * imbalance and would be the first to start moving tasks about.
1303 *
1304 * And we want to avoid any moving of tasks about, as that would create
1305 * random movement of tasks -- counter the numa conditions we're trying
1306 * to satisfy here.
Mel Gorman58d081b2013-10-07 11:29:10 +01001307 */
Mel Gormane6628d52013-10-07 11:29:02 +01001308 rcu_read_lock();
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001309 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
Rik van Riel46a73e82013-11-11 19:29:25 -05001310 if (sd)
1311 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
Mel Gormane6628d52013-10-07 11:29:02 +01001312 rcu_read_unlock();
1313
Rik van Riel46a73e82013-11-11 19:29:25 -05001314 /*
1315 * Cpusets can break the scheduler domain tree into smaller
1316 * balance domains, some of which do not cross NUMA boundaries.
1317 * Tasks that are "trapped" in such domains cannot be migrated
1318 * elsewhere, so there is no point in (re)trying.
1319 */
1320 if (unlikely(!sd)) {
Wanpeng Lide1b3012013-12-12 15:23:24 +08001321 p->numa_preferred_nid = task_node(p);
Rik van Riel46a73e82013-11-11 19:29:25 -05001322 return -EINVAL;
1323 }
1324
Rik van Riel887c2902013-10-07 11:29:31 +01001325 taskweight = task_weight(p, env.src_nid);
1326 groupweight = group_weight(p, env.src_nid);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001327 update_numa_stats(&env.src_stats, env.src_nid);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001328 env.dst_nid = p->numa_preferred_nid;
Rik van Riel887c2902013-10-07 11:29:31 +01001329 taskimp = task_weight(p, env.dst_nid) - taskweight;
1330 groupimp = group_weight(p, env.dst_nid) - groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001331 update_numa_stats(&env.dst_stats, env.dst_nid);
Mel Gorman58d081b2013-10-07 11:29:10 +01001332
Rik van Riela43455a2014-06-04 16:09:42 -04001333 /* Try to find a spot on the preferred nid. */
1334 task_numa_find_cpu(&env, taskimp, groupimp);
Rik van Riele1dda8a2013-10-07 11:29:19 +01001335
1336 /* No space available on the preferred nid. Look elsewhere. */
1337 if (env.best_cpu == -1) {
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001338 for_each_online_node(nid) {
1339 if (nid == env.src_nid || nid == p->numa_preferred_nid)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001340 continue;
1341
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001342 /* Only consider nodes where both task and groups benefit */
Rik van Riel887c2902013-10-07 11:29:31 +01001343 taskimp = task_weight(p, nid) - taskweight;
1344 groupimp = group_weight(p, nid) - groupweight;
1345 if (taskimp < 0 && groupimp < 0)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001346 continue;
1347
1348 env.dst_nid = nid;
1349 update_numa_stats(&env.dst_stats, env.dst_nid);
Rik van Riel887c2902013-10-07 11:29:31 +01001350 task_numa_find_cpu(&env, taskimp, groupimp);
Mel Gorman58d081b2013-10-07 11:29:10 +01001351 }
1352 }
1353
Rik van Riel68d1b022014-04-11 13:00:29 -04001354 /*
1355 * If the task is part of a workload that spans multiple NUMA nodes,
1356 * and is migrating into one of the workload's active nodes, remember
1357 * this node as the task's preferred numa node, so the workload can
1358 * settle down.
1359 * A task that migrated to a second choice node will be better off
1360 * trying for a better one later. Do not set the preferred node here.
1361 */
Rik van Rieldb015da2014-06-23 11:41:34 -04001362 if (p->numa_group) {
1363 if (env.best_cpu == -1)
1364 nid = env.src_nid;
1365 else
1366 nid = env.dst_nid;
1367
1368 if (node_isset(nid, p->numa_group->active_nodes))
1369 sched_setnuma(p, env.dst_nid);
1370 }
1371
1372 /* No better CPU than the current one was found. */
1373 if (env.best_cpu == -1)
1374 return -EAGAIN;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001375
Rik van Riel04bb2f92013-10-07 11:29:36 +01001376 /*
1377 * Reset the scan period if the task is being rescheduled on an
1378 * alternative node to recheck if the tasks is now properly placed.
1379 */
1380 p->numa_scan_period = task_scan_min(p);
1381
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001382 if (env.best_task == NULL) {
Mel Gorman286549d2014-01-21 15:51:03 -08001383 ret = migrate_task_to(p, env.best_cpu);
1384 if (ret != 0)
1385 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001386 return ret;
1387 }
1388
1389 ret = migrate_swap(p, env.best_task);
Mel Gorman286549d2014-01-21 15:51:03 -08001390 if (ret != 0)
1391 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001392 put_task_struct(env.best_task);
1393 return ret;
Mel Gormane6628d52013-10-07 11:29:02 +01001394}
1395
Mel Gorman6b9a7462013-10-07 11:29:11 +01001396/* Attempt to migrate a task to a CPU on the preferred node. */
1397static void numa_migrate_preferred(struct task_struct *p)
1398{
Rik van Riel5085e2a2014-04-11 13:00:28 -04001399 unsigned long interval = HZ;
1400
Rik van Riel2739d3e2013-10-07 11:29:41 +01001401 /* This task has no NUMA fault statistics yet */
Rik van Rielff1df892014-01-27 17:03:41 -05001402 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults_memory))
Rik van Riel2739d3e2013-10-07 11:29:41 +01001403 return;
1404
1405 /* Periodically retry migrating the task to the preferred node */
Rik van Riel5085e2a2014-04-11 13:00:28 -04001406 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1407 p->numa_migrate_retry = jiffies + interval;
Rik van Riel2739d3e2013-10-07 11:29:41 +01001408
Mel Gorman6b9a7462013-10-07 11:29:11 +01001409 /* Success if task is already running on preferred CPU */
Wanpeng Lide1b3012013-12-12 15:23:24 +08001410 if (task_node(p) == p->numa_preferred_nid)
Mel Gorman6b9a7462013-10-07 11:29:11 +01001411 return;
1412
Mel Gorman6b9a7462013-10-07 11:29:11 +01001413 /* Otherwise, try migrate to a CPU on the preferred node */
Rik van Riel2739d3e2013-10-07 11:29:41 +01001414 task_numa_migrate(p);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001415}
1416
Rik van Riel04bb2f92013-10-07 11:29:36 +01001417/*
Rik van Riel20e07de2014-01-27 17:03:43 -05001418 * Find the nodes on which the workload is actively running. We do this by
1419 * tracking the nodes from which NUMA hinting faults are triggered. This can
1420 * be different from the set of nodes where the workload's memory is currently
1421 * located.
1422 *
1423 * The bitmask is used to make smarter decisions on when to do NUMA page
1424 * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1425 * are added when they cause over 6/16 of the maximum number of faults, but
1426 * only removed when they drop below 3/16.
1427 */
1428static void update_numa_active_node_mask(struct numa_group *numa_group)
1429{
1430 unsigned long faults, max_faults = 0;
1431 int nid;
1432
1433 for_each_online_node(nid) {
1434 faults = group_faults_cpu(numa_group, nid);
1435 if (faults > max_faults)
1436 max_faults = faults;
1437 }
1438
1439 for_each_online_node(nid) {
1440 faults = group_faults_cpu(numa_group, nid);
1441 if (!node_isset(nid, numa_group->active_nodes)) {
1442 if (faults > max_faults * 6 / 16)
1443 node_set(nid, numa_group->active_nodes);
1444 } else if (faults < max_faults * 3 / 16)
1445 node_clear(nid, numa_group->active_nodes);
1446 }
1447}
1448
1449/*
Rik van Riel04bb2f92013-10-07 11:29:36 +01001450 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1451 * increments. The more local the fault statistics are, the higher the scan
Rik van Riela22b4b02014-06-23 11:41:35 -04001452 * period will be for the next scan window. If local/(local+remote) ratio is
1453 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1454 * the scan period will decrease. Aim for 70% local accesses.
Rik van Riel04bb2f92013-10-07 11:29:36 +01001455 */
1456#define NUMA_PERIOD_SLOTS 10
Rik van Riela22b4b02014-06-23 11:41:35 -04001457#define NUMA_PERIOD_THRESHOLD 7
Rik van Riel04bb2f92013-10-07 11:29:36 +01001458
1459/*
1460 * Increase the scan period (slow down scanning) if the majority of
1461 * our memory is already on our local node, or if the majority of
1462 * the page accesses are shared with other processes.
1463 * Otherwise, decrease the scan period.
1464 */
1465static void update_task_scan_period(struct task_struct *p,
1466 unsigned long shared, unsigned long private)
1467{
1468 unsigned int period_slot;
1469 int ratio;
1470 int diff;
1471
1472 unsigned long remote = p->numa_faults_locality[0];
1473 unsigned long local = p->numa_faults_locality[1];
1474
1475 /*
1476 * If there were no record hinting faults then either the task is
1477 * completely idle or all activity is areas that are not of interest
1478 * to automatic numa balancing. Scan slower
1479 */
1480 if (local + shared == 0) {
1481 p->numa_scan_period = min(p->numa_scan_period_max,
1482 p->numa_scan_period << 1);
1483
1484 p->mm->numa_next_scan = jiffies +
1485 msecs_to_jiffies(p->numa_scan_period);
1486
1487 return;
1488 }
1489
1490 /*
1491 * Prepare to scale scan period relative to the current period.
1492 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1493 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1494 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1495 */
1496 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1497 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1498 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1499 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1500 if (!slot)
1501 slot = 1;
1502 diff = slot * period_slot;
1503 } else {
1504 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1505
1506 /*
1507 * Scale scan rate increases based on sharing. There is an
1508 * inverse relationship between the degree of sharing and
1509 * the adjustment made to the scanning period. Broadly
1510 * speaking the intent is that there is little point
1511 * scanning faster if shared accesses dominate as it may
1512 * simply bounce migrations uselessly
1513 */
Rik van Riel04bb2f92013-10-07 11:29:36 +01001514 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared));
1515 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1516 }
1517
1518 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1519 task_scan_min(p), task_scan_max(p));
1520 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1521}
1522
Rik van Riel7e2703e2014-01-27 17:03:45 -05001523/*
1524 * Get the fraction of time the task has been running since the last
1525 * NUMA placement cycle. The scheduler keeps similar statistics, but
1526 * decays those on a 32ms period, which is orders of magnitude off
1527 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1528 * stats only if the task is so new there are no NUMA statistics yet.
1529 */
1530static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1531{
1532 u64 runtime, delta, now;
1533 /* Use the start of this time slice to avoid calculations. */
1534 now = p->se.exec_start;
1535 runtime = p->se.sum_exec_runtime;
1536
1537 if (p->last_task_numa_placement) {
1538 delta = runtime - p->last_sum_exec_runtime;
1539 *period = now - p->last_task_numa_placement;
1540 } else {
1541 delta = p->se.avg.runnable_avg_sum;
1542 *period = p->se.avg.runnable_avg_period;
1543 }
1544
1545 p->last_sum_exec_runtime = runtime;
1546 p->last_task_numa_placement = now;
1547
1548 return delta;
1549}
1550
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001551static void task_numa_placement(struct task_struct *p)
1552{
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001553 int seq, nid, max_nid = -1, max_group_nid = -1;
1554 unsigned long max_faults = 0, max_group_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001555 unsigned long fault_types[2] = { 0, 0 };
Rik van Riel7e2703e2014-01-27 17:03:45 -05001556 unsigned long total_faults;
1557 u64 runtime, period;
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001558 spinlock_t *group_lock = NULL;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001559
Hugh Dickins2832bc12012-12-19 17:42:16 -08001560 seq = ACCESS_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001561 if (p->numa_scan_seq == seq)
1562 return;
1563 p->numa_scan_seq = seq;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001564 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001565
Rik van Riel7e2703e2014-01-27 17:03:45 -05001566 total_faults = p->numa_faults_locality[0] +
1567 p->numa_faults_locality[1];
1568 runtime = numa_get_avg_runtime(p, &period);
1569
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001570 /* If the task is part of a group prevent parallel updates to group stats */
1571 if (p->numa_group) {
1572 group_lock = &p->numa_group->lock;
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001573 spin_lock_irq(group_lock);
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001574 }
1575
Mel Gorman688b7582013-10-07 11:28:58 +01001576 /* Find the node with the highest number of faults */
1577 for_each_online_node(nid) {
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001578 unsigned long faults = 0, group_faults = 0;
Mel Gormanac8e8952013-10-07 11:29:03 +01001579 int priv, i;
Mel Gorman745d6142013-10-07 11:28:59 +01001580
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001581 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
Rik van Riel7e2703e2014-01-27 17:03:45 -05001582 long diff, f_diff, f_weight;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001583
Mel Gormanac8e8952013-10-07 11:29:03 +01001584 i = task_faults_idx(nid, priv);
Mel Gorman745d6142013-10-07 11:28:59 +01001585
Mel Gormanac8e8952013-10-07 11:29:03 +01001586 /* Decay existing window, copy faults since last scan */
Rik van Riel35664fd2014-01-27 17:03:46 -05001587 diff = p->numa_faults_buffer_memory[i] - p->numa_faults_memory[i] / 2;
Rik van Rielff1df892014-01-27 17:03:41 -05001588 fault_types[priv] += p->numa_faults_buffer_memory[i];
1589 p->numa_faults_buffer_memory[i] = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001590
Rik van Riel7e2703e2014-01-27 17:03:45 -05001591 /*
1592 * Normalize the faults_from, so all tasks in a group
1593 * count according to CPU use, instead of by the raw
1594 * number of faults. Tasks with little runtime have
1595 * little over-all impact on throughput, and thus their
1596 * faults are less important.
1597 */
1598 f_weight = div64_u64(runtime << 16, period + 1);
1599 f_weight = (f_weight * p->numa_faults_buffer_cpu[i]) /
1600 (total_faults + 1);
Rik van Riel35664fd2014-01-27 17:03:46 -05001601 f_diff = f_weight - p->numa_faults_cpu[i] / 2;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001602 p->numa_faults_buffer_cpu[i] = 0;
1603
Rik van Riel35664fd2014-01-27 17:03:46 -05001604 p->numa_faults_memory[i] += diff;
1605 p->numa_faults_cpu[i] += f_diff;
Rik van Rielff1df892014-01-27 17:03:41 -05001606 faults += p->numa_faults_memory[i];
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001607 p->total_numa_faults += diff;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001608 if (p->numa_group) {
1609 /* safe because we can only change our own group */
Mel Gorman989348b2013-10-07 11:29:40 +01001610 p->numa_group->faults[i] += diff;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001611 p->numa_group->faults_cpu[i] += f_diff;
Mel Gorman989348b2013-10-07 11:29:40 +01001612 p->numa_group->total_faults += diff;
1613 group_faults += p->numa_group->faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001614 }
Mel Gormanac8e8952013-10-07 11:29:03 +01001615 }
1616
Mel Gorman688b7582013-10-07 11:28:58 +01001617 if (faults > max_faults) {
1618 max_faults = faults;
1619 max_nid = nid;
1620 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001621
1622 if (group_faults > max_group_faults) {
1623 max_group_faults = group_faults;
1624 max_group_nid = nid;
1625 }
1626 }
1627
Rik van Riel04bb2f92013-10-07 11:29:36 +01001628 update_task_scan_period(p, fault_types[0], fault_types[1]);
1629
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001630 if (p->numa_group) {
Rik van Riel20e07de2014-01-27 17:03:43 -05001631 update_numa_active_node_mask(p->numa_group);
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001632 spin_unlock_irq(group_lock);
Rik van Rielf0b8a4a2014-06-23 11:41:29 -04001633 max_nid = max_group_nid;
Mel Gorman688b7582013-10-07 11:28:58 +01001634 }
1635
Rik van Rielbb97fc32014-06-04 16:33:15 -04001636 if (max_faults) {
1637 /* Set the new preferred node */
1638 if (max_nid != p->numa_preferred_nid)
1639 sched_setnuma(p, max_nid);
1640
1641 if (task_node(p) != p->numa_preferred_nid)
1642 numa_migrate_preferred(p);
Mel Gorman3a7053b2013-10-07 11:29:00 +01001643 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001644}
1645
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001646static inline int get_numa_group(struct numa_group *grp)
1647{
1648 return atomic_inc_not_zero(&grp->refcount);
1649}
1650
1651static inline void put_numa_group(struct numa_group *grp)
1652{
1653 if (atomic_dec_and_test(&grp->refcount))
1654 kfree_rcu(grp, rcu);
1655}
1656
Mel Gorman3e6a9412013-10-07 11:29:35 +01001657static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1658 int *priv)
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001659{
1660 struct numa_group *grp, *my_grp;
1661 struct task_struct *tsk;
1662 bool join = false;
1663 int cpu = cpupid_to_cpu(cpupid);
1664 int i;
1665
1666 if (unlikely(!p->numa_group)) {
1667 unsigned int size = sizeof(struct numa_group) +
Rik van Riel50ec8a42014-01-27 17:03:42 -05001668 4*nr_node_ids*sizeof(unsigned long);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001669
1670 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1671 if (!grp)
1672 return;
1673
1674 atomic_set(&grp->refcount, 1);
1675 spin_lock_init(&grp->lock);
1676 INIT_LIST_HEAD(&grp->task_list);
Mel Gormane29cf082013-10-07 11:29:22 +01001677 grp->gid = p->pid;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001678 /* Second half of the array tracks nids where faults happen */
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001679 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1680 nr_node_ids;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001681
Rik van Riel20e07de2014-01-27 17:03:43 -05001682 node_set(task_node(current), grp->active_nodes);
1683
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001684 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
Rik van Rielff1df892014-01-27 17:03:41 -05001685 grp->faults[i] = p->numa_faults_memory[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001686
Mel Gorman989348b2013-10-07 11:29:40 +01001687 grp->total_faults = p->total_numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001688
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001689 list_add(&p->numa_entry, &grp->task_list);
1690 grp->nr_tasks++;
1691 rcu_assign_pointer(p->numa_group, grp);
1692 }
1693
1694 rcu_read_lock();
1695 tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
1696
1697 if (!cpupid_match_pid(tsk, cpupid))
Peter Zijlstra33547812013-10-09 10:24:48 +02001698 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001699
1700 grp = rcu_dereference(tsk->numa_group);
1701 if (!grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001702 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001703
1704 my_grp = p->numa_group;
1705 if (grp == my_grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001706 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001707
1708 /*
1709 * Only join the other group if its bigger; if we're the bigger group,
1710 * the other task will join us.
1711 */
1712 if (my_grp->nr_tasks > grp->nr_tasks)
Peter Zijlstra33547812013-10-09 10:24:48 +02001713 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001714
1715 /*
1716 * Tie-break on the grp address.
1717 */
1718 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001719 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001720
Rik van Rieldabe1d92013-10-07 11:29:34 +01001721 /* Always join threads in the same process. */
1722 if (tsk->mm == current->mm)
1723 join = true;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001724
Rik van Rieldabe1d92013-10-07 11:29:34 +01001725 /* Simple filter to avoid false positives due to PID collisions */
1726 if (flags & TNF_SHARED)
1727 join = true;
1728
Mel Gorman3e6a9412013-10-07 11:29:35 +01001729 /* Update priv based on whether false sharing was detected */
1730 *priv = !join;
1731
Rik van Rieldabe1d92013-10-07 11:29:34 +01001732 if (join && !get_numa_group(grp))
Peter Zijlstra33547812013-10-09 10:24:48 +02001733 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001734
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001735 rcu_read_unlock();
1736
1737 if (!join)
1738 return;
1739
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001740 BUG_ON(irqs_disabled());
1741 double_lock_irq(&my_grp->lock, &grp->lock);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001742
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001743 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
Rik van Rielff1df892014-01-27 17:03:41 -05001744 my_grp->faults[i] -= p->numa_faults_memory[i];
1745 grp->faults[i] += p->numa_faults_memory[i];
Mel Gorman989348b2013-10-07 11:29:40 +01001746 }
1747 my_grp->total_faults -= p->total_numa_faults;
1748 grp->total_faults += p->total_numa_faults;
1749
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001750 list_move(&p->numa_entry, &grp->task_list);
1751 my_grp->nr_tasks--;
1752 grp->nr_tasks++;
1753
1754 spin_unlock(&my_grp->lock);
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001755 spin_unlock_irq(&grp->lock);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001756
1757 rcu_assign_pointer(p->numa_group, grp);
1758
1759 put_numa_group(my_grp);
Peter Zijlstra33547812013-10-09 10:24:48 +02001760 return;
1761
1762no_join:
1763 rcu_read_unlock();
1764 return;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001765}
1766
1767void task_numa_free(struct task_struct *p)
1768{
1769 struct numa_group *grp = p->numa_group;
Rik van Rielff1df892014-01-27 17:03:41 -05001770 void *numa_faults = p->numa_faults_memory;
Steven Rostedte9dd6852014-05-27 17:02:04 -04001771 unsigned long flags;
1772 int i;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001773
1774 if (grp) {
Steven Rostedte9dd6852014-05-27 17:02:04 -04001775 spin_lock_irqsave(&grp->lock, flags);
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001776 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
Rik van Rielff1df892014-01-27 17:03:41 -05001777 grp->faults[i] -= p->numa_faults_memory[i];
Mel Gorman989348b2013-10-07 11:29:40 +01001778 grp->total_faults -= p->total_numa_faults;
1779
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001780 list_del(&p->numa_entry);
1781 grp->nr_tasks--;
Steven Rostedte9dd6852014-05-27 17:02:04 -04001782 spin_unlock_irqrestore(&grp->lock, flags);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001783 rcu_assign_pointer(p->numa_group, NULL);
1784 put_numa_group(grp);
1785 }
1786
Rik van Rielff1df892014-01-27 17:03:41 -05001787 p->numa_faults_memory = NULL;
1788 p->numa_faults_buffer_memory = NULL;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001789 p->numa_faults_cpu= NULL;
1790 p->numa_faults_buffer_cpu = NULL;
Rik van Riel82727012013-10-07 11:29:28 +01001791 kfree(numa_faults);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001792}
1793
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001794/*
1795 * Got a PROT_NONE fault for a page on @node.
1796 */
Rik van Riel58b46da2014-01-27 17:03:47 -05001797void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001798{
1799 struct task_struct *p = current;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001800 bool migrated = flags & TNF_MIGRATED;
Rik van Riel58b46da2014-01-27 17:03:47 -05001801 int cpu_node = task_node(current);
Rik van Riel792568e2014-04-11 13:00:27 -04001802 int local = !!(flags & TNF_FAULT_LOCAL);
Mel Gormanac8e8952013-10-07 11:29:03 +01001803 int priv;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001804
Dave Kleikamp10e84b92013-07-31 13:53:35 -07001805 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +00001806 return;
1807
Mel Gorman9ff1d9f2013-10-07 11:29:04 +01001808 /* for example, ksmd faulting in a user's mm */
1809 if (!p->mm)
1810 return;
1811
Rik van Riel82727012013-10-07 11:29:28 +01001812 /* Do not worry about placement if exiting */
1813 if (p->state == TASK_DEAD)
1814 return;
1815
Mel Gormanf809ca92013-10-07 11:28:57 +01001816 /* Allocate buffer to track faults on a per-node basis */
Rik van Rielff1df892014-01-27 17:03:41 -05001817 if (unlikely(!p->numa_faults_memory)) {
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001818 int size = sizeof(*p->numa_faults_memory) *
1819 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +01001820
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001821 p->numa_faults_memory = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
Rik van Rielff1df892014-01-27 17:03:41 -05001822 if (!p->numa_faults_memory)
Mel Gormanf809ca92013-10-07 11:28:57 +01001823 return;
Mel Gorman745d6142013-10-07 11:28:59 +01001824
Rik van Rielff1df892014-01-27 17:03:41 -05001825 BUG_ON(p->numa_faults_buffer_memory);
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001826 /*
1827 * The averaged statistics, shared & private, memory & cpu,
1828 * occupy the first half of the array. The second half of the
1829 * array is for current counters, which are averaged into the
1830 * first set by task_numa_placement.
1831 */
Rik van Riel50ec8a42014-01-27 17:03:42 -05001832 p->numa_faults_cpu = p->numa_faults_memory + (2 * nr_node_ids);
1833 p->numa_faults_buffer_memory = p->numa_faults_memory + (4 * nr_node_ids);
1834 p->numa_faults_buffer_cpu = p->numa_faults_memory + (6 * nr_node_ids);
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001835 p->total_numa_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001836 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
Mel Gormanf809ca92013-10-07 11:28:57 +01001837 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001838
Mel Gormanfb003b82012-11-15 09:01:14 +00001839 /*
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001840 * First accesses are treated as private, otherwise consider accesses
1841 * to be private if the accessing pid has not changed
1842 */
1843 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
1844 priv = 1;
1845 } else {
1846 priv = cpupid_match_pid(p, last_cpupid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01001847 if (!priv && !(flags & TNF_NO_GROUP))
Mel Gorman3e6a9412013-10-07 11:29:35 +01001848 task_numa_group(p, last_cpupid, flags, &priv);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001849 }
1850
Rik van Riel792568e2014-04-11 13:00:27 -04001851 /*
1852 * If a workload spans multiple NUMA nodes, a shared fault that
1853 * occurs wholly within the set of nodes that the workload is
1854 * actively using should be counted as local. This allows the
1855 * scan rate to slow down when a workload has settled down.
1856 */
1857 if (!priv && !local && p->numa_group &&
1858 node_isset(cpu_node, p->numa_group->active_nodes) &&
1859 node_isset(mem_node, p->numa_group->active_nodes))
1860 local = 1;
1861
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001862 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01001863
Rik van Riel2739d3e2013-10-07 11:29:41 +01001864 /*
1865 * Retry task to preferred node migration periodically, in case it
1866 * case it previously failed, or the scheduler moved us.
1867 */
1868 if (time_after(jiffies, p->numa_migrate_retry))
Mel Gorman6b9a7462013-10-07 11:29:11 +01001869 numa_migrate_preferred(p);
1870
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001871 if (migrated)
1872 p->numa_pages_migrated += pages;
1873
Rik van Riel58b46da2014-01-27 17:03:47 -05001874 p->numa_faults_buffer_memory[task_faults_idx(mem_node, priv)] += pages;
1875 p->numa_faults_buffer_cpu[task_faults_idx(cpu_node, priv)] += pages;
Rik van Riel792568e2014-04-11 13:00:27 -04001876 p->numa_faults_locality[local] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001877}
1878
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001879static void reset_ptenuma_scan(struct task_struct *p)
1880{
1881 ACCESS_ONCE(p->mm->numa_scan_seq)++;
1882 p->mm->numa_scan_offset = 0;
1883}
1884
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001885/*
1886 * The expensive part of numa migration is done from task_work context.
1887 * Triggered from task_tick_numa().
1888 */
1889void task_numa_work(struct callback_head *work)
1890{
1891 unsigned long migrate, next_scan, now = jiffies;
1892 struct task_struct *p = current;
1893 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001894 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00001895 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001896 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +00001897 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001898
1899 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
1900
1901 work->next = work; /* protect against double add */
1902 /*
1903 * Who cares about NUMA placement when they're dying.
1904 *
1905 * NOTE: make sure not to dereference p->mm before this check,
1906 * exit_task_work() happens _after_ exit_mm() so we could be called
1907 * without p->mm even though we still had it when we enqueued this
1908 * work.
1909 */
1910 if (p->flags & PF_EXITING)
1911 return;
1912
Mel Gorman930aa172013-10-07 11:29:37 +01001913 if (!mm->numa_next_scan) {
Mel Gorman7e8d16b2013-10-07 11:28:54 +01001914 mm->numa_next_scan = now +
1915 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
Mel Gormanb8593bf2012-11-21 01:18:23 +00001916 }
1917
1918 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001919 * Enforce maximal scan/migration frequency..
1920 */
1921 migrate = mm->numa_next_scan;
1922 if (time_before(now, migrate))
1923 return;
1924
Mel Gorman598f0ec2013-10-07 11:28:55 +01001925 if (p->numa_scan_period == 0) {
1926 p->numa_scan_period_max = task_scan_max(p);
1927 p->numa_scan_period = task_scan_min(p);
1928 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001929
Mel Gormanfb003b82012-11-15 09:01:14 +00001930 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001931 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
1932 return;
1933
Mel Gormane14808b2012-11-19 10:59:15 +00001934 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01001935 * Delay this task enough that another task of this mm will likely win
1936 * the next time around.
1937 */
1938 p->node_stamp += 2 * TICK_NSEC;
1939
Mel Gorman9f406042012-11-14 18:34:32 +00001940 start = mm->numa_scan_offset;
1941 pages = sysctl_numa_balancing_scan_size;
1942 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
1943 if (!pages)
1944 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001945
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001946 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00001947 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001948 if (!vma) {
1949 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00001950 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001951 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001952 }
Mel Gorman9f406042012-11-14 18:34:32 +00001953 for (; vma; vma = vma->vm_next) {
Mel Gormanfc3147242013-10-07 11:29:09 +01001954 if (!vma_migratable(vma) || !vma_policy_mof(p, vma))
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001955 continue;
1956
Mel Gorman4591ce4f2013-10-07 11:29:13 +01001957 /*
1958 * Shared library pages mapped by multiple processes are not
1959 * migrated as it is expected they are cache replicated. Avoid
1960 * hinting faults in read-only file-backed mappings or the vdso
1961 * as migrating the pages will be of marginal benefit.
1962 */
1963 if (!vma->vm_mm ||
1964 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
1965 continue;
1966
Mel Gorman3c67f472013-12-18 17:08:40 -08001967 /*
1968 * Skip inaccessible VMAs to avoid any confusion between
1969 * PROT_NONE and NUMA hinting ptes
1970 */
1971 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
1972 continue;
1973
Mel Gorman9f406042012-11-14 18:34:32 +00001974 do {
1975 start = max(start, vma->vm_start);
1976 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
1977 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01001978 nr_pte_updates += change_prot_numa(vma, start, end);
1979
1980 /*
1981 * Scan sysctl_numa_balancing_scan_size but ensure that
1982 * at least one PTE is updated so that unused virtual
1983 * address space is quickly skipped.
1984 */
1985 if (nr_pte_updates)
1986 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001987
Mel Gorman9f406042012-11-14 18:34:32 +00001988 start = end;
1989 if (pages <= 0)
1990 goto out;
Rik van Riel3cf19622014-02-18 17:12:44 -05001991
1992 cond_resched();
Mel Gorman9f406042012-11-14 18:34:32 +00001993 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001994 }
1995
Mel Gorman9f406042012-11-14 18:34:32 +00001996out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02001997 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01001998 * It is possible to reach the end of the VMA list but the last few
1999 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2000 * would find the !migratable VMA on the next scan but not reset the
2001 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002002 */
2003 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00002004 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002005 else
2006 reset_ptenuma_scan(p);
2007 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002008}
2009
2010/*
2011 * Drive the periodic memory faults..
2012 */
2013void task_tick_numa(struct rq *rq, struct task_struct *curr)
2014{
2015 struct callback_head *work = &curr->numa_work;
2016 u64 period, now;
2017
2018 /*
2019 * We don't care about NUMA placement if we don't have memory.
2020 */
2021 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2022 return;
2023
2024 /*
2025 * Using runtime rather than walltime has the dual advantage that
2026 * we (mostly) drive the selection from busy threads and that the
2027 * task needs to have done some actual work before we bother with
2028 * NUMA placement.
2029 */
2030 now = curr->se.sum_exec_runtime;
2031 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2032
2033 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02002034 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01002035 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01002036 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002037
2038 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2039 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2040 task_work_add(curr, work, true);
2041 }
2042 }
2043}
2044#else
2045static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2046{
2047}
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002048
2049static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2050{
2051}
2052
2053static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2054{
2055}
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002056#endif /* CONFIG_NUMA_BALANCING */
2057
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002058static void
2059account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2060{
2061 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002062 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02002063 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01002064#ifdef CONFIG_SMP
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002065 if (entity_is_task(se)) {
2066 struct rq *rq = rq_of(cfs_rq);
2067
2068 account_numa_enqueue(rq, task_of(se));
2069 list_add(&se->group_node, &rq->cfs_tasks);
2070 }
Peter Zijlstra367456c2012-02-20 21:49:09 +01002071#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002072 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002073}
2074
2075static void
2076account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2077{
2078 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002079 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02002080 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002081 if (entity_is_task(se)) {
2082 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
Bharata B Raob87f1722008-09-25 09:53:54 +05302083 list_del_init(&se->group_node);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002084 }
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002085 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002086}
2087
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002088#ifdef CONFIG_FAIR_GROUP_SCHED
2089# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002090static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2091{
2092 long tg_weight;
2093
2094 /*
2095 * Use this CPU's actual weight instead of the last load_contribution
2096 * to gain a more accurate current total weight. See
2097 * update_cfs_rq_load_contribution().
2098 */
Alex Shibf5b9862013-06-20 10:18:54 +08002099 tg_weight = atomic_long_read(&tg->load_avg);
Paul Turner82958362012-10-04 13:18:31 +02002100 tg_weight -= cfs_rq->tg_load_contrib;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002101 tg_weight += cfs_rq->load.weight;
2102
2103 return tg_weight;
2104}
2105
Paul Turner6d5ab292011-01-21 20:45:01 -08002106static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002107{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002108 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002109
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002110 tg_weight = calc_tg_weight(tg, cfs_rq);
Paul Turner6d5ab292011-01-21 20:45:01 -08002111 load = cfs_rq->load.weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002112
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002113 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002114 if (tg_weight)
2115 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002116
2117 if (shares < MIN_SHARES)
2118 shares = MIN_SHARES;
2119 if (shares > tg->shares)
2120 shares = tg->shares;
2121
2122 return shares;
2123}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002124# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08002125static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002126{
2127 return tg->shares;
2128}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002129# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002130static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2131 unsigned long weight)
2132{
Paul Turner19e5eeb2010-12-15 19:10:18 -08002133 if (se->on_rq) {
2134 /* commit outstanding execution time */
2135 if (cfs_rq->curr == se)
2136 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002137 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08002138 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002139
2140 update_load_set(&se->load, weight);
2141
2142 if (se->on_rq)
2143 account_entity_enqueue(cfs_rq, se);
2144}
2145
Paul Turner82958362012-10-04 13:18:31 +02002146static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2147
Paul Turner6d5ab292011-01-21 20:45:01 -08002148static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002149{
2150 struct task_group *tg;
2151 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002152 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002153
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002154 tg = cfs_rq->tg;
2155 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07002156 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002157 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002158#ifndef CONFIG_SMP
2159 if (likely(se->load.weight == tg->shares))
2160 return;
2161#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08002162 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002163
2164 reweight_entity(cfs_rq_of(se), se, shares);
2165}
2166#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08002167static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002168{
2169}
2170#endif /* CONFIG_FAIR_GROUP_SCHED */
2171
Alex Shi141965c2013-06-26 13:05:39 +08002172#ifdef CONFIG_SMP
Paul Turner9d85f212012-10-04 13:18:29 +02002173/*
Paul Turner5b51f2f2012-10-04 13:18:32 +02002174 * We choose a half-life close to 1 scheduling period.
2175 * Note: The tables below are dependent on this value.
2176 */
2177#define LOAD_AVG_PERIOD 32
2178#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
2179#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
2180
2181/* Precomputed fixed inverse multiplies for multiplication by y^n */
2182static const u32 runnable_avg_yN_inv[] = {
2183 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2184 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2185 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2186 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2187 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2188 0x85aac367, 0x82cd8698,
2189};
2190
2191/*
2192 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2193 * over-estimates when re-combining.
2194 */
2195static const u32 runnable_avg_yN_sum[] = {
2196 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2197 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2198 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2199};
2200
2201/*
Paul Turner9d85f212012-10-04 13:18:29 +02002202 * Approximate:
2203 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2204 */
2205static __always_inline u64 decay_load(u64 val, u64 n)
2206{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002207 unsigned int local_n;
2208
2209 if (!n)
2210 return val;
2211 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2212 return 0;
2213
2214 /* after bounds checking we can collapse to 32-bit */
2215 local_n = n;
2216
2217 /*
2218 * As y^PERIOD = 1/2, we can combine
2219 * y^n = 1/2^(n/PERIOD) * k^(n%PERIOD)
2220 * With a look-up table which covers k^n (n<PERIOD)
2221 *
2222 * To achieve constant time decay_load.
2223 */
2224 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2225 val >>= local_n / LOAD_AVG_PERIOD;
2226 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02002227 }
2228
Paul Turner5b51f2f2012-10-04 13:18:32 +02002229 val *= runnable_avg_yN_inv[local_n];
2230 /* We don't use SRR here since we always want to round down. */
2231 return val >> 32;
2232}
2233
2234/*
2235 * For updates fully spanning n periods, the contribution to runnable
2236 * average will be: \Sum 1024*y^n
2237 *
2238 * We can compute this reasonably efficiently by combining:
2239 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2240 */
2241static u32 __compute_runnable_contrib(u64 n)
2242{
2243 u32 contrib = 0;
2244
2245 if (likely(n <= LOAD_AVG_PERIOD))
2246 return runnable_avg_yN_sum[n];
2247 else if (unlikely(n >= LOAD_AVG_MAX_N))
2248 return LOAD_AVG_MAX;
2249
2250 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2251 do {
2252 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2253 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2254
2255 n -= LOAD_AVG_PERIOD;
2256 } while (n > LOAD_AVG_PERIOD);
2257
2258 contrib = decay_load(contrib, n);
2259 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02002260}
2261
2262/*
2263 * We can represent the historical contribution to runnable average as the
2264 * coefficients of a geometric series. To do this we sub-divide our runnable
2265 * history into segments of approximately 1ms (1024us); label the segment that
2266 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2267 *
2268 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2269 * p0 p1 p2
2270 * (now) (~1ms ago) (~2ms ago)
2271 *
2272 * Let u_i denote the fraction of p_i that the entity was runnable.
2273 *
2274 * We then designate the fractions u_i as our co-efficients, yielding the
2275 * following representation of historical load:
2276 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2277 *
2278 * We choose y based on the with of a reasonably scheduling period, fixing:
2279 * y^32 = 0.5
2280 *
2281 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2282 * approximately half as much as the contribution to load within the last ms
2283 * (u_0).
2284 *
2285 * When a period "rolls over" and we have new u_0`, multiplying the previous
2286 * sum again by y is sufficient to update:
2287 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2288 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2289 */
2290static __always_inline int __update_entity_runnable_avg(u64 now,
2291 struct sched_avg *sa,
2292 int runnable)
2293{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002294 u64 delta, periods;
2295 u32 runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002296 int delta_w, decayed = 0;
2297
2298 delta = now - sa->last_runnable_update;
2299 /*
2300 * This should only happen when time goes backwards, which it
2301 * unfortunately does during sched clock init when we swap over to TSC.
2302 */
2303 if ((s64)delta < 0) {
2304 sa->last_runnable_update = now;
2305 return 0;
2306 }
2307
2308 /*
2309 * Use 1024ns as the unit of measurement since it's a reasonable
2310 * approximation of 1us and fast to compute.
2311 */
2312 delta >>= 10;
2313 if (!delta)
2314 return 0;
2315 sa->last_runnable_update = now;
2316
2317 /* delta_w is the amount already accumulated against our next period */
2318 delta_w = sa->runnable_avg_period % 1024;
2319 if (delta + delta_w >= 1024) {
2320 /* period roll-over */
2321 decayed = 1;
2322
2323 /*
2324 * Now that we know we're crossing a period boundary, figure
2325 * out how much from delta we need to complete the current
2326 * period and accrue it.
2327 */
2328 delta_w = 1024 - delta_w;
Paul Turner5b51f2f2012-10-04 13:18:32 +02002329 if (runnable)
2330 sa->runnable_avg_sum += delta_w;
2331 sa->runnable_avg_period += delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002332
Paul Turner5b51f2f2012-10-04 13:18:32 +02002333 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002334
Paul Turner5b51f2f2012-10-04 13:18:32 +02002335 /* Figure out how many additional periods this update spans */
2336 periods = delta / 1024;
2337 delta %= 1024;
2338
2339 sa->runnable_avg_sum = decay_load(sa->runnable_avg_sum,
2340 periods + 1);
2341 sa->runnable_avg_period = decay_load(sa->runnable_avg_period,
2342 periods + 1);
2343
2344 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
2345 runnable_contrib = __compute_runnable_contrib(periods);
2346 if (runnable)
2347 sa->runnable_avg_sum += runnable_contrib;
2348 sa->runnable_avg_period += runnable_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002349 }
2350
2351 /* Remainder of delta accrued against u_0` */
2352 if (runnable)
2353 sa->runnable_avg_sum += delta;
2354 sa->runnable_avg_period += delta;
2355
2356 return decayed;
2357}
2358
Paul Turner9ee474f2012-10-04 13:18:30 +02002359/* Synchronize an entity's decay with its parenting cfs_rq.*/
Paul Turneraff3e492012-10-04 13:18:30 +02002360static inline u64 __synchronize_entity_decay(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02002361{
2362 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2363 u64 decays = atomic64_read(&cfs_rq->decay_counter);
2364
2365 decays -= se->avg.decay_count;
2366 if (!decays)
Paul Turneraff3e492012-10-04 13:18:30 +02002367 return 0;
Paul Turner9ee474f2012-10-04 13:18:30 +02002368
2369 se->avg.load_avg_contrib = decay_load(se->avg.load_avg_contrib, decays);
2370 se->avg.decay_count = 0;
Paul Turneraff3e492012-10-04 13:18:30 +02002371
2372 return decays;
Paul Turner9ee474f2012-10-04 13:18:30 +02002373}
2374
Paul Turnerc566e8e2012-10-04 13:18:30 +02002375#ifdef CONFIG_FAIR_GROUP_SCHED
2376static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2377 int force_update)
2378{
2379 struct task_group *tg = cfs_rq->tg;
Alex Shibf5b9862013-06-20 10:18:54 +08002380 long tg_contrib;
Paul Turnerc566e8e2012-10-04 13:18:30 +02002381
2382 tg_contrib = cfs_rq->runnable_load_avg + cfs_rq->blocked_load_avg;
2383 tg_contrib -= cfs_rq->tg_load_contrib;
2384
Alex Shibf5b9862013-06-20 10:18:54 +08002385 if (force_update || abs(tg_contrib) > cfs_rq->tg_load_contrib / 8) {
2386 atomic_long_add(tg_contrib, &tg->load_avg);
Paul Turnerc566e8e2012-10-04 13:18:30 +02002387 cfs_rq->tg_load_contrib += tg_contrib;
2388 }
2389}
Paul Turner8165e142012-10-04 13:18:31 +02002390
Paul Turnerbb17f652012-10-04 13:18:31 +02002391/*
2392 * Aggregate cfs_rq runnable averages into an equivalent task_group
2393 * representation for computing load contributions.
2394 */
2395static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2396 struct cfs_rq *cfs_rq)
2397{
2398 struct task_group *tg = cfs_rq->tg;
2399 long contrib;
2400
2401 /* The fraction of a cpu used by this cfs_rq */
Michal Nazarewicz85b088e2013-11-10 20:42:01 +01002402 contrib = div_u64((u64)sa->runnable_avg_sum << NICE_0_SHIFT,
Paul Turnerbb17f652012-10-04 13:18:31 +02002403 sa->runnable_avg_period + 1);
2404 contrib -= cfs_rq->tg_runnable_contrib;
2405
2406 if (abs(contrib) > cfs_rq->tg_runnable_contrib / 64) {
2407 atomic_add(contrib, &tg->runnable_avg);
2408 cfs_rq->tg_runnable_contrib += contrib;
2409 }
2410}
2411
Paul Turner8165e142012-10-04 13:18:31 +02002412static inline void __update_group_entity_contrib(struct sched_entity *se)
2413{
2414 struct cfs_rq *cfs_rq = group_cfs_rq(se);
2415 struct task_group *tg = cfs_rq->tg;
Paul Turnerbb17f652012-10-04 13:18:31 +02002416 int runnable_avg;
2417
Paul Turner8165e142012-10-04 13:18:31 +02002418 u64 contrib;
2419
2420 contrib = cfs_rq->tg_load_contrib * tg->shares;
Alex Shibf5b9862013-06-20 10:18:54 +08002421 se->avg.load_avg_contrib = div_u64(contrib,
2422 atomic_long_read(&tg->load_avg) + 1);
Paul Turnerbb17f652012-10-04 13:18:31 +02002423
2424 /*
2425 * For group entities we need to compute a correction term in the case
2426 * that they are consuming <1 cpu so that we would contribute the same
2427 * load as a task of equal weight.
2428 *
2429 * Explicitly co-ordinating this measurement would be expensive, but
2430 * fortunately the sum of each cpus contribution forms a usable
2431 * lower-bound on the true value.
2432 *
2433 * Consider the aggregate of 2 contributions. Either they are disjoint
2434 * (and the sum represents true value) or they are disjoint and we are
2435 * understating by the aggregate of their overlap.
2436 *
2437 * Extending this to N cpus, for a given overlap, the maximum amount we
2438 * understand is then n_i(n_i+1)/2 * w_i where n_i is the number of
2439 * cpus that overlap for this interval and w_i is the interval width.
2440 *
2441 * On a small machine; the first term is well-bounded which bounds the
2442 * total error since w_i is a subset of the period. Whereas on a
2443 * larger machine, while this first term can be larger, if w_i is the
2444 * of consequential size guaranteed to see n_i*w_i quickly converge to
2445 * our upper bound of 1-cpu.
2446 */
2447 runnable_avg = atomic_read(&tg->runnable_avg);
2448 if (runnable_avg < NICE_0_LOAD) {
2449 se->avg.load_avg_contrib *= runnable_avg;
2450 se->avg.load_avg_contrib >>= NICE_0_SHIFT;
2451 }
Paul Turner8165e142012-10-04 13:18:31 +02002452}
Dietmar Eggemannf5f97392014-02-26 11:19:33 +00002453
2454static inline void update_rq_runnable_avg(struct rq *rq, int runnable)
2455{
2456 __update_entity_runnable_avg(rq_clock_task(rq), &rq->avg, runnable);
2457 __update_tg_runnable_avg(&rq->avg, &rq->cfs);
2458}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002459#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turnerc566e8e2012-10-04 13:18:30 +02002460static inline void __update_cfs_rq_tg_load_contrib(struct cfs_rq *cfs_rq,
2461 int force_update) {}
Paul Turnerbb17f652012-10-04 13:18:31 +02002462static inline void __update_tg_runnable_avg(struct sched_avg *sa,
2463 struct cfs_rq *cfs_rq) {}
Paul Turner8165e142012-10-04 13:18:31 +02002464static inline void __update_group_entity_contrib(struct sched_entity *se) {}
Dietmar Eggemannf5f97392014-02-26 11:19:33 +00002465static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002466#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turnerc566e8e2012-10-04 13:18:30 +02002467
Paul Turner8165e142012-10-04 13:18:31 +02002468static inline void __update_task_entity_contrib(struct sched_entity *se)
2469{
2470 u32 contrib;
2471
2472 /* avoid overflowing a 32-bit type w/ SCHED_LOAD_SCALE */
2473 contrib = se->avg.runnable_avg_sum * scale_load_down(se->load.weight);
2474 contrib /= (se->avg.runnable_avg_period + 1);
2475 se->avg.load_avg_contrib = scale_load(contrib);
2476}
2477
Paul Turner2dac7542012-10-04 13:18:30 +02002478/* Compute the current contribution to load_avg by se, return any delta */
2479static long __update_entity_load_avg_contrib(struct sched_entity *se)
2480{
2481 long old_contrib = se->avg.load_avg_contrib;
2482
Paul Turner8165e142012-10-04 13:18:31 +02002483 if (entity_is_task(se)) {
2484 __update_task_entity_contrib(se);
2485 } else {
Paul Turnerbb17f652012-10-04 13:18:31 +02002486 __update_tg_runnable_avg(&se->avg, group_cfs_rq(se));
Paul Turner8165e142012-10-04 13:18:31 +02002487 __update_group_entity_contrib(se);
2488 }
Paul Turner2dac7542012-10-04 13:18:30 +02002489
2490 return se->avg.load_avg_contrib - old_contrib;
2491}
2492
Paul Turner9ee474f2012-10-04 13:18:30 +02002493static inline void subtract_blocked_load_contrib(struct cfs_rq *cfs_rq,
2494 long load_contrib)
2495{
2496 if (likely(load_contrib < cfs_rq->blocked_load_avg))
2497 cfs_rq->blocked_load_avg -= load_contrib;
2498 else
2499 cfs_rq->blocked_load_avg = 0;
2500}
2501
Paul Turnerf1b17282012-10-04 13:18:31 +02002502static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2503
Paul Turner9d85f212012-10-04 13:18:29 +02002504/* Update a sched_entity's runnable average */
Paul Turner9ee474f2012-10-04 13:18:30 +02002505static inline void update_entity_load_avg(struct sched_entity *se,
2506 int update_cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02002507{
Paul Turner2dac7542012-10-04 13:18:30 +02002508 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2509 long contrib_delta;
Paul Turnerf1b17282012-10-04 13:18:31 +02002510 u64 now;
Paul Turner2dac7542012-10-04 13:18:30 +02002511
Paul Turnerf1b17282012-10-04 13:18:31 +02002512 /*
2513 * For a group entity we need to use their owned cfs_rq_clock_task() in
2514 * case they are the parent of a throttled hierarchy.
2515 */
2516 if (entity_is_task(se))
2517 now = cfs_rq_clock_task(cfs_rq);
2518 else
2519 now = cfs_rq_clock_task(group_cfs_rq(se));
2520
2521 if (!__update_entity_runnable_avg(now, &se->avg, se->on_rq))
Paul Turner2dac7542012-10-04 13:18:30 +02002522 return;
2523
2524 contrib_delta = __update_entity_load_avg_contrib(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02002525
2526 if (!update_cfs_rq)
2527 return;
2528
Paul Turner2dac7542012-10-04 13:18:30 +02002529 if (se->on_rq)
2530 cfs_rq->runnable_load_avg += contrib_delta;
Paul Turner9ee474f2012-10-04 13:18:30 +02002531 else
2532 subtract_blocked_load_contrib(cfs_rq, -contrib_delta);
2533}
2534
2535/*
2536 * Decay the load contributed by all blocked children and account this so that
2537 * their contribution may appropriately discounted when they wake up.
2538 */
Paul Turneraff3e492012-10-04 13:18:30 +02002539static void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq, int force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02002540{
Paul Turnerf1b17282012-10-04 13:18:31 +02002541 u64 now = cfs_rq_clock_task(cfs_rq) >> 20;
Paul Turner9ee474f2012-10-04 13:18:30 +02002542 u64 decays;
2543
2544 decays = now - cfs_rq->last_decay;
Paul Turneraff3e492012-10-04 13:18:30 +02002545 if (!decays && !force_update)
Paul Turner9ee474f2012-10-04 13:18:30 +02002546 return;
2547
Alex Shi25099402013-06-20 10:18:55 +08002548 if (atomic_long_read(&cfs_rq->removed_load)) {
2549 unsigned long removed_load;
2550 removed_load = atomic_long_xchg(&cfs_rq->removed_load, 0);
Paul Turneraff3e492012-10-04 13:18:30 +02002551 subtract_blocked_load_contrib(cfs_rq, removed_load);
2552 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002553
Paul Turneraff3e492012-10-04 13:18:30 +02002554 if (decays) {
2555 cfs_rq->blocked_load_avg = decay_load(cfs_rq->blocked_load_avg,
2556 decays);
2557 atomic64_add(decays, &cfs_rq->decay_counter);
2558 cfs_rq->last_decay = now;
2559 }
Paul Turnerc566e8e2012-10-04 13:18:30 +02002560
2561 __update_cfs_rq_tg_load_contrib(cfs_rq, force_update);
Paul Turner9d85f212012-10-04 13:18:29 +02002562}
Ben Segall18bf2802012-10-04 12:51:20 +02002563
Paul Turner2dac7542012-10-04 13:18:30 +02002564/* Add the load generated by se into cfs_rq's child load-average */
2565static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002566 struct sched_entity *se,
2567 int wakeup)
Paul Turner2dac7542012-10-04 13:18:30 +02002568{
Paul Turneraff3e492012-10-04 13:18:30 +02002569 /*
2570 * We track migrations using entity decay_count <= 0, on a wake-up
2571 * migration we use a negative decay count to track the remote decays
2572 * accumulated while sleeping.
Alex Shia75cdaa2013-06-20 10:18:47 +08002573 *
2574 * Newly forked tasks are enqueued with se->avg.decay_count == 0, they
2575 * are seen by enqueue_entity_load_avg() as a migration with an already
2576 * constructed load_avg_contrib.
Paul Turneraff3e492012-10-04 13:18:30 +02002577 */
2578 if (unlikely(se->avg.decay_count <= 0)) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002579 se->avg.last_runnable_update = rq_clock_task(rq_of(cfs_rq));
Paul Turneraff3e492012-10-04 13:18:30 +02002580 if (se->avg.decay_count) {
2581 /*
2582 * In a wake-up migration we have to approximate the
2583 * time sleeping. This is because we can't synchronize
2584 * clock_task between the two cpus, and it is not
2585 * guaranteed to be read-safe. Instead, we can
2586 * approximate this using our carried decays, which are
2587 * explicitly atomically readable.
2588 */
2589 se->avg.last_runnable_update -= (-se->avg.decay_count)
2590 << 20;
2591 update_entity_load_avg(se, 0);
2592 /* Indicate that we're now synchronized and on-rq */
2593 se->avg.decay_count = 0;
2594 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002595 wakeup = 0;
2596 } else {
Vincent Guittot93906752014-01-22 08:45:34 +01002597 __synchronize_entity_decay(se);
Paul Turner9ee474f2012-10-04 13:18:30 +02002598 }
2599
Paul Turneraff3e492012-10-04 13:18:30 +02002600 /* migrated tasks did not contribute to our blocked load */
2601 if (wakeup) {
Paul Turner9ee474f2012-10-04 13:18:30 +02002602 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turneraff3e492012-10-04 13:18:30 +02002603 update_entity_load_avg(se, 0);
2604 }
Paul Turner9ee474f2012-10-04 13:18:30 +02002605
Paul Turner2dac7542012-10-04 13:18:30 +02002606 cfs_rq->runnable_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02002607 /* we force update consideration on load-balancer moves */
2608 update_cfs_rq_blocked_load(cfs_rq, !wakeup);
Paul Turner2dac7542012-10-04 13:18:30 +02002609}
2610
Paul Turner9ee474f2012-10-04 13:18:30 +02002611/*
2612 * Remove se's load from this cfs_rq child load-average, if the entity is
2613 * transitioning to a blocked state we track its projected decay using
2614 * blocked_load_avg.
2615 */
Paul Turner2dac7542012-10-04 13:18:30 +02002616static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002617 struct sched_entity *se,
2618 int sleep)
Paul Turner2dac7542012-10-04 13:18:30 +02002619{
Paul Turner9ee474f2012-10-04 13:18:30 +02002620 update_entity_load_avg(se, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02002621 /* we force update consideration on load-balancer moves */
2622 update_cfs_rq_blocked_load(cfs_rq, !sleep);
Paul Turner9ee474f2012-10-04 13:18:30 +02002623
Paul Turner2dac7542012-10-04 13:18:30 +02002624 cfs_rq->runnable_load_avg -= se->avg.load_avg_contrib;
Paul Turner9ee474f2012-10-04 13:18:30 +02002625 if (sleep) {
2626 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
2627 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
2628 } /* migrations, e.g. sleep=0 leave decay_count == 0 */
Paul Turner2dac7542012-10-04 13:18:30 +02002629}
Vincent Guittot642dbc32013-04-18 18:34:26 +02002630
2631/*
2632 * Update the rq's load with the elapsed running time before entering
2633 * idle. if the last scheduled task is not a CFS task, idle_enter will
2634 * be the only way to update the runnable statistic.
2635 */
2636void idle_enter_fair(struct rq *this_rq)
2637{
2638 update_rq_runnable_avg(this_rq, 1);
2639}
2640
2641/*
2642 * Update the rq's load with the elapsed idle time before a task is
2643 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2644 * be the only way to update the runnable statistic.
2645 */
2646void idle_exit_fair(struct rq *this_rq)
2647{
2648 update_rq_runnable_avg(this_rq, 0);
2649}
2650
Peter Zijlstra6e831252014-02-11 16:11:48 +01002651static int idle_balance(struct rq *this_rq);
2652
Peter Zijlstra38033c32014-01-23 20:32:21 +01002653#else /* CONFIG_SMP */
2654
Paul Turner9ee474f2012-10-04 13:18:30 +02002655static inline void update_entity_load_avg(struct sched_entity *se,
2656 int update_cfs_rq) {}
Ben Segall18bf2802012-10-04 12:51:20 +02002657static inline void update_rq_runnable_avg(struct rq *rq, int runnable) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002658static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002659 struct sched_entity *se,
2660 int wakeup) {}
Paul Turner2dac7542012-10-04 13:18:30 +02002661static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq,
Paul Turner9ee474f2012-10-04 13:18:30 +02002662 struct sched_entity *se,
2663 int sleep) {}
Paul Turneraff3e492012-10-04 13:18:30 +02002664static inline void update_cfs_rq_blocked_load(struct cfs_rq *cfs_rq,
2665 int force_update) {}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002666
2667static inline int idle_balance(struct rq *rq)
2668{
2669 return 0;
2670}
2671
Peter Zijlstra38033c32014-01-23 20:32:21 +01002672#endif /* CONFIG_SMP */
Paul Turner9d85f212012-10-04 13:18:29 +02002673
Ingo Molnar2396af62007-08-09 11:16:48 +02002674static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002675{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002676#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02002677 struct task_struct *tsk = NULL;
2678
2679 if (entity_is_task(se))
2680 tsk = task_of(se);
2681
Lucas De Marchi41acab82010-03-10 23:37:45 -03002682 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002683 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002684
2685 if ((s64)delta < 0)
2686 delta = 0;
2687
Lucas De Marchi41acab82010-03-10 23:37:45 -03002688 if (unlikely(delta > se->statistics.sleep_max))
2689 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002690
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002691 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002692 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01002693
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002694 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02002695 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002696 trace_sched_stat_sleep(tsk, delta);
2697 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002698 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03002699 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002700 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002701
2702 if ((s64)delta < 0)
2703 delta = 0;
2704
Lucas De Marchi41acab82010-03-10 23:37:45 -03002705 if (unlikely(delta > se->statistics.block_max))
2706 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002707
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002708 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002709 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02002710
Peter Zijlstrae4143142009-07-23 20:13:26 +02002711 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002712 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002713 se->statistics.iowait_sum += delta;
2714 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002715 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002716 }
2717
Andrew Vaginb781a602011-11-28 12:03:35 +03002718 trace_sched_stat_blocked(tsk, delta);
2719
Peter Zijlstrae4143142009-07-23 20:13:26 +02002720 /*
2721 * Blocking time is in units of nanosecs, so shift by
2722 * 20 to get a milliseconds-range estimation of the
2723 * amount of time that the task spent sleeping:
2724 */
2725 if (unlikely(prof_on == SLEEP_PROFILING)) {
2726 profile_hits(SLEEP_PROFILING,
2727 (void *)get_wchan(tsk),
2728 delta >> 20);
2729 }
2730 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02002731 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002732 }
2733#endif
2734}
2735
Peter Zijlstraddc97292007-10-15 17:00:10 +02002736static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2737{
2738#ifdef CONFIG_SCHED_DEBUG
2739 s64 d = se->vruntime - cfs_rq->min_vruntime;
2740
2741 if (d < 0)
2742 d = -d;
2743
2744 if (d > 3*sysctl_sched_latency)
2745 schedstat_inc(cfs_rq, nr_spread_over);
2746#endif
2747}
2748
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002749static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002750place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2751{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02002752 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002753
Peter Zijlstra2cb86002007-11-09 22:39:37 +01002754 /*
2755 * The 'current' period is already promised to the current tasks,
2756 * however the extra weight of the new task will slow them down a
2757 * little, place the new task so that it fits in the slot that
2758 * stays open at the end.
2759 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002760 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02002761 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002762
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002763 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01002764 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002765 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02002766
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002767 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002768 * Halve their sleep time's effect, to allow
2769 * for a gentler effect of sleepers:
2770 */
2771 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2772 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02002773
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002774 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002775 }
2776
Mike Galbraithb5d9d732009-09-08 11:12:28 +02002777 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05302778 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002779}
2780
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002781static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2782
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002783static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002784enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002785{
2786 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002787 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05302788 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002789 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002790 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002791 se->vruntime += cfs_rq->min_vruntime;
2792
2793 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002794 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002795 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002796 update_curr(cfs_rq);
Paul Turnerf269ae02012-10-04 13:18:31 +02002797 enqueue_entity_load_avg(cfs_rq, se, flags & ENQUEUE_WAKEUP);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002798 account_entity_enqueue(cfs_rq, se);
2799 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002800
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002801 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002802 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02002803 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02002804 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002805
Ingo Molnard2417e52007-08-09 11:16:47 +02002806 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02002807 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002808 if (se != cfs_rq->curr)
2809 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002810 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002811
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002812 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002813 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002814 check_enqueue_throttle(cfs_rq);
2815 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002816}
2817
Rik van Riel2c13c9192011-02-01 09:48:37 -05002818static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01002819{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002820 for_each_sched_entity(se) {
2821 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01002822 if (cfs_rq->last != se)
Rik van Riel2c13c9192011-02-01 09:48:37 -05002823 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01002824
2825 cfs_rq->last = NULL;
Rik van Riel2c13c9192011-02-01 09:48:37 -05002826 }
2827}
Peter Zijlstra2002c692008-11-11 11:52:33 +01002828
Rik van Riel2c13c9192011-02-01 09:48:37 -05002829static void __clear_buddies_next(struct sched_entity *se)
2830{
2831 for_each_sched_entity(se) {
2832 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01002833 if (cfs_rq->next != se)
Rik van Riel2c13c9192011-02-01 09:48:37 -05002834 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01002835
2836 cfs_rq->next = NULL;
Rik van Riel2c13c9192011-02-01 09:48:37 -05002837 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01002838}
2839
Rik van Rielac53db52011-02-01 09:51:03 -05002840static void __clear_buddies_skip(struct sched_entity *se)
2841{
2842 for_each_sched_entity(se) {
2843 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01002844 if (cfs_rq->skip != se)
Rik van Rielac53db52011-02-01 09:51:03 -05002845 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01002846
2847 cfs_rq->skip = NULL;
Rik van Rielac53db52011-02-01 09:51:03 -05002848 }
2849}
2850
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002851static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
2852{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002853 if (cfs_rq->last == se)
2854 __clear_buddies_last(se);
2855
2856 if (cfs_rq->next == se)
2857 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05002858
2859 if (cfs_rq->skip == se)
2860 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01002861}
2862
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07002863static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07002864
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002865static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002866dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002867{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002868 /*
2869 * Update run-time statistics of the 'current'.
2870 */
2871 update_curr(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002872 dequeue_entity_load_avg(cfs_rq, se, flags & DEQUEUE_SLEEP);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002873
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02002874 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002875 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002876#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002877 if (entity_is_task(se)) {
2878 struct task_struct *tsk = task_of(se);
2879
2880 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002881 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002882 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002883 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002884 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02002885#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02002886 }
2887
Peter Zijlstra2002c692008-11-11 11:52:33 +01002888 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01002889
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002890 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002891 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002892 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002893 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002894
2895 /*
2896 * Normalize the entity after updating the min_vruntime because the
2897 * update can refer to the ->curr item and we need to reflect this
2898 * movement in our normalized position.
2899 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002900 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002901 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07002902
Paul Turnerd8b49862011-07-21 09:43:41 -07002903 /* return excess runtime on last dequeue */
2904 return_cfs_rq_runtime(cfs_rq);
2905
Peter Zijlstra1e876232011-05-17 16:21:10 -07002906 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002907 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002908}
2909
2910/*
2911 * Preempt the current task with a newly woken task if needed:
2912 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02002913static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02002914check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002915{
Peter Zijlstra11697832007-09-05 14:32:49 +02002916 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002917 struct sched_entity *se;
2918 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02002919
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +02002920 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02002921 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002922 if (delta_exec > ideal_runtime) {
Kirill Tkhai88751252014-06-29 00:03:57 +04002923 resched_curr(rq_of(cfs_rq));
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01002924 /*
2925 * The current task ran long enough, ensure it doesn't get
2926 * re-elected due to buddy favours.
2927 */
2928 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02002929 return;
2930 }
2931
2932 /*
2933 * Ensure that a task that missed wakeup preemption by a
2934 * narrow margin doesn't have to wait for a full slice.
2935 * This also mitigates buddy induced latencies under load.
2936 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02002937 if (delta_exec < sysctl_sched_min_granularity)
2938 return;
2939
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002940 se = __pick_first_entity(cfs_rq);
2941 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02002942
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002943 if (delta < 0)
2944 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01002945
Wang Xingchaof4cfb332011-09-16 13:35:52 -04002946 if (delta > ideal_runtime)
Kirill Tkhai88751252014-06-29 00:03:57 +04002947 resched_curr(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002948}
2949
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002950static void
Ingo Molnar8494f412007-08-09 11:16:48 +02002951set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002952{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002953 /* 'current' is not kept within the tree. */
2954 if (se->on_rq) {
2955 /*
2956 * Any task has to be enqueued before it get to execute on
2957 * a CPU. So account for the time it spent waiting on the
2958 * runqueue.
2959 */
2960 update_stats_wait_end(cfs_rq, se);
2961 __dequeue_entity(cfs_rq, se);
2962 }
2963
Ingo Molnar79303e92007-08-09 11:16:47 +02002964 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02002965 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02002966#ifdef CONFIG_SCHEDSTATS
2967 /*
2968 * Track our maximum slice length, if the CPU's load is at
2969 * least twice that of our own weight (i.e. dont track it
2970 * when there are only lesser-weight tasks around):
2971 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02002972 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002973 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02002974 se->sum_exec_runtime - se->prev_sum_exec_runtime);
2975 }
2976#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02002977 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002978}
2979
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02002980static int
2981wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
2982
Rik van Rielac53db52011-02-01 09:51:03 -05002983/*
2984 * Pick the next process, keeping these things in mind, in this order:
2985 * 1) keep things fair between processes/task groups
2986 * 2) pick the "next" process, since someone really wants that to run
2987 * 3) pick the "last" process, for cache locality
2988 * 4) do not run the "skip" process, if something else is available
2989 */
Peter Zijlstra678d5712012-02-11 06:05:00 +01002990static struct sched_entity *
2991pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01002992{
Peter Zijlstra678d5712012-02-11 06:05:00 +01002993 struct sched_entity *left = __pick_first_entity(cfs_rq);
2994 struct sched_entity *se;
2995
2996 /*
2997 * If curr is set we have to see if its left of the leftmost entity
2998 * still in the tree, provided there was anything in the tree at all.
2999 */
3000 if (!left || (curr && entity_before(curr, left)))
3001 left = curr;
3002
3003 se = left; /* ideally we run the leftmost entity */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003004
Rik van Rielac53db52011-02-01 09:51:03 -05003005 /*
3006 * Avoid running the skip buddy, if running something else can
3007 * be done without getting too unfair.
3008 */
3009 if (cfs_rq->skip == se) {
Peter Zijlstra678d5712012-02-11 06:05:00 +01003010 struct sched_entity *second;
3011
3012 if (se == curr) {
3013 second = __pick_first_entity(cfs_rq);
3014 } else {
3015 second = __pick_next_entity(se);
3016 if (!second || (curr && entity_before(curr, second)))
3017 second = curr;
3018 }
3019
Rik van Rielac53db52011-02-01 09:51:03 -05003020 if (second && wakeup_preempt_entity(second, left) < 1)
3021 se = second;
3022 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003023
Mike Galbraithf685cea2009-10-23 23:09:22 +02003024 /*
3025 * Prefer last buddy, try to return the CPU to a preempted task.
3026 */
3027 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3028 se = cfs_rq->last;
3029
Rik van Rielac53db52011-02-01 09:51:03 -05003030 /*
3031 * Someone really wants this to run. If it's not unfair, run it.
3032 */
3033 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3034 se = cfs_rq->next;
3035
Mike Galbraithf685cea2009-10-23 23:09:22 +02003036 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01003037
3038 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003039}
3040
Peter Zijlstra678d5712012-02-11 06:05:00 +01003041static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003042
Ingo Molnarab6cde22007-08-09 11:16:48 +02003043static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003044{
3045 /*
3046 * If still on the runqueue then deactivate_task()
3047 * was not called and update_curr() has to be done:
3048 */
3049 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02003050 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003051
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003052 /* throttle cfs_rqs exceeding runtime */
3053 check_cfs_rq_runtime(cfs_rq);
3054
Peter Zijlstraddc97292007-10-15 17:00:10 +02003055 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003056 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02003057 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003058 /* Put 'current' back into the tree. */
3059 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02003060 /* in !on_rq case, update occurred at dequeue */
Paul Turner9ee474f2012-10-04 13:18:30 +02003061 update_entity_load_avg(prev, 1);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003062 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02003063 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003064}
3065
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003066static void
3067entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003068{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003069 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003070 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003071 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003072 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003073
Paul Turner43365bd2010-12-15 19:10:17 -08003074 /*
Paul Turner9d85f212012-10-04 13:18:29 +02003075 * Ensure that runnable average is periodically updated.
3076 */
Paul Turner9ee474f2012-10-04 13:18:30 +02003077 update_entity_load_avg(curr, 1);
Paul Turneraff3e492012-10-04 13:18:30 +02003078 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02003079 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02003080
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003081#ifdef CONFIG_SCHED_HRTICK
3082 /*
3083 * queued ticks are scheduled to match the slice, so don't bother
3084 * validating it and just reschedule.
3085 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003086 if (queued) {
Kirill Tkhai88751252014-06-29 00:03:57 +04003087 resched_curr(rq_of(cfs_rq));
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003088 return;
3089 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003090 /*
3091 * don't let the period tick interfere with the hrtick preemption
3092 */
3093 if (!sched_feat(DOUBLE_TICK) &&
3094 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3095 return;
3096#endif
3097
Yong Zhang2c2efae2011-07-29 16:20:33 +08003098 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02003099 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003100}
3101
Paul Turnerab84d312011-07-21 09:43:28 -07003102
3103/**************************************************
3104 * CFS bandwidth control machinery
3105 */
3106
3107#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02003108
3109#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01003110static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003111
3112static inline bool cfs_bandwidth_used(void)
3113{
Ingo Molnarc5905af2012-02-24 08:31:31 +01003114 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003115}
3116
Ben Segall1ee14e62013-10-16 11:16:12 -07003117void cfs_bandwidth_usage_inc(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003118{
Ben Segall1ee14e62013-10-16 11:16:12 -07003119 static_key_slow_inc(&__cfs_bandwidth_used);
3120}
3121
3122void cfs_bandwidth_usage_dec(void)
3123{
3124 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003125}
3126#else /* HAVE_JUMP_LABEL */
3127static bool cfs_bandwidth_used(void)
3128{
3129 return true;
3130}
3131
Ben Segall1ee14e62013-10-16 11:16:12 -07003132void cfs_bandwidth_usage_inc(void) {}
3133void cfs_bandwidth_usage_dec(void) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003134#endif /* HAVE_JUMP_LABEL */
3135
Paul Turnerab84d312011-07-21 09:43:28 -07003136/*
3137 * default period for cfs group bandwidth.
3138 * default: 0.1s, units: nanoseconds
3139 */
3140static inline u64 default_cfs_period(void)
3141{
3142 return 100000000ULL;
3143}
Paul Turnerec12cb72011-07-21 09:43:30 -07003144
3145static inline u64 sched_cfs_bandwidth_slice(void)
3146{
3147 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3148}
3149
Paul Turnera9cf55b2011-07-21 09:43:32 -07003150/*
3151 * Replenish runtime according to assigned quota and update expiration time.
3152 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3153 * additional synchronization around rq->lock.
3154 *
3155 * requires cfs_b->lock
3156 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02003157void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07003158{
3159 u64 now;
3160
3161 if (cfs_b->quota == RUNTIME_INF)
3162 return;
3163
3164 now = sched_clock_cpu(smp_processor_id());
3165 cfs_b->runtime = cfs_b->quota;
3166 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3167}
3168
Peter Zijlstra029632f2011-10-25 10:00:11 +02003169static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3170{
3171 return &tg->cfs_bandwidth;
3172}
3173
Paul Turnerf1b17282012-10-04 13:18:31 +02003174/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3175static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3176{
3177 if (unlikely(cfs_rq->throttle_count))
3178 return cfs_rq->throttled_clock_task;
3179
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003180 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02003181}
3182
Paul Turner85dac902011-07-21 09:43:33 -07003183/* returns 0 on failure to allocate runtime */
3184static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07003185{
3186 struct task_group *tg = cfs_rq->tg;
3187 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003188 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07003189
3190 /* note: this is a positive sum as runtime_remaining <= 0 */
3191 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3192
3193 raw_spin_lock(&cfs_b->lock);
3194 if (cfs_b->quota == RUNTIME_INF)
3195 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07003196 else {
Paul Turnera9cf55b2011-07-21 09:43:32 -07003197 /*
3198 * If the bandwidth pool has become inactive, then at least one
3199 * period must have elapsed since the last consumption.
3200 * Refresh the global state and ensure bandwidth timer becomes
3201 * active.
3202 */
3203 if (!cfs_b->timer_active) {
3204 __refill_cfs_bandwidth_runtime(cfs_b);
Roman Gushchin09dc4ab2014-05-19 15:10:09 +04003205 __start_cfs_bandwidth(cfs_b, false);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003206 }
Paul Turner58088ad2011-07-21 09:43:31 -07003207
3208 if (cfs_b->runtime > 0) {
3209 amount = min(cfs_b->runtime, min_amount);
3210 cfs_b->runtime -= amount;
3211 cfs_b->idle = 0;
3212 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003213 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07003214 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07003215 raw_spin_unlock(&cfs_b->lock);
3216
3217 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003218 /*
3219 * we may have advanced our local expiration to account for allowed
3220 * spread between our sched_clock and the one on which runtime was
3221 * issued.
3222 */
3223 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3224 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07003225
3226 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003227}
3228
3229/*
3230 * Note: This depends on the synchronization provided by sched_clock and the
3231 * fact that rq->clock snapshots this value.
3232 */
3233static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3234{
3235 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003236
3237 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003238 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07003239 return;
3240
3241 if (cfs_rq->runtime_remaining < 0)
3242 return;
3243
3244 /*
3245 * If the local deadline has passed we have to consider the
3246 * possibility that our sched_clock is 'fast' and the global deadline
3247 * has not truly expired.
3248 *
3249 * Fortunately we can check determine whether this the case by checking
Ben Segall51f21762014-05-19 15:49:45 -07003250 * whether the global deadline has advanced. It is valid to compare
3251 * cfs_b->runtime_expires without any locks since we only care about
3252 * exact equality, so a partial write will still work.
Paul Turnera9cf55b2011-07-21 09:43:32 -07003253 */
3254
Ben Segall51f21762014-05-19 15:49:45 -07003255 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
Paul Turnera9cf55b2011-07-21 09:43:32 -07003256 /* extend local deadline, drift is bounded above by 2 ticks */
3257 cfs_rq->runtime_expires += TICK_NSEC;
3258 } else {
3259 /* global deadline is ahead, expiration has passed */
3260 cfs_rq->runtime_remaining = 0;
3261 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003262}
3263
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003264static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003265{
Paul Turnera9cf55b2011-07-21 09:43:32 -07003266 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07003267 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003268 expire_cfs_rq_runtime(cfs_rq);
3269
3270 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07003271 return;
3272
Paul Turner85dac902011-07-21 09:43:33 -07003273 /*
3274 * if we're unable to extend our runtime we resched so that the active
3275 * hierarchy can be throttled
3276 */
3277 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
Kirill Tkhai88751252014-06-29 00:03:57 +04003278 resched_curr(rq_of(cfs_rq));
Paul Turnerec12cb72011-07-21 09:43:30 -07003279}
3280
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003281static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003282void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003283{
Paul Turner56f570e2011-11-07 20:26:33 -08003284 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07003285 return;
3286
3287 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3288}
3289
Paul Turner85dac902011-07-21 09:43:33 -07003290static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3291{
Paul Turner56f570e2011-11-07 20:26:33 -08003292 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07003293}
3294
Paul Turner64660c82011-07-21 09:43:36 -07003295/* check whether cfs_rq, or any parent, is throttled */
3296static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3297{
Paul Turner56f570e2011-11-07 20:26:33 -08003298 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07003299}
3300
3301/*
3302 * Ensure that neither of the group entities corresponding to src_cpu or
3303 * dest_cpu are members of a throttled hierarchy when performing group
3304 * load-balance operations.
3305 */
3306static inline int throttled_lb_pair(struct task_group *tg,
3307 int src_cpu, int dest_cpu)
3308{
3309 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3310
3311 src_cfs_rq = tg->cfs_rq[src_cpu];
3312 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3313
3314 return throttled_hierarchy(src_cfs_rq) ||
3315 throttled_hierarchy(dest_cfs_rq);
3316}
3317
3318/* updated child weight may affect parent so we have to do this bottom up */
3319static int tg_unthrottle_up(struct task_group *tg, void *data)
3320{
3321 struct rq *rq = data;
3322 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3323
3324 cfs_rq->throttle_count--;
3325#ifdef CONFIG_SMP
3326 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02003327 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003328 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02003329 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07003330 }
3331#endif
3332
3333 return 0;
3334}
3335
3336static int tg_throttle_down(struct task_group *tg, void *data)
3337{
3338 struct rq *rq = data;
3339 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3340
Paul Turner82958362012-10-04 13:18:31 +02003341 /* group is entering throttled state, stop time */
3342 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003343 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07003344 cfs_rq->throttle_count++;
3345
3346 return 0;
3347}
3348
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003349static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07003350{
3351 struct rq *rq = rq_of(cfs_rq);
3352 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3353 struct sched_entity *se;
3354 long task_delta, dequeue = 1;
3355
3356 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3357
Paul Turnerf1b17282012-10-04 13:18:31 +02003358 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07003359 rcu_read_lock();
3360 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3361 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07003362
3363 task_delta = cfs_rq->h_nr_running;
3364 for_each_sched_entity(se) {
3365 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3366 /* throttled entity or throttle-on-deactivate */
3367 if (!se->on_rq)
3368 break;
3369
3370 if (dequeue)
3371 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3372 qcfs_rq->h_nr_running -= task_delta;
3373
3374 if (qcfs_rq->load.weight)
3375 dequeue = 0;
3376 }
3377
3378 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04003379 sub_nr_running(rq, task_delta);
Paul Turner85dac902011-07-21 09:43:33 -07003380
3381 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003382 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07003383 raw_spin_lock(&cfs_b->lock);
Ben Segallc06f04c2014-06-20 15:21:20 -07003384 /*
3385 * Add to the _head_ of the list, so that an already-started
3386 * distribute_cfs_runtime will not see us
3387 */
3388 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
Ben Segallf9f9ffc2013-10-16 11:16:32 -07003389 if (!cfs_b->timer_active)
Roman Gushchin09dc4ab2014-05-19 15:10:09 +04003390 __start_cfs_bandwidth(cfs_b, false);
Paul Turner85dac902011-07-21 09:43:33 -07003391 raw_spin_unlock(&cfs_b->lock);
3392}
3393
Peter Zijlstra029632f2011-10-25 10:00:11 +02003394void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07003395{
3396 struct rq *rq = rq_of(cfs_rq);
3397 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3398 struct sched_entity *se;
3399 int enqueue = 1;
3400 long task_delta;
3401
Michael Wang22b958d2013-06-04 14:23:39 +08003402 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07003403
3404 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02003405
3406 update_rq_clock(rq);
3407
Paul Turner671fd9d2011-07-21 09:43:34 -07003408 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003409 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07003410 list_del_rcu(&cfs_rq->throttled_list);
3411 raw_spin_unlock(&cfs_b->lock);
3412
Paul Turner64660c82011-07-21 09:43:36 -07003413 /* update hierarchical throttle state */
3414 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3415
Paul Turner671fd9d2011-07-21 09:43:34 -07003416 if (!cfs_rq->load.weight)
3417 return;
3418
3419 task_delta = cfs_rq->h_nr_running;
3420 for_each_sched_entity(se) {
3421 if (se->on_rq)
3422 enqueue = 0;
3423
3424 cfs_rq = cfs_rq_of(se);
3425 if (enqueue)
3426 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3427 cfs_rq->h_nr_running += task_delta;
3428
3429 if (cfs_rq_throttled(cfs_rq))
3430 break;
3431 }
3432
3433 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04003434 add_nr_running(rq, task_delta);
Paul Turner671fd9d2011-07-21 09:43:34 -07003435
3436 /* determine whether we need to wake up potentially idle cpu */
3437 if (rq->curr == rq->idle && rq->cfs.nr_running)
Kirill Tkhai88751252014-06-29 00:03:57 +04003438 resched_curr(rq);
Paul Turner671fd9d2011-07-21 09:43:34 -07003439}
3440
3441static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3442 u64 remaining, u64 expires)
3443{
3444 struct cfs_rq *cfs_rq;
Ben Segallc06f04c2014-06-20 15:21:20 -07003445 u64 runtime;
3446 u64 starting_runtime = remaining;
Paul Turner671fd9d2011-07-21 09:43:34 -07003447
3448 rcu_read_lock();
3449 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3450 throttled_list) {
3451 struct rq *rq = rq_of(cfs_rq);
3452
3453 raw_spin_lock(&rq->lock);
3454 if (!cfs_rq_throttled(cfs_rq))
3455 goto next;
3456
3457 runtime = -cfs_rq->runtime_remaining + 1;
3458 if (runtime > remaining)
3459 runtime = remaining;
3460 remaining -= runtime;
3461
3462 cfs_rq->runtime_remaining += runtime;
3463 cfs_rq->runtime_expires = expires;
3464
3465 /* we check whether we're throttled above */
3466 if (cfs_rq->runtime_remaining > 0)
3467 unthrottle_cfs_rq(cfs_rq);
3468
3469next:
3470 raw_spin_unlock(&rq->lock);
3471
3472 if (!remaining)
3473 break;
3474 }
3475 rcu_read_unlock();
3476
Ben Segallc06f04c2014-06-20 15:21:20 -07003477 return starting_runtime - remaining;
Paul Turner671fd9d2011-07-21 09:43:34 -07003478}
3479
Paul Turner58088ad2011-07-21 09:43:31 -07003480/*
3481 * Responsible for refilling a task_group's bandwidth and unthrottling its
3482 * cfs_rqs as appropriate. If there has been no activity within the last
3483 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3484 * used to track this state.
3485 */
3486static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3487{
Paul Turner671fd9d2011-07-21 09:43:34 -07003488 u64 runtime, runtime_expires;
Ben Segall51f21762014-05-19 15:49:45 -07003489 int throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07003490
Paul Turner58088ad2011-07-21 09:43:31 -07003491 /* no need to continue the timer with no bandwidth constraint */
3492 if (cfs_b->quota == RUNTIME_INF)
Ben Segall51f21762014-05-19 15:49:45 -07003493 goto out_deactivate;
Paul Turner58088ad2011-07-21 09:43:31 -07003494
Paul Turner671fd9d2011-07-21 09:43:34 -07003495 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003496 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07003497
Ben Segall51f21762014-05-19 15:49:45 -07003498 /*
3499 * idle depends on !throttled (for the case of a large deficit), and if
3500 * we're going inactive then everything else can be deferred
3501 */
3502 if (cfs_b->idle && !throttled)
3503 goto out_deactivate;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003504
Ben Segall927b54f2013-10-16 11:16:22 -07003505 /*
3506 * if we have relooped after returning idle once, we need to update our
3507 * status as actually running, so that other cpus doing
3508 * __start_cfs_bandwidth will stop trying to cancel us.
3509 */
3510 cfs_b->timer_active = 1;
3511
Paul Turnera9cf55b2011-07-21 09:43:32 -07003512 __refill_cfs_bandwidth_runtime(cfs_b);
3513
Paul Turner671fd9d2011-07-21 09:43:34 -07003514 if (!throttled) {
3515 /* mark as potentially idle for the upcoming period */
3516 cfs_b->idle = 1;
Ben Segall51f21762014-05-19 15:49:45 -07003517 return 0;
Paul Turner671fd9d2011-07-21 09:43:34 -07003518 }
Paul Turner58088ad2011-07-21 09:43:31 -07003519
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003520 /* account preceding periods in which throttling occurred */
3521 cfs_b->nr_throttled += overrun;
3522
Paul Turner671fd9d2011-07-21 09:43:34 -07003523 runtime_expires = cfs_b->runtime_expires;
Paul Turner671fd9d2011-07-21 09:43:34 -07003524
3525 /*
Ben Segallc06f04c2014-06-20 15:21:20 -07003526 * This check is repeated as we are holding onto the new bandwidth while
3527 * we unthrottle. This can potentially race with an unthrottled group
3528 * trying to acquire new bandwidth from the global pool. This can result
3529 * in us over-using our runtime if it is all used during this loop, but
3530 * only by limited amounts in that extreme case.
Paul Turner671fd9d2011-07-21 09:43:34 -07003531 */
Ben Segallc06f04c2014-06-20 15:21:20 -07003532 while (throttled && cfs_b->runtime > 0) {
3533 runtime = cfs_b->runtime;
Paul Turner671fd9d2011-07-21 09:43:34 -07003534 raw_spin_unlock(&cfs_b->lock);
3535 /* we can't nest cfs_b->lock while distributing bandwidth */
3536 runtime = distribute_cfs_runtime(cfs_b, runtime,
3537 runtime_expires);
3538 raw_spin_lock(&cfs_b->lock);
3539
3540 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
Ben Segallc06f04c2014-06-20 15:21:20 -07003541
3542 cfs_b->runtime -= min(runtime, cfs_b->runtime);
Paul Turner671fd9d2011-07-21 09:43:34 -07003543 }
3544
Paul Turner671fd9d2011-07-21 09:43:34 -07003545 /*
3546 * While we are ensured activity in the period following an
3547 * unthrottle, this also covers the case in which the new bandwidth is
3548 * insufficient to cover the existing bandwidth deficit. (Forcing the
3549 * timer to remain active while there are any throttled entities.)
3550 */
3551 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07003552
Ben Segall51f21762014-05-19 15:49:45 -07003553 return 0;
3554
3555out_deactivate:
3556 cfs_b->timer_active = 0;
3557 return 1;
Paul Turner58088ad2011-07-21 09:43:31 -07003558}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003559
Paul Turnerd8b49862011-07-21 09:43:41 -07003560/* a cfs_rq won't donate quota below this amount */
3561static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3562/* minimum remaining period time to redistribute slack quota */
3563static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3564/* how long we wait to gather additional slack before distributing */
3565static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3566
Ben Segalldb06e782013-10-16 11:16:17 -07003567/*
3568 * Are we near the end of the current quota period?
3569 *
3570 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
3571 * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
3572 * migrate_hrtimers, base is never cleared, so we are fine.
3573 */
Paul Turnerd8b49862011-07-21 09:43:41 -07003574static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3575{
3576 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3577 u64 remaining;
3578
3579 /* if the call-back is running a quota refresh is already occurring */
3580 if (hrtimer_callback_running(refresh_timer))
3581 return 1;
3582
3583 /* is a quota refresh about to occur? */
3584 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3585 if (remaining < min_expire)
3586 return 1;
3587
3588 return 0;
3589}
3590
3591static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3592{
3593 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3594
3595 /* if there's a quota refresh soon don't bother with slack */
3596 if (runtime_refresh_within(cfs_b, min_left))
3597 return;
3598
3599 start_bandwidth_timer(&cfs_b->slack_timer,
3600 ns_to_ktime(cfs_bandwidth_slack_period));
3601}
3602
3603/* we know any runtime found here is valid as update_curr() precedes return */
3604static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3605{
3606 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3607 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3608
3609 if (slack_runtime <= 0)
3610 return;
3611
3612 raw_spin_lock(&cfs_b->lock);
3613 if (cfs_b->quota != RUNTIME_INF &&
3614 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3615 cfs_b->runtime += slack_runtime;
3616
3617 /* we are under rq->lock, defer unthrottling using a timer */
3618 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3619 !list_empty(&cfs_b->throttled_cfs_rq))
3620 start_cfs_slack_bandwidth(cfs_b);
3621 }
3622 raw_spin_unlock(&cfs_b->lock);
3623
3624 /* even if it's not valid for return we don't want to try again */
3625 cfs_rq->runtime_remaining -= slack_runtime;
3626}
3627
3628static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3629{
Paul Turner56f570e2011-11-07 20:26:33 -08003630 if (!cfs_bandwidth_used())
3631 return;
3632
Paul Turnerfccfdc62011-11-07 20:26:34 -08003633 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07003634 return;
3635
3636 __return_cfs_rq_runtime(cfs_rq);
3637}
3638
3639/*
3640 * This is done with a timer (instead of inline with bandwidth return) since
3641 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3642 */
3643static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3644{
3645 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3646 u64 expires;
3647
3648 /* confirm we're still not at a refresh boundary */
Paul Turnerd8b49862011-07-21 09:43:41 -07003649 raw_spin_lock(&cfs_b->lock);
Ben Segalldb06e782013-10-16 11:16:17 -07003650 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3651 raw_spin_unlock(&cfs_b->lock);
3652 return;
3653 }
3654
Ben Segallc06f04c2014-06-20 15:21:20 -07003655 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
Paul Turnerd8b49862011-07-21 09:43:41 -07003656 runtime = cfs_b->runtime;
Ben Segallc06f04c2014-06-20 15:21:20 -07003657
Paul Turnerd8b49862011-07-21 09:43:41 -07003658 expires = cfs_b->runtime_expires;
3659 raw_spin_unlock(&cfs_b->lock);
3660
3661 if (!runtime)
3662 return;
3663
3664 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3665
3666 raw_spin_lock(&cfs_b->lock);
3667 if (expires == cfs_b->runtime_expires)
Ben Segallc06f04c2014-06-20 15:21:20 -07003668 cfs_b->runtime -= min(runtime, cfs_b->runtime);
Paul Turnerd8b49862011-07-21 09:43:41 -07003669 raw_spin_unlock(&cfs_b->lock);
3670}
3671
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003672/*
3673 * When a group wakes up we want to make sure that its quota is not already
3674 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3675 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3676 */
3677static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3678{
Paul Turner56f570e2011-11-07 20:26:33 -08003679 if (!cfs_bandwidth_used())
3680 return;
3681
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003682 /* an active group must be handled by the update_curr()->put() path */
3683 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3684 return;
3685
3686 /* ensure the group is not already throttled */
3687 if (cfs_rq_throttled(cfs_rq))
3688 return;
3689
3690 /* update runtime allocation */
3691 account_cfs_rq_runtime(cfs_rq, 0);
3692 if (cfs_rq->runtime_remaining <= 0)
3693 throttle_cfs_rq(cfs_rq);
3694}
3695
3696/* conditionally throttle active cfs_rq's from put_prev_entity() */
Peter Zijlstra678d5712012-02-11 06:05:00 +01003697static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003698{
Paul Turner56f570e2011-11-07 20:26:33 -08003699 if (!cfs_bandwidth_used())
Peter Zijlstra678d5712012-02-11 06:05:00 +01003700 return false;
Paul Turner56f570e2011-11-07 20:26:33 -08003701
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003702 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
Peter Zijlstra678d5712012-02-11 06:05:00 +01003703 return false;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003704
3705 /*
3706 * it's possible for a throttled entity to be forced into a running
3707 * state (e.g. set_curr_task), in this case we're finished.
3708 */
3709 if (cfs_rq_throttled(cfs_rq))
Peter Zijlstra678d5712012-02-11 06:05:00 +01003710 return true;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003711
3712 throttle_cfs_rq(cfs_rq);
Peter Zijlstra678d5712012-02-11 06:05:00 +01003713 return true;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003714}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003715
Peter Zijlstra029632f2011-10-25 10:00:11 +02003716static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3717{
3718 struct cfs_bandwidth *cfs_b =
3719 container_of(timer, struct cfs_bandwidth, slack_timer);
3720 do_sched_cfs_slack_timer(cfs_b);
3721
3722 return HRTIMER_NORESTART;
3723}
3724
3725static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3726{
3727 struct cfs_bandwidth *cfs_b =
3728 container_of(timer, struct cfs_bandwidth, period_timer);
3729 ktime_t now;
3730 int overrun;
3731 int idle = 0;
3732
Ben Segall51f21762014-05-19 15:49:45 -07003733 raw_spin_lock(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003734 for (;;) {
3735 now = hrtimer_cb_get_time(timer);
3736 overrun = hrtimer_forward(timer, now, cfs_b->period);
3737
3738 if (!overrun)
3739 break;
3740
3741 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3742 }
Ben Segall51f21762014-05-19 15:49:45 -07003743 raw_spin_unlock(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003744
3745 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3746}
3747
3748void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3749{
3750 raw_spin_lock_init(&cfs_b->lock);
3751 cfs_b->runtime = 0;
3752 cfs_b->quota = RUNTIME_INF;
3753 cfs_b->period = ns_to_ktime(default_cfs_period());
3754
3755 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
3756 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3757 cfs_b->period_timer.function = sched_cfs_period_timer;
3758 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3759 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3760}
3761
3762static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3763{
3764 cfs_rq->runtime_enabled = 0;
3765 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3766}
3767
3768/* requires cfs_b->lock, may release to reprogram timer */
Roman Gushchin09dc4ab2014-05-19 15:10:09 +04003769void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003770{
3771 /*
3772 * The timer may be active because we're trying to set a new bandwidth
3773 * period or because we're racing with the tear-down path
3774 * (timer_active==0 becomes visible before the hrtimer call-back
3775 * terminates). In either case we ensure that it's re-programmed
3776 */
Ben Segall927b54f2013-10-16 11:16:22 -07003777 while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
3778 hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
3779 /* bounce the lock to allow do_sched_cfs_period_timer to run */
Peter Zijlstra029632f2011-10-25 10:00:11 +02003780 raw_spin_unlock(&cfs_b->lock);
Ben Segall927b54f2013-10-16 11:16:22 -07003781 cpu_relax();
Peter Zijlstra029632f2011-10-25 10:00:11 +02003782 raw_spin_lock(&cfs_b->lock);
3783 /* if someone else restarted the timer then we're done */
Roman Gushchin09dc4ab2014-05-19 15:10:09 +04003784 if (!force && cfs_b->timer_active)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003785 return;
3786 }
3787
3788 cfs_b->timer_active = 1;
3789 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
3790}
3791
3792static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3793{
3794 hrtimer_cancel(&cfs_b->period_timer);
3795 hrtimer_cancel(&cfs_b->slack_timer);
3796}
3797
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04003798static void __maybe_unused update_runtime_enabled(struct rq *rq)
3799{
3800 struct cfs_rq *cfs_rq;
3801
3802 for_each_leaf_cfs_rq(rq, cfs_rq) {
3803 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
3804
3805 raw_spin_lock(&cfs_b->lock);
3806 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
3807 raw_spin_unlock(&cfs_b->lock);
3808 }
3809}
3810
Arnd Bergmann38dc3342013-01-25 14:14:22 +00003811static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003812{
3813 struct cfs_rq *cfs_rq;
3814
3815 for_each_leaf_cfs_rq(rq, cfs_rq) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02003816 if (!cfs_rq->runtime_enabled)
3817 continue;
3818
3819 /*
3820 * clock_task is not advancing so we just need to make sure
3821 * there's some valid quota amount
3822 */
Ben Segall51f21762014-05-19 15:49:45 -07003823 cfs_rq->runtime_remaining = 1;
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04003824 /*
3825 * Offline rq is schedulable till cpu is completely disabled
3826 * in take_cpu_down(), so we prevent new cfs throttling here.
3827 */
3828 cfs_rq->runtime_enabled = 0;
3829
Peter Zijlstra029632f2011-10-25 10:00:11 +02003830 if (cfs_rq_throttled(cfs_rq))
3831 unthrottle_cfs_rq(cfs_rq);
3832 }
3833}
3834
3835#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02003836static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3837{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003838 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02003839}
3840
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003841static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
Peter Zijlstra678d5712012-02-11 06:05:00 +01003842static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003843static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003844static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07003845
3846static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3847{
3848 return 0;
3849}
Paul Turner64660c82011-07-21 09:43:36 -07003850
3851static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3852{
3853 return 0;
3854}
3855
3856static inline int throttled_lb_pair(struct task_group *tg,
3857 int src_cpu, int dest_cpu)
3858{
3859 return 0;
3860}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003861
3862void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
3863
3864#ifdef CONFIG_FAIR_GROUP_SCHED
3865static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07003866#endif
3867
Peter Zijlstra029632f2011-10-25 10:00:11 +02003868static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3869{
3870 return NULL;
3871}
3872static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04003873static inline void update_runtime_enabled(struct rq *rq) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07003874static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003875
3876#endif /* CONFIG_CFS_BANDWIDTH */
3877
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003878/**************************************************
3879 * CFS operations on tasks:
3880 */
3881
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003882#ifdef CONFIG_SCHED_HRTICK
3883static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
3884{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003885 struct sched_entity *se = &p->se;
3886 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3887
3888 WARN_ON(task_rq(p) != rq);
3889
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003890 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003891 u64 slice = sched_slice(cfs_rq, se);
3892 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
3893 s64 delta = slice - ran;
3894
3895 if (delta < 0) {
3896 if (rq->curr == p)
Kirill Tkhai88751252014-06-29 00:03:57 +04003897 resched_curr(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003898 return;
3899 }
3900
3901 /*
3902 * Don't schedule slices shorter than 10000ns, that just
3903 * doesn't make sense. Rely on vruntime for fairness.
3904 */
Peter Zijlstra31656512008-07-18 18:01:23 +02003905 if (rq->curr != p)
Peter Zijlstra157124c2008-07-28 11:53:11 +02003906 delta = max_t(s64, 10000LL, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003907
Peter Zijlstra31656512008-07-18 18:01:23 +02003908 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003909 }
3910}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003911
3912/*
3913 * called from enqueue/dequeue and updates the hrtick when the
3914 * current task is from our class and nr_running is low enough
3915 * to matter.
3916 */
3917static void hrtick_update(struct rq *rq)
3918{
3919 struct task_struct *curr = rq->curr;
3920
Mike Galbraithb39e66e2011-11-22 15:20:07 +01003921 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003922 return;
3923
3924 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
3925 hrtick_start_fair(rq, curr);
3926}
Dhaval Giani55e12e52008-06-24 23:39:43 +05303927#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003928static inline void
3929hrtick_start_fair(struct rq *rq, struct task_struct *p)
3930{
3931}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003932
3933static inline void hrtick_update(struct rq *rq)
3934{
3935}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003936#endif
3937
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003938/*
3939 * The enqueue_task method is called before nr_running is
3940 * increased. Here we update the fair scheduling stats and
3941 * then put the task into the rbtree:
3942 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00003943static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003944enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003945{
3946 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003947 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003948
3949 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003950 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003951 break;
3952 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003953 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07003954
3955 /*
3956 * end evaluation on encountering a throttled cfs_rq
3957 *
3958 * note: in the case of encountering a throttled cfs_rq we will
3959 * post the final h_nr_running increment below.
3960 */
3961 if (cfs_rq_throttled(cfs_rq))
3962 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07003963 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07003964
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003965 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003966 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003967
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003968 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08003969 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07003970 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003971
Paul Turner85dac902011-07-21 09:43:33 -07003972 if (cfs_rq_throttled(cfs_rq))
3973 break;
3974
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003975 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003976 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003977 }
3978
Ben Segall18bf2802012-10-04 12:51:20 +02003979 if (!se) {
3980 update_rq_runnable_avg(rq, rq->nr_running);
Kirill Tkhai72465442014-05-09 03:00:14 +04003981 add_nr_running(rq, 1);
Ben Segall18bf2802012-10-04 12:51:20 +02003982 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02003983 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003984}
3985
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003986static void set_next_buddy(struct sched_entity *se);
3987
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003988/*
3989 * The dequeue_task method is called before nr_running is
3990 * decreased. We remove the task from the rbtree and
3991 * update the fair scheduling stats:
3992 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003993static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003994{
3995 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01003996 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07003997 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003998
3999 for_each_sched_entity(se) {
4000 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004001 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07004002
4003 /*
4004 * end evaluation on encountering a throttled cfs_rq
4005 *
4006 * note: in the case of encountering a throttled cfs_rq we will
4007 * post the final h_nr_running decrement below.
4008 */
4009 if (cfs_rq_throttled(cfs_rq))
4010 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07004011 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004012
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004013 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004014 if (cfs_rq->load.weight) {
4015 /*
4016 * Bias pick_next to pick a task from this cfs_rq, as
4017 * p is sleeping when it is within its sched_slice.
4018 */
4019 if (task_sleep && parent_entity(se))
4020 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07004021
4022 /* avoid re-evaluating load for this entity */
4023 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004024 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004025 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004026 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004027 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004028
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004029 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08004030 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07004031 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004032
Paul Turner85dac902011-07-21 09:43:33 -07004033 if (cfs_rq_throttled(cfs_rq))
4034 break;
4035
Linus Torvalds17bc14b2012-12-14 07:20:43 -08004036 update_cfs_shares(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02004037 update_entity_load_avg(se, 1);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004038 }
4039
Ben Segall18bf2802012-10-04 12:51:20 +02004040 if (!se) {
Kirill Tkhai72465442014-05-09 03:00:14 +04004041 sub_nr_running(rq, 1);
Ben Segall18bf2802012-10-04 12:51:20 +02004042 update_rq_runnable_avg(rq, 1);
4043 }
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004044 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004045}
4046
Gregory Haskinse7693a32008-01-25 21:08:09 +01004047#ifdef CONFIG_SMP
Peter Zijlstra029632f2011-10-25 10:00:11 +02004048/* Used instead of source_load when we know the type == 0 */
4049static unsigned long weighted_cpuload(const int cpu)
4050{
Alex Shib92486c2013-06-20 10:18:50 +08004051 return cpu_rq(cpu)->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004052}
4053
4054/*
4055 * Return a low guess at the load of a migration-source cpu weighted
4056 * according to the scheduling class and "nice" value.
4057 *
4058 * We want to under-estimate the load of migration sources, to
4059 * balance conservatively.
4060 */
4061static unsigned long source_load(int cpu, int type)
4062{
4063 struct rq *rq = cpu_rq(cpu);
4064 unsigned long total = weighted_cpuload(cpu);
4065
4066 if (type == 0 || !sched_feat(LB_BIAS))
4067 return total;
4068
4069 return min(rq->cpu_load[type-1], total);
4070}
4071
4072/*
4073 * Return a high guess at the load of a migration-target cpu weighted
4074 * according to the scheduling class and "nice" value.
4075 */
4076static unsigned long target_load(int cpu, int type)
4077{
4078 struct rq *rq = cpu_rq(cpu);
4079 unsigned long total = weighted_cpuload(cpu);
4080
4081 if (type == 0 || !sched_feat(LB_BIAS))
4082 return total;
4083
4084 return max(rq->cpu_load[type-1], total);
4085}
4086
Nicolas Pitreced549f2014-05-26 18:19:38 -04004087static unsigned long capacity_of(int cpu)
Peter Zijlstra029632f2011-10-25 10:00:11 +02004088{
Nicolas Pitreced549f2014-05-26 18:19:38 -04004089 return cpu_rq(cpu)->cpu_capacity;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004090}
4091
4092static unsigned long cpu_avg_load_per_task(int cpu)
4093{
4094 struct rq *rq = cpu_rq(cpu);
4095 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
Alex Shib92486c2013-06-20 10:18:50 +08004096 unsigned long load_avg = rq->cfs.runnable_load_avg;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004097
4098 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08004099 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004100
4101 return 0;
4102}
4103
Michael Wang62470412013-07-04 12:55:51 +08004104static void record_wakee(struct task_struct *p)
4105{
4106 /*
4107 * Rough decay (wiping) for cost saving, don't worry
4108 * about the boundary, really active task won't care
4109 * about the loss.
4110 */
Manuel Schölling2538d962014-05-22 19:45:23 +02004111 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
Rik van Riel096aa332014-05-16 00:13:32 -04004112 current->wakee_flips >>= 1;
Michael Wang62470412013-07-04 12:55:51 +08004113 current->wakee_flip_decay_ts = jiffies;
4114 }
4115
4116 if (current->last_wakee != p) {
4117 current->last_wakee = p;
4118 current->wakee_flips++;
4119 }
4120}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004121
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02004122static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004123{
4124 struct sched_entity *se = &p->se;
4125 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02004126 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004127
Peter Zijlstra3fe16982011-04-05 17:23:48 +02004128#ifndef CONFIG_64BIT
4129 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02004130
Peter Zijlstra3fe16982011-04-05 17:23:48 +02004131 do {
4132 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4133 smp_rmb();
4134 min_vruntime = cfs_rq->min_vruntime;
4135 } while (min_vruntime != min_vruntime_copy);
4136#else
4137 min_vruntime = cfs_rq->min_vruntime;
4138#endif
4139
4140 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08004141 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004142}
4143
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004144#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02004145/*
4146 * effective_load() calculates the load change as seen from the root_task_group
4147 *
4148 * Adding load to a group doesn't make a group heavier, but can cause movement
4149 * of group shares between cpus. Assuming the shares were perfectly aligned one
4150 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004151 *
4152 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4153 * on this @cpu and results in a total addition (subtraction) of @wg to the
4154 * total group weight.
4155 *
4156 * Given a runqueue weight distribution (rw_i) we can compute a shares
4157 * distribution (s_i) using:
4158 *
4159 * s_i = rw_i / \Sum rw_j (1)
4160 *
4161 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4162 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4163 * shares distribution (s_i):
4164 *
4165 * rw_i = { 2, 4, 1, 0 }
4166 * s_i = { 2/7, 4/7, 1/7, 0 }
4167 *
4168 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4169 * task used to run on and the CPU the waker is running on), we need to
4170 * compute the effect of waking a task on either CPU and, in case of a sync
4171 * wakeup, compute the effect of the current task going to sleep.
4172 *
4173 * So for a change of @wl to the local @cpu with an overall group weight change
4174 * of @wl we can compute the new shares distribution (s'_i) using:
4175 *
4176 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4177 *
4178 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4179 * differences in waking a task to CPU 0. The additional task changes the
4180 * weight and shares distributions like:
4181 *
4182 * rw'_i = { 3, 4, 1, 0 }
4183 * s'_i = { 3/8, 4/8, 1/8, 0 }
4184 *
4185 * We can then compute the difference in effective weight by using:
4186 *
4187 * dw_i = S * (s'_i - s_i) (3)
4188 *
4189 * Where 'S' is the group weight as seen by its parent.
4190 *
4191 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4192 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4193 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02004194 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004195static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004196{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004197 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02004198
Rik van Riel9722c2d2014-01-06 11:39:12 +00004199 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02004200 return wl;
4201
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004202 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004203 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004204
Paul Turner977dda72011-01-14 17:57:50 -08004205 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004206
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004207 /*
4208 * W = @wg + \Sum rw_j
4209 */
4210 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004211
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004212 /*
4213 * w = rw_i + @wl
4214 */
4215 w = se->my_q->load.weight + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02004216
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004217 /*
4218 * wl = S * s'_i; see (2)
4219 */
4220 if (W > 0 && w < W)
4221 wl = (w * tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08004222 else
4223 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02004224
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004225 /*
4226 * Per the above, wl is the new se->load.weight value; since
4227 * those are clipped to [MIN_SHARES, ...) do so now. See
4228 * calc_cfs_shares().
4229 */
Paul Turner977dda72011-01-14 17:57:50 -08004230 if (wl < MIN_SHARES)
4231 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004232
4233 /*
4234 * wl = dw_i = S * (s'_i - s_i); see (3)
4235 */
Paul Turner977dda72011-01-14 17:57:50 -08004236 wl -= se->load.weight;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004237
4238 /*
4239 * Recursively apply this logic to all parent groups to compute
4240 * the final effective load change on the root group. Since
4241 * only the @tg group gets extra weight, all parent groups can
4242 * only redistribute existing shares. @wl is the shift in shares
4243 * resulting from this level per the above.
4244 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004245 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004246 }
4247
4248 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004249}
4250#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004251
Mel Gorman58d081b2013-10-07 11:29:10 +01004252static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004253{
Peter Zijlstra83378262008-06-27 13:41:37 +02004254 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004255}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004256
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004257#endif
4258
Michael Wang62470412013-07-04 12:55:51 +08004259static int wake_wide(struct task_struct *p)
4260{
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08004261 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08004262
4263 /*
4264 * Yeah, it's the switching-frequency, could means many wakee or
4265 * rapidly switch, use factor here will just help to automatically
4266 * adjust the loose-degree, so bigger node will lead to more pull.
4267 */
4268 if (p->wakee_flips > factor) {
4269 /*
4270 * wakee is somewhat hot, it needs certain amount of cpu
4271 * resource, so if waker is far more hot, prefer to leave
4272 * it alone.
4273 */
4274 if (current->wakee_flips > (factor * p->wakee_flips))
4275 return 1;
4276 }
4277
4278 return 0;
4279}
4280
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004281static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004282{
Paul Turnere37b6a72011-01-21 20:44:59 -08004283 s64 this_load, load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004284 int idx, this_cpu, prev_cpu;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004285 unsigned long tl_per_task;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004286 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02004287 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004288 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004289
Michael Wang62470412013-07-04 12:55:51 +08004290 /*
4291 * If we wake multiple tasks be careful to not bounce
4292 * ourselves around too much.
4293 */
4294 if (wake_wide(p))
4295 return 0;
4296
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004297 idx = sd->wake_idx;
4298 this_cpu = smp_processor_id();
4299 prev_cpu = task_cpu(p);
4300 load = source_load(prev_cpu, idx);
4301 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004302
4303 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004304 * If sync wakeup then subtract the (maximum possible)
4305 * effect of the currently running task from the load
4306 * of the current CPU:
4307 */
Peter Zijlstra83378262008-06-27 13:41:37 +02004308 if (sync) {
4309 tg = task_group(current);
4310 weight = current->se.load.weight;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004311
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004312 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02004313 load += effective_load(tg, prev_cpu, 0, -weight);
4314 }
4315
4316 tg = task_group(p);
4317 weight = p->se.load.weight;
4318
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004319 /*
4320 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004321 * due to the sync cause above having dropped this_load to 0, we'll
4322 * always have an imbalance, but there's really nothing you can do
4323 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004324 *
4325 * Otherwise check if either cpus are near enough in load to allow this
4326 * task to be woken on this_cpu.
4327 */
Paul Turnere37b6a72011-01-21 20:44:59 -08004328 if (this_load > 0) {
4329 s64 this_eff_load, prev_eff_load;
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004330
4331 this_eff_load = 100;
Nicolas Pitreced549f2014-05-26 18:19:38 -04004332 this_eff_load *= capacity_of(prev_cpu);
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004333 this_eff_load *= this_load +
4334 effective_load(tg, this_cpu, weight, weight);
4335
4336 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
Nicolas Pitreced549f2014-05-26 18:19:38 -04004337 prev_eff_load *= capacity_of(this_cpu);
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004338 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
4339
4340 balanced = this_eff_load <= prev_eff_load;
4341 } else
4342 balanced = true;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004343
4344 /*
4345 * If the currently running task will sleep within
4346 * a reasonable amount of time then attract this newly
4347 * woken task:
4348 */
Peter Zijlstra2fb76352008-10-08 09:16:04 +02004349 if (sync && balanced)
4350 return 1;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004351
Lucas De Marchi41acab82010-03-10 23:37:45 -03004352 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004353 tl_per_task = cpu_avg_load_per_task(this_cpu);
4354
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004355 if (balanced ||
4356 (this_load <= load &&
4357 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004358 /*
4359 * This domain has SD_WAKE_AFFINE and
4360 * p is cache cold in this domain, and
4361 * there is no bad imbalance.
4362 */
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004363 schedstat_inc(sd, ttwu_move_affine);
Lucas De Marchi41acab82010-03-10 23:37:45 -03004364 schedstat_inc(p, se.statistics.nr_wakeups_affine);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004365
4366 return 1;
4367 }
4368 return 0;
4369}
4370
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004371/*
4372 * find_idlest_group finds and returns the least busy CPU group within the
4373 * domain.
4374 */
4375static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02004376find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004377 int this_cpu, int sd_flag)
Gregory Haskinse7693a32008-01-25 21:08:09 +01004378{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07004379 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004380 unsigned long min_load = ULONG_MAX, this_load = 0;
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004381 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004382 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004383
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004384 if (sd_flag & SD_BALANCE_WAKE)
4385 load_idx = sd->wake_idx;
4386
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004387 do {
4388 unsigned long load, avg_load;
4389 int local_group;
4390 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004391
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004392 /* Skip over this group if it has no CPUs allowed */
4393 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004394 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004395 continue;
4396
4397 local_group = cpumask_test_cpu(this_cpu,
4398 sched_group_cpus(group));
4399
4400 /* Tally up the load of all CPUs in the group */
4401 avg_load = 0;
4402
4403 for_each_cpu(i, sched_group_cpus(group)) {
4404 /* Bias balancing toward cpus of our domain */
4405 if (local_group)
4406 load = source_load(i, load_idx);
4407 else
4408 load = target_load(i, load_idx);
4409
4410 avg_load += load;
4411 }
4412
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04004413 /* Adjust by relative CPU capacity of the group */
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04004414 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004415
4416 if (local_group) {
4417 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004418 } else if (avg_load < min_load) {
4419 min_load = avg_load;
4420 idlest = group;
4421 }
4422 } while (group = group->next, group != sd->groups);
4423
4424 if (!idlest || 100*this_load < imbalance*min_load)
4425 return NULL;
4426 return idlest;
4427}
4428
4429/*
4430 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4431 */
4432static int
4433find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4434{
4435 unsigned long load, min_load = ULONG_MAX;
4436 int idlest = -1;
4437 int i;
4438
4439 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004440 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004441 load = weighted_cpuload(i);
4442
4443 if (load < min_load || (load == min_load && i == this_cpu)) {
4444 min_load = load;
4445 idlest = i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004446 }
4447 }
4448
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004449 return idlest;
4450}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004451
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004452/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004453 * Try and locate an idle CPU in the sched_domain.
4454 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004455static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004456{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004457 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07004458 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004459 int i = task_cpu(p);
4460
4461 if (idle_cpu(target))
4462 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004463
4464 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004465 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004466 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004467 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4468 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004469
4470 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07004471 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004472 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01004473 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08004474 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07004475 sg = sd->groups;
4476 do {
4477 if (!cpumask_intersects(sched_group_cpus(sg),
4478 tsk_cpus_allowed(p)))
4479 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02004480
Linus Torvalds37407ea2012-09-16 12:29:43 -07004481 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004482 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07004483 goto next;
4484 }
4485
4486 target = cpumask_first_and(sched_group_cpus(sg),
4487 tsk_cpus_allowed(p));
4488 goto done;
4489next:
4490 sg = sg->next;
4491 } while (sg != sd->groups);
4492 }
4493done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004494 return target;
4495}
4496
4497/*
Morten Rasmussende91b9c2014-02-18 14:14:24 +00004498 * select_task_rq_fair: Select target runqueue for the waking task in domains
4499 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
4500 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004501 *
Morten Rasmussende91b9c2014-02-18 14:14:24 +00004502 * Balances load by selecting the idlest cpu in the idlest group, or under
4503 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004504 *
Morten Rasmussende91b9c2014-02-18 14:14:24 +00004505 * Returns the target cpu number.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004506 *
4507 * preempt must be disabled.
4508 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01004509static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01004510select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004511{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004512 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004513 int cpu = smp_processor_id();
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004514 int new_cpu = cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004515 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004516 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004517
Peter Zijlstra29baa742012-04-23 12:11:21 +02004518 if (p->nr_cpus_allowed == 1)
Mike Galbraith76854c72011-11-22 15:18:24 +01004519 return prev_cpu;
4520
Peter Zijlstra0763a662009-09-14 19:37:39 +02004521 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004522 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004523 want_affine = 1;
4524 new_cpu = prev_cpu;
4525 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01004526
Peter Zijlstradce840a2011-04-07 14:09:50 +02004527 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004528 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f42882009-12-16 18:04:34 +01004529 if (!(tmp->flags & SD_LOAD_BALANCE))
4530 continue;
4531
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004532 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004533 * If both cpu and prev_cpu are part of this domain,
4534 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01004535 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004536 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4537 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4538 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08004539 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004540 }
4541
Alex Shif03542a2012-07-26 08:55:34 +08004542 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004543 sd = tmp;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004544 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004545
Rik van Riel8bf21432014-05-14 11:40:37 -04004546 if (affine_sd && cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4547 prev_cpu = cpu;
Peter Zijlstradce840a2011-04-07 14:09:50 +02004548
Rik van Riel8bf21432014-05-14 11:40:37 -04004549 if (sd_flag & SD_BALANCE_WAKE) {
Peter Zijlstradce840a2011-04-07 14:09:50 +02004550 new_cpu = select_idle_sibling(p, prev_cpu);
4551 goto unlock;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01004552 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02004553
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004554 while (sd) {
4555 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004556 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004557
Peter Zijlstra0763a662009-09-14 19:37:39 +02004558 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004559 sd = sd->child;
4560 continue;
4561 }
4562
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004563 group = find_idlest_group(sd, p, cpu, sd_flag);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004564 if (!group) {
4565 sd = sd->child;
4566 continue;
4567 }
4568
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02004569 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004570 if (new_cpu == -1 || new_cpu == cpu) {
4571 /* Now try balancing at a lower domain level of cpu */
4572 sd = sd->child;
4573 continue;
4574 }
4575
4576 /* Now try balancing at a lower domain level of new_cpu */
4577 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004578 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004579 sd = NULL;
4580 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004581 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004582 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02004583 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004584 sd = tmp;
4585 }
4586 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01004587 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02004588unlock:
4589 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01004590
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004591 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004592}
Paul Turner0a74bef2012-10-04 13:18:30 +02004593
4594/*
4595 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4596 * cfs_rq_of(p) references at time of call are still valid and identify the
4597 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4598 * other assumptions, including the state of rq->lock, should be made.
4599 */
4600static void
4601migrate_task_rq_fair(struct task_struct *p, int next_cpu)
4602{
Paul Turneraff3e492012-10-04 13:18:30 +02004603 struct sched_entity *se = &p->se;
4604 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4605
4606 /*
4607 * Load tracking: accumulate removed load so that it can be processed
4608 * when we next update owning cfs_rq under rq->lock. Tasks contribute
4609 * to blocked load iff they have a positive decay-count. It can never
4610 * be negative here since on-rq tasks have decay-count == 0.
4611 */
4612 if (se->avg.decay_count) {
4613 se->avg.decay_count = -__synchronize_entity_decay(se);
Alex Shi25099402013-06-20 10:18:55 +08004614 atomic_long_add(se->avg.load_avg_contrib,
4615 &cfs_rq->removed_load);
Paul Turneraff3e492012-10-04 13:18:30 +02004616 }
Ben Segall3944a922014-05-15 15:59:20 -07004617
4618 /* We have migrated, no longer consider this task hot */
4619 se->exec_start = 0;
Paul Turner0a74bef2012-10-04 13:18:30 +02004620}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004621#endif /* CONFIG_SMP */
4622
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004623static unsigned long
4624wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004625{
4626 unsigned long gran = sysctl_sched_wakeup_granularity;
4627
4628 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004629 * Since its curr running now, convert the gran from real-time
4630 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01004631 *
4632 * By using 'se' instead of 'curr' we penalize light tasks, so
4633 * they get preempted easier. That is, if 'se' < 'curr' then
4634 * the resulting gran will be larger, therefore penalizing the
4635 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4636 * be smaller, again penalizing the lighter task.
4637 *
4638 * This is especially important for buddies when the leftmost
4639 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004640 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08004641 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004642}
4643
4644/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02004645 * Should 'se' preempt 'curr'.
4646 *
4647 * |s1
4648 * |s2
4649 * |s3
4650 * g
4651 * |<--->|c
4652 *
4653 * w(c, s1) = -1
4654 * w(c, s2) = 0
4655 * w(c, s3) = 1
4656 *
4657 */
4658static int
4659wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
4660{
4661 s64 gran, vdiff = curr->vruntime - se->vruntime;
4662
4663 if (vdiff <= 0)
4664 return -1;
4665
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004666 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02004667 if (vdiff > gran)
4668 return 1;
4669
4670 return 0;
4671}
4672
Peter Zijlstra02479092008-11-04 21:25:10 +01004673static void set_last_buddy(struct sched_entity *se)
4674{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004675 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4676 return;
4677
4678 for_each_sched_entity(se)
4679 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004680}
4681
4682static void set_next_buddy(struct sched_entity *se)
4683{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004684 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
4685 return;
4686
4687 for_each_sched_entity(se)
4688 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01004689}
4690
Rik van Rielac53db52011-02-01 09:51:03 -05004691static void set_skip_buddy(struct sched_entity *se)
4692{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07004693 for_each_sched_entity(se)
4694 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05004695}
4696
Peter Zijlstra464b7522008-10-24 11:06:15 +02004697/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004698 * Preempt the current task with a newly woken task if needed:
4699 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02004700static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004701{
4702 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02004703 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004704 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02004705 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004706 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01004707
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01004708 if (unlikely(se == pse))
4709 return;
4710
Paul Turner5238cdd2011-07-21 09:43:37 -07004711 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01004712 * This is possible from callers such as move_task(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07004713 * unconditionally check_prempt_curr() after an enqueue (which may have
4714 * lead to a throttle). This both saves work and prevents false
4715 * next-buddy nomination below.
4716 */
4717 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
4718 return;
4719
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004720 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02004721 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004722 next_buddy_marked = 1;
4723 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02004724
Bharata B Raoaec0a512008-08-28 14:42:49 +05304725 /*
4726 * We can come here with TIF_NEED_RESCHED already set from new task
4727 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07004728 *
4729 * Note: this also catches the edge-case of curr being in a throttled
4730 * group (e.g. via set_curr_task), since update_curr() (in the
4731 * enqueue of curr) will have resulted in resched being set. This
4732 * prevents us from potentially nominating it as a false LAST_BUDDY
4733 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05304734 */
4735 if (test_tsk_need_resched(curr))
4736 return;
4737
Darren Harta2f5c9a2011-02-22 13:04:33 -08004738 /* Idle tasks are by definition preempted by non-idle tasks. */
4739 if (unlikely(curr->policy == SCHED_IDLE) &&
4740 likely(p->policy != SCHED_IDLE))
4741 goto preempt;
4742
Ingo Molnar91c234b2007-10-15 17:00:18 +02004743 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08004744 * Batch and idle tasks do not preempt non-idle tasks (their preemption
4745 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02004746 */
Ingo Molnar8ed92e52012-10-14 14:28:50 +02004747 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02004748 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004749
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004750 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07004751 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004752 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004753 if (wakeup_preempt_entity(se, pse) == 1) {
4754 /*
4755 * Bias pick_next to pick the sched entity that is
4756 * triggering this preemption.
4757 */
4758 if (!next_buddy_marked)
4759 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004760 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004761 }
Jupyung Leea65ac742009-11-17 18:51:40 +09004762
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004763 return;
4764
4765preempt:
Kirill Tkhai88751252014-06-29 00:03:57 +04004766 resched_curr(rq);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01004767 /*
4768 * Only set the backward buddy when the current task is still
4769 * on the rq. This can happen when a wakeup gets interleaved
4770 * with schedule on the ->pre_schedule() or idle_balance()
4771 * point, either of which can * drop the rq lock.
4772 *
4773 * Also, during early boot the idle thread is in the fair class,
4774 * for obvious reasons its a bad idea to schedule back to it.
4775 */
4776 if (unlikely(!se->on_rq || curr == rq->idle))
4777 return;
4778
4779 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
4780 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004781}
4782
Peter Zijlstra606dba22012-02-11 06:05:00 +01004783static struct task_struct *
4784pick_next_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004785{
4786 struct cfs_rq *cfs_rq = &rq->cfs;
4787 struct sched_entity *se;
Peter Zijlstra678d5712012-02-11 06:05:00 +01004788 struct task_struct *p;
Peter Zijlstra37e117c2014-02-14 12:25:08 +01004789 int new_tasks;
Peter Zijlstra678d5712012-02-11 06:05:00 +01004790
Peter Zijlstra6e831252014-02-11 16:11:48 +01004791again:
Peter Zijlstra678d5712012-02-11 06:05:00 +01004792#ifdef CONFIG_FAIR_GROUP_SCHED
4793 if (!cfs_rq->nr_running)
Peter Zijlstra38033c32014-01-23 20:32:21 +01004794 goto idle;
Peter Zijlstra678d5712012-02-11 06:05:00 +01004795
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01004796 if (prev->sched_class != &fair_sched_class)
Peter Zijlstra678d5712012-02-11 06:05:00 +01004797 goto simple;
4798
4799 /*
4800 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
4801 * likely that a next task is from the same cgroup as the current.
4802 *
4803 * Therefore attempt to avoid putting and setting the entire cgroup
4804 * hierarchy, only change the part that actually changes.
4805 */
4806
4807 do {
4808 struct sched_entity *curr = cfs_rq->curr;
4809
4810 /*
4811 * Since we got here without doing put_prev_entity() we also
4812 * have to consider cfs_rq->curr. If it is still a runnable
4813 * entity, update_curr() will update its vruntime, otherwise
4814 * forget we've ever seen it.
4815 */
4816 if (curr && curr->on_rq)
4817 update_curr(cfs_rq);
4818 else
4819 curr = NULL;
4820
4821 /*
4822 * This call to check_cfs_rq_runtime() will do the throttle and
4823 * dequeue its entity in the parent(s). Therefore the 'simple'
4824 * nr_running test will indeed be correct.
4825 */
4826 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
4827 goto simple;
4828
4829 se = pick_next_entity(cfs_rq, curr);
4830 cfs_rq = group_cfs_rq(se);
4831 } while (cfs_rq);
4832
4833 p = task_of(se);
4834
4835 /*
4836 * Since we haven't yet done put_prev_entity and if the selected task
4837 * is a different task than we started out with, try and touch the
4838 * least amount of cfs_rqs.
4839 */
4840 if (prev != p) {
4841 struct sched_entity *pse = &prev->se;
4842
4843 while (!(cfs_rq = is_same_group(se, pse))) {
4844 int se_depth = se->depth;
4845 int pse_depth = pse->depth;
4846
4847 if (se_depth <= pse_depth) {
4848 put_prev_entity(cfs_rq_of(pse), pse);
4849 pse = parent_entity(pse);
4850 }
4851 if (se_depth >= pse_depth) {
4852 set_next_entity(cfs_rq_of(se), se);
4853 se = parent_entity(se);
4854 }
4855 }
4856
4857 put_prev_entity(cfs_rq, pse);
4858 set_next_entity(cfs_rq, se);
4859 }
4860
4861 if (hrtick_enabled(rq))
4862 hrtick_start_fair(rq, p);
4863
4864 return p;
4865simple:
4866 cfs_rq = &rq->cfs;
4867#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004868
Tim Blechmann36ace272009-11-24 11:55:45 +01004869 if (!cfs_rq->nr_running)
Peter Zijlstra38033c32014-01-23 20:32:21 +01004870 goto idle;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004871
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01004872 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +01004873
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004874 do {
Peter Zijlstra678d5712012-02-11 06:05:00 +01004875 se = pick_next_entity(cfs_rq, NULL);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01004876 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004877 cfs_rq = group_cfs_rq(se);
4878 } while (cfs_rq);
4879
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004880 p = task_of(se);
Peter Zijlstra678d5712012-02-11 06:05:00 +01004881
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004882 if (hrtick_enabled(rq))
4883 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004884
4885 return p;
Peter Zijlstra38033c32014-01-23 20:32:21 +01004886
4887idle:
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04004888 new_tasks = idle_balance(rq);
Peter Zijlstra37e117c2014-02-14 12:25:08 +01004889 /*
4890 * Because idle_balance() releases (and re-acquires) rq->lock, it is
4891 * possible for any higher priority task to appear. In that case we
4892 * must re-start the pick_next_entity() loop.
4893 */
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04004894 if (new_tasks < 0)
Peter Zijlstra37e117c2014-02-14 12:25:08 +01004895 return RETRY_TASK;
4896
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04004897 if (new_tasks > 0)
Peter Zijlstra38033c32014-01-23 20:32:21 +01004898 goto again;
Peter Zijlstra38033c32014-01-23 20:32:21 +01004899
4900 return NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004901}
4902
4903/*
4904 * Account for a descheduled task:
4905 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02004906static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004907{
4908 struct sched_entity *se = &prev->se;
4909 struct cfs_rq *cfs_rq;
4910
4911 for_each_sched_entity(se) {
4912 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02004913 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004914 }
4915}
4916
Rik van Rielac53db52011-02-01 09:51:03 -05004917/*
4918 * sched_yield() is very simple
4919 *
4920 * The magic of dealing with the ->skip buddy is in pick_next_entity.
4921 */
4922static void yield_task_fair(struct rq *rq)
4923{
4924 struct task_struct *curr = rq->curr;
4925 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
4926 struct sched_entity *se = &curr->se;
4927
4928 /*
4929 * Are we the only task in the tree?
4930 */
4931 if (unlikely(rq->nr_running == 1))
4932 return;
4933
4934 clear_buddies(cfs_rq, se);
4935
4936 if (curr->policy != SCHED_BATCH) {
4937 update_rq_clock(rq);
4938 /*
4939 * Update run-time statistics of the 'current'.
4940 */
4941 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01004942 /*
4943 * Tell update_rq_clock() that we've just updated,
4944 * so we don't do microscopic update in schedule()
4945 * and double the fastpath cost.
4946 */
4947 rq->skip_clock_update = 1;
Rik van Rielac53db52011-02-01 09:51:03 -05004948 }
4949
4950 set_skip_buddy(se);
4951}
4952
Mike Galbraithd95f4122011-02-01 09:50:51 -05004953static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
4954{
4955 struct sched_entity *se = &p->se;
4956
Paul Turner5238cdd2011-07-21 09:43:37 -07004957 /* throttled hierarchies are not runnable */
4958 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05004959 return false;
4960
4961 /* Tell the scheduler that we'd really like pse to run next. */
4962 set_next_buddy(se);
4963
Mike Galbraithd95f4122011-02-01 09:50:51 -05004964 yield_task_fair(rq);
4965
4966 return true;
4967}
4968
Peter Williams681f3e62007-10-24 18:23:51 +02004969#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004970/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02004971 * Fair scheduling class load-balancing methods.
4972 *
4973 * BASICS
4974 *
4975 * The purpose of load-balancing is to achieve the same basic fairness the
4976 * per-cpu scheduler provides, namely provide a proportional amount of compute
4977 * time to each task. This is expressed in the following equation:
4978 *
4979 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
4980 *
4981 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
4982 * W_i,0 is defined as:
4983 *
4984 * W_i,0 = \Sum_j w_i,j (2)
4985 *
4986 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
4987 * is derived from the nice value as per prio_to_weight[].
4988 *
4989 * The weight average is an exponential decay average of the instantaneous
4990 * weight:
4991 *
4992 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
4993 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04004994 * C_i is the compute capacity of cpu i, typically it is the
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02004995 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
4996 * can also include other factors [XXX].
4997 *
4998 * To achieve this balance we define a measure of imbalance which follows
4999 * directly from (1):
5000 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005001 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005002 *
5003 * We them move tasks around to minimize the imbalance. In the continuous
5004 * function space it is obvious this converges, in the discrete case we get
5005 * a few fun cases generally called infeasible weight scenarios.
5006 *
5007 * [XXX expand on:
5008 * - infeasible weights;
5009 * - local vs global optima in the discrete case. ]
5010 *
5011 *
5012 * SCHED DOMAINS
5013 *
5014 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5015 * for all i,j solution, we create a tree of cpus that follows the hardware
5016 * topology where each level pairs two lower groups (or better). This results
5017 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5018 * tree to only the first of the previous level and we decrease the frequency
5019 * of load-balance at each level inv. proportional to the number of cpus in
5020 * the groups.
5021 *
5022 * This yields:
5023 *
5024 * log_2 n 1 n
5025 * \Sum { --- * --- * 2^i } = O(n) (5)
5026 * i = 0 2^i 2^i
5027 * `- size of each group
5028 * | | `- number of cpus doing load-balance
5029 * | `- freq
5030 * `- sum over all levels
5031 *
5032 * Coupled with a limit on how many tasks we can migrate every balance pass,
5033 * this makes (5) the runtime complexity of the balancer.
5034 *
5035 * An important property here is that each CPU is still (indirectly) connected
5036 * to every other cpu in at most O(log n) steps:
5037 *
5038 * The adjacency matrix of the resulting graph is given by:
5039 *
5040 * log_2 n
5041 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5042 * k = 0
5043 *
5044 * And you'll find that:
5045 *
5046 * A^(log_2 n)_i,j != 0 for all i,j (7)
5047 *
5048 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5049 * The task movement gives a factor of O(m), giving a convergence complexity
5050 * of:
5051 *
5052 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5053 *
5054 *
5055 * WORK CONSERVING
5056 *
5057 * In order to avoid CPUs going idle while there's still work to do, new idle
5058 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5059 * tree itself instead of relying on other CPUs to bring it work.
5060 *
5061 * This adds some complexity to both (5) and (8) but it reduces the total idle
5062 * time.
5063 *
5064 * [XXX more?]
5065 *
5066 *
5067 * CGROUPS
5068 *
5069 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5070 *
5071 * s_k,i
5072 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5073 * S_k
5074 *
5075 * Where
5076 *
5077 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5078 *
5079 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5080 *
5081 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5082 * property.
5083 *
5084 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5085 * rewrite all of this once again.]
5086 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005087
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09005088static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5089
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005090enum fbq_type { regular, remote, all };
5091
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005092#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01005093#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02005094#define LBF_DST_PINNED 0x04
5095#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005096
5097struct lb_env {
5098 struct sched_domain *sd;
5099
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005100 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05305101 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005102
5103 int dst_cpu;
5104 struct rq *dst_rq;
5105
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305106 struct cpumask *dst_grpmask;
5107 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005108 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005109 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08005110 /* The set of CPUs under consideration for load-balancing */
5111 struct cpumask *cpus;
5112
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005113 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01005114
5115 unsigned int loop;
5116 unsigned int loop_break;
5117 unsigned int loop_max;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005118
5119 enum fbq_type fbq_type;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005120};
5121
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005122/*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005123 * move_task - move a task from one runqueue to another runqueue.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005124 * Both runqueues must be locked.
5125 */
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005126static void move_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005127{
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005128 deactivate_task(env->src_rq, p, 0);
5129 set_task_cpu(p, env->dst_cpu);
5130 activate_task(env->dst_rq, p, 0);
5131 check_preempt_curr(env->dst_rq, p, 0);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005132}
5133
5134/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02005135 * Is this task likely cache-hot:
5136 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005137static int task_hot(struct task_struct *p, struct lb_env *env)
Peter Zijlstra029632f2011-10-25 10:00:11 +02005138{
5139 s64 delta;
5140
5141 if (p->sched_class != &fair_sched_class)
5142 return 0;
5143
5144 if (unlikely(p->policy == SCHED_IDLE))
5145 return 0;
5146
5147 /*
5148 * Buddy candidates are cache hot:
5149 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005150 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
Peter Zijlstra029632f2011-10-25 10:00:11 +02005151 (&p->se == cfs_rq_of(&p->se)->next ||
5152 &p->se == cfs_rq_of(&p->se)->last))
5153 return 1;
5154
5155 if (sysctl_sched_migration_cost == -1)
5156 return 1;
5157 if (sysctl_sched_migration_cost == 0)
5158 return 0;
5159
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005160 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
Peter Zijlstra029632f2011-10-25 10:00:11 +02005161
5162 return delta < (s64)sysctl_sched_migration_cost;
5163}
5164
Mel Gorman3a7053b2013-10-07 11:29:00 +01005165#ifdef CONFIG_NUMA_BALANCING
5166/* Returns true if the destination node has incurred more faults */
5167static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
5168{
Rik van Rielb1ad0652014-05-15 13:03:06 -04005169 struct numa_group *numa_group = rcu_dereference(p->numa_group);
Mel Gorman3a7053b2013-10-07 11:29:00 +01005170 int src_nid, dst_nid;
5171
Rik van Rielff1df892014-01-27 17:03:41 -05005172 if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults_memory ||
Mel Gorman3a7053b2013-10-07 11:29:00 +01005173 !(env->sd->flags & SD_NUMA)) {
5174 return false;
5175 }
5176
5177 src_nid = cpu_to_node(env->src_cpu);
5178 dst_nid = cpu_to_node(env->dst_cpu);
5179
Mel Gorman83e1d2c2013-10-07 11:29:27 +01005180 if (src_nid == dst_nid)
Mel Gorman3a7053b2013-10-07 11:29:00 +01005181 return false;
5182
Rik van Rielb1ad0652014-05-15 13:03:06 -04005183 if (numa_group) {
5184 /* Task is already in the group's interleave set. */
5185 if (node_isset(src_nid, numa_group->active_nodes))
5186 return false;
5187
5188 /* Task is moving into the group's interleave set. */
5189 if (node_isset(dst_nid, numa_group->active_nodes))
5190 return true;
5191
5192 return group_faults(p, dst_nid) > group_faults(p, src_nid);
5193 }
5194
5195 /* Encourage migration to the preferred node. */
Mel Gorman83e1d2c2013-10-07 11:29:27 +01005196 if (dst_nid == p->numa_preferred_nid)
5197 return true;
5198
Rik van Rielb1ad0652014-05-15 13:03:06 -04005199 return task_faults(p, dst_nid) > task_faults(p, src_nid);
Mel Gorman3a7053b2013-10-07 11:29:00 +01005200}
Mel Gorman7a0f3082013-10-07 11:29:01 +01005201
5202
5203static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
5204{
Rik van Rielb1ad0652014-05-15 13:03:06 -04005205 struct numa_group *numa_group = rcu_dereference(p->numa_group);
Mel Gorman7a0f3082013-10-07 11:29:01 +01005206 int src_nid, dst_nid;
5207
5208 if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
5209 return false;
5210
Rik van Rielff1df892014-01-27 17:03:41 -05005211 if (!p->numa_faults_memory || !(env->sd->flags & SD_NUMA))
Mel Gorman7a0f3082013-10-07 11:29:01 +01005212 return false;
5213
5214 src_nid = cpu_to_node(env->src_cpu);
5215 dst_nid = cpu_to_node(env->dst_cpu);
5216
Mel Gorman83e1d2c2013-10-07 11:29:27 +01005217 if (src_nid == dst_nid)
Mel Gorman7a0f3082013-10-07 11:29:01 +01005218 return false;
5219
Rik van Rielb1ad0652014-05-15 13:03:06 -04005220 if (numa_group) {
5221 /* Task is moving within/into the group's interleave set. */
5222 if (node_isset(dst_nid, numa_group->active_nodes))
5223 return false;
5224
5225 /* Task is moving out of the group's interleave set. */
5226 if (node_isset(src_nid, numa_group->active_nodes))
5227 return true;
5228
5229 return group_faults(p, dst_nid) < group_faults(p, src_nid);
5230 }
5231
Mel Gorman83e1d2c2013-10-07 11:29:27 +01005232 /* Migrating away from the preferred node is always bad. */
5233 if (src_nid == p->numa_preferred_nid)
5234 return true;
5235
Rik van Rielb1ad0652014-05-15 13:03:06 -04005236 return task_faults(p, dst_nid) < task_faults(p, src_nid);
Mel Gorman7a0f3082013-10-07 11:29:01 +01005237}
5238
Mel Gorman3a7053b2013-10-07 11:29:00 +01005239#else
5240static inline bool migrate_improves_locality(struct task_struct *p,
5241 struct lb_env *env)
5242{
5243 return false;
5244}
Mel Gorman7a0f3082013-10-07 11:29:01 +01005245
5246static inline bool migrate_degrades_locality(struct task_struct *p,
5247 struct lb_env *env)
5248{
5249 return false;
5250}
Mel Gorman3a7053b2013-10-07 11:29:00 +01005251#endif
5252
Peter Zijlstra029632f2011-10-25 10:00:11 +02005253/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005254 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5255 */
5256static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005257int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005258{
5259 int tsk_cache_hot = 0;
5260 /*
5261 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09005262 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005263 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09005264 * 3) running (obviously), or
5265 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005266 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09005267 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5268 return 0;
5269
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005270 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005271 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305272
Lucas De Marchi41acab82010-03-10 23:37:45 -03005273 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305274
Peter Zijlstra62633222013-08-19 12:41:09 +02005275 env->flags |= LBF_SOME_PINNED;
5276
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305277 /*
5278 * Remember if this task can be migrated to any other cpu in
5279 * our sched_group. We may want to revisit it if we couldn't
5280 * meet load balance goals by pulling other tasks on src_cpu.
5281 *
5282 * Also avoid computing new_dst_cpu if we have already computed
5283 * one in current iteration.
5284 */
Peter Zijlstra62633222013-08-19 12:41:09 +02005285 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305286 return 0;
5287
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005288 /* Prevent to re-select dst_cpu via env's cpus */
5289 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5290 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02005291 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005292 env->new_dst_cpu = cpu;
5293 break;
5294 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305295 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005296
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005297 return 0;
5298 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305299
5300 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005301 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005302
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005303 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03005304 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005305 return 0;
5306 }
5307
5308 /*
5309 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01005310 * 1) destination numa is preferred
5311 * 2) task is cache cold, or
5312 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005313 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005314 tsk_cache_hot = task_hot(p, env);
Mel Gorman7a0f3082013-10-07 11:29:01 +01005315 if (!tsk_cache_hot)
5316 tsk_cache_hot = migrate_degrades_locality(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01005317
5318 if (migrate_improves_locality(p, env)) {
5319#ifdef CONFIG_SCHEDSTATS
5320 if (tsk_cache_hot) {
5321 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5322 schedstat_inc(p, se.statistics.nr_forced_migrations);
5323 }
5324#endif
5325 return 1;
5326 }
5327
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005328 if (!tsk_cache_hot ||
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005329 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Zhang Hang4e2dcb72013-04-10 14:04:55 +08005330
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005331 if (tsk_cache_hot) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005332 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
Lucas De Marchi41acab82010-03-10 23:37:45 -03005333 schedstat_inc(p, se.statistics.nr_forced_migrations);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005334 }
Zhang Hang4e2dcb72013-04-10 14:04:55 +08005335
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005336 return 1;
5337 }
5338
Zhang Hang4e2dcb72013-04-10 14:04:55 +08005339 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5340 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005341}
5342
Peter Zijlstra897c3952009-12-17 17:45:42 +01005343/*
5344 * move_one_task tries to move exactly one task from busiest to this_rq, as
5345 * part of active balancing operations within "domain".
5346 * Returns 1 if successful and 0 otherwise.
5347 *
5348 * Called with both runqueues locked.
5349 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005350static int move_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01005351{
5352 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005353
Peter Zijlstra367456c2012-02-20 21:49:09 +01005354 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01005355 if (!can_migrate_task(p, env))
5356 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005357
Peter Zijlstra367456c2012-02-20 21:49:09 +01005358 move_task(p, env);
5359 /*
5360 * Right now, this is only the second place move_task()
5361 * is called, so we can safely collect move_task()
5362 * stats here rather than inside move_task().
5363 */
5364 schedstat_inc(env->sd, lb_gained[env->idle]);
5365 return 1;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005366 }
Peter Zijlstra897c3952009-12-17 17:45:42 +01005367 return 0;
5368}
5369
Peter Zijlstraeb953082012-04-17 13:38:40 +02005370static const unsigned int sched_nr_migrate_break = 32;
5371
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005372/*
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005373 * move_tasks tries to move up to imbalance weighted load from busiest to
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005374 * this_rq, as part of a balancing operation within domain "sd".
5375 * Returns 1 if successful and 0 otherwise.
5376 *
5377 * Called with both runqueues locked.
5378 */
5379static int move_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005380{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005381 struct list_head *tasks = &env->src_rq->cfs_tasks;
5382 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01005383 unsigned long load;
5384 int pulled = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005385
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005386 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005387 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005388
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005389 while (!list_empty(tasks)) {
5390 p = list_first_entry(tasks, struct task_struct, se.group_node);
5391
Peter Zijlstra367456c2012-02-20 21:49:09 +01005392 env->loop++;
5393 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005394 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005395 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005396
5397 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01005398 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02005399 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005400 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01005401 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02005402 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005403
Joonsoo Kimd3198082013-04-23 17:27:40 +09005404 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01005405 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005406
Peter Zijlstra367456c2012-02-20 21:49:09 +01005407 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005408
Peter Zijlstraeb953082012-04-17 13:38:40 +02005409 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005410 goto next;
5411
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005412 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005413 goto next;
5414
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005415 move_task(p, env);
Peter Zijlstraee00e662009-12-17 17:25:20 +01005416 pulled++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005417 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005418
5419#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01005420 /*
5421 * NEWIDLE balancing is a source of latency, so preemptible
5422 * kernels will stop after the first task is pulled to minimize
5423 * the critical section.
5424 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005425 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005426 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005427#endif
5428
Peter Zijlstraee00e662009-12-17 17:25:20 +01005429 /*
5430 * We only want to steal up to the prescribed amount of
5431 * weighted load.
5432 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005433 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005434 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005435
Peter Zijlstra367456c2012-02-20 21:49:09 +01005436 continue;
5437next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005438 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005439 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005440
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005441 /*
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005442 * Right now, this is one of only two places move_task() is called,
5443 * so we can safely collect move_task() stats here rather than
5444 * inside move_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005445 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005446 schedstat_add(env->sd, lb_gained[env->idle], pulled);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005447
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005448 return pulled;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005449}
5450
Peter Zijlstra230059de2009-12-17 17:47:12 +01005451#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005452/*
5453 * update tg->load_weight by folding this cpu's load_avg
5454 */
Paul Turner48a16752012-10-04 13:18:31 +02005455static void __update_blocked_averages_cpu(struct task_group *tg, int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005456{
Paul Turner48a16752012-10-04 13:18:31 +02005457 struct sched_entity *se = tg->se[cpu];
5458 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu];
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005459
Paul Turner48a16752012-10-04 13:18:31 +02005460 /* throttled entities do not contribute to load */
5461 if (throttled_hierarchy(cfs_rq))
5462 return;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005463
Paul Turneraff3e492012-10-04 13:18:30 +02005464 update_cfs_rq_blocked_load(cfs_rq, 1);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005465
Paul Turner82958362012-10-04 13:18:31 +02005466 if (se) {
5467 update_entity_load_avg(se, 1);
5468 /*
5469 * We pivot on our runnable average having decayed to zero for
5470 * list removal. This generally implies that all our children
5471 * have also been removed (modulo rounding error or bandwidth
5472 * control); however, such cases are rare and we can fix these
5473 * at enqueue.
5474 *
5475 * TODO: fix up out-of-order children on enqueue.
5476 */
5477 if (!se->avg.runnable_avg_sum && !cfs_rq->nr_running)
5478 list_del_leaf_cfs_rq(cfs_rq);
5479 } else {
Paul Turner48a16752012-10-04 13:18:31 +02005480 struct rq *rq = rq_of(cfs_rq);
Paul Turner82958362012-10-04 13:18:31 +02005481 update_rq_runnable_avg(rq, rq->nr_running);
5482 }
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005483}
5484
Paul Turner48a16752012-10-04 13:18:31 +02005485static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005486{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005487 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02005488 struct cfs_rq *cfs_rq;
5489 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005490
Paul Turner48a16752012-10-04 13:18:31 +02005491 raw_spin_lock_irqsave(&rq->lock, flags);
5492 update_rq_clock(rq);
Peter Zijlstra9763b672011-07-13 13:09:25 +02005493 /*
5494 * Iterates the task_group tree in a bottom up fashion, see
5495 * list_add_leaf_cfs_rq() for details.
5496 */
Paul Turner64660c82011-07-21 09:43:36 -07005497 for_each_leaf_cfs_rq(rq, cfs_rq) {
Paul Turner48a16752012-10-04 13:18:31 +02005498 /*
5499 * Note: We may want to consider periodically releasing
5500 * rq->lock about these updates so that creating many task
5501 * groups does not result in continually extending hold time.
5502 */
5503 __update_blocked_averages_cpu(cfs_rq->tg, rq->cpu);
Paul Turner64660c82011-07-21 09:43:36 -07005504 }
Paul Turner48a16752012-10-04 13:18:31 +02005505
5506 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005507}
5508
Peter Zijlstra9763b672011-07-13 13:09:25 +02005509/*
Vladimir Davydov68520792013-07-15 17:49:19 +04005510 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02005511 * This needs to be done in a top-down fashion because the load of a child
5512 * group is a fraction of its parents load.
5513 */
Vladimir Davydov68520792013-07-15 17:49:19 +04005514static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02005515{
Vladimir Davydov68520792013-07-15 17:49:19 +04005516 struct rq *rq = rq_of(cfs_rq);
5517 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005518 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04005519 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005520
Vladimir Davydov68520792013-07-15 17:49:19 +04005521 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005522 return;
5523
Vladimir Davydov68520792013-07-15 17:49:19 +04005524 cfs_rq->h_load_next = NULL;
5525 for_each_sched_entity(se) {
5526 cfs_rq = cfs_rq_of(se);
5527 cfs_rq->h_load_next = se;
5528 if (cfs_rq->last_h_load_update == now)
5529 break;
5530 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005531
Vladimir Davydov68520792013-07-15 17:49:19 +04005532 if (!se) {
Vladimir Davydov7e3115e2013-09-14 19:39:46 +04005533 cfs_rq->h_load = cfs_rq->runnable_load_avg;
Vladimir Davydov68520792013-07-15 17:49:19 +04005534 cfs_rq->last_h_load_update = now;
5535 }
5536
5537 while ((se = cfs_rq->h_load_next) != NULL) {
5538 load = cfs_rq->h_load;
5539 load = div64_ul(load * se->avg.load_avg_contrib,
5540 cfs_rq->runnable_load_avg + 1);
5541 cfs_rq = group_cfs_rq(se);
5542 cfs_rq->h_load = load;
5543 cfs_rq->last_h_load_update = now;
5544 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02005545}
5546
Peter Zijlstra367456c2012-02-20 21:49:09 +01005547static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01005548{
Peter Zijlstra367456c2012-02-20 21:49:09 +01005549 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005550
Vladimir Davydov68520792013-07-15 17:49:19 +04005551 update_cfs_rq_h_load(cfs_rq);
Alex Shia003a252013-06-20 10:18:51 +08005552 return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
5553 cfs_rq->runnable_load_avg + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005554}
5555#else
Paul Turner48a16752012-10-04 13:18:31 +02005556static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005557{
5558}
5559
Peter Zijlstra367456c2012-02-20 21:49:09 +01005560static unsigned long task_h_load(struct task_struct *p)
5561{
Alex Shia003a252013-06-20 10:18:51 +08005562 return p->se.avg.load_avg_contrib;
Peter Zijlstra230059de2009-12-17 17:47:12 +01005563}
5564#endif
5565
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005566/********** Helpers for find_busiest_group ************************/
Rik van Rielcaeb1782014-07-28 14:16:28 -04005567
5568enum group_type {
5569 group_other = 0,
5570 group_imbalanced,
5571 group_overloaded,
5572};
5573
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005574/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005575 * sg_lb_stats - stats of a sched_group required for load_balancing
5576 */
5577struct sg_lb_stats {
5578 unsigned long avg_load; /*Avg load across the CPUs of the group */
5579 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005580 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005581 unsigned long load_per_task;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005582 unsigned long group_capacity;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005583 unsigned int sum_nr_running; /* Nr tasks running in the group */
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005584 unsigned int group_capacity_factor;
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005585 unsigned int idle_cpus;
5586 unsigned int group_weight;
Rik van Rielcaeb1782014-07-28 14:16:28 -04005587 enum group_type group_type;
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04005588 int group_has_free_capacity;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005589#ifdef CONFIG_NUMA_BALANCING
5590 unsigned int nr_numa_running;
5591 unsigned int nr_preferred_running;
5592#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005593};
5594
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005595/*
5596 * sd_lb_stats - Structure to store the statistics of a sched_domain
5597 * during load balancing.
5598 */
5599struct sd_lb_stats {
5600 struct sched_group *busiest; /* Busiest group in this sd */
5601 struct sched_group *local; /* Local group in this sd */
5602 unsigned long total_load; /* Total load of all groups in sd */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005603 unsigned long total_capacity; /* Total capacity of all groups in sd */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005604 unsigned long avg_load; /* Average load across all groups in sd */
5605
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005606 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005607 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005608};
5609
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005610static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5611{
5612 /*
5613 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5614 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5615 * We must however clear busiest_stat::avg_load because
5616 * update_sd_pick_busiest() reads this before assignment.
5617 */
5618 *sds = (struct sd_lb_stats){
5619 .busiest = NULL,
5620 .local = NULL,
5621 .total_load = 0UL,
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005622 .total_capacity = 0UL,
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005623 .busiest_stat = {
5624 .avg_load = 0UL,
Rik van Rielcaeb1782014-07-28 14:16:28 -04005625 .sum_nr_running = 0,
5626 .group_type = group_other,
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005627 },
5628 };
5629}
5630
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005631/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005632 * get_sd_load_idx - Obtain the load index for a given sched domain.
5633 * @sd: The sched_domain whose load_idx is to be obtained.
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305634 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005635 *
5636 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005637 */
5638static inline int get_sd_load_idx(struct sched_domain *sd,
5639 enum cpu_idle_type idle)
5640{
5641 int load_idx;
5642
5643 switch (idle) {
5644 case CPU_NOT_IDLE:
5645 load_idx = sd->busy_idx;
5646 break;
5647
5648 case CPU_NEWLY_IDLE:
5649 load_idx = sd->newidle_idx;
5650 break;
5651 default:
5652 load_idx = sd->idle_idx;
5653 break;
5654 }
5655
5656 return load_idx;
5657}
5658
Nicolas Pitreced549f2014-05-26 18:19:38 -04005659static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005660{
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005661 return SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005662}
5663
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005664unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005665{
Nicolas Pitreced549f2014-05-26 18:19:38 -04005666 return default_scale_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005667}
5668
Nicolas Pitreced549f2014-05-26 18:19:38 -04005669static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005670{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005671 unsigned long weight = sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005672 unsigned long smt_gain = sd->smt_gain;
5673
5674 smt_gain /= weight;
5675
5676 return smt_gain;
5677}
5678
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005679unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005680{
Nicolas Pitreced549f2014-05-26 18:19:38 -04005681 return default_scale_smt_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005682}
5683
Nicolas Pitreced549f2014-05-26 18:19:38 -04005684static unsigned long scale_rt_capacity(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005685{
5686 struct rq *rq = cpu_rq(cpu);
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005687 u64 total, available, age_stamp, avg;
Peter Zijlstracadefd32014-02-27 10:40:35 +01005688 s64 delta;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005689
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005690 /*
5691 * Since we're reading these variables without serialization make sure
5692 * we read them once before doing sanity checks on them.
5693 */
5694 age_stamp = ACCESS_ONCE(rq->age_stamp);
5695 avg = ACCESS_ONCE(rq->rt_avg);
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005696
Peter Zijlstracadefd32014-02-27 10:40:35 +01005697 delta = rq_clock(rq) - age_stamp;
5698 if (unlikely(delta < 0))
5699 delta = 0;
5700
5701 total = sched_avg_period() + delta;
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005702
5703 if (unlikely(total < avg)) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04005704 /* Ensures that capacity won't end up being negative */
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005705 available = 0;
5706 } else {
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02005707 available = total - avg;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07005708 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005709
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005710 if (unlikely((s64)total < SCHED_CAPACITY_SCALE))
5711 total = SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005712
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005713 total >>= SCHED_CAPACITY_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005714
5715 return div_u64(available, total);
5716}
5717
Nicolas Pitreced549f2014-05-26 18:19:38 -04005718static void update_cpu_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005719{
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005720 unsigned long weight = sd->span_weight;
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005721 unsigned long capacity = SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005722 struct sched_group *sdg = sd->groups;
5723
Nicolas Pitre5d4dfdd2014-05-27 13:50:41 -04005724 if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) {
5725 if (sched_feat(ARCH_CAPACITY))
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005726 capacity *= arch_scale_smt_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005727 else
Nicolas Pitreced549f2014-05-26 18:19:38 -04005728 capacity *= default_scale_smt_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005729
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005730 capacity >>= SCHED_CAPACITY_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005731 }
5732
Nicolas Pitreced549f2014-05-26 18:19:38 -04005733 sdg->sgc->capacity_orig = capacity;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005734
Nicolas Pitre5d4dfdd2014-05-27 13:50:41 -04005735 if (sched_feat(ARCH_CAPACITY))
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005736 capacity *= arch_scale_freq_capacity(sd, cpu);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005737 else
Nicolas Pitreced549f2014-05-26 18:19:38 -04005738 capacity *= default_scale_capacity(sd, cpu);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005739
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005740 capacity >>= SCHED_CAPACITY_SHIFT;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005741
Nicolas Pitreced549f2014-05-26 18:19:38 -04005742 capacity *= scale_rt_capacity(cpu);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005743 capacity >>= SCHED_CAPACITY_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005744
Nicolas Pitreced549f2014-05-26 18:19:38 -04005745 if (!capacity)
5746 capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005747
Nicolas Pitreced549f2014-05-26 18:19:38 -04005748 cpu_rq(cpu)->cpu_capacity = capacity;
5749 sdg->sgc->capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005750}
5751
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005752void update_group_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005753{
5754 struct sched_domain *child = sd->child;
5755 struct sched_group *group, *sdg = sd->groups;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005756 unsigned long capacity, capacity_orig;
Vincent Guittot4ec44122011-12-12 20:21:08 +01005757 unsigned long interval;
5758
5759 interval = msecs_to_jiffies(sd->balance_interval);
5760 interval = clamp(interval, 1UL, max_load_balance_interval);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005761 sdg->sgc->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005762
5763 if (!child) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04005764 update_cpu_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005765 return;
5766 }
5767
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005768 capacity_orig = capacity = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005769
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005770 if (child->flags & SD_OVERLAP) {
5771 /*
5772 * SD_OVERLAP domains cannot assume that child groups
5773 * span the current group.
5774 */
5775
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005776 for_each_cpu(cpu, sched_group_cpus(sdg)) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005777 struct sched_group_capacity *sgc;
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305778 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005779
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305780 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005781 * build_sched_domains() -> init_sched_groups_capacity()
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305782 * gets here before we've attached the domains to the
5783 * runqueues.
5784 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005785 * Use capacity_of(), which is set irrespective of domains
5786 * in update_cpu_capacity().
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305787 *
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005788 * This avoids capacity/capacity_orig from being 0 and
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305789 * causing divide-by-zero issues on boot.
5790 *
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005791 * Runtime updates will correct capacity_orig.
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305792 */
5793 if (unlikely(!rq->sd)) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04005794 capacity_orig += capacity_of(cpu);
5795 capacity += capacity_of(cpu);
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05305796 continue;
5797 }
5798
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005799 sgc = rq->sd->groups->sgc;
5800 capacity_orig += sgc->capacity_orig;
5801 capacity += sgc->capacity;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02005802 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005803 } else {
5804 /*
5805 * !SD_OVERLAP domains can assume that child groups
5806 * span the current group.
5807 */
5808
5809 group = child->groups;
5810 do {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005811 capacity_orig += group->sgc->capacity_orig;
5812 capacity += group->sgc->capacity;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02005813 group = group->next;
5814 } while (group != child->groups);
5815 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005816
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005817 sdg->sgc->capacity_orig = capacity_orig;
5818 sdg->sgc->capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005819}
5820
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005821/*
5822 * Try and fix up capacity for tiny siblings, this is needed when
5823 * things like SD_ASYM_PACKING need f_b_g to select another sibling
5824 * which on its own isn't powerful enough.
5825 *
5826 * See update_sd_pick_busiest() and check_asym_packing().
5827 */
5828static inline int
5829fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
5830{
5831 /*
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005832 * Only siblings can have significantly less than SCHED_CAPACITY_SCALE
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005833 */
Nicolas Pitre5d4dfdd2014-05-27 13:50:41 -04005834 if (!(sd->flags & SD_SHARE_CPUCAPACITY))
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005835 return 0;
5836
5837 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005838 * If ~90% of the cpu_capacity is still there, we're good.
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005839 */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005840 if (group->sgc->capacity * 32 > group->sgc->capacity_orig * 29)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10005841 return 1;
5842
5843 return 0;
5844}
5845
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005846/*
5847 * Group imbalance indicates (and tries to solve) the problem where balancing
5848 * groups is inadequate due to tsk_cpus_allowed() constraints.
5849 *
5850 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
5851 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
5852 * Something like:
5853 *
5854 * { 0 1 2 3 } { 4 5 6 7 }
5855 * * * * *
5856 *
5857 * If we were to balance group-wise we'd place two tasks in the first group and
5858 * two tasks in the second group. Clearly this is undesired as it will overload
5859 * cpu 3 and leave one of the cpus in the second group unused.
5860 *
5861 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02005862 * by noticing the lower domain failed to reach balance and had difficulty
5863 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005864 *
5865 * When this is so detected; this group becomes a candidate for busiest; see
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305866 * update_sd_pick_busiest(). And calculate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02005867 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005868 * to create an effective group imbalance.
5869 *
5870 * This is a somewhat tricky proposition since the next run might not find the
5871 * group imbalance and decide the groups need to be balanced again. A most
5872 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005873 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005874
Peter Zijlstra62633222013-08-19 12:41:09 +02005875static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005876{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005877 return group->sgc->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005878}
5879
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005880/*
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005881 * Compute the group capacity factor.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005882 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005883 * Avoid the issue where N*frac(smt_capacity) >= 1 creates 'phantom' cores by
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005884 * first dividing out the smt factor and computing the actual number of cores
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005885 * and limit unit capacity with that.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005886 */
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005887static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *group)
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005888{
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005889 unsigned int capacity_factor, smt, cpus;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005890 unsigned int capacity, capacity_orig;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005891
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005892 capacity = group->sgc->capacity;
5893 capacity_orig = group->sgc->capacity_orig;
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005894 cpus = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005895
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005896 /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005897 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig);
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005898 capacity_factor = cpus / smt; /* cores */
Peter Zijlstrac61037e2013-08-28 12:40:38 +02005899
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005900 capacity_factor = min_t(unsigned,
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005901 capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE));
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005902 if (!capacity_factor)
5903 capacity_factor = fix_small_capacity(env->sd, group);
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005904
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005905 return capacity_factor;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005906}
5907
Rik van Rielcaeb1782014-07-28 14:16:28 -04005908static enum group_type
5909group_classify(struct sched_group *group, struct sg_lb_stats *sgs)
5910{
5911 if (sgs->sum_nr_running > sgs->group_capacity_factor)
5912 return group_overloaded;
5913
5914 if (sg_imbalanced(group))
5915 return group_imbalanced;
5916
5917 return group_other;
5918}
5919
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005920/**
5921 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
5922 * @env: The load balancing environment.
5923 * @group: sched_group whose statistics are to be updated.
5924 * @load_idx: Load index of sched_domain of this_cpu for load calc.
5925 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005926 * @sgs: variable to hold the statistics for this group.
Masanari Iidacd3bd4e2014-07-28 12:38:06 +09005927 * @overload: Indicate more than one runnable task for any CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005928 */
5929static inline void update_sg_lb_stats(struct lb_env *env,
5930 struct sched_group *group, int load_idx,
Tim Chen4486edd2014-06-23 12:16:49 -07005931 int local_group, struct sg_lb_stats *sgs,
5932 bool *overload)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005933{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02005934 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005935 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005936
Peter Zijlstrab72ff132013-08-28 10:32:32 +02005937 memset(sgs, 0, sizeof(*sgs));
5938
Michael Wangb94031302012-07-12 16:10:13 +08005939 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005940 struct rq *rq = cpu_rq(i);
5941
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005942 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02005943 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02005944 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02005945 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005946 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005947
5948 sgs->group_load += load;
Kamalesh Babulal380c9072013-11-15 15:06:52 +05305949 sgs->sum_nr_running += rq->nr_running;
Tim Chen4486edd2014-06-23 12:16:49 -07005950
5951 if (rq->nr_running > 1)
5952 *overload = true;
5953
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005954#ifdef CONFIG_NUMA_BALANCING
5955 sgs->nr_numa_running += rq->nr_numa_running;
5956 sgs->nr_preferred_running += rq->nr_preferred_running;
5957#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005958 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005959 if (idle_cpu(i))
5960 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005961 }
5962
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005963 /* Adjust by relative CPU capacity of the group */
5964 sgs->group_capacity = group->sgc->capacity;
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005965 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005966
Suresh Siddhadd5feea2010-02-23 16:13:52 -08005967 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02005968 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005969
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07005970 sgs->group_weight = group->group_weight;
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005971 sgs->group_capacity_factor = sg_capacity_factor(env, group);
Rik van Rielcaeb1782014-07-28 14:16:28 -04005972 sgs->group_type = group_classify(group, sgs);
Peter Zijlstrab37d9312013-08-28 11:50:34 +02005973
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04005974 if (sgs->group_capacity_factor > sgs->sum_nr_running)
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04005975 sgs->group_has_free_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005976}
5977
5978/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10005979 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07005980 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005981 * @sds: sched_domain statistics
5982 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10005983 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10005984 *
5985 * Determine if @sg is a busier group than the previously selected
5986 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005987 *
5988 * Return: %true if @sg is a busier group than the previously selected
5989 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10005990 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005991static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10005992 struct sd_lb_stats *sds,
5993 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005994 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005995{
Rik van Rielcaeb1782014-07-28 14:16:28 -04005996 struct sg_lb_stats *busiest = &sds->busiest_stat;
Michael Neuling532cb4c2010-06-08 14:57:02 +10005997
Rik van Rielcaeb1782014-07-28 14:16:28 -04005998 if (sgs->group_type > busiest->group_type)
Michael Neuling532cb4c2010-06-08 14:57:02 +10005999 return true;
6000
Rik van Rielcaeb1782014-07-28 14:16:28 -04006001 if (sgs->group_type < busiest->group_type)
6002 return false;
6003
6004 if (sgs->avg_load <= busiest->avg_load)
6005 return false;
6006
6007 /* This is the busiest node in its class. */
6008 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006009 return true;
6010
6011 /*
6012 * ASYM_PACKING needs to move all the work to the lowest
6013 * numbered CPUs in the group, therefore mark all groups
6014 * higher than ourself as busy.
6015 */
Rik van Rielcaeb1782014-07-28 14:16:28 -04006016 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006017 if (!sds->busiest)
6018 return true;
6019
6020 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6021 return true;
6022 }
6023
6024 return false;
6025}
6026
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006027#ifdef CONFIG_NUMA_BALANCING
6028static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6029{
6030 if (sgs->sum_nr_running > sgs->nr_numa_running)
6031 return regular;
6032 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6033 return remote;
6034 return all;
6035}
6036
6037static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6038{
6039 if (rq->nr_running > rq->nr_numa_running)
6040 return regular;
6041 if (rq->nr_running > rq->nr_preferred_running)
6042 return remote;
6043 return all;
6044}
6045#else
6046static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6047{
6048 return all;
6049}
6050
6051static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6052{
6053 return regular;
6054}
6055#endif /* CONFIG_NUMA_BALANCING */
6056
Michael Neuling532cb4c2010-06-08 14:57:02 +10006057/**
Hui Kang461819a2011-10-11 23:00:59 -04006058 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07006059 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006060 * @sds: variable to hold the statistics for this sched_domain.
6061 */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006062static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006063{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006064 struct sched_domain *child = env->sd->child;
6065 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006066 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006067 int load_idx, prefer_sibling = 0;
Tim Chen4486edd2014-06-23 12:16:49 -07006068 bool overload = false;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006069
6070 if (child && child->flags & SD_PREFER_SIBLING)
6071 prefer_sibling = 1;
6072
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006073 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006074
6075 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006076 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006077 int local_group;
6078
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006079 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006080 if (local_group) {
6081 sds->local = sg;
6082 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006083
6084 if (env->idle != CPU_NEWLY_IDLE ||
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006085 time_after_eq(jiffies, sg->sgc->next_update))
6086 update_group_capacity(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006087 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006088
Tim Chen4486edd2014-06-23 12:16:49 -07006089 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6090 &overload);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006091
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006092 if (local_group)
6093 goto next_group;
6094
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006095 /*
6096 * In case the child domain prefers tasks go to siblings
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04006097 * first, lower the sg capacity factor to one so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07006098 * and move all the excess tasks away. We lower the capacity
6099 * of a group only if the local group has the capacity to fit
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04006100 * these excess tasks, i.e. nr_running < group_capacity_factor. The
Nikhil Rao75dd3212010-10-15 13:12:30 -07006101 * extra check prevents the case where you always pull from the
6102 * heaviest group when it is already under-utilized (possible
6103 * with a large weight task outweighs the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006104 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006105 if (prefer_sibling && sds->local &&
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04006106 sds->local_stat.group_has_free_capacity)
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04006107 sgs->group_capacity_factor = min(sgs->group_capacity_factor, 1U);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006108
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006109 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006110 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006111 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006112 }
6113
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006114next_group:
6115 /* Now, start updating sd_lb_stats */
6116 sds->total_load += sgs->group_load;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006117 sds->total_capacity += sgs->group_capacity;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006118
Michael Neuling532cb4c2010-06-08 14:57:02 +10006119 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006120 } while (sg != env->sd->groups);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006121
6122 if (env->sd->flags & SD_NUMA)
6123 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
Tim Chen4486edd2014-06-23 12:16:49 -07006124
6125 if (!env->sd->parent) {
6126 /* update overload indicator if we are at root domain */
6127 if (env->dst_rq->rd->overload != overload)
6128 env->dst_rq->rd->overload = overload;
6129 }
6130
Michael Neuling532cb4c2010-06-08 14:57:02 +10006131}
6132
Michael Neuling532cb4c2010-06-08 14:57:02 +10006133/**
6134 * check_asym_packing - Check to see if the group is packed into the
6135 * sched doman.
6136 *
6137 * This is primarily intended to used at the sibling level. Some
6138 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6139 * case of POWER7, it can move to lower SMT modes only when higher
6140 * threads are idle. When in lower SMT modes, the threads will
6141 * perform better since they share less core resources. Hence when we
6142 * have idle threads, we want them to be the higher ones.
6143 *
6144 * This packing function is run on idle threads. It checks to see if
6145 * the busiest CPU in this domain (core in the P7 case) has a higher
6146 * CPU number than the packing function is being run on. Here we are
6147 * assuming lower CPU number will be equivalent to lower a SMT thread
6148 * number.
6149 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02006150 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10006151 * this CPU. The amount of the imbalance is returned in *imbalance.
6152 *
Randy Dunlapcd968912012-06-08 13:18:33 -07006153 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006154 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10006155 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006156static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006157{
6158 int busiest_cpu;
6159
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006160 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006161 return 0;
6162
6163 if (!sds->busiest)
6164 return 0;
6165
6166 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006167 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006168 return 0;
6169
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006170 env->imbalance = DIV_ROUND_CLOSEST(
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006171 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006172 SCHED_CAPACITY_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006173
Michael Neuling532cb4c2010-06-08 14:57:02 +10006174 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006175}
6176
6177/**
6178 * fix_small_imbalance - Calculate the minor imbalance that exists
6179 * amongst the groups of a sched_domain, during
6180 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07006181 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006182 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006183 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006184static inline
6185void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006186{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006187 unsigned long tmp, capa_now = 0, capa_move = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006188 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006189 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006190 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006191
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006192 local = &sds->local_stat;
6193 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006194
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006195 if (!local->sum_nr_running)
6196 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6197 else if (busiest->load_per_task > local->load_per_task)
6198 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006199
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006200 scaled_busy_load_per_task =
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006201 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006202 busiest->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006203
Vladimir Davydov3029ede2013-09-15 17:49:14 +04006204 if (busiest->avg_load + scaled_busy_load_per_task >=
6205 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006206 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006207 return;
6208 }
6209
6210 /*
6211 * OK, we don't have enough imbalance to justify moving tasks,
Nicolas Pitreced549f2014-05-26 18:19:38 -04006212 * however we may be able to increase total CPU capacity used by
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006213 * moving them.
6214 */
6215
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006216 capa_now += busiest->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006217 min(busiest->load_per_task, busiest->avg_load);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006218 capa_now += local->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006219 min(local->load_per_task, local->avg_load);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006220 capa_now /= SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006221
6222 /* Amount of load we'd subtract */
Vincent Guittota2cd4262014-03-11 17:26:06 +01006223 if (busiest->avg_load > scaled_busy_load_per_task) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006224 capa_move += busiest->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006225 min(busiest->load_per_task,
Vincent Guittota2cd4262014-03-11 17:26:06 +01006226 busiest->avg_load - scaled_busy_load_per_task);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006227 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006228
6229 /* Amount of load we'd add */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006230 if (busiest->avg_load * busiest->group_capacity <
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006231 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006232 tmp = (busiest->avg_load * busiest->group_capacity) /
6233 local->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006234 } else {
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006235 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006236 local->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006237 }
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006238 capa_move += local->group_capacity *
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02006239 min(local->load_per_task, local->avg_load + tmp);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006240 capa_move /= SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006241
6242 /* Move if we gain throughput */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006243 if (capa_move > capa_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006244 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006245}
6246
6247/**
6248 * calculate_imbalance - Calculate the amount of imbalance present within the
6249 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006250 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006251 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006252 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006253static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006254{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006255 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006256 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006257
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006258 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006259 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006260
Rik van Rielcaeb1782014-07-28 14:16:28 -04006261 if (busiest->group_type == group_imbalanced) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006262 /*
6263 * In the group_imb case we cannot rely on group-wide averages
6264 * to ensure cpu-load equilibrium, look at wider averages. XXX
6265 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006266 busiest->load_per_task =
6267 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006268 }
6269
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006270 /*
6271 * In the presence of smp nice balancing, certain scenarios can have
6272 * max load less than avg load(as we skip the groups at or below
Nicolas Pitreced549f2014-05-26 18:19:38 -04006273 * its cpu_capacity, while calculating max_load..)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006274 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04006275 if (busiest->avg_load <= sds->avg_load ||
6276 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006277 env->imbalance = 0;
6278 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006279 }
6280
Peter Zijlstra9a5d9ba2014-07-29 17:15:11 +02006281 /*
6282 * If there aren't any idle cpus, avoid creating some.
6283 */
6284 if (busiest->group_type == group_overloaded &&
6285 local->group_type == group_overloaded) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006286 load_above_capacity =
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04006287 (busiest->sum_nr_running - busiest->group_capacity_factor);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006288
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006289 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006290 load_above_capacity /= busiest->group_capacity;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006291 }
6292
6293 /*
6294 * We're trying to get all the cpus to the average_load, so we don't
6295 * want to push ourselves above the average load, nor do we wish to
6296 * reduce the max loaded cpu below the average load. At the same time,
6297 * we also don't want to reduce the group load below the group capacity
6298 * (so that we can implement power-savings policies etc). Thus we look
6299 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006300 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006301 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006302
6303 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006304 env->imbalance = min(
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006305 max_pull * busiest->group_capacity,
6306 (sds->avg_load - local->avg_load) * local->group_capacity
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006307 ) / SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006308
6309 /*
6310 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006311 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006312 * a think about bumping its value to force at least one task to be
6313 * moved
6314 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006315 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006316 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006317}
Nikhil Raofab47622010-10-15 13:12:29 -07006318
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006319/******* find_busiest_group() helpers end here *********************/
6320
6321/**
6322 * find_busiest_group - Returns the busiest group within the sched_domain
6323 * if there is an imbalance. If there isn't an imbalance, and
6324 * the user has opted for power-savings, it returns a group whose
6325 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6326 * such a group exists.
6327 *
6328 * Also calculates the amount of weighted load which should be moved
6329 * to restore balance.
6330 *
Randy Dunlapcd968912012-06-08 13:18:33 -07006331 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006332 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02006333 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006334 * - If no imbalance and user has opted for power-savings balance,
6335 * return the least loaded group whose CPUs can be
6336 * put to idle by rebalancing its tasks onto our group.
6337 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006338static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006339{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006340 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006341 struct sd_lb_stats sds;
6342
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006343 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006344
6345 /*
6346 * Compute the various statistics relavent for load balancing at
6347 * this level.
6348 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006349 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006350 local = &sds.local_stat;
6351 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006352
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006353 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6354 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006355 return sds.busiest;
6356
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006357 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006358 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006359 goto out_balanced;
6360
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006361 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6362 / sds.total_capacity;
Ken Chenb0432d82011-04-07 17:23:22 -07006363
Peter Zijlstra866ab432011-02-21 18:56:47 +01006364 /*
6365 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006366 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01006367 * isn't true due to cpus_allowed constraints and the like.
6368 */
Rik van Rielcaeb1782014-07-28 14:16:28 -04006369 if (busiest->group_type == group_imbalanced)
Peter Zijlstra866ab432011-02-21 18:56:47 +01006370 goto force_balance;
6371
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006372 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04006373 if (env->idle == CPU_NEWLY_IDLE && local->group_has_free_capacity &&
6374 !busiest->group_has_free_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07006375 goto force_balance;
6376
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006377 /*
6378 * If the local group is more busy than the selected busiest group
6379 * don't try and pull any tasks.
6380 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006381 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006382 goto out_balanced;
6383
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006384 /*
6385 * Don't pull any tasks if this group is already above the domain
6386 * average load.
6387 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006388 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006389 goto out_balanced;
6390
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006391 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006392 /*
6393 * This cpu is idle. If the busiest group load doesn't
6394 * have more tasks than the number of available cpu's and
6395 * there is no imbalance between this and busiest group
6396 * wrt to idle cpu's, it is balanced.
6397 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006398 if ((local->idle_cpus < busiest->idle_cpus) &&
6399 busiest->sum_nr_running <= busiest->group_weight)
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006400 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01006401 } else {
6402 /*
6403 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6404 * imbalance_pct to be conservative.
6405 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006406 if (100 * busiest->avg_load <=
6407 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01006408 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006409 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006410
Nikhil Raofab47622010-10-15 13:12:29 -07006411force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006412 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006413 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006414 return sds.busiest;
6415
6416out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006417 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006418 return NULL;
6419}
6420
6421/*
6422 * find_busiest_queue - find the busiest runqueue among the cpus in group.
6423 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006424static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08006425 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006426{
6427 struct rq *busiest = NULL, *rq;
Nicolas Pitreced549f2014-05-26 18:19:38 -04006428 unsigned long busiest_load = 0, busiest_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006429 int i;
6430
Peter Zijlstra6906a402013-08-19 15:20:21 +02006431 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04006432 unsigned long capacity, capacity_factor, wl;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006433 enum fbq_type rt;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006434
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006435 rq = cpu_rq(i);
6436 rt = fbq_classify_rq(rq);
6437
6438 /*
6439 * We classify groups/runqueues into three groups:
6440 * - regular: there are !numa tasks
6441 * - remote: there are numa tasks that run on the 'wrong' node
6442 * - all: there is no distinction
6443 *
6444 * In order to avoid migrating ideally placed numa tasks,
6445 * ignore those when there's better options.
6446 *
6447 * If we ignore the actual busiest queue to migrate another
6448 * task, the next balance pass can still reduce the busiest
6449 * queue by moving tasks around inside the node.
6450 *
6451 * If we cannot move enough load due to this classification
6452 * the next pass will adjust the group classification and
6453 * allow migration of more tasks.
6454 *
6455 * Both cases only affect the total convergence complexity.
6456 */
6457 if (rt > env->fbq_type)
6458 continue;
6459
Nicolas Pitreced549f2014-05-26 18:19:38 -04006460 capacity = capacity_of(i);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006461 capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE);
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04006462 if (!capacity_factor)
6463 capacity_factor = fix_small_capacity(env->sd, group);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006464
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006465 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006466
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006467 /*
6468 * When comparing with imbalance, use weighted_cpuload()
Nicolas Pitreced549f2014-05-26 18:19:38 -04006469 * which is not scaled with the cpu capacity.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006470 */
Nicolas Pitre0fedc6c2014-05-26 18:19:36 -04006471 if (capacity_factor && rq->nr_running == 1 && wl > env->imbalance)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006472 continue;
6473
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006474 /*
6475 * For the load comparisons with the other cpu's, consider
Nicolas Pitreced549f2014-05-26 18:19:38 -04006476 * the weighted_cpuload() scaled with the cpu capacity, so
6477 * that the load can be moved away from the cpu that is
6478 * potentially running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006479 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04006480 * Thus we're looking for max(wl_i / capacity_i), crosswise
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006481 * multiplication to rid ourselves of the division works out
Nicolas Pitreced549f2014-05-26 18:19:38 -04006482 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
6483 * our previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006484 */
Nicolas Pitreced549f2014-05-26 18:19:38 -04006485 if (wl * busiest_capacity > busiest_load * capacity) {
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006486 busiest_load = wl;
Nicolas Pitreced549f2014-05-26 18:19:38 -04006487 busiest_capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006488 busiest = rq;
6489 }
6490 }
6491
6492 return busiest;
6493}
6494
6495/*
6496 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6497 * so long as it is large enough.
6498 */
6499#define MAX_PINNED_INTERVAL 512
6500
6501/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09006502DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006503
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006504static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006505{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006506 struct sched_domain *sd = env->sd;
6507
6508 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006509
6510 /*
6511 * ASYM_PACKING needs to force migrate tasks from busy but
6512 * higher numbered CPUs in order to pack all tasks in the
6513 * lowest numbered CPUs.
6514 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006515 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006516 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006517 }
6518
6519 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6520}
6521
Tejun Heo969c7922010-05-06 18:49:21 +02006522static int active_load_balance_cpu_stop(void *data);
6523
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006524static int should_we_balance(struct lb_env *env)
6525{
6526 struct sched_group *sg = env->sd->groups;
6527 struct cpumask *sg_cpus, *sg_mask;
6528 int cpu, balance_cpu = -1;
6529
6530 /*
6531 * In the newly idle case, we will allow all the cpu's
6532 * to do the newly idle load balance.
6533 */
6534 if (env->idle == CPU_NEWLY_IDLE)
6535 return 1;
6536
6537 sg_cpus = sched_group_cpus(sg);
6538 sg_mask = sched_group_mask(sg);
6539 /* Try to find first idle cpu */
6540 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6541 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6542 continue;
6543
6544 balance_cpu = cpu;
6545 break;
6546 }
6547
6548 if (balance_cpu == -1)
6549 balance_cpu = group_balance_cpu(sg);
6550
6551 /*
6552 * First idle cpu or the first cpu(busiest) in this sched group
6553 * is eligible for doing load balancing at this and above domains.
6554 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09006555 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006556}
6557
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006558/*
6559 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6560 * tasks if there is an imbalance.
6561 */
6562static int load_balance(int this_cpu, struct rq *this_rq,
6563 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006564 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006565{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306566 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02006567 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006568 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006569 struct rq *busiest;
6570 unsigned long flags;
Joonsoo Kime6252c32013-04-23 17:27:41 +09006571 struct cpumask *cpus = __get_cpu_var(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006572
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006573 struct lb_env env = {
6574 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006575 .dst_cpu = this_cpu,
6576 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306577 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006578 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02006579 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08006580 .cpus = cpus,
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006581 .fbq_type = all,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006582 };
6583
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006584 /*
6585 * For NEWLY_IDLE load_balancing, we don't need to consider
6586 * other cpus in our group
6587 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006588 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006589 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006590
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006591 cpumask_copy(cpus, cpu_active_mask);
6592
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006593 schedstat_inc(sd, lb_count[idle]);
6594
6595redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006596 if (!should_we_balance(&env)) {
6597 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006598 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006599 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006600
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006601 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006602 if (!group) {
6603 schedstat_inc(sd, lb_nobusyg[idle]);
6604 goto out_balanced;
6605 }
6606
Michael Wangb94031302012-07-12 16:10:13 +08006607 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006608 if (!busiest) {
6609 schedstat_inc(sd, lb_nobusyq[idle]);
6610 goto out_balanced;
6611 }
6612
Michael Wang78feefc2012-08-06 16:41:59 +08006613 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006614
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006615 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006616
6617 ld_moved = 0;
6618 if (busiest->nr_running > 1) {
6619 /*
6620 * Attempt to move tasks. If find_busiest_group has found
6621 * an imbalance but busiest->nr_running <= 1, the group is
6622 * still unbalanced. ld_moved simply stays zero, so it is
6623 * correctly treated as an imbalance.
6624 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006625 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02006626 env.src_cpu = busiest->cpu;
6627 env.src_rq = busiest;
6628 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006629
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006630more_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006631 local_irq_save(flags);
Michael Wang78feefc2012-08-06 16:41:59 +08006632 double_rq_lock(env.dst_rq, busiest);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306633
6634 /*
6635 * cur_ld_moved - load moved in current iteration
6636 * ld_moved - cumulative load moved across iterations
6637 */
6638 cur_ld_moved = move_tasks(&env);
6639 ld_moved += cur_ld_moved;
Michael Wang78feefc2012-08-06 16:41:59 +08006640 double_rq_unlock(env.dst_rq, busiest);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006641 local_irq_restore(flags);
6642
6643 /*
6644 * some other cpu did the load balance for us.
6645 */
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306646 if (cur_ld_moved && env.dst_cpu != smp_processor_id())
6647 resched_cpu(env.dst_cpu);
6648
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09006649 if (env.flags & LBF_NEED_BREAK) {
6650 env.flags &= ~LBF_NEED_BREAK;
6651 goto more_balance;
6652 }
6653
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306654 /*
6655 * Revisit (affine) tasks on src_cpu that couldn't be moved to
6656 * us and move them to an alternate dst_cpu in our sched_group
6657 * where they can run. The upper limit on how many times we
6658 * iterate on same src_cpu is dependent on number of cpus in our
6659 * sched_group.
6660 *
6661 * This changes load balance semantics a bit on who can move
6662 * load to a given_cpu. In addition to the given_cpu itself
6663 * (or a ilb_cpu acting on its behalf where given_cpu is
6664 * nohz-idle), we now have balance_cpu in a position to move
6665 * load to given_cpu. In rare situations, this may cause
6666 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
6667 * _independently_ and at _same_ time to move some load to
6668 * given_cpu) causing exceess load to be moved to given_cpu.
6669 * This however should not happen so much in practice and
6670 * moreover subsequent load balance cycles should correct the
6671 * excess load moved.
6672 */
Peter Zijlstra62633222013-08-19 12:41:09 +02006673 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306674
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04006675 /* Prevent to re-select dst_cpu via env's cpus */
6676 cpumask_clear_cpu(env.dst_cpu, env.cpus);
6677
Michael Wang78feefc2012-08-06 16:41:59 +08006678 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306679 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02006680 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306681 env.loop = 0;
6682 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006683
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306684 /*
6685 * Go back to "more_balance" rather than "redo" since we
6686 * need to continue with same src_cpu.
6687 */
6688 goto more_balance;
6689 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006690
Peter Zijlstra62633222013-08-19 12:41:09 +02006691 /*
6692 * We failed to reach balance because of affinity.
6693 */
6694 if (sd_parent) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006695 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
Peter Zijlstra62633222013-08-19 12:41:09 +02006696
6697 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0) {
6698 *group_imbalance = 1;
6699 } else if (*group_imbalance)
6700 *group_imbalance = 0;
6701 }
6702
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006703 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006704 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006705 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05306706 if (!cpumask_empty(cpus)) {
6707 env.loop = 0;
6708 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006709 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05306710 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006711 goto out_balanced;
6712 }
6713 }
6714
6715 if (!ld_moved) {
6716 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07006717 /*
6718 * Increment the failure counter only on periodic balance.
6719 * We do not want newidle balance, which can be very
6720 * frequent, pollute the failure counter causing
6721 * excessive cache_hot migrations and active balances.
6722 */
6723 if (idle != CPU_NEWLY_IDLE)
6724 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006725
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006726 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006727 raw_spin_lock_irqsave(&busiest->lock, flags);
6728
Tejun Heo969c7922010-05-06 18:49:21 +02006729 /* don't kick the active_load_balance_cpu_stop,
6730 * if the curr task on busiest cpu can't be
6731 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006732 */
6733 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02006734 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006735 raw_spin_unlock_irqrestore(&busiest->lock,
6736 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006737 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006738 goto out_one_pinned;
6739 }
6740
Tejun Heo969c7922010-05-06 18:49:21 +02006741 /*
6742 * ->active_balance synchronizes accesses to
6743 * ->active_balance_work. Once set, it's cleared
6744 * only after active load balance is finished.
6745 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006746 if (!busiest->active_balance) {
6747 busiest->active_balance = 1;
6748 busiest->push_cpu = this_cpu;
6749 active_balance = 1;
6750 }
6751 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02006752
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006753 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02006754 stop_one_cpu_nowait(cpu_of(busiest),
6755 active_load_balance_cpu_stop, busiest,
6756 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006757 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006758
6759 /*
6760 * We've kicked active balancing, reset the failure
6761 * counter.
6762 */
6763 sd->nr_balance_failed = sd->cache_nice_tries+1;
6764 }
6765 } else
6766 sd->nr_balance_failed = 0;
6767
6768 if (likely(!active_balance)) {
6769 /* We were unbalanced, so reset the balancing interval */
6770 sd->balance_interval = sd->min_interval;
6771 } else {
6772 /*
6773 * If we've begun active balancing, start to back off. This
6774 * case may not be covered by the all_pinned logic if there
6775 * is only 1 task on the busy runqueue (because we don't call
6776 * move_tasks).
6777 */
6778 if (sd->balance_interval < sd->max_interval)
6779 sd->balance_interval *= 2;
6780 }
6781
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006782 goto out;
6783
6784out_balanced:
6785 schedstat_inc(sd, lb_balanced[idle]);
6786
6787 sd->nr_balance_failed = 0;
6788
6789out_one_pinned:
6790 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006791 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02006792 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006793 (sd->balance_interval < sd->max_interval))
6794 sd->balance_interval *= 2;
6795
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08006796 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006797out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006798 return ld_moved;
6799}
6800
Jason Low52a08ef2014-05-08 17:49:22 -07006801static inline unsigned long
6802get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
6803{
6804 unsigned long interval = sd->balance_interval;
6805
6806 if (cpu_busy)
6807 interval *= sd->busy_factor;
6808
6809 /* scale ms to jiffies */
6810 interval = msecs_to_jiffies(interval);
6811 interval = clamp(interval, 1UL, max_load_balance_interval);
6812
6813 return interval;
6814}
6815
6816static inline void
6817update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
6818{
6819 unsigned long interval, next;
6820
6821 interval = get_sd_balance_interval(sd, cpu_busy);
6822 next = sd->last_balance + interval;
6823
6824 if (time_after(*next_balance, next))
6825 *next_balance = next;
6826}
6827
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006828/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006829 * idle_balance is called by schedule() if this_cpu is about to become
6830 * idle. Attempts to pull tasks from other CPUs.
6831 */
Peter Zijlstra6e831252014-02-11 16:11:48 +01006832static int idle_balance(struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006833{
Jason Low52a08ef2014-05-08 17:49:22 -07006834 unsigned long next_balance = jiffies + HZ;
6835 int this_cpu = this_rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006836 struct sched_domain *sd;
6837 int pulled_task = 0;
Jason Low9bd721c2013-09-13 11:26:52 -07006838 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006839
Peter Zijlstra6e831252014-02-11 16:11:48 +01006840 idle_enter_fair(this_rq);
Jason Low0e5b5332014-04-28 15:45:54 -07006841
Peter Zijlstra6e831252014-02-11 16:11:48 +01006842 /*
6843 * We must set idle_stamp _before_ calling idle_balance(), such that we
6844 * measure the duration of idle_balance() as idle time.
6845 */
6846 this_rq->idle_stamp = rq_clock(this_rq);
6847
Tim Chen4486edd2014-06-23 12:16:49 -07006848 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
6849 !this_rq->rd->overload) {
Jason Low52a08ef2014-05-08 17:49:22 -07006850 rcu_read_lock();
6851 sd = rcu_dereference_check_sched_domain(this_rq->sd);
6852 if (sd)
6853 update_next_balance(sd, 0, &next_balance);
6854 rcu_read_unlock();
6855
Peter Zijlstra6e831252014-02-11 16:11:48 +01006856 goto out;
Jason Low52a08ef2014-05-08 17:49:22 -07006857 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006858
Peter Zijlstraf492e122009-12-23 15:29:42 +01006859 /*
6860 * Drop the rq->lock, but keep IRQ/preempt disabled.
6861 */
6862 raw_spin_unlock(&this_rq->lock);
6863
Paul Turner48a16752012-10-04 13:18:31 +02006864 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02006865 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006866 for_each_domain(this_cpu, sd) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006867 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07006868 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006869
6870 if (!(sd->flags & SD_LOAD_BALANCE))
6871 continue;
6872
Jason Low52a08ef2014-05-08 17:49:22 -07006873 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
6874 update_next_balance(sd, 0, &next_balance);
Jason Low9bd721c2013-09-13 11:26:52 -07006875 break;
Jason Low52a08ef2014-05-08 17:49:22 -07006876 }
Jason Low9bd721c2013-09-13 11:26:52 -07006877
Peter Zijlstraf492e122009-12-23 15:29:42 +01006878 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07006879 t0 = sched_clock_cpu(this_cpu);
6880
Peter Zijlstraf492e122009-12-23 15:29:42 +01006881 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006882 sd, CPU_NEWLY_IDLE,
6883 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07006884
6885 domain_cost = sched_clock_cpu(this_cpu) - t0;
6886 if (domain_cost > sd->max_newidle_lb_cost)
6887 sd->max_newidle_lb_cost = domain_cost;
6888
6889 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01006890 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006891
Jason Low52a08ef2014-05-08 17:49:22 -07006892 update_next_balance(sd, 0, &next_balance);
Jason Low39a4d9c2014-04-23 18:30:35 -07006893
6894 /*
6895 * Stop searching for tasks to pull if there are
6896 * now runnable tasks on this rq.
6897 */
6898 if (pulled_task || this_rq->nr_running > 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006899 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006900 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006901 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01006902
6903 raw_spin_lock(&this_rq->lock);
6904
Jason Low0e5b5332014-04-28 15:45:54 -07006905 if (curr_cost > this_rq->max_idle_balance_cost)
6906 this_rq->max_idle_balance_cost = curr_cost;
6907
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01006908 /*
Jason Low0e5b5332014-04-28 15:45:54 -07006909 * While browsing the domains, we released the rq lock, a task could
6910 * have been enqueued in the meantime. Since we're not going idle,
6911 * pretend we pulled a task.
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01006912 */
Jason Low0e5b5332014-04-28 15:45:54 -07006913 if (this_rq->cfs.h_nr_running && !pulled_task)
Peter Zijlstra6e831252014-02-11 16:11:48 +01006914 pulled_task = 1;
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01006915
Peter Zijlstra6e831252014-02-11 16:11:48 +01006916out:
Jason Low52a08ef2014-05-08 17:49:22 -07006917 /* Move the next balance forward */
6918 if (time_after(this_rq->next_balance, next_balance))
6919 this_rq->next_balance = next_balance;
6920
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04006921 /* Is there a task of a high priority class? */
Kirill Tkhai46383642014-03-15 02:15:07 +04006922 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04006923 pulled_task = -1;
6924
6925 if (pulled_task) {
6926 idle_exit_fair(this_rq);
Peter Zijlstra6e831252014-02-11 16:11:48 +01006927 this_rq->idle_stamp = 0;
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04006928 }
Peter Zijlstra6e831252014-02-11 16:11:48 +01006929
Daniel Lezcano3c4017c2014-01-17 10:04:03 +01006930 return pulled_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006931}
6932
6933/*
Tejun Heo969c7922010-05-06 18:49:21 +02006934 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
6935 * running tasks off the busiest CPU onto idle CPUs. It requires at
6936 * least 1 task to be running on each physical CPU where possible, and
6937 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006938 */
Tejun Heo969c7922010-05-06 18:49:21 +02006939static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006940{
Tejun Heo969c7922010-05-06 18:49:21 +02006941 struct rq *busiest_rq = data;
6942 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006943 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02006944 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006945 struct sched_domain *sd;
Tejun Heo969c7922010-05-06 18:49:21 +02006946
6947 raw_spin_lock_irq(&busiest_rq->lock);
6948
6949 /* make sure the requested cpu hasn't gone down in the meantime */
6950 if (unlikely(busiest_cpu != smp_processor_id() ||
6951 !busiest_rq->active_balance))
6952 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006953
6954 /* Is there any task to move? */
6955 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02006956 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006957
6958 /*
6959 * This condition is "impossible", if it occurs
6960 * we need to fix it. Originally reported by
6961 * Bjorn Helgaas on a 128-cpu setup.
6962 */
6963 BUG_ON(busiest_rq == target_rq);
6964
6965 /* move a task from busiest_rq to target_rq */
6966 double_lock_balance(busiest_rq, target_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006967
6968 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02006969 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006970 for_each_domain(target_cpu, sd) {
6971 if ((sd->flags & SD_LOAD_BALANCE) &&
6972 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
6973 break;
6974 }
6975
6976 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006977 struct lb_env env = {
6978 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006979 .dst_cpu = target_cpu,
6980 .dst_rq = target_rq,
6981 .src_cpu = busiest_rq->cpu,
6982 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006983 .idle = CPU_IDLE,
6984 };
6985
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006986 schedstat_inc(sd, alb_count);
6987
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006988 if (move_one_task(&env))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006989 schedstat_inc(sd, alb_pushed);
6990 else
6991 schedstat_inc(sd, alb_failed);
6992 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02006993 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006994 double_unlock_balance(busiest_rq, target_rq);
Tejun Heo969c7922010-05-06 18:49:21 +02006995out_unlock:
6996 busiest_rq->active_balance = 0;
6997 raw_spin_unlock_irq(&busiest_rq->lock);
6998 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006999}
7000
Mike Galbraithd987fc72011-12-05 10:01:47 +01007001static inline int on_null_domain(struct rq *rq)
7002{
7003 return unlikely(!rcu_dereference_sched(rq->sd));
7004}
7005
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007006#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007007/*
7008 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007009 * - When one of the busy CPUs notice that there may be an idle rebalancing
7010 * needed, they will kick the idle load balancer, which then does idle
7011 * load balancing for all the idle CPUs.
7012 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007013static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007014 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007015 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007016 unsigned long next_balance; /* in jiffy units */
7017} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007018
Daniel Lezcano3dd03372014-01-06 12:34:41 +01007019static inline int find_new_ilb(void)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007020{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007021 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007022
Suresh Siddha786d6dc2011-12-01 17:07:35 -08007023 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7024 return ilb;
7025
7026 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007027}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007028
7029/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007030 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7031 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7032 * CPU (if there is one).
7033 */
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01007034static void nohz_balancer_kick(void)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007035{
7036 int ilb_cpu;
7037
7038 nohz.next_balance++;
7039
Daniel Lezcano3dd03372014-01-06 12:34:41 +01007040 ilb_cpu = find_new_ilb();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007041
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007042 if (ilb_cpu >= nr_cpu_ids)
7043 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007044
Suresh Siddhacd490c52011-12-06 11:26:34 -08007045 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08007046 return;
7047 /*
7048 * Use smp_send_reschedule() instead of resched_cpu().
7049 * This way we generate a sched IPI on the target cpu which
7050 * is idle. And the softirq performing nohz idle load balance
7051 * will be run before returning from the IPI.
7052 */
7053 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007054 return;
7055}
7056
Alex Shic1cc0172012-09-10 15:10:58 +08007057static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08007058{
7059 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
Mike Galbraithd987fc72011-12-05 10:01:47 +01007060 /*
7061 * Completely isolated CPUs don't ever set, so we must test.
7062 */
7063 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7064 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7065 atomic_dec(&nohz.nr_cpus);
7066 }
Suresh Siddha71325962012-01-19 18:28:57 -08007067 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7068 }
7069}
7070
Suresh Siddha69e1e812011-12-01 17:07:33 -08007071static inline void set_cpu_sd_state_busy(void)
7072{
7073 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307074 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08007075
Suresh Siddha69e1e812011-12-01 17:07:33 -08007076 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307077 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02007078
7079 if (!sd || !sd->nohz_idle)
7080 goto unlock;
7081 sd->nohz_idle = 0;
7082
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007083 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02007084unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08007085 rcu_read_unlock();
7086}
7087
7088void set_cpu_sd_state_idle(void)
7089{
7090 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307091 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08007092
Suresh Siddha69e1e812011-12-01 17:07:33 -08007093 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307094 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02007095
7096 if (!sd || sd->nohz_idle)
7097 goto unlock;
7098 sd->nohz_idle = 1;
7099
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007100 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02007101unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08007102 rcu_read_unlock();
7103}
7104
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007105/*
Alex Shic1cc0172012-09-10 15:10:58 +08007106 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007107 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007108 */
Alex Shic1cc0172012-09-10 15:10:58 +08007109void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007110{
Suresh Siddha71325962012-01-19 18:28:57 -08007111 /*
7112 * If this cpu is going down, then nothing needs to be done.
7113 */
7114 if (!cpu_active(cpu))
7115 return;
7116
Alex Shic1cc0172012-09-10 15:10:58 +08007117 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7118 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007119
Mike Galbraithd987fc72011-12-05 10:01:47 +01007120 /*
7121 * If we're a completely isolated CPU, we don't play.
7122 */
7123 if (on_null_domain(cpu_rq(cpu)))
7124 return;
7125
Alex Shic1cc0172012-09-10 15:10:58 +08007126 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7127 atomic_inc(&nohz.nr_cpus);
7128 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007129}
Suresh Siddha71325962012-01-19 18:28:57 -08007130
Paul Gortmaker0db06282013-06-19 14:53:51 -04007131static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08007132 unsigned long action, void *hcpu)
7133{
7134 switch (action & ~CPU_TASKS_FROZEN) {
7135 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08007136 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08007137 return NOTIFY_OK;
7138 default:
7139 return NOTIFY_DONE;
7140 }
7141}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007142#endif
7143
7144static DEFINE_SPINLOCK(balancing);
7145
Peter Zijlstra49c022e2011-04-05 10:14:25 +02007146/*
7147 * Scale the max load_balance interval with the number of CPUs in the system.
7148 * This trades load-balance latency on larger machines for less cross talk.
7149 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007150void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02007151{
7152 max_load_balance_interval = HZ*num_online_cpus()/10;
7153}
7154
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007155/*
7156 * It checks each scheduling domain to see if it is due to be balanced,
7157 * and initiates a balancing operation if so.
7158 *
Libinb9b08532013-04-01 19:14:01 +08007159 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007160 */
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007161static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007162{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007163 int continue_balancing = 1;
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007164 int cpu = rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007165 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02007166 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007167 /* Earliest time when we have to do rebalance again */
7168 unsigned long next_balance = jiffies + 60*HZ;
7169 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07007170 int need_serialize, need_decay = 0;
7171 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007172
Paul Turner48a16752012-10-04 13:18:31 +02007173 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08007174
Peter Zijlstradce840a2011-04-07 14:09:50 +02007175 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007176 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07007177 /*
7178 * Decay the newidle max times here because this is a regular
7179 * visit to all the domains. Decay ~1% per second.
7180 */
7181 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7182 sd->max_newidle_lb_cost =
7183 (sd->max_newidle_lb_cost * 253) / 256;
7184 sd->next_decay_max_lb_cost = jiffies + HZ;
7185 need_decay = 1;
7186 }
7187 max_cost += sd->max_newidle_lb_cost;
7188
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007189 if (!(sd->flags & SD_LOAD_BALANCE))
7190 continue;
7191
Jason Lowf48627e2013-09-13 11:26:53 -07007192 /*
7193 * Stop the load balance at this level. There is another
7194 * CPU in our sched group which is doing load balancing more
7195 * actively.
7196 */
7197 if (!continue_balancing) {
7198 if (need_decay)
7199 continue;
7200 break;
7201 }
7202
Jason Low52a08ef2014-05-08 17:49:22 -07007203 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007204
7205 need_serialize = sd->flags & SD_SERIALIZE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007206 if (need_serialize) {
7207 if (!spin_trylock(&balancing))
7208 goto out;
7209 }
7210
7211 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007212 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007213 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02007214 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09007215 * env->dst_cpu, so we can't know our idle
7216 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007217 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09007218 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007219 }
7220 sd->last_balance = jiffies;
Jason Low52a08ef2014-05-08 17:49:22 -07007221 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007222 }
7223 if (need_serialize)
7224 spin_unlock(&balancing);
7225out:
7226 if (time_after(next_balance, sd->last_balance + interval)) {
7227 next_balance = sd->last_balance + interval;
7228 update_next_balance = 1;
7229 }
Jason Lowf48627e2013-09-13 11:26:53 -07007230 }
7231 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007232 /*
Jason Lowf48627e2013-09-13 11:26:53 -07007233 * Ensure the rq-wide value also decays but keep it at a
7234 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007235 */
Jason Lowf48627e2013-09-13 11:26:53 -07007236 rq->max_idle_balance_cost =
7237 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007238 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007239 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007240
7241 /*
7242 * next_balance will be updated only when there is a need.
7243 * When the cpu is attached to null domain for ex, it will not be
7244 * updated.
7245 */
7246 if (likely(update_next_balance))
7247 rq->next_balance = next_balance;
7248}
7249
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007250#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007251/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007252 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007253 * rebalancing for all the cpus for whom scheduler ticks are stopped.
7254 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01007255static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007256{
Daniel Lezcano208cb162014-01-06 12:34:44 +01007257 int this_cpu = this_rq->cpu;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007258 struct rq *rq;
7259 int balance_cpu;
7260
Suresh Siddha1c792db2011-12-01 17:07:32 -08007261 if (idle != CPU_IDLE ||
7262 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7263 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007264
7265 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08007266 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007267 continue;
7268
7269 /*
7270 * If this cpu gets work to do, stop the load balancing
7271 * work being done for other cpus. Next load
7272 * balancing owner will pick it up.
7273 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08007274 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007275 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007276
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02007277 rq = cpu_rq(balance_cpu);
7278
Tim Chened61bbc2014-05-20 14:39:27 -07007279 /*
7280 * If time for next balance is due,
7281 * do the balance.
7282 */
7283 if (time_after_eq(jiffies, rq->next_balance)) {
7284 raw_spin_lock_irq(&rq->lock);
7285 update_rq_clock(rq);
7286 update_idle_cpu_load(rq);
7287 raw_spin_unlock_irq(&rq->lock);
7288 rebalance_domains(rq, CPU_IDLE);
7289 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007290
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007291 if (time_after(this_rq->next_balance, rq->next_balance))
7292 this_rq->next_balance = rq->next_balance;
7293 }
7294 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08007295end:
7296 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007297}
7298
7299/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007300 * Current heuristic for kicking the idle load balancer in the presence
7301 * of an idle cpu is the system.
7302 * - This rq has more than one task.
7303 * - At any scheduler domain level, this cpu's scheduler group has multiple
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007304 * busy cpu's exceeding the group's capacity.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007305 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7306 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007307 */
Daniel Lezcano4a725622014-01-06 12:34:39 +01007308static inline int nohz_kick_needed(struct rq *rq)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007309{
7310 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007311 struct sched_domain *sd;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007312 struct sched_group_capacity *sgc;
Daniel Lezcano4a725622014-01-06 12:34:39 +01007313 int nr_busy, cpu = rq->cpu;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007314
Daniel Lezcano4a725622014-01-06 12:34:39 +01007315 if (unlikely(rq->idle_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007316 return 0;
7317
Suresh Siddha1c792db2011-12-01 17:07:32 -08007318 /*
7319 * We may be recently in ticked or tickless idle mode. At the first
7320 * busy tick after returning from idle, we will update the busy stats.
7321 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08007322 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08007323 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007324
7325 /*
7326 * None are in tickless mode and hence no need for NOHZ idle load
7327 * balancing.
7328 */
7329 if (likely(!atomic_read(&nohz.nr_cpus)))
7330 return 0;
Suresh Siddha1c792db2011-12-01 17:07:32 -08007331
7332 if (time_before(now, nohz.next_balance))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007333 return 0;
7334
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007335 if (rq->nr_running >= 2)
7336 goto need_kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007337
Peter Zijlstra067491b2011-12-07 14:32:08 +01007338 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307339 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007340
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307341 if (sd) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007342 sgc = sd->groups->sgc;
7343 nr_busy = atomic_read(&sgc->nr_busy_cpus);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307344
7345 if (nr_busy > 1)
Peter Zijlstra067491b2011-12-07 14:32:08 +01007346 goto need_kick_unlock;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007347 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307348
7349 sd = rcu_dereference(per_cpu(sd_asym, cpu));
7350
7351 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
7352 sched_domain_span(sd)) < cpu))
7353 goto need_kick_unlock;
7354
Peter Zijlstra067491b2011-12-07 14:32:08 +01007355 rcu_read_unlock();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007356 return 0;
Peter Zijlstra067491b2011-12-07 14:32:08 +01007357
7358need_kick_unlock:
7359 rcu_read_unlock();
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007360need_kick:
7361 return 1;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007362}
7363#else
Daniel Lezcano208cb162014-01-06 12:34:44 +01007364static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007365#endif
7366
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007367/*
7368 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007369 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007370 */
7371static void run_rebalance_domains(struct softirq_action *h)
7372{
Daniel Lezcano208cb162014-01-06 12:34:44 +01007373 struct rq *this_rq = this_rq();
Suresh Siddha6eb57e02011-10-03 15:09:01 -07007374 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007375 CPU_IDLE : CPU_NOT_IDLE;
7376
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007377 rebalance_domains(this_rq, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007378
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007379 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007380 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007381 * balancing on behalf of the other idle cpus whose ticks are
7382 * stopped.
7383 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01007384 nohz_idle_balance(this_rq, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007385}
7386
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007387/*
7388 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007389 */
Daniel Lezcano7caff662014-01-06 12:34:38 +01007390void trigger_load_balance(struct rq *rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007391{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007392 /* Don't need to rebalance while attached to NULL domain */
Daniel Lezcanoc7260992014-01-06 12:34:45 +01007393 if (unlikely(on_null_domain(rq)))
7394 return;
7395
7396 if (time_after_eq(jiffies, rq->next_balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007397 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007398#ifdef CONFIG_NO_HZ_COMMON
Daniel Lezcanoc7260992014-01-06 12:34:45 +01007399 if (nohz_kick_needed(rq))
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01007400 nohz_balancer_kick();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007401#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007402}
7403
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007404static void rq_online_fair(struct rq *rq)
7405{
7406 update_sysctl();
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04007407
7408 update_runtime_enabled(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007409}
7410
7411static void rq_offline_fair(struct rq *rq)
7412{
7413 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07007414
7415 /* Ensure any throttled groups are reachable by pick_next_task */
7416 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007417}
7418
Dhaval Giani55e12e52008-06-24 23:39:43 +05307419#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02007420
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007421/*
7422 * scheduler tick hitting a task of our scheduling class:
7423 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01007424static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007425{
7426 struct cfs_rq *cfs_rq;
7427 struct sched_entity *se = &curr->se;
7428
7429 for_each_sched_entity(se) {
7430 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01007431 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007432 }
Ben Segall18bf2802012-10-04 12:51:20 +02007433
Dave Kleikamp10e84b92013-07-31 13:53:35 -07007434 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02007435 task_tick_numa(rq, curr);
Linus Torvalds3d59eeb2012-12-16 14:33:25 -08007436
Ben Segall18bf2802012-10-04 12:51:20 +02007437 update_rq_runnable_avg(rq, 1);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007438}
7439
7440/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007441 * called on fork with the child task as argument from the parent's context
7442 * - child not yet on the tasklist
7443 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007444 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007445static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007446{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09007447 struct cfs_rq *cfs_rq;
7448 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02007449 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007450 struct rq *rq = this_rq();
7451 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007452
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007453 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007454
Peter Zijlstra861d0342010-08-19 13:31:43 +02007455 update_rq_clock(rq);
7456
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09007457 cfs_rq = task_cfs_rq(current);
7458 curr = cfs_rq->curr;
7459
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09007460 /*
7461 * Not only the cpu but also the task_group of the parent might have
7462 * been changed after parent->se.parent,cfs_rq were copied to
7463 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7464 * of child point to valid ones.
7465 */
7466 rcu_read_lock();
7467 __set_task_cpu(p, this_cpu);
7468 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007469
Ting Yang7109c4422007-08-28 12:53:24 +02007470 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007471
Mike Galbraithb5d9d732009-09-08 11:12:28 +02007472 if (curr)
7473 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02007474 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02007475
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007476 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02007477 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02007478 * Upon rescheduling, sched_class::put_prev_task() will place
7479 * 'current' within the tree based on its new key value.
7480 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02007481 swap(curr->vruntime, se->vruntime);
Kirill Tkhai88751252014-06-29 00:03:57 +04007482 resched_curr(rq);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02007483 }
7484
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007485 se->vruntime -= cfs_rq->min_vruntime;
7486
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007487 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007488}
7489
Steven Rostedtcb469842008-01-25 21:08:22 +01007490/*
7491 * Priority of the task has changed. Check to see if we preempt
7492 * the current task.
7493 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007494static void
7495prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01007496{
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007497 if (!task_on_rq_queued(p))
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007498 return;
7499
Steven Rostedtcb469842008-01-25 21:08:22 +01007500 /*
7501 * Reschedule if we are currently running on this runqueue and
7502 * our priority decreased, or if we are not currently running on
7503 * this runqueue and our priority is higher than the current's
7504 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007505 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01007506 if (p->prio > oldprio)
Kirill Tkhai88751252014-06-29 00:03:57 +04007507 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01007508 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02007509 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01007510}
7511
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007512static void switched_from_fair(struct rq *rq, struct task_struct *p)
7513{
7514 struct sched_entity *se = &p->se;
7515 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7516
7517 /*
George McCollister791c9e02014-02-18 17:56:51 -06007518 * Ensure the task's vruntime is normalized, so that when it's
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007519 * switched back to the fair class the enqueue_entity(.flags=0) will
7520 * do the right thing.
7521 *
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007522 * If it's queued, then the dequeue_entity(.flags=0) will already
7523 * have normalized the vruntime, if it's !queued, then only when
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007524 * the task is sleeping will it still have non-normalized vruntime.
7525 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007526 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007527 /*
7528 * Fix up our vruntime so that the current sleep doesn't
7529 * cause 'unlimited' sleep bonus.
7530 */
7531 place_entity(cfs_rq, se, 0);
7532 se->vruntime -= cfs_rq->min_vruntime;
7533 }
Paul Turner9ee474f2012-10-04 13:18:30 +02007534
Alex Shi141965c2013-06-26 13:05:39 +08007535#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02007536 /*
7537 * Remove our load from contribution when we leave sched_fair
7538 * and ensure we don't carry in an old decay_count if we
7539 * switch back.
7540 */
Kirill Tkhai87e3c8a2013-07-21 04:32:07 +04007541 if (se->avg.decay_count) {
7542 __synchronize_entity_decay(se);
7543 subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
Paul Turner9ee474f2012-10-04 13:18:30 +02007544 }
7545#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007546}
7547
Steven Rostedtcb469842008-01-25 21:08:22 +01007548/*
7549 * We switched to the sched_fair class.
7550 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007551static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01007552{
Michael wangeb7a59b2014-02-20 11:14:53 +08007553#ifdef CONFIG_FAIR_GROUP_SCHED
Kirill Tkhaif36c0192014-08-06 12:06:01 +04007554 struct sched_entity *se = &p->se;
Michael wangeb7a59b2014-02-20 11:14:53 +08007555 /*
7556 * Since the real-depth could have been changed (only FAIR
7557 * class maintain depth value), reset depth properly.
7558 */
7559 se->depth = se->parent ? se->parent->depth + 1 : 0;
7560#endif
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007561 if (!task_on_rq_queued(p))
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007562 return;
7563
Steven Rostedtcb469842008-01-25 21:08:22 +01007564 /*
7565 * We were most likely switched from sched_rt, so
7566 * kick off the schedule if running, otherwise just see
7567 * if we can still preempt the current task.
7568 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007569 if (rq->curr == p)
Kirill Tkhai88751252014-06-29 00:03:57 +04007570 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01007571 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02007572 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01007573}
7574
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007575/* Account for a task changing its policy or group.
7576 *
7577 * This routine is mostly called to set cfs_rq->curr field when a task
7578 * migrates between groups/classes.
7579 */
7580static void set_curr_task_fair(struct rq *rq)
7581{
7582 struct sched_entity *se = &rq->curr->se;
7583
Paul Turnerec12cb72011-07-21 09:43:30 -07007584 for_each_sched_entity(se) {
7585 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7586
7587 set_next_entity(cfs_rq, se);
7588 /* ensure bandwidth has been allocated on our new cfs_rq */
7589 account_cfs_rq_runtime(cfs_rq, 0);
7590 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007591}
7592
Peter Zijlstra029632f2011-10-25 10:00:11 +02007593void init_cfs_rq(struct cfs_rq *cfs_rq)
7594{
7595 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007596 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7597#ifndef CONFIG_64BIT
7598 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7599#endif
Alex Shi141965c2013-06-26 13:05:39 +08007600#ifdef CONFIG_SMP
Paul Turner9ee474f2012-10-04 13:18:30 +02007601 atomic64_set(&cfs_rq->decay_counter, 1);
Alex Shi25099402013-06-20 10:18:55 +08007602 atomic_long_set(&cfs_rq->removed_load, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02007603#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007604}
7605
Peter Zijlstra810b3812008-02-29 15:21:01 -05007606#ifdef CONFIG_FAIR_GROUP_SCHED
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007607static void task_move_group_fair(struct task_struct *p, int queued)
Peter Zijlstra810b3812008-02-29 15:21:01 -05007608{
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007609 struct sched_entity *se = &p->se;
Paul Turneraff3e492012-10-04 13:18:30 +02007610 struct cfs_rq *cfs_rq;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007611
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007612 /*
7613 * If the task was not on the rq at the time of this cgroup movement
7614 * it must have been asleep, sleeping tasks keep their ->vruntime
7615 * absolute on their old rq until wakeup (needed for the fair sleeper
7616 * bonus in place_entity()).
7617 *
7618 * If it was on the rq, we've just 'preempted' it, which does convert
7619 * ->vruntime to a relative base.
7620 *
7621 * Make sure both cases convert their relative position when migrating
7622 * to another cgroup's rq. This does somewhat interfere with the
7623 * fair sleeper stuff for the first placement, but who cares.
7624 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007625 /*
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007626 * When !queued, vruntime of the task has usually NOT been normalized.
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007627 * But there are some cases where it has already been normalized:
7628 *
7629 * - Moving a forked child which is waiting for being woken up by
7630 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09007631 * - Moving a task which has been woken up by try_to_wake_up() and
7632 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007633 *
7634 * To prevent boost or penalty in the new cfs_rq caused by delta
7635 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
7636 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007637 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
7638 queued = 1;
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09007639
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007640 if (!queued)
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007641 se->vruntime -= cfs_rq_of(se)->min_vruntime;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007642 set_task_rq(p, task_cpu(p));
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007643 se->depth = se->parent ? se->parent->depth + 1 : 0;
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007644 if (!queued) {
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007645 cfs_rq = cfs_rq_of(se);
7646 se->vruntime += cfs_rq->min_vruntime;
Paul Turneraff3e492012-10-04 13:18:30 +02007647#ifdef CONFIG_SMP
7648 /*
7649 * migrate_task_rq_fair() will have removed our previous
7650 * contribution, but we must synchronize for ongoing future
7651 * decay.
7652 */
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007653 se->avg.decay_count = atomic64_read(&cfs_rq->decay_counter);
7654 cfs_rq->blocked_load_avg += se->avg.load_avg_contrib;
Paul Turneraff3e492012-10-04 13:18:30 +02007655#endif
7656 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05007657}
Peter Zijlstra029632f2011-10-25 10:00:11 +02007658
7659void free_fair_sched_group(struct task_group *tg)
7660{
7661 int i;
7662
7663 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
7664
7665 for_each_possible_cpu(i) {
7666 if (tg->cfs_rq)
7667 kfree(tg->cfs_rq[i]);
7668 if (tg->se)
7669 kfree(tg->se[i]);
7670 }
7671
7672 kfree(tg->cfs_rq);
7673 kfree(tg->se);
7674}
7675
7676int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7677{
7678 struct cfs_rq *cfs_rq;
7679 struct sched_entity *se;
7680 int i;
7681
7682 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
7683 if (!tg->cfs_rq)
7684 goto err;
7685 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
7686 if (!tg->se)
7687 goto err;
7688
7689 tg->shares = NICE_0_LOAD;
7690
7691 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
7692
7693 for_each_possible_cpu(i) {
7694 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
7695 GFP_KERNEL, cpu_to_node(i));
7696 if (!cfs_rq)
7697 goto err;
7698
7699 se = kzalloc_node(sizeof(struct sched_entity),
7700 GFP_KERNEL, cpu_to_node(i));
7701 if (!se)
7702 goto err_free_rq;
7703
7704 init_cfs_rq(cfs_rq);
7705 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
7706 }
7707
7708 return 1;
7709
7710err_free_rq:
7711 kfree(cfs_rq);
7712err:
7713 return 0;
7714}
7715
7716void unregister_fair_sched_group(struct task_group *tg, int cpu)
7717{
7718 struct rq *rq = cpu_rq(cpu);
7719 unsigned long flags;
7720
7721 /*
7722 * Only empty task groups can be destroyed; so we can speculatively
7723 * check on_list without danger of it being re-added.
7724 */
7725 if (!tg->cfs_rq[cpu]->on_list)
7726 return;
7727
7728 raw_spin_lock_irqsave(&rq->lock, flags);
7729 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
7730 raw_spin_unlock_irqrestore(&rq->lock, flags);
7731}
7732
7733void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
7734 struct sched_entity *se, int cpu,
7735 struct sched_entity *parent)
7736{
7737 struct rq *rq = cpu_rq(cpu);
7738
7739 cfs_rq->tg = tg;
7740 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007741 init_cfs_rq_runtime(cfs_rq);
7742
7743 tg->cfs_rq[cpu] = cfs_rq;
7744 tg->se[cpu] = se;
7745
7746 /* se could be NULL for root_task_group */
7747 if (!se)
7748 return;
7749
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007750 if (!parent) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02007751 se->cfs_rq = &rq->cfs;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007752 se->depth = 0;
7753 } else {
Peter Zijlstra029632f2011-10-25 10:00:11 +02007754 se->cfs_rq = parent->my_q;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01007755 se->depth = parent->depth + 1;
7756 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02007757
7758 se->my_q = cfs_rq;
Paul Turner0ac9b1c2013-10-16 11:16:27 -07007759 /* guarantee group entities always have weight */
7760 update_load_set(&se->load, NICE_0_LOAD);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007761 se->parent = parent;
7762}
7763
7764static DEFINE_MUTEX(shares_mutex);
7765
7766int sched_group_set_shares(struct task_group *tg, unsigned long shares)
7767{
7768 int i;
7769 unsigned long flags;
7770
7771 /*
7772 * We can't change the weight of the root cgroup.
7773 */
7774 if (!tg->se[0])
7775 return -EINVAL;
7776
7777 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
7778
7779 mutex_lock(&shares_mutex);
7780 if (tg->shares == shares)
7781 goto done;
7782
7783 tg->shares = shares;
7784 for_each_possible_cpu(i) {
7785 struct rq *rq = cpu_rq(i);
7786 struct sched_entity *se;
7787
7788 se = tg->se[i];
7789 /* Propagate contribution to hierarchy */
7790 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02007791
7792 /* Possible calls to update_curr() need rq clock */
7793 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08007794 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02007795 update_cfs_shares(group_cfs_rq(se));
7796 raw_spin_unlock_irqrestore(&rq->lock, flags);
7797 }
7798
7799done:
7800 mutex_unlock(&shares_mutex);
7801 return 0;
7802}
7803#else /* CONFIG_FAIR_GROUP_SCHED */
7804
7805void free_fair_sched_group(struct task_group *tg) { }
7806
7807int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
7808{
7809 return 1;
7810}
7811
7812void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
7813
7814#endif /* CONFIG_FAIR_GROUP_SCHED */
7815
Peter Zijlstra810b3812008-02-29 15:21:01 -05007816
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07007817static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00007818{
7819 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00007820 unsigned int rr_interval = 0;
7821
7822 /*
7823 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
7824 * idle runqueue:
7825 */
Peter Williams0d721ce2009-09-21 01:31:53 +00007826 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08007827 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00007828
7829 return rr_interval;
7830}
7831
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007832/*
7833 * All the scheduling class methods:
7834 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007835const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02007836 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007837 .enqueue_task = enqueue_task_fair,
7838 .dequeue_task = dequeue_task_fair,
7839 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05007840 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007841
Ingo Molnar2e09bf52007-10-15 17:00:05 +02007842 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007843
7844 .pick_next_task = pick_next_task_fair,
7845 .put_prev_task = put_prev_task_fair,
7846
Peter Williams681f3e62007-10-24 18:23:51 +02007847#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08007848 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02007849 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08007850
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007851 .rq_online = rq_online_fair,
7852 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007853
7854 .task_waking = task_waking_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02007855#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007856
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007857 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007858 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007859 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01007860
7861 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007862 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01007863 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05007864
Peter Williams0d721ce2009-09-21 01:31:53 +00007865 .get_rr_interval = get_rr_interval_fair,
7866
Peter Zijlstra810b3812008-02-29 15:21:01 -05007867#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02007868 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05007869#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007870};
7871
7872#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02007873void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007874{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007875 struct cfs_rq *cfs_rq;
7876
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01007877 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02007878 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02007879 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01007880 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007881}
7882#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007883
7884__init void init_sched_fair_class(void)
7885{
7886#ifdef CONFIG_SMP
7887 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
7888
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007889#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08007890 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007891 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08007892 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02007893#endif
7894#endif /* SMP */
7895
7896}