blob: f0950fde1f5ba3b1004fd25b2ba7bba59eba86ff [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Arjan van de Ven97455122008-01-25 21:08:34 +010023#include <linux/latencytop.h>
Christian Ehrhardt1983a922009-11-30 12:16:47 +010024#include <linux/sched.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Nicolas Pitre83a0a962014-09-04 11:32:10 -040026#include <linux/cpuidle.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020027#include <linux/slab.h>
28#include <linux/profile.h>
29#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020030#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000031#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020032#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020033
34#include <trace/events/sched.h>
35
36#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010037
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020038/*
Peter Zijlstra21805082007-08-25 18:41:53 +020039 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090040 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020041 *
Peter Zijlstra21805082007-08-25 18:41:53 +020042 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020043 * 'timeslice length' - timeslices in CFS are of variable length
44 * and have no persistent notion like in traditional, time-slice
45 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020046 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020047 * (to see the precise effective timeslice length of your workload,
48 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020049 */
Mike Galbraith21406922010-03-11 17:17:15 +010050unsigned int sysctl_sched_latency = 6000000ULL;
51unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020052
53/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010054 * The initial- and re-scaling of tunables is configurable
55 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56 *
57 * Options are:
58 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 */
62enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010066 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090067 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010068 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020069unsigned int sysctl_sched_min_granularity = 750000ULL;
70unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010071
72/*
73 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020075static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010076
77/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020078 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020079 * parent will (try to) run first.
80 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020081unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020082
83/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020084 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020085 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020086 *
87 * This option delays the preemption effects of decoupled workloads
88 * and reduces their over-scheduling. Synchronous workloads will still
89 * have immediate wakeup/sleep latencies.
90 */
Mike Galbraith172e0822009-09-09 15:41:37 +020091unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010092unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020093
Ingo Molnarda84d962007-10-15 17:00:18 +020094const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
Paul Turnera7a4f8a2010-11-15 15:47:06 -080096/*
97 * The exponential sliding window over which load is averaged for shares
98 * distribution.
99 * (default: 10msec)
100 */
101unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
Paul Turnerec12cb72011-07-21 09:43:30 -0700103#ifdef CONFIG_CFS_BANDWIDTH
104/*
105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106 * each time a cfs_rq requests quota.
107 *
108 * Note: in the case that the slice exceeds the runtime remaining (either due
109 * to consumption or the quota being specified to be smaller than the slice)
110 * we will always only issue the remaining available time.
111 *
112 * default: 5 msec, units: microseconds
113 */
114unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115#endif
116
Paul Gortmaker85276322013-04-19 15:10:50 -0400117static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118{
119 lw->weight += inc;
120 lw->inv_weight = 0;
121}
122
123static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124{
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127}
128
129static inline void update_load_set(struct load_weight *lw, unsigned long w)
130{
131 lw->weight = w;
132 lw->inv_weight = 0;
133}
134
Peter Zijlstra029632f2011-10-25 10:00:11 +0200135/*
136 * Increase the granularity value when there are more CPUs,
137 * because with more CPUs the 'effective latency' as visible
138 * to users decreases. But the relationship is not linear,
139 * so pick a second-best guess by going with the log2 of the
140 * number of CPUs.
141 *
142 * This idea comes from the SD scheduler of Con Kolivas:
143 */
Nicholas Mc Guire58ac93e2015-05-15 21:05:42 +0200144static unsigned int get_update_sysctl_factor(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200145{
Nicholas Mc Guire58ac93e2015-05-15 21:05:42 +0200146 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163}
164
165static void update_sysctl(void)
166{
167 unsigned int factor = get_update_sysctl_factor();
168
169#define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174#undef SET_SYSCTL
175}
176
177void sched_init_granularity(void)
178{
179 update_sysctl();
180}
181
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100182#define WMULT_CONST (~0U)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200183#define WMULT_SHIFT 32
184
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100185static void __update_inv_weight(struct load_weight *lw)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200186{
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100187 unsigned long w;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200188
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200198 else
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100199 lw->inv_weight = WMULT_CONST / w;
200}
Peter Zijlstra029632f2011-10-25 10:00:11 +0200201
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100202/*
203 * delta_exec * weight / lw.weight
204 * OR
205 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
206 *
207 * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
208 * we're guaranteed shift stays positive because inv_weight is guaranteed to
209 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
210 *
211 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
212 * weight/lw.weight <= 1, and therefore our shift will also be positive.
213 */
214static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215{
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200218
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
Peter Zijlstra029632f2011-10-25 10:00:11 +0200226 }
227
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100228 /* hint to use a 32x32->64 mul */
229 fact = (u64)(u32)fact * lw->inv_weight;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200230
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200237}
238
239
240const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200241
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200242/**************************************************************
243 * CFS operations on generic schedulable entities:
244 */
245
246#ifdef CONFIG_FAIR_GROUP_SCHED
247
248/* cpu runqueue to which this cfs_rq is attached */
249static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250{
251 return cfs_rq->rq;
252}
253
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200254/* An entity is a task if it doesn't "own" a runqueue */
255#define entity_is_task(se) (!se->my_q)
256
Peter Zijlstra8f488942009-07-24 12:25:30 +0200257static inline struct task_struct *task_of(struct sched_entity *se)
258{
259#ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261#endif
262 return container_of(se, struct task_struct, se);
263}
264
Peter Zijlstrab7581492008-04-19 19:45:00 +0200265/* Walk up scheduling entities hierarchy */
266#define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270{
271 return p->se.cfs_rq;
272}
273
274/* runqueue on which this entity is (to be) queued */
275static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276{
277 return se->cfs_rq;
278}
279
280/* runqueue "owned" by this group */
281static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282{
283 return grp->my_q;
284}
285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
305 }
306}
307
308static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
309{
310 if (cfs_rq->on_list) {
311 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
312 cfs_rq->on_list = 0;
313 }
314}
315
Peter Zijlstrab7581492008-04-19 19:45:00 +0200316/* Iterate thr' all leaf cfs_rq's on a runqueue */
317#define for_each_leaf_cfs_rq(rq, cfs_rq) \
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
319
320/* Do the two (enqueued) entities belong to the same group ? */
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100321static inline struct cfs_rq *
Peter Zijlstrab7581492008-04-19 19:45:00 +0200322is_same_group(struct sched_entity *se, struct sched_entity *pse)
323{
324 if (se->cfs_rq == pse->cfs_rq)
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100325 return se->cfs_rq;
Peter Zijlstrab7581492008-04-19 19:45:00 +0200326
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100327 return NULL;
Peter Zijlstrab7581492008-04-19 19:45:00 +0200328}
329
330static inline struct sched_entity *parent_entity(struct sched_entity *se)
331{
332 return se->parent;
333}
334
Peter Zijlstra464b7522008-10-24 11:06:15 +0200335static void
336find_matching_se(struct sched_entity **se, struct sched_entity **pse)
337{
338 int se_depth, pse_depth;
339
340 /*
341 * preemption test can be made between sibling entities who are in the
342 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
343 * both tasks until we find their ancestors who are siblings of common
344 * parent.
345 */
346
347 /* First walk up until both entities are at same depth */
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100348 se_depth = (*se)->depth;
349 pse_depth = (*pse)->depth;
Peter Zijlstra464b7522008-10-24 11:06:15 +0200350
351 while (se_depth > pse_depth) {
352 se_depth--;
353 *se = parent_entity(*se);
354 }
355
356 while (pse_depth > se_depth) {
357 pse_depth--;
358 *pse = parent_entity(*pse);
359 }
360
361 while (!is_same_group(*se, *pse)) {
362 *se = parent_entity(*se);
363 *pse = parent_entity(*pse);
364 }
365}
366
Peter Zijlstra8f488942009-07-24 12:25:30 +0200367#else /* !CONFIG_FAIR_GROUP_SCHED */
368
369static inline struct task_struct *task_of(struct sched_entity *se)
370{
371 return container_of(se, struct task_struct, se);
372}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200373
374static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
375{
376 return container_of(cfs_rq, struct rq, cfs);
377}
378
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200379#define entity_is_task(se) 1
380
Peter Zijlstrab7581492008-04-19 19:45:00 +0200381#define for_each_sched_entity(se) \
382 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200383
Peter Zijlstrab7581492008-04-19 19:45:00 +0200384static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200385{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200386 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200387}
388
Peter Zijlstrab7581492008-04-19 19:45:00 +0200389static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
390{
391 struct task_struct *p = task_of(se);
392 struct rq *rq = task_rq(p);
393
394 return &rq->cfs;
395}
396
397/* runqueue "owned" by this group */
398static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
399{
400 return NULL;
401}
402
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800403static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
404{
405}
406
407static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408{
409}
410
Peter Zijlstrab7581492008-04-19 19:45:00 +0200411#define for_each_leaf_cfs_rq(rq, cfs_rq) \
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
413
Peter Zijlstrab7581492008-04-19 19:45:00 +0200414static inline struct sched_entity *parent_entity(struct sched_entity *se)
415{
416 return NULL;
417}
418
Peter Zijlstra464b7522008-10-24 11:06:15 +0200419static inline void
420find_matching_se(struct sched_entity **se, struct sched_entity **pse)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#endif /* CONFIG_FAIR_GROUP_SCHED */
425
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700426static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100427void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200428
429/**************************************************************
430 * Scheduling class tree data structure manipulation methods:
431 */
432
Andrei Epure1bf08232013-03-12 21:12:24 +0200433static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200434{
Andrei Epure1bf08232013-03-12 21:12:24 +0200435 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200436 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200437 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200438
Andrei Epure1bf08232013-03-12 21:12:24 +0200439 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200440}
441
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200442static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200443{
444 s64 delta = (s64)(vruntime - min_vruntime);
445 if (delta < 0)
446 min_vruntime = vruntime;
447
448 return min_vruntime;
449}
450
Fabio Checconi54fdc582009-07-16 12:32:27 +0200451static inline int entity_before(struct sched_entity *a,
452 struct sched_entity *b)
453{
454 return (s64)(a->vruntime - b->vruntime) < 0;
455}
456
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200457static void update_min_vruntime(struct cfs_rq *cfs_rq)
458{
459 u64 vruntime = cfs_rq->min_vruntime;
460
461 if (cfs_rq->curr)
462 vruntime = cfs_rq->curr->vruntime;
463
464 if (cfs_rq->rb_leftmost) {
465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
466 struct sched_entity,
467 run_node);
468
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100469 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200470 vruntime = se->vruntime;
471 else
472 vruntime = min_vruntime(vruntime, se->vruntime);
473 }
474
Andrei Epure1bf08232013-03-12 21:12:24 +0200475 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200477#ifndef CONFIG_64BIT
478 smp_wmb();
479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
480#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200481}
482
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200483/*
484 * Enqueue an entity into the rb-tree:
485 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200486static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200487{
488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
489 struct rb_node *parent = NULL;
490 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200491 int leftmost = 1;
492
493 /*
494 * Find the right place in the rbtree:
495 */
496 while (*link) {
497 parent = *link;
498 entry = rb_entry(parent, struct sched_entity, run_node);
499 /*
500 * We dont care about collisions. Nodes with
501 * the same key stay together.
502 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200503 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200504 link = &parent->rb_left;
505 } else {
506 link = &parent->rb_right;
507 leftmost = 0;
508 }
509 }
510
511 /*
512 * Maintain a cache of leftmost tree entries (it is frequently
513 * used):
514 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200515 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200516 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200517
518 rb_link_node(&se->run_node, parent, link);
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200520}
521
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200522static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100524 if (cfs_rq->rb_leftmost == &se->run_node) {
525 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100526
527 next_node = rb_next(&se->run_node);
528 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100529 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200530
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200532}
533
Peter Zijlstra029632f2011-10-25 10:00:11 +0200534struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200535{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100536 struct rb_node *left = cfs_rq->rb_leftmost;
537
538 if (!left)
539 return NULL;
540
541 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542}
543
Rik van Rielac53db52011-02-01 09:51:03 -0500544static struct sched_entity *__pick_next_entity(struct sched_entity *se)
545{
546 struct rb_node *next = rb_next(&se->run_node);
547
548 if (!next)
549 return NULL;
550
551 return rb_entry(next, struct sched_entity, run_node);
552}
553
554#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200555struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200556{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200558
Balbir Singh70eee742008-02-22 13:25:53 +0530559 if (!last)
560 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100561
562 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200563}
564
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200565/**************************************************************
566 * Scheduling class statistics methods:
567 */
568
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100569int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700570 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100571 loff_t *ppos)
572{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700573 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Nicholas Mc Guire58ac93e2015-05-15 21:05:42 +0200574 unsigned int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100575
576 if (ret || !write)
577 return ret;
578
579 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
580 sysctl_sched_min_granularity);
581
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100582#define WRT_SYSCTL(name) \
583 (normalized_sysctl_##name = sysctl_##name / (factor))
584 WRT_SYSCTL(sched_min_granularity);
585 WRT_SYSCTL(sched_latency);
586 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100587#undef WRT_SYSCTL
588
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100589 return 0;
590}
591#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200592
593/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200594 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200595 */
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100596static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200597{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200598 if (unlikely(se->load.weight != NICE_0_LOAD))
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100599 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200600
601 return delta;
602}
603
604/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200605 * The idea is to set a period in which each task runs once.
606 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200607 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200608 * this period because otherwise the slices get too small.
609 *
610 * p = (nr <= nl) ? l : l*nr/nl
611 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200612static u64 __sched_period(unsigned long nr_running)
613{
Boqun Feng8e2b0bf2015-07-02 22:25:52 +0800614 if (unlikely(nr_running > sched_nr_latency))
615 return nr_running * sysctl_sched_min_granularity;
616 else
617 return sysctl_sched_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200618}
619
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200620/*
621 * We calculate the wall-time slice from the period by taking a part
622 * proportional to the weight.
623 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200624 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 */
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200626static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200627{
Mike Galbraith0a582442009-01-02 12:16:42 +0100628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200629
Mike Galbraith0a582442009-01-02 12:16:42 +0100630 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100631 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200632 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100633
634 cfs_rq = cfs_rq_of(se);
635 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200636
Mike Galbraith0a582442009-01-02 12:16:42 +0100637 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200638 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100639
640 update_load_add(&lw, se->load.weight);
641 load = &lw;
642 }
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100643 slice = __calc_delta(slice, se->load.weight, load);
Mike Galbraith0a582442009-01-02 12:16:42 +0100644 }
645 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200646}
647
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200648/*
Andrei Epure660cc002013-03-11 12:03:20 +0200649 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200651 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200652 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200653static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200654{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200655 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200656}
657
Alex Shia75cdaa2013-06-20 10:18:47 +0800658#ifdef CONFIG_SMP
Rik van Rielba7e5a22014-09-04 16:35:30 -0400659static int select_idle_sibling(struct task_struct *p, int cpu);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100660static unsigned long task_h_load(struct task_struct *p);
661
Yuyang Du9d89c252015-07-15 08:04:37 +0800662/*
663 * We choose a half-life close to 1 scheduling period.
664 * Note: The tables below are dependent on this value.
665 */
666#define LOAD_AVG_PERIOD 32
667#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
668#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */
Alex Shia75cdaa2013-06-20 10:18:47 +0800669
Yuyang Du540247f2015-07-15 08:04:39 +0800670/* Give new sched_entity start runnable values to heavy its load in infant time */
671void init_entity_runnable_average(struct sched_entity *se)
Alex Shia75cdaa2013-06-20 10:18:47 +0800672{
Yuyang Du540247f2015-07-15 08:04:39 +0800673 struct sched_avg *sa = &se->avg;
Alex Shia75cdaa2013-06-20 10:18:47 +0800674
Yuyang Du9d89c252015-07-15 08:04:37 +0800675 sa->last_update_time = 0;
676 /*
677 * sched_avg's period_contrib should be strictly less then 1024, so
678 * we give it 1023 to make sure it is almost a period (1024us), and
679 * will definitely be update (after enqueue).
680 */
681 sa->period_contrib = 1023;
Yuyang Du540247f2015-07-15 08:04:39 +0800682 sa->load_avg = scale_load_down(se->load.weight);
Yuyang Du9d89c252015-07-15 08:04:37 +0800683 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
684 sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
685 sa->util_sum = LOAD_AVG_MAX;
686 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
Alex Shia75cdaa2013-06-20 10:18:47 +0800687}
Yuyang Du7ea241a2015-07-15 08:04:42 +0800688
689static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
690static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
Alex Shia75cdaa2013-06-20 10:18:47 +0800691#else
Yuyang Du540247f2015-07-15 08:04:39 +0800692void init_entity_runnable_average(struct sched_entity *se)
Alex Shia75cdaa2013-06-20 10:18:47 +0800693{
694}
695#endif
696
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200697/*
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100698 * Update the current task's runtime statistics.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200699 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200700static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200701{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200702 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200703 u64 now = rq_clock_task(rq_of(cfs_rq));
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100704 u64 delta_exec;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200705
706 if (unlikely(!curr))
707 return;
708
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100709 delta_exec = now - curr->exec_start;
710 if (unlikely((s64)delta_exec <= 0))
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100711 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200712
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200713 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100714
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100715 schedstat_set(curr->statistics.exec_max,
716 max(delta_exec, curr->statistics.exec_max));
717
718 curr->sum_exec_runtime += delta_exec;
719 schedstat_add(cfs_rq, exec_clock, delta_exec);
720
721 curr->vruntime += calc_delta_fair(delta_exec, curr);
722 update_min_vruntime(cfs_rq);
723
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100724 if (entity_is_task(curr)) {
725 struct task_struct *curtask = task_of(curr);
726
Ingo Molnarf977bb42009-09-13 18:15:54 +0200727 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100728 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700729 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100730 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700731
732 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200733}
734
Stanislaw Gruszka6e998912014-11-12 16:58:44 +0100735static void update_curr_fair(struct rq *rq)
736{
737 update_curr(cfs_rq_of(&rq->curr->se));
738}
739
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200740static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200741update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200742{
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200743 schedstat_set(se->statistics.wait_start, rq_clock(rq_of(cfs_rq)));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200744}
745
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200746/*
747 * Task is being enqueued - update stats:
748 */
Ingo Molnard2417e52007-08-09 11:16:47 +0200749static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200750{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200751 /*
752 * Are we enqueueing a waiting task? (for current tasks
753 * a dequeue/enqueue event is a NOP)
754 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200755 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200756 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200757}
758
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200759static void
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200760update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200761{
Lucas De Marchi41acab82010-03-10 23:37:45 -0300762 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200763 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start));
Lucas De Marchi41acab82010-03-10 23:37:45 -0300764 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
765 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200766 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200767#ifdef CONFIG_SCHEDSTATS
768 if (entity_is_task(se)) {
769 trace_sched_stat_wait(task_of(se),
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200770 rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start);
Peter Zijlstra768d0c22009-07-23 20:13:26 +0200771 }
772#endif
Lucas De Marchi41acab82010-03-10 23:37:45 -0300773 schedstat_set(se->statistics.wait_start, 0);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200774}
775
776static inline void
Ingo Molnar19b6a2e2007-08-09 11:16:48 +0200777update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200778{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200779 /*
780 * Mark the end of the wait period if dequeueing a
781 * waiting task:
782 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200783 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200784 update_stats_wait_end(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200785}
786
787/*
788 * We are picking a new current task - update its stats:
789 */
790static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200791update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200792{
793 /*
794 * We are starting a new run period:
795 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200796 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200797}
798
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200799/**************************************************
800 * Scheduling class queueing methods:
801 */
802
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200803#ifdef CONFIG_NUMA_BALANCING
804/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100805 * Approximate time to scan a full NUMA task in ms. The task scan period is
806 * calculated based on the tasks virtual memory size and
807 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200808 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100809unsigned int sysctl_numa_balancing_scan_period_min = 1000;
810unsigned int sysctl_numa_balancing_scan_period_max = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200811
812/* Portion of address space to scan in MB */
813unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200814
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200815/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
816unsigned int sysctl_numa_balancing_scan_delay = 1000;
817
Mel Gorman598f0ec2013-10-07 11:28:55 +0100818static unsigned int task_nr_scan_windows(struct task_struct *p)
819{
820 unsigned long rss = 0;
821 unsigned long nr_scan_pages;
822
823 /*
824 * Calculations based on RSS as non-present and empty pages are skipped
825 * by the PTE scanner and NUMA hinting faults should be trapped based
826 * on resident pages
827 */
828 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
829 rss = get_mm_rss(p->mm);
830 if (!rss)
831 rss = nr_scan_pages;
832
833 rss = round_up(rss, nr_scan_pages);
834 return rss / nr_scan_pages;
835}
836
837/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
838#define MAX_SCAN_WINDOW 2560
839
840static unsigned int task_scan_min(struct task_struct *p)
841{
Jason Low316c1608d2015-04-28 13:00:20 -0700842 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
Mel Gorman598f0ec2013-10-07 11:28:55 +0100843 unsigned int scan, floor;
844 unsigned int windows = 1;
845
Kirill Tkhai64192652014-10-16 14:39:37 +0400846 if (scan_size < MAX_SCAN_WINDOW)
847 windows = MAX_SCAN_WINDOW / scan_size;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100848 floor = 1000 / windows;
849
850 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
851 return max_t(unsigned int, floor, scan);
852}
853
854static unsigned int task_scan_max(struct task_struct *p)
855{
856 unsigned int smin = task_scan_min(p);
857 unsigned int smax;
858
859 /* Watch for min being lower than max due to floor calculations */
860 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
861 return max(smin, smax);
862}
863
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100864static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
865{
866 rq->nr_numa_running += (p->numa_preferred_nid != -1);
867 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
868}
869
870static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
871{
872 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
873 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
874}
875
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100876struct numa_group {
877 atomic_t refcount;
878
879 spinlock_t lock; /* nr_tasks, tasks */
880 int nr_tasks;
Mel Gormane29cf082013-10-07 11:29:22 +0100881 pid_t gid;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100882
883 struct rcu_head rcu;
Rik van Riel20e07de2014-01-27 17:03:43 -0500884 nodemask_t active_nodes;
Mel Gorman989348b2013-10-07 11:29:40 +0100885 unsigned long total_faults;
Rik van Riel7e2703e2014-01-27 17:03:45 -0500886 /*
887 * Faults_cpu is used to decide whether memory should move
888 * towards the CPU. As a consequence, these stats are weighted
889 * more by CPU use than by memory faults.
890 */
Rik van Riel50ec8a42014-01-27 17:03:42 -0500891 unsigned long *faults_cpu;
Mel Gorman989348b2013-10-07 11:29:40 +0100892 unsigned long faults[0];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100893};
894
Rik van Rielbe1e4e72014-01-27 17:03:48 -0500895/* Shared or private faults. */
896#define NR_NUMA_HINT_FAULT_TYPES 2
897
898/* Memory and CPU locality */
899#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
900
901/* Averaged statistics, and temporary buffers. */
902#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
903
Mel Gormane29cf082013-10-07 11:29:22 +0100904pid_t task_numa_group_id(struct task_struct *p)
905{
906 return p->numa_group ? p->numa_group->gid : 0;
907}
908
Iulia Manda44dba3d2014-10-31 02:13:31 +0200909/*
910 * The averaged statistics, shared & private, memory & cpu,
911 * occupy the first half of the array. The second half of the
912 * array is for current counters, which are averaged into the
913 * first set by task_numa_placement.
914 */
915static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
Mel Gormanac8e8952013-10-07 11:29:03 +0100916{
Iulia Manda44dba3d2014-10-31 02:13:31 +0200917 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
Mel Gormanac8e8952013-10-07 11:29:03 +0100918}
919
920static inline unsigned long task_faults(struct task_struct *p, int nid)
921{
Iulia Manda44dba3d2014-10-31 02:13:31 +0200922 if (!p->numa_faults)
Mel Gormanac8e8952013-10-07 11:29:03 +0100923 return 0;
924
Iulia Manda44dba3d2014-10-31 02:13:31 +0200925 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
926 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
Mel Gormanac8e8952013-10-07 11:29:03 +0100927}
928
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100929static inline unsigned long group_faults(struct task_struct *p, int nid)
930{
931 if (!p->numa_group)
932 return 0;
933
Iulia Manda44dba3d2014-10-31 02:13:31 +0200934 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
935 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
Mel Gorman83e1d2c2013-10-07 11:29:27 +0100936}
937
Rik van Riel20e07de2014-01-27 17:03:43 -0500938static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
939{
Iulia Manda44dba3d2014-10-31 02:13:31 +0200940 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
941 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
Rik van Riel20e07de2014-01-27 17:03:43 -0500942}
943
Rik van Riel6c6b1192014-10-17 03:29:52 -0400944/* Handle placement on systems where not all nodes are directly connected. */
945static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
946 int maxdist, bool task)
947{
948 unsigned long score = 0;
949 int node;
950
951 /*
952 * All nodes are directly connected, and the same distance
953 * from each other. No need for fancy placement algorithms.
954 */
955 if (sched_numa_topology_type == NUMA_DIRECT)
956 return 0;
957
958 /*
959 * This code is called for each node, introducing N^2 complexity,
960 * which should be ok given the number of nodes rarely exceeds 8.
961 */
962 for_each_online_node(node) {
963 unsigned long faults;
964 int dist = node_distance(nid, node);
965
966 /*
967 * The furthest away nodes in the system are not interesting
968 * for placement; nid was already counted.
969 */
970 if (dist == sched_max_numa_distance || node == nid)
971 continue;
972
973 /*
974 * On systems with a backplane NUMA topology, compare groups
975 * of nodes, and move tasks towards the group with the most
976 * memory accesses. When comparing two nodes at distance
977 * "hoplimit", only nodes closer by than "hoplimit" are part
978 * of each group. Skip other nodes.
979 */
980 if (sched_numa_topology_type == NUMA_BACKPLANE &&
981 dist > maxdist)
982 continue;
983
984 /* Add up the faults from nearby nodes. */
985 if (task)
986 faults = task_faults(p, node);
987 else
988 faults = group_faults(p, node);
989
990 /*
991 * On systems with a glueless mesh NUMA topology, there are
992 * no fixed "groups of nodes". Instead, nodes that are not
993 * directly connected bounce traffic through intermediate
994 * nodes; a numa_group can occupy any set of nodes.
995 * The further away a node is, the less the faults count.
996 * This seems to result in good task placement.
997 */
998 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
999 faults *= (sched_max_numa_distance - dist);
1000 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1001 }
1002
1003 score += faults;
1004 }
1005
1006 return score;
1007}
1008
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001009/*
1010 * These return the fraction of accesses done by a particular task, or
1011 * task group, on a particular numa node. The group weight is given a
1012 * larger multiplier, in order to group tasks together that are almost
1013 * evenly spread out between numa nodes.
1014 */
Rik van Riel7bd95322014-10-17 03:29:51 -04001015static inline unsigned long task_weight(struct task_struct *p, int nid,
1016 int dist)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001017{
Rik van Riel7bd95322014-10-17 03:29:51 -04001018 unsigned long faults, total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001019
Iulia Manda44dba3d2014-10-31 02:13:31 +02001020 if (!p->numa_faults)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001021 return 0;
1022
1023 total_faults = p->total_numa_faults;
1024
1025 if (!total_faults)
1026 return 0;
1027
Rik van Riel7bd95322014-10-17 03:29:51 -04001028 faults = task_faults(p, nid);
Rik van Riel6c6b1192014-10-17 03:29:52 -04001029 faults += score_nearby_nodes(p, nid, dist, true);
1030
Rik van Riel7bd95322014-10-17 03:29:51 -04001031 return 1000 * faults / total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001032}
1033
Rik van Riel7bd95322014-10-17 03:29:51 -04001034static inline unsigned long group_weight(struct task_struct *p, int nid,
1035 int dist)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001036{
Rik van Riel7bd95322014-10-17 03:29:51 -04001037 unsigned long faults, total_faults;
1038
1039 if (!p->numa_group)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001040 return 0;
1041
Rik van Riel7bd95322014-10-17 03:29:51 -04001042 total_faults = p->numa_group->total_faults;
1043
1044 if (!total_faults)
1045 return 0;
1046
1047 faults = group_faults(p, nid);
Rik van Riel6c6b1192014-10-17 03:29:52 -04001048 faults += score_nearby_nodes(p, nid, dist, false);
1049
Rik van Riel7bd95322014-10-17 03:29:51 -04001050 return 1000 * faults / total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001051}
1052
Rik van Riel10f39042014-01-27 17:03:44 -05001053bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1054 int src_nid, int dst_cpu)
1055{
1056 struct numa_group *ng = p->numa_group;
1057 int dst_nid = cpu_to_node(dst_cpu);
1058 int last_cpupid, this_cpupid;
1059
1060 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1061
1062 /*
1063 * Multi-stage node selection is used in conjunction with a periodic
1064 * migration fault to build a temporal task<->page relation. By using
1065 * a two-stage filter we remove short/unlikely relations.
1066 *
1067 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1068 * a task's usage of a particular page (n_p) per total usage of this
1069 * page (n_t) (in a given time-span) to a probability.
1070 *
1071 * Our periodic faults will sample this probability and getting the
1072 * same result twice in a row, given these samples are fully
1073 * independent, is then given by P(n)^2, provided our sample period
1074 * is sufficiently short compared to the usage pattern.
1075 *
1076 * This quadric squishes small probabilities, making it less likely we
1077 * act on an unlikely task<->page relation.
1078 */
1079 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1080 if (!cpupid_pid_unset(last_cpupid) &&
1081 cpupid_to_nid(last_cpupid) != dst_nid)
1082 return false;
1083
1084 /* Always allow migrate on private faults */
1085 if (cpupid_match_pid(p, last_cpupid))
1086 return true;
1087
1088 /* A shared fault, but p->numa_group has not been set up yet. */
1089 if (!ng)
1090 return true;
1091
1092 /*
1093 * Do not migrate if the destination is not a node that
1094 * is actively used by this numa group.
1095 */
1096 if (!node_isset(dst_nid, ng->active_nodes))
1097 return false;
1098
1099 /*
1100 * Source is a node that is not actively used by this
1101 * numa group, while the destination is. Migrate.
1102 */
1103 if (!node_isset(src_nid, ng->active_nodes))
1104 return true;
1105
1106 /*
1107 * Both source and destination are nodes in active
1108 * use by this numa group. Maximize memory bandwidth
1109 * by migrating from more heavily used groups, to less
1110 * heavily used ones, spreading the load around.
1111 * Use a 1/4 hysteresis to avoid spurious page movement.
1112 */
1113 return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
1114}
1115
Mel Gormane6628d52013-10-07 11:29:02 +01001116static unsigned long weighted_cpuload(const int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +01001117static unsigned long source_load(int cpu, int type);
1118static unsigned long target_load(int cpu, int type);
Nicolas Pitreced549f2014-05-26 18:19:38 -04001119static unsigned long capacity_of(int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +01001120static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
Mel Gormane6628d52013-10-07 11:29:02 +01001121
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001122/* Cached statistics for all CPUs within a node */
Mel Gorman58d081b2013-10-07 11:29:10 +01001123struct numa_stats {
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001124 unsigned long nr_running;
Mel Gorman58d081b2013-10-07 11:29:10 +01001125 unsigned long load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001126
1127 /* Total compute capacity of CPUs on a node */
Nicolas Pitre5ef20ca2014-05-26 18:19:34 -04001128 unsigned long compute_capacity;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001129
1130 /* Approximate capacity in terms of runnable tasks on a node */
Nicolas Pitre5ef20ca2014-05-26 18:19:34 -04001131 unsigned long task_capacity;
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001132 int has_free_capacity;
Mel Gorman58d081b2013-10-07 11:29:10 +01001133};
Mel Gormane6628d52013-10-07 11:29:02 +01001134
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001135/*
1136 * XXX borrowed from update_sg_lb_stats
1137 */
1138static void update_numa_stats(struct numa_stats *ns, int nid)
1139{
Rik van Riel83d7f242014-08-04 13:23:28 -04001140 int smt, cpu, cpus = 0;
1141 unsigned long capacity;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001142
1143 memset(ns, 0, sizeof(*ns));
1144 for_each_cpu(cpu, cpumask_of_node(nid)) {
1145 struct rq *rq = cpu_rq(cpu);
1146
1147 ns->nr_running += rq->nr_running;
1148 ns->load += weighted_cpuload(cpu);
Nicolas Pitreced549f2014-05-26 18:19:38 -04001149 ns->compute_capacity += capacity_of(cpu);
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001150
1151 cpus++;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001152 }
1153
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001154 /*
1155 * If we raced with hotplug and there are no CPUs left in our mask
1156 * the @ns structure is NULL'ed and task_numa_compare() will
1157 * not find this node attractive.
1158 *
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001159 * We'll either bail at !has_free_capacity, or we'll detect a huge
1160 * imbalance and bail there.
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001161 */
1162 if (!cpus)
1163 return;
1164
Rik van Riel83d7f242014-08-04 13:23:28 -04001165 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1166 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1167 capacity = cpus / smt; /* cores */
1168
1169 ns->task_capacity = min_t(unsigned, capacity,
1170 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001171 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001172}
1173
Mel Gorman58d081b2013-10-07 11:29:10 +01001174struct task_numa_env {
1175 struct task_struct *p;
1176
1177 int src_cpu, src_nid;
1178 int dst_cpu, dst_nid;
1179
1180 struct numa_stats src_stats, dst_stats;
1181
Wanpeng Li40ea2b42013-12-05 19:10:17 +08001182 int imbalance_pct;
Rik van Riel7bd95322014-10-17 03:29:51 -04001183 int dist;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001184
1185 struct task_struct *best_task;
1186 long best_imp;
Mel Gorman58d081b2013-10-07 11:29:10 +01001187 int best_cpu;
1188};
1189
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001190static void task_numa_assign(struct task_numa_env *env,
1191 struct task_struct *p, long imp)
1192{
1193 if (env->best_task)
1194 put_task_struct(env->best_task);
1195 if (p)
1196 get_task_struct(p);
1197
1198 env->best_task = p;
1199 env->best_imp = imp;
1200 env->best_cpu = env->dst_cpu;
1201}
1202
Rik van Riel28a21742014-06-23 11:46:13 -04001203static bool load_too_imbalanced(long src_load, long dst_load,
Rik van Riele63da032014-05-14 13:22:21 -04001204 struct task_numa_env *env)
1205{
Rik van Riele4991b22015-05-27 15:04:27 -04001206 long imb, old_imb;
1207 long orig_src_load, orig_dst_load;
Rik van Riel28a21742014-06-23 11:46:13 -04001208 long src_capacity, dst_capacity;
1209
1210 /*
1211 * The load is corrected for the CPU capacity available on each node.
1212 *
1213 * src_load dst_load
1214 * ------------ vs ---------
1215 * src_capacity dst_capacity
1216 */
1217 src_capacity = env->src_stats.compute_capacity;
1218 dst_capacity = env->dst_stats.compute_capacity;
Rik van Riele63da032014-05-14 13:22:21 -04001219
1220 /* We care about the slope of the imbalance, not the direction. */
Rik van Riele4991b22015-05-27 15:04:27 -04001221 if (dst_load < src_load)
1222 swap(dst_load, src_load);
Rik van Riele63da032014-05-14 13:22:21 -04001223
1224 /* Is the difference below the threshold? */
Rik van Riele4991b22015-05-27 15:04:27 -04001225 imb = dst_load * src_capacity * 100 -
1226 src_load * dst_capacity * env->imbalance_pct;
Rik van Riele63da032014-05-14 13:22:21 -04001227 if (imb <= 0)
1228 return false;
1229
1230 /*
1231 * The imbalance is above the allowed threshold.
Rik van Riele4991b22015-05-27 15:04:27 -04001232 * Compare it with the old imbalance.
Rik van Riele63da032014-05-14 13:22:21 -04001233 */
Rik van Riel28a21742014-06-23 11:46:13 -04001234 orig_src_load = env->src_stats.load;
Rik van Riele4991b22015-05-27 15:04:27 -04001235 orig_dst_load = env->dst_stats.load;
Rik van Riel28a21742014-06-23 11:46:13 -04001236
Rik van Riele4991b22015-05-27 15:04:27 -04001237 if (orig_dst_load < orig_src_load)
1238 swap(orig_dst_load, orig_src_load);
Rik van Riele63da032014-05-14 13:22:21 -04001239
Rik van Riele4991b22015-05-27 15:04:27 -04001240 old_imb = orig_dst_load * src_capacity * 100 -
1241 orig_src_load * dst_capacity * env->imbalance_pct;
1242
1243 /* Would this change make things worse? */
1244 return (imb > old_imb);
Rik van Riele63da032014-05-14 13:22:21 -04001245}
1246
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001247/*
1248 * This checks if the overall compute and NUMA accesses of the system would
1249 * be improved if the source tasks was migrated to the target dst_cpu taking
1250 * into account that it might be best if task running on the dst_cpu should
1251 * be exchanged with the source task
1252 */
Rik van Riel887c2902013-10-07 11:29:31 +01001253static void task_numa_compare(struct task_numa_env *env,
1254 long taskimp, long groupimp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001255{
1256 struct rq *src_rq = cpu_rq(env->src_cpu);
1257 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1258 struct task_struct *cur;
Rik van Riel28a21742014-06-23 11:46:13 -04001259 long src_load, dst_load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001260 long load;
Rik van Riel1c5d3eb2014-06-23 11:46:15 -04001261 long imp = env->p->numa_group ? groupimp : taskimp;
Rik van Riel0132c3e2014-06-23 11:46:16 -04001262 long moveimp = imp;
Rik van Riel7bd95322014-10-17 03:29:51 -04001263 int dist = env->dist;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001264
1265 rcu_read_lock();
Kirill Tkhai1effd9f2014-10-22 11:17:11 +04001266
1267 raw_spin_lock_irq(&dst_rq->lock);
1268 cur = dst_rq->curr;
1269 /*
1270 * No need to move the exiting task, and this ensures that ->curr
1271 * wasn't reaped and thus get_task_struct() in task_numa_assign()
1272 * is safe under RCU read lock.
1273 * Note that rcu_read_lock() itself can't protect from the final
1274 * put_task_struct() after the last schedule().
1275 */
1276 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001277 cur = NULL;
Kirill Tkhai1effd9f2014-10-22 11:17:11 +04001278 raw_spin_unlock_irq(&dst_rq->lock);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001279
1280 /*
Peter Zijlstra7af68332014-11-10 10:54:35 +01001281 * Because we have preemption enabled we can get migrated around and
1282 * end try selecting ourselves (current == env->p) as a swap candidate.
1283 */
1284 if (cur == env->p)
1285 goto unlock;
1286
1287 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001288 * "imp" is the fault differential for the source task between the
1289 * source and destination node. Calculate the total differential for
1290 * the source task and potential destination task. The more negative
1291 * the value is, the more rmeote accesses that would be expected to
1292 * be incurred if the tasks were swapped.
1293 */
1294 if (cur) {
1295 /* Skip this swap candidate if cannot move to the source cpu */
1296 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1297 goto unlock;
1298
Rik van Riel887c2902013-10-07 11:29:31 +01001299 /*
1300 * If dst and source tasks are in the same NUMA group, or not
Rik van Rielca28aa532013-10-07 11:29:32 +01001301 * in any group then look only at task weights.
Rik van Riel887c2902013-10-07 11:29:31 +01001302 */
Rik van Rielca28aa532013-10-07 11:29:32 +01001303 if (cur->numa_group == env->p->numa_group) {
Rik van Riel7bd95322014-10-17 03:29:51 -04001304 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1305 task_weight(cur, env->dst_nid, dist);
Rik van Rielca28aa532013-10-07 11:29:32 +01001306 /*
1307 * Add some hysteresis to prevent swapping the
1308 * tasks within a group over tiny differences.
1309 */
1310 if (cur->numa_group)
1311 imp -= imp/16;
Rik van Riel887c2902013-10-07 11:29:31 +01001312 } else {
Rik van Rielca28aa532013-10-07 11:29:32 +01001313 /*
1314 * Compare the group weights. If a task is all by
1315 * itself (not part of a group), use the task weight
1316 * instead.
1317 */
Rik van Rielca28aa532013-10-07 11:29:32 +01001318 if (cur->numa_group)
Rik van Riel7bd95322014-10-17 03:29:51 -04001319 imp += group_weight(cur, env->src_nid, dist) -
1320 group_weight(cur, env->dst_nid, dist);
Rik van Rielca28aa532013-10-07 11:29:32 +01001321 else
Rik van Riel7bd95322014-10-17 03:29:51 -04001322 imp += task_weight(cur, env->src_nid, dist) -
1323 task_weight(cur, env->dst_nid, dist);
Rik van Riel887c2902013-10-07 11:29:31 +01001324 }
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001325 }
1326
Rik van Riel0132c3e2014-06-23 11:46:16 -04001327 if (imp <= env->best_imp && moveimp <= env->best_imp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001328 goto unlock;
1329
1330 if (!cur) {
1331 /* Is there capacity at our destination? */
Rik van Rielb932c032014-08-04 13:23:27 -04001332 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001333 !env->dst_stats.has_free_capacity)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001334 goto unlock;
1335
1336 goto balance;
1337 }
1338
1339 /* Balance doesn't matter much if we're running a task per cpu */
Rik van Riel0132c3e2014-06-23 11:46:16 -04001340 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1341 dst_rq->nr_running == 1)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001342 goto assign;
1343
1344 /*
1345 * In the overloaded case, try and keep the load balanced.
1346 */
1347balance:
Peter Zijlstrae720fff2014-07-11 16:01:53 +02001348 load = task_h_load(env->p);
1349 dst_load = env->dst_stats.load + load;
1350 src_load = env->src_stats.load - load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001351
Rik van Riel0132c3e2014-06-23 11:46:16 -04001352 if (moveimp > imp && moveimp > env->best_imp) {
1353 /*
1354 * If the improvement from just moving env->p direction is
1355 * better than swapping tasks around, check if a move is
1356 * possible. Store a slightly smaller score than moveimp,
1357 * so an actually idle CPU will win.
1358 */
1359 if (!load_too_imbalanced(src_load, dst_load, env)) {
1360 imp = moveimp - 1;
1361 cur = NULL;
1362 goto assign;
1363 }
1364 }
1365
1366 if (imp <= env->best_imp)
1367 goto unlock;
1368
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001369 if (cur) {
Peter Zijlstrae720fff2014-07-11 16:01:53 +02001370 load = task_h_load(cur);
1371 dst_load -= load;
1372 src_load += load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001373 }
1374
Rik van Riel28a21742014-06-23 11:46:13 -04001375 if (load_too_imbalanced(src_load, dst_load, env))
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001376 goto unlock;
1377
Rik van Rielba7e5a22014-09-04 16:35:30 -04001378 /*
1379 * One idle CPU per node is evaluated for a task numa move.
1380 * Call select_idle_sibling to maybe find a better one.
1381 */
1382 if (!cur)
1383 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1384
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001385assign:
1386 task_numa_assign(env, cur, imp);
1387unlock:
1388 rcu_read_unlock();
1389}
1390
Rik van Riel887c2902013-10-07 11:29:31 +01001391static void task_numa_find_cpu(struct task_numa_env *env,
1392 long taskimp, long groupimp)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001393{
1394 int cpu;
1395
1396 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1397 /* Skip this CPU if the source task cannot migrate */
1398 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1399 continue;
1400
1401 env->dst_cpu = cpu;
Rik van Riel887c2902013-10-07 11:29:31 +01001402 task_numa_compare(env, taskimp, groupimp);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001403 }
1404}
1405
Rik van Riel6f9aad02015-05-28 09:52:49 -04001406/* Only move tasks to a NUMA node less busy than the current node. */
1407static bool numa_has_capacity(struct task_numa_env *env)
1408{
1409 struct numa_stats *src = &env->src_stats;
1410 struct numa_stats *dst = &env->dst_stats;
1411
1412 if (src->has_free_capacity && !dst->has_free_capacity)
1413 return false;
1414
1415 /*
1416 * Only consider a task move if the source has a higher load
1417 * than the destination, corrected for CPU capacity on each node.
1418 *
1419 * src->load dst->load
1420 * --------------------- vs ---------------------
1421 * src->compute_capacity dst->compute_capacity
1422 */
Srikar Dronamraju44dcb042015-06-16 17:26:00 +05301423 if (src->load * dst->compute_capacity * env->imbalance_pct >
1424
1425 dst->load * src->compute_capacity * 100)
Rik van Riel6f9aad02015-05-28 09:52:49 -04001426 return true;
1427
1428 return false;
1429}
1430
Mel Gorman58d081b2013-10-07 11:29:10 +01001431static int task_numa_migrate(struct task_struct *p)
Mel Gormane6628d52013-10-07 11:29:02 +01001432{
Mel Gorman58d081b2013-10-07 11:29:10 +01001433 struct task_numa_env env = {
1434 .p = p,
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001435
Mel Gorman58d081b2013-10-07 11:29:10 +01001436 .src_cpu = task_cpu(p),
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001437 .src_nid = task_node(p),
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001438
1439 .imbalance_pct = 112,
1440
1441 .best_task = NULL,
1442 .best_imp = 0,
1443 .best_cpu = -1
Mel Gorman58d081b2013-10-07 11:29:10 +01001444 };
1445 struct sched_domain *sd;
Rik van Riel887c2902013-10-07 11:29:31 +01001446 unsigned long taskweight, groupweight;
Rik van Riel7bd95322014-10-17 03:29:51 -04001447 int nid, ret, dist;
Rik van Riel887c2902013-10-07 11:29:31 +01001448 long taskimp, groupimp;
Mel Gormane6628d52013-10-07 11:29:02 +01001449
Mel Gorman58d081b2013-10-07 11:29:10 +01001450 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001451 * Pick the lowest SD_NUMA domain, as that would have the smallest
1452 * imbalance and would be the first to start moving tasks about.
1453 *
1454 * And we want to avoid any moving of tasks about, as that would create
1455 * random movement of tasks -- counter the numa conditions we're trying
1456 * to satisfy here.
Mel Gorman58d081b2013-10-07 11:29:10 +01001457 */
Mel Gormane6628d52013-10-07 11:29:02 +01001458 rcu_read_lock();
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001459 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
Rik van Riel46a73e82013-11-11 19:29:25 -05001460 if (sd)
1461 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
Mel Gormane6628d52013-10-07 11:29:02 +01001462 rcu_read_unlock();
1463
Rik van Riel46a73e82013-11-11 19:29:25 -05001464 /*
1465 * Cpusets can break the scheduler domain tree into smaller
1466 * balance domains, some of which do not cross NUMA boundaries.
1467 * Tasks that are "trapped" in such domains cannot be migrated
1468 * elsewhere, so there is no point in (re)trying.
1469 */
1470 if (unlikely(!sd)) {
Wanpeng Lide1b3012013-12-12 15:23:24 +08001471 p->numa_preferred_nid = task_node(p);
Rik van Riel46a73e82013-11-11 19:29:25 -05001472 return -EINVAL;
1473 }
1474
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001475 env.dst_nid = p->numa_preferred_nid;
Rik van Riel7bd95322014-10-17 03:29:51 -04001476 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1477 taskweight = task_weight(p, env.src_nid, dist);
1478 groupweight = group_weight(p, env.src_nid, dist);
1479 update_numa_stats(&env.src_stats, env.src_nid);
1480 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1481 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001482 update_numa_stats(&env.dst_stats, env.dst_nid);
Mel Gorman58d081b2013-10-07 11:29:10 +01001483
Rik van Riela43455a2014-06-04 16:09:42 -04001484 /* Try to find a spot on the preferred nid. */
Rik van Riel6f9aad02015-05-28 09:52:49 -04001485 if (numa_has_capacity(&env))
1486 task_numa_find_cpu(&env, taskimp, groupimp);
Rik van Riele1dda8a2013-10-07 11:29:19 +01001487
Rik van Riel9de05d42014-10-09 17:27:47 -04001488 /*
1489 * Look at other nodes in these cases:
1490 * - there is no space available on the preferred_nid
1491 * - the task is part of a numa_group that is interleaved across
1492 * multiple NUMA nodes; in order to better consolidate the group,
1493 * we need to check other locations.
1494 */
1495 if (env.best_cpu == -1 || (p->numa_group &&
1496 nodes_weight(p->numa_group->active_nodes) > 1)) {
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001497 for_each_online_node(nid) {
1498 if (nid == env.src_nid || nid == p->numa_preferred_nid)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001499 continue;
1500
Rik van Riel7bd95322014-10-17 03:29:51 -04001501 dist = node_distance(env.src_nid, env.dst_nid);
Rik van Riel6c6b1192014-10-17 03:29:52 -04001502 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1503 dist != env.dist) {
1504 taskweight = task_weight(p, env.src_nid, dist);
1505 groupweight = group_weight(p, env.src_nid, dist);
1506 }
Rik van Riel7bd95322014-10-17 03:29:51 -04001507
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001508 /* Only consider nodes where both task and groups benefit */
Rik van Riel7bd95322014-10-17 03:29:51 -04001509 taskimp = task_weight(p, nid, dist) - taskweight;
1510 groupimp = group_weight(p, nid, dist) - groupweight;
Rik van Riel887c2902013-10-07 11:29:31 +01001511 if (taskimp < 0 && groupimp < 0)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001512 continue;
1513
Rik van Riel7bd95322014-10-17 03:29:51 -04001514 env.dist = dist;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001515 env.dst_nid = nid;
1516 update_numa_stats(&env.dst_stats, env.dst_nid);
Rik van Riel6f9aad02015-05-28 09:52:49 -04001517 if (numa_has_capacity(&env))
1518 task_numa_find_cpu(&env, taskimp, groupimp);
Mel Gorman58d081b2013-10-07 11:29:10 +01001519 }
1520 }
1521
Rik van Riel68d1b022014-04-11 13:00:29 -04001522 /*
1523 * If the task is part of a workload that spans multiple NUMA nodes,
1524 * and is migrating into one of the workload's active nodes, remember
1525 * this node as the task's preferred numa node, so the workload can
1526 * settle down.
1527 * A task that migrated to a second choice node will be better off
1528 * trying for a better one later. Do not set the preferred node here.
1529 */
Rik van Rieldb015da2014-06-23 11:41:34 -04001530 if (p->numa_group) {
1531 if (env.best_cpu == -1)
1532 nid = env.src_nid;
1533 else
1534 nid = env.dst_nid;
1535
1536 if (node_isset(nid, p->numa_group->active_nodes))
1537 sched_setnuma(p, env.dst_nid);
1538 }
1539
1540 /* No better CPU than the current one was found. */
1541 if (env.best_cpu == -1)
1542 return -EAGAIN;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001543
Rik van Riel04bb2f92013-10-07 11:29:36 +01001544 /*
1545 * Reset the scan period if the task is being rescheduled on an
1546 * alternative node to recheck if the tasks is now properly placed.
1547 */
1548 p->numa_scan_period = task_scan_min(p);
1549
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001550 if (env.best_task == NULL) {
Mel Gorman286549d2014-01-21 15:51:03 -08001551 ret = migrate_task_to(p, env.best_cpu);
1552 if (ret != 0)
1553 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001554 return ret;
1555 }
1556
1557 ret = migrate_swap(p, env.best_task);
Mel Gorman286549d2014-01-21 15:51:03 -08001558 if (ret != 0)
1559 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001560 put_task_struct(env.best_task);
1561 return ret;
Mel Gormane6628d52013-10-07 11:29:02 +01001562}
1563
Mel Gorman6b9a7462013-10-07 11:29:11 +01001564/* Attempt to migrate a task to a CPU on the preferred node. */
1565static void numa_migrate_preferred(struct task_struct *p)
1566{
Rik van Riel5085e2a2014-04-11 13:00:28 -04001567 unsigned long interval = HZ;
1568
Rik van Riel2739d3e2013-10-07 11:29:41 +01001569 /* This task has no NUMA fault statistics yet */
Iulia Manda44dba3d2014-10-31 02:13:31 +02001570 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
Rik van Riel2739d3e2013-10-07 11:29:41 +01001571 return;
1572
1573 /* Periodically retry migrating the task to the preferred node */
Rik van Riel5085e2a2014-04-11 13:00:28 -04001574 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1575 p->numa_migrate_retry = jiffies + interval;
Rik van Riel2739d3e2013-10-07 11:29:41 +01001576
Mel Gorman6b9a7462013-10-07 11:29:11 +01001577 /* Success if task is already running on preferred CPU */
Wanpeng Lide1b3012013-12-12 15:23:24 +08001578 if (task_node(p) == p->numa_preferred_nid)
Mel Gorman6b9a7462013-10-07 11:29:11 +01001579 return;
1580
Mel Gorman6b9a7462013-10-07 11:29:11 +01001581 /* Otherwise, try migrate to a CPU on the preferred node */
Rik van Riel2739d3e2013-10-07 11:29:41 +01001582 task_numa_migrate(p);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001583}
1584
Rik van Riel04bb2f92013-10-07 11:29:36 +01001585/*
Rik van Riel20e07de2014-01-27 17:03:43 -05001586 * Find the nodes on which the workload is actively running. We do this by
1587 * tracking the nodes from which NUMA hinting faults are triggered. This can
1588 * be different from the set of nodes where the workload's memory is currently
1589 * located.
1590 *
1591 * The bitmask is used to make smarter decisions on when to do NUMA page
1592 * migrations, To prevent flip-flopping, and excessive page migrations, nodes
1593 * are added when they cause over 6/16 of the maximum number of faults, but
1594 * only removed when they drop below 3/16.
1595 */
1596static void update_numa_active_node_mask(struct numa_group *numa_group)
1597{
1598 unsigned long faults, max_faults = 0;
1599 int nid;
1600
1601 for_each_online_node(nid) {
1602 faults = group_faults_cpu(numa_group, nid);
1603 if (faults > max_faults)
1604 max_faults = faults;
1605 }
1606
1607 for_each_online_node(nid) {
1608 faults = group_faults_cpu(numa_group, nid);
1609 if (!node_isset(nid, numa_group->active_nodes)) {
1610 if (faults > max_faults * 6 / 16)
1611 node_set(nid, numa_group->active_nodes);
1612 } else if (faults < max_faults * 3 / 16)
1613 node_clear(nid, numa_group->active_nodes);
1614 }
1615}
1616
1617/*
Rik van Riel04bb2f92013-10-07 11:29:36 +01001618 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1619 * increments. The more local the fault statistics are, the higher the scan
Rik van Riela22b4b02014-06-23 11:41:35 -04001620 * period will be for the next scan window. If local/(local+remote) ratio is
1621 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1622 * the scan period will decrease. Aim for 70% local accesses.
Rik van Riel04bb2f92013-10-07 11:29:36 +01001623 */
1624#define NUMA_PERIOD_SLOTS 10
Rik van Riela22b4b02014-06-23 11:41:35 -04001625#define NUMA_PERIOD_THRESHOLD 7
Rik van Riel04bb2f92013-10-07 11:29:36 +01001626
1627/*
1628 * Increase the scan period (slow down scanning) if the majority of
1629 * our memory is already on our local node, or if the majority of
1630 * the page accesses are shared with other processes.
1631 * Otherwise, decrease the scan period.
1632 */
1633static void update_task_scan_period(struct task_struct *p,
1634 unsigned long shared, unsigned long private)
1635{
1636 unsigned int period_slot;
1637 int ratio;
1638 int diff;
1639
1640 unsigned long remote = p->numa_faults_locality[0];
1641 unsigned long local = p->numa_faults_locality[1];
1642
1643 /*
1644 * If there were no record hinting faults then either the task is
1645 * completely idle or all activity is areas that are not of interest
Mel Gorman074c2382015-03-25 15:55:42 -07001646 * to automatic numa balancing. Related to that, if there were failed
1647 * migration then it implies we are migrating too quickly or the local
1648 * node is overloaded. In either case, scan slower
Rik van Riel04bb2f92013-10-07 11:29:36 +01001649 */
Mel Gorman074c2382015-03-25 15:55:42 -07001650 if (local + shared == 0 || p->numa_faults_locality[2]) {
Rik van Riel04bb2f92013-10-07 11:29:36 +01001651 p->numa_scan_period = min(p->numa_scan_period_max,
1652 p->numa_scan_period << 1);
1653
1654 p->mm->numa_next_scan = jiffies +
1655 msecs_to_jiffies(p->numa_scan_period);
1656
1657 return;
1658 }
1659
1660 /*
1661 * Prepare to scale scan period relative to the current period.
1662 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1663 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1664 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1665 */
1666 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1667 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1668 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1669 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1670 if (!slot)
1671 slot = 1;
1672 diff = slot * period_slot;
1673 } else {
1674 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1675
1676 /*
1677 * Scale scan rate increases based on sharing. There is an
1678 * inverse relationship between the degree of sharing and
1679 * the adjustment made to the scanning period. Broadly
1680 * speaking the intent is that there is little point
1681 * scanning faster if shared accesses dominate as it may
1682 * simply bounce migrations uselessly
1683 */
Yasuaki Ishimatsu2847c902014-10-22 16:04:35 +09001684 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
Rik van Riel04bb2f92013-10-07 11:29:36 +01001685 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1686 }
1687
1688 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1689 task_scan_min(p), task_scan_max(p));
1690 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1691}
1692
Rik van Riel7e2703e2014-01-27 17:03:45 -05001693/*
1694 * Get the fraction of time the task has been running since the last
1695 * NUMA placement cycle. The scheduler keeps similar statistics, but
1696 * decays those on a 32ms period, which is orders of magnitude off
1697 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1698 * stats only if the task is so new there are no NUMA statistics yet.
1699 */
1700static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1701{
1702 u64 runtime, delta, now;
1703 /* Use the start of this time slice to avoid calculations. */
1704 now = p->se.exec_start;
1705 runtime = p->se.sum_exec_runtime;
1706
1707 if (p->last_task_numa_placement) {
1708 delta = runtime - p->last_sum_exec_runtime;
1709 *period = now - p->last_task_numa_placement;
1710 } else {
Yuyang Du9d89c252015-07-15 08:04:37 +08001711 delta = p->se.avg.load_sum / p->se.load.weight;
1712 *period = LOAD_AVG_MAX;
Rik van Riel7e2703e2014-01-27 17:03:45 -05001713 }
1714
1715 p->last_sum_exec_runtime = runtime;
1716 p->last_task_numa_placement = now;
1717
1718 return delta;
1719}
1720
Rik van Riel54009412014-10-17 03:29:53 -04001721/*
1722 * Determine the preferred nid for a task in a numa_group. This needs to
1723 * be done in a way that produces consistent results with group_weight,
1724 * otherwise workloads might not converge.
1725 */
1726static int preferred_group_nid(struct task_struct *p, int nid)
1727{
1728 nodemask_t nodes;
1729 int dist;
1730
1731 /* Direct connections between all NUMA nodes. */
1732 if (sched_numa_topology_type == NUMA_DIRECT)
1733 return nid;
1734
1735 /*
1736 * On a system with glueless mesh NUMA topology, group_weight
1737 * scores nodes according to the number of NUMA hinting faults on
1738 * both the node itself, and on nearby nodes.
1739 */
1740 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1741 unsigned long score, max_score = 0;
1742 int node, max_node = nid;
1743
1744 dist = sched_max_numa_distance;
1745
1746 for_each_online_node(node) {
1747 score = group_weight(p, node, dist);
1748 if (score > max_score) {
1749 max_score = score;
1750 max_node = node;
1751 }
1752 }
1753 return max_node;
1754 }
1755
1756 /*
1757 * Finding the preferred nid in a system with NUMA backplane
1758 * interconnect topology is more involved. The goal is to locate
1759 * tasks from numa_groups near each other in the system, and
1760 * untangle workloads from different sides of the system. This requires
1761 * searching down the hierarchy of node groups, recursively searching
1762 * inside the highest scoring group of nodes. The nodemask tricks
1763 * keep the complexity of the search down.
1764 */
1765 nodes = node_online_map;
1766 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1767 unsigned long max_faults = 0;
Jan Beulich81907472015-01-23 08:25:38 +00001768 nodemask_t max_group = NODE_MASK_NONE;
Rik van Riel54009412014-10-17 03:29:53 -04001769 int a, b;
1770
1771 /* Are there nodes at this distance from each other? */
1772 if (!find_numa_distance(dist))
1773 continue;
1774
1775 for_each_node_mask(a, nodes) {
1776 unsigned long faults = 0;
1777 nodemask_t this_group;
1778 nodes_clear(this_group);
1779
1780 /* Sum group's NUMA faults; includes a==b case. */
1781 for_each_node_mask(b, nodes) {
1782 if (node_distance(a, b) < dist) {
1783 faults += group_faults(p, b);
1784 node_set(b, this_group);
1785 node_clear(b, nodes);
1786 }
1787 }
1788
1789 /* Remember the top group. */
1790 if (faults > max_faults) {
1791 max_faults = faults;
1792 max_group = this_group;
1793 /*
1794 * subtle: at the smallest distance there is
1795 * just one node left in each "group", the
1796 * winner is the preferred nid.
1797 */
1798 nid = a;
1799 }
1800 }
1801 /* Next round, evaluate the nodes within max_group. */
Jan Beulich890a5402015-02-09 12:30:00 +01001802 if (!max_faults)
1803 break;
Rik van Riel54009412014-10-17 03:29:53 -04001804 nodes = max_group;
1805 }
1806 return nid;
1807}
1808
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001809static void task_numa_placement(struct task_struct *p)
1810{
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001811 int seq, nid, max_nid = -1, max_group_nid = -1;
1812 unsigned long max_faults = 0, max_group_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001813 unsigned long fault_types[2] = { 0, 0 };
Rik van Riel7e2703e2014-01-27 17:03:45 -05001814 unsigned long total_faults;
1815 u64 runtime, period;
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001816 spinlock_t *group_lock = NULL;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001817
Jason Low7e5a2c12015-04-30 17:28:14 -07001818 /*
1819 * The p->mm->numa_scan_seq field gets updated without
1820 * exclusive access. Use READ_ONCE() here to ensure
1821 * that the field is read in a single access:
1822 */
Jason Low316c1608d2015-04-28 13:00:20 -07001823 seq = READ_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001824 if (p->numa_scan_seq == seq)
1825 return;
1826 p->numa_scan_seq = seq;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001827 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001828
Rik van Riel7e2703e2014-01-27 17:03:45 -05001829 total_faults = p->numa_faults_locality[0] +
1830 p->numa_faults_locality[1];
1831 runtime = numa_get_avg_runtime(p, &period);
1832
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001833 /* If the task is part of a group prevent parallel updates to group stats */
1834 if (p->numa_group) {
1835 group_lock = &p->numa_group->lock;
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001836 spin_lock_irq(group_lock);
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001837 }
1838
Mel Gorman688b7582013-10-07 11:28:58 +01001839 /* Find the node with the highest number of faults */
1840 for_each_online_node(nid) {
Iulia Manda44dba3d2014-10-31 02:13:31 +02001841 /* Keep track of the offsets in numa_faults array */
1842 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001843 unsigned long faults = 0, group_faults = 0;
Iulia Manda44dba3d2014-10-31 02:13:31 +02001844 int priv;
Mel Gorman745d6142013-10-07 11:28:59 +01001845
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001846 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
Rik van Riel7e2703e2014-01-27 17:03:45 -05001847 long diff, f_diff, f_weight;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001848
Iulia Manda44dba3d2014-10-31 02:13:31 +02001849 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
1850 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
1851 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
1852 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
Mel Gorman745d6142013-10-07 11:28:59 +01001853
Mel Gormanac8e8952013-10-07 11:29:03 +01001854 /* Decay existing window, copy faults since last scan */
Iulia Manda44dba3d2014-10-31 02:13:31 +02001855 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
1856 fault_types[priv] += p->numa_faults[membuf_idx];
1857 p->numa_faults[membuf_idx] = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001858
Rik van Riel7e2703e2014-01-27 17:03:45 -05001859 /*
1860 * Normalize the faults_from, so all tasks in a group
1861 * count according to CPU use, instead of by the raw
1862 * number of faults. Tasks with little runtime have
1863 * little over-all impact on throughput, and thus their
1864 * faults are less important.
1865 */
1866 f_weight = div64_u64(runtime << 16, period + 1);
Iulia Manda44dba3d2014-10-31 02:13:31 +02001867 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
Rik van Riel7e2703e2014-01-27 17:03:45 -05001868 (total_faults + 1);
Iulia Manda44dba3d2014-10-31 02:13:31 +02001869 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
1870 p->numa_faults[cpubuf_idx] = 0;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001871
Iulia Manda44dba3d2014-10-31 02:13:31 +02001872 p->numa_faults[mem_idx] += diff;
1873 p->numa_faults[cpu_idx] += f_diff;
1874 faults += p->numa_faults[mem_idx];
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001875 p->total_numa_faults += diff;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001876 if (p->numa_group) {
Iulia Manda44dba3d2014-10-31 02:13:31 +02001877 /*
1878 * safe because we can only change our own group
1879 *
1880 * mem_idx represents the offset for a given
1881 * nid and priv in a specific region because it
1882 * is at the beginning of the numa_faults array.
1883 */
1884 p->numa_group->faults[mem_idx] += diff;
1885 p->numa_group->faults_cpu[mem_idx] += f_diff;
Mel Gorman989348b2013-10-07 11:29:40 +01001886 p->numa_group->total_faults += diff;
Iulia Manda44dba3d2014-10-31 02:13:31 +02001887 group_faults += p->numa_group->faults[mem_idx];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001888 }
Mel Gormanac8e8952013-10-07 11:29:03 +01001889 }
1890
Mel Gorman688b7582013-10-07 11:28:58 +01001891 if (faults > max_faults) {
1892 max_faults = faults;
1893 max_nid = nid;
1894 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001895
1896 if (group_faults > max_group_faults) {
1897 max_group_faults = group_faults;
1898 max_group_nid = nid;
1899 }
1900 }
1901
Rik van Riel04bb2f92013-10-07 11:29:36 +01001902 update_task_scan_period(p, fault_types[0], fault_types[1]);
1903
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001904 if (p->numa_group) {
Rik van Riel20e07de2014-01-27 17:03:43 -05001905 update_numa_active_node_mask(p->numa_group);
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001906 spin_unlock_irq(group_lock);
Rik van Riel54009412014-10-17 03:29:53 -04001907 max_nid = preferred_group_nid(p, max_group_nid);
Mel Gorman688b7582013-10-07 11:28:58 +01001908 }
1909
Rik van Rielbb97fc32014-06-04 16:33:15 -04001910 if (max_faults) {
1911 /* Set the new preferred node */
1912 if (max_nid != p->numa_preferred_nid)
1913 sched_setnuma(p, max_nid);
1914
1915 if (task_node(p) != p->numa_preferred_nid)
1916 numa_migrate_preferred(p);
Mel Gorman3a7053b2013-10-07 11:29:00 +01001917 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001918}
1919
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001920static inline int get_numa_group(struct numa_group *grp)
1921{
1922 return atomic_inc_not_zero(&grp->refcount);
1923}
1924
1925static inline void put_numa_group(struct numa_group *grp)
1926{
1927 if (atomic_dec_and_test(&grp->refcount))
1928 kfree_rcu(grp, rcu);
1929}
1930
Mel Gorman3e6a9412013-10-07 11:29:35 +01001931static void task_numa_group(struct task_struct *p, int cpupid, int flags,
1932 int *priv)
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001933{
1934 struct numa_group *grp, *my_grp;
1935 struct task_struct *tsk;
1936 bool join = false;
1937 int cpu = cpupid_to_cpu(cpupid);
1938 int i;
1939
1940 if (unlikely(!p->numa_group)) {
1941 unsigned int size = sizeof(struct numa_group) +
Rik van Riel50ec8a42014-01-27 17:03:42 -05001942 4*nr_node_ids*sizeof(unsigned long);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001943
1944 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1945 if (!grp)
1946 return;
1947
1948 atomic_set(&grp->refcount, 1);
1949 spin_lock_init(&grp->lock);
Mel Gormane29cf082013-10-07 11:29:22 +01001950 grp->gid = p->pid;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001951 /* Second half of the array tracks nids where faults happen */
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001952 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
1953 nr_node_ids;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001954
Rik van Riel20e07de2014-01-27 17:03:43 -05001955 node_set(task_node(current), grp->active_nodes);
1956
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001957 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
Iulia Manda44dba3d2014-10-31 02:13:31 +02001958 grp->faults[i] = p->numa_faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001959
Mel Gorman989348b2013-10-07 11:29:40 +01001960 grp->total_faults = p->total_numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001961
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001962 grp->nr_tasks++;
1963 rcu_assign_pointer(p->numa_group, grp);
1964 }
1965
1966 rcu_read_lock();
Jason Low316c1608d2015-04-28 13:00:20 -07001967 tsk = READ_ONCE(cpu_rq(cpu)->curr);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001968
1969 if (!cpupid_match_pid(tsk, cpupid))
Peter Zijlstra33547812013-10-09 10:24:48 +02001970 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001971
1972 grp = rcu_dereference(tsk->numa_group);
1973 if (!grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001974 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001975
1976 my_grp = p->numa_group;
1977 if (grp == my_grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001978 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001979
1980 /*
1981 * Only join the other group if its bigger; if we're the bigger group,
1982 * the other task will join us.
1983 */
1984 if (my_grp->nr_tasks > grp->nr_tasks)
Peter Zijlstra33547812013-10-09 10:24:48 +02001985 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001986
1987 /*
1988 * Tie-break on the grp address.
1989 */
1990 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02001991 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001992
Rik van Rieldabe1d92013-10-07 11:29:34 +01001993 /* Always join threads in the same process. */
1994 if (tsk->mm == current->mm)
1995 join = true;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001996
Rik van Rieldabe1d92013-10-07 11:29:34 +01001997 /* Simple filter to avoid false positives due to PID collisions */
1998 if (flags & TNF_SHARED)
1999 join = true;
2000
Mel Gorman3e6a9412013-10-07 11:29:35 +01002001 /* Update priv based on whether false sharing was detected */
2002 *priv = !join;
2003
Rik van Rieldabe1d92013-10-07 11:29:34 +01002004 if (join && !get_numa_group(grp))
Peter Zijlstra33547812013-10-09 10:24:48 +02002005 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002006
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002007 rcu_read_unlock();
2008
2009 if (!join)
2010 return;
2011
Mike Galbraith60e69ee2014-04-07 10:55:15 +02002012 BUG_ON(irqs_disabled());
2013 double_lock_irq(&my_grp->lock, &grp->lock);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002014
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002015 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
Iulia Manda44dba3d2014-10-31 02:13:31 +02002016 my_grp->faults[i] -= p->numa_faults[i];
2017 grp->faults[i] += p->numa_faults[i];
Mel Gorman989348b2013-10-07 11:29:40 +01002018 }
2019 my_grp->total_faults -= p->total_numa_faults;
2020 grp->total_faults += p->total_numa_faults;
2021
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002022 my_grp->nr_tasks--;
2023 grp->nr_tasks++;
2024
2025 spin_unlock(&my_grp->lock);
Mike Galbraith60e69ee2014-04-07 10:55:15 +02002026 spin_unlock_irq(&grp->lock);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002027
2028 rcu_assign_pointer(p->numa_group, grp);
2029
2030 put_numa_group(my_grp);
Peter Zijlstra33547812013-10-09 10:24:48 +02002031 return;
2032
2033no_join:
2034 rcu_read_unlock();
2035 return;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002036}
2037
2038void task_numa_free(struct task_struct *p)
2039{
2040 struct numa_group *grp = p->numa_group;
Iulia Manda44dba3d2014-10-31 02:13:31 +02002041 void *numa_faults = p->numa_faults;
Steven Rostedte9dd6852014-05-27 17:02:04 -04002042 unsigned long flags;
2043 int i;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002044
2045 if (grp) {
Steven Rostedte9dd6852014-05-27 17:02:04 -04002046 spin_lock_irqsave(&grp->lock, flags);
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002047 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
Iulia Manda44dba3d2014-10-31 02:13:31 +02002048 grp->faults[i] -= p->numa_faults[i];
Mel Gorman989348b2013-10-07 11:29:40 +01002049 grp->total_faults -= p->total_numa_faults;
2050
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002051 grp->nr_tasks--;
Steven Rostedte9dd6852014-05-27 17:02:04 -04002052 spin_unlock_irqrestore(&grp->lock, flags);
Andreea-Cristina Bernat35b123e2014-08-22 17:50:43 +03002053 RCU_INIT_POINTER(p->numa_group, NULL);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002054 put_numa_group(grp);
2055 }
2056
Iulia Manda44dba3d2014-10-31 02:13:31 +02002057 p->numa_faults = NULL;
Rik van Riel82727012013-10-07 11:29:28 +01002058 kfree(numa_faults);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002059}
2060
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002061/*
2062 * Got a PROT_NONE fault for a page on @node.
2063 */
Rik van Riel58b46da2014-01-27 17:03:47 -05002064void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002065{
2066 struct task_struct *p = current;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01002067 bool migrated = flags & TNF_MIGRATED;
Rik van Riel58b46da2014-01-27 17:03:47 -05002068 int cpu_node = task_node(current);
Rik van Riel792568e2014-04-11 13:00:27 -04002069 int local = !!(flags & TNF_FAULT_LOCAL);
Mel Gormanac8e8952013-10-07 11:29:03 +01002070 int priv;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002071
Dave Kleikamp10e84b92013-07-31 13:53:35 -07002072 if (!numabalancing_enabled)
Mel Gorman1a687c22012-11-22 11:16:36 +00002073 return;
2074
Mel Gorman9ff1d9f2013-10-07 11:29:04 +01002075 /* for example, ksmd faulting in a user's mm */
2076 if (!p->mm)
2077 return;
2078
Mel Gormanf809ca92013-10-07 11:28:57 +01002079 /* Allocate buffer to track faults on a per-node basis */
Iulia Manda44dba3d2014-10-31 02:13:31 +02002080 if (unlikely(!p->numa_faults)) {
2081 int size = sizeof(*p->numa_faults) *
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002082 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +01002083
Iulia Manda44dba3d2014-10-31 02:13:31 +02002084 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2085 if (!p->numa_faults)
Mel Gormanf809ca92013-10-07 11:28:57 +01002086 return;
Mel Gorman745d6142013-10-07 11:28:59 +01002087
Mel Gorman83e1d2c2013-10-07 11:29:27 +01002088 p->total_numa_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01002089 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
Mel Gormanf809ca92013-10-07 11:28:57 +01002090 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002091
Mel Gormanfb003b82012-11-15 09:01:14 +00002092 /*
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002093 * First accesses are treated as private, otherwise consider accesses
2094 * to be private if the accessing pid has not changed
2095 */
2096 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2097 priv = 1;
2098 } else {
2099 priv = cpupid_match_pid(p, last_cpupid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01002100 if (!priv && !(flags & TNF_NO_GROUP))
Mel Gorman3e6a9412013-10-07 11:29:35 +01002101 task_numa_group(p, last_cpupid, flags, &priv);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002102 }
2103
Rik van Riel792568e2014-04-11 13:00:27 -04002104 /*
2105 * If a workload spans multiple NUMA nodes, a shared fault that
2106 * occurs wholly within the set of nodes that the workload is
2107 * actively using should be counted as local. This allows the
2108 * scan rate to slow down when a workload has settled down.
2109 */
2110 if (!priv && !local && p->numa_group &&
2111 node_isset(cpu_node, p->numa_group->active_nodes) &&
2112 node_isset(mem_node, p->numa_group->active_nodes))
2113 local = 1;
2114
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002115 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01002116
Rik van Riel2739d3e2013-10-07 11:29:41 +01002117 /*
2118 * Retry task to preferred node migration periodically, in case it
2119 * case it previously failed, or the scheduler moved us.
2120 */
2121 if (time_after(jiffies, p->numa_migrate_retry))
Mel Gorman6b9a7462013-10-07 11:29:11 +01002122 numa_migrate_preferred(p);
2123
Ingo Molnarb32e86b2013-10-07 11:29:30 +01002124 if (migrated)
2125 p->numa_pages_migrated += pages;
Mel Gorman074c2382015-03-25 15:55:42 -07002126 if (flags & TNF_MIGRATE_FAIL)
2127 p->numa_faults_locality[2] += pages;
Ingo Molnarb32e86b2013-10-07 11:29:30 +01002128
Iulia Manda44dba3d2014-10-31 02:13:31 +02002129 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2130 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
Rik van Riel792568e2014-04-11 13:00:27 -04002131 p->numa_faults_locality[local] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002132}
2133
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002134static void reset_ptenuma_scan(struct task_struct *p)
2135{
Jason Low7e5a2c12015-04-30 17:28:14 -07002136 /*
2137 * We only did a read acquisition of the mmap sem, so
2138 * p->mm->numa_scan_seq is written to without exclusive access
2139 * and the update is not guaranteed to be atomic. That's not
2140 * much of an issue though, since this is just used for
2141 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2142 * expensive, to avoid any form of compiler optimizations:
2143 */
Jason Low316c1608d2015-04-28 13:00:20 -07002144 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002145 p->mm->numa_scan_offset = 0;
2146}
2147
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002148/*
2149 * The expensive part of numa migration is done from task_work context.
2150 * Triggered from task_tick_numa().
2151 */
2152void task_numa_work(struct callback_head *work)
2153{
2154 unsigned long migrate, next_scan, now = jiffies;
2155 struct task_struct *p = current;
2156 struct mm_struct *mm = p->mm;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002157 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00002158 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01002159 unsigned long nr_pte_updates = 0;
Mel Gorman9f406042012-11-14 18:34:32 +00002160 long pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002161
2162 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2163
2164 work->next = work; /* protect against double add */
2165 /*
2166 * Who cares about NUMA placement when they're dying.
2167 *
2168 * NOTE: make sure not to dereference p->mm before this check,
2169 * exit_task_work() happens _after_ exit_mm() so we could be called
2170 * without p->mm even though we still had it when we enqueued this
2171 * work.
2172 */
2173 if (p->flags & PF_EXITING)
2174 return;
2175
Mel Gorman930aa172013-10-07 11:29:37 +01002176 if (!mm->numa_next_scan) {
Mel Gorman7e8d16b2013-10-07 11:28:54 +01002177 mm->numa_next_scan = now +
2178 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
Mel Gormanb8593bf2012-11-21 01:18:23 +00002179 }
2180
2181 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002182 * Enforce maximal scan/migration frequency..
2183 */
2184 migrate = mm->numa_next_scan;
2185 if (time_before(now, migrate))
2186 return;
2187
Mel Gorman598f0ec2013-10-07 11:28:55 +01002188 if (p->numa_scan_period == 0) {
2189 p->numa_scan_period_max = task_scan_max(p);
2190 p->numa_scan_period = task_scan_min(p);
2191 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002192
Mel Gormanfb003b82012-11-15 09:01:14 +00002193 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002194 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2195 return;
2196
Mel Gormane14808b2012-11-19 10:59:15 +00002197 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01002198 * Delay this task enough that another task of this mm will likely win
2199 * the next time around.
2200 */
2201 p->node_stamp += 2 * TICK_NSEC;
2202
Mel Gorman9f406042012-11-14 18:34:32 +00002203 start = mm->numa_scan_offset;
2204 pages = sysctl_numa_balancing_scan_size;
2205 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
2206 if (!pages)
2207 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002208
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002209 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00002210 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002211 if (!vma) {
2212 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00002213 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002214 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002215 }
Mel Gorman9f406042012-11-14 18:34:32 +00002216 for (; vma; vma = vma->vm_next) {
Naoya Horiguchi6b79c572015-04-07 14:26:47 -07002217 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
Mel Gorman8e76d4e2015-06-10 11:15:00 -07002218 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002219 continue;
Naoya Horiguchi6b79c572015-04-07 14:26:47 -07002220 }
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002221
Mel Gorman4591ce4f2013-10-07 11:29:13 +01002222 /*
2223 * Shared library pages mapped by multiple processes are not
2224 * migrated as it is expected they are cache replicated. Avoid
2225 * hinting faults in read-only file-backed mappings or the vdso
2226 * as migrating the pages will be of marginal benefit.
2227 */
2228 if (!vma->vm_mm ||
2229 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2230 continue;
2231
Mel Gorman3c67f472013-12-18 17:08:40 -08002232 /*
2233 * Skip inaccessible VMAs to avoid any confusion between
2234 * PROT_NONE and NUMA hinting ptes
2235 */
2236 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2237 continue;
2238
Mel Gorman9f406042012-11-14 18:34:32 +00002239 do {
2240 start = max(start, vma->vm_start);
2241 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2242 end = min(end, vma->vm_end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01002243 nr_pte_updates += change_prot_numa(vma, start, end);
2244
2245 /*
2246 * Scan sysctl_numa_balancing_scan_size but ensure that
2247 * at least one PTE is updated so that unused virtual
2248 * address space is quickly skipped.
2249 */
2250 if (nr_pte_updates)
2251 pages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002252
Mel Gorman9f406042012-11-14 18:34:32 +00002253 start = end;
2254 if (pages <= 0)
2255 goto out;
Rik van Riel3cf19622014-02-18 17:12:44 -05002256
2257 cond_resched();
Mel Gorman9f406042012-11-14 18:34:32 +00002258 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002259 }
2260
Mel Gorman9f406042012-11-14 18:34:32 +00002261out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002262 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01002263 * It is possible to reach the end of the VMA list but the last few
2264 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2265 * would find the !migratable VMA on the next scan but not reset the
2266 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002267 */
2268 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00002269 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002270 else
2271 reset_ptenuma_scan(p);
2272 up_read(&mm->mmap_sem);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002273}
2274
2275/*
2276 * Drive the periodic memory faults..
2277 */
2278void task_tick_numa(struct rq *rq, struct task_struct *curr)
2279{
2280 struct callback_head *work = &curr->numa_work;
2281 u64 period, now;
2282
2283 /*
2284 * We don't care about NUMA placement if we don't have memory.
2285 */
2286 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2287 return;
2288
2289 /*
2290 * Using runtime rather than walltime has the dual advantage that
2291 * we (mostly) drive the selection from busy threads and that the
2292 * task needs to have done some actual work before we bother with
2293 * NUMA placement.
2294 */
2295 now = curr->se.sum_exec_runtime;
2296 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2297
2298 if (now - curr->node_stamp > period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02002299 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01002300 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01002301 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002302
2303 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2304 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2305 task_work_add(curr, work, true);
2306 }
2307 }
2308}
2309#else
2310static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2311{
2312}
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002313
2314static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2315{
2316}
2317
2318static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2319{
2320}
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002321#endif /* CONFIG_NUMA_BALANCING */
2322
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002323static void
2324account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2325{
2326 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002327 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02002328 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01002329#ifdef CONFIG_SMP
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002330 if (entity_is_task(se)) {
2331 struct rq *rq = rq_of(cfs_rq);
2332
2333 account_numa_enqueue(rq, task_of(se));
2334 list_add(&se->group_node, &rq->cfs_tasks);
2335 }
Peter Zijlstra367456c2012-02-20 21:49:09 +01002336#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002337 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002338}
2339
2340static void
2341account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2342{
2343 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002344 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02002345 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002346 if (entity_is_task(se)) {
2347 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
Bharata B Raob87f1722008-09-25 09:53:54 +05302348 list_del_init(&se->group_node);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002349 }
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002350 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002351}
2352
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002353#ifdef CONFIG_FAIR_GROUP_SCHED
2354# ifdef CONFIG_SMP
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002355static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
2356{
2357 long tg_weight;
2358
2359 /*
Yuyang Du9d89c252015-07-15 08:04:37 +08002360 * Use this CPU's real-time load instead of the last load contribution
2361 * as the updating of the contribution is delayed, and we will use the
2362 * the real-time load to calc the share. See update_tg_load_avg().
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002363 */
Alex Shibf5b9862013-06-20 10:18:54 +08002364 tg_weight = atomic_long_read(&tg->load_avg);
Yuyang Du9d89c252015-07-15 08:04:37 +08002365 tg_weight -= cfs_rq->tg_load_avg_contrib;
Yuyang Du7ea241a2015-07-15 08:04:42 +08002366 tg_weight += cfs_rq_load_avg(cfs_rq);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002367
2368 return tg_weight;
2369}
2370
Paul Turner6d5ab292011-01-21 20:45:01 -08002371static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002372{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002373 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002374
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002375 tg_weight = calc_tg_weight(tg, cfs_rq);
Yuyang Du7ea241a2015-07-15 08:04:42 +08002376 load = cfs_rq_load_avg(cfs_rq);
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002377
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002378 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002379 if (tg_weight)
2380 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002381
2382 if (shares < MIN_SHARES)
2383 shares = MIN_SHARES;
2384 if (shares > tg->shares)
2385 shares = tg->shares;
2386
2387 return shares;
2388}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002389# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08002390static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002391{
2392 return tg->shares;
2393}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002394# endif /* CONFIG_SMP */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002395static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2396 unsigned long weight)
2397{
Paul Turner19e5eeb2010-12-15 19:10:18 -08002398 if (se->on_rq) {
2399 /* commit outstanding execution time */
2400 if (cfs_rq->curr == se)
2401 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002402 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08002403 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002404
2405 update_load_set(&se->load, weight);
2406
2407 if (se->on_rq)
2408 account_entity_enqueue(cfs_rq, se);
2409}
2410
Paul Turner82958362012-10-04 13:18:31 +02002411static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2412
Paul Turner6d5ab292011-01-21 20:45:01 -08002413static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002414{
2415 struct task_group *tg;
2416 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002417 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002418
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002419 tg = cfs_rq->tg;
2420 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07002421 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002422 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002423#ifndef CONFIG_SMP
2424 if (likely(se->load.weight == tg->shares))
2425 return;
2426#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08002427 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002428
2429 reweight_entity(cfs_rq_of(se), se, shares);
2430}
2431#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08002432static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002433{
2434}
2435#endif /* CONFIG_FAIR_GROUP_SCHED */
2436
Alex Shi141965c2013-06-26 13:05:39 +08002437#ifdef CONFIG_SMP
Paul Turner5b51f2f2012-10-04 13:18:32 +02002438/* Precomputed fixed inverse multiplies for multiplication by y^n */
2439static const u32 runnable_avg_yN_inv[] = {
2440 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2441 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2442 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2443 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2444 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2445 0x85aac367, 0x82cd8698,
2446};
2447
2448/*
2449 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2450 * over-estimates when re-combining.
2451 */
2452static const u32 runnable_avg_yN_sum[] = {
2453 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2454 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2455 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2456};
2457
2458/*
Paul Turner9d85f212012-10-04 13:18:29 +02002459 * Approximate:
2460 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2461 */
2462static __always_inline u64 decay_load(u64 val, u64 n)
2463{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002464 unsigned int local_n;
2465
2466 if (!n)
2467 return val;
2468 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2469 return 0;
2470
2471 /* after bounds checking we can collapse to 32-bit */
2472 local_n = n;
2473
2474 /*
2475 * As y^PERIOD = 1/2, we can combine
Zhihui Zhang9c58c792014-09-20 21:24:36 -04002476 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2477 * With a look-up table which covers y^n (n<PERIOD)
Paul Turner5b51f2f2012-10-04 13:18:32 +02002478 *
2479 * To achieve constant time decay_load.
2480 */
2481 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2482 val >>= local_n / LOAD_AVG_PERIOD;
2483 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02002484 }
2485
Yuyang Du9d89c252015-07-15 08:04:37 +08002486 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2487 return val;
Paul Turner5b51f2f2012-10-04 13:18:32 +02002488}
2489
2490/*
2491 * For updates fully spanning n periods, the contribution to runnable
2492 * average will be: \Sum 1024*y^n
2493 *
2494 * We can compute this reasonably efficiently by combining:
2495 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2496 */
2497static u32 __compute_runnable_contrib(u64 n)
2498{
2499 u32 contrib = 0;
2500
2501 if (likely(n <= LOAD_AVG_PERIOD))
2502 return runnable_avg_yN_sum[n];
2503 else if (unlikely(n >= LOAD_AVG_MAX_N))
2504 return LOAD_AVG_MAX;
2505
2506 /* Compute \Sum k^n combining precomputed values for k^i, \Sum k^j */
2507 do {
2508 contrib /= 2; /* y^LOAD_AVG_PERIOD = 1/2 */
2509 contrib += runnable_avg_yN_sum[LOAD_AVG_PERIOD];
2510
2511 n -= LOAD_AVG_PERIOD;
2512 } while (n > LOAD_AVG_PERIOD);
2513
2514 contrib = decay_load(contrib, n);
2515 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02002516}
2517
2518/*
2519 * We can represent the historical contribution to runnable average as the
2520 * coefficients of a geometric series. To do this we sub-divide our runnable
2521 * history into segments of approximately 1ms (1024us); label the segment that
2522 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2523 *
2524 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2525 * p0 p1 p2
2526 * (now) (~1ms ago) (~2ms ago)
2527 *
2528 * Let u_i denote the fraction of p_i that the entity was runnable.
2529 *
2530 * We then designate the fractions u_i as our co-efficients, yielding the
2531 * following representation of historical load:
2532 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2533 *
2534 * We choose y based on the with of a reasonably scheduling period, fixing:
2535 * y^32 = 0.5
2536 *
2537 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2538 * approximately half as much as the contribution to load within the last ms
2539 * (u_0).
2540 *
2541 * When a period "rolls over" and we have new u_0`, multiplying the previous
2542 * sum again by y is sufficient to update:
2543 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2544 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2545 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002546static __always_inline int
2547__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
Yuyang Du13962232015-07-15 08:04:41 +08002548 unsigned long weight, int running, struct cfs_rq *cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02002549{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002550 u64 delta, periods;
Yuyang Du9d89c252015-07-15 08:04:37 +08002551 u32 contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002552 int delta_w, decayed = 0;
Morten Rasmussen0c1dc6b2015-03-04 08:46:26 +01002553 unsigned long scale_freq = arch_scale_freq_capacity(NULL, cpu);
Paul Turner9d85f212012-10-04 13:18:29 +02002554
Yuyang Du9d89c252015-07-15 08:04:37 +08002555 delta = now - sa->last_update_time;
Paul Turner9d85f212012-10-04 13:18:29 +02002556 /*
2557 * This should only happen when time goes backwards, which it
2558 * unfortunately does during sched clock init when we swap over to TSC.
2559 */
2560 if ((s64)delta < 0) {
Yuyang Du9d89c252015-07-15 08:04:37 +08002561 sa->last_update_time = now;
Paul Turner9d85f212012-10-04 13:18:29 +02002562 return 0;
2563 }
2564
2565 /*
2566 * Use 1024ns as the unit of measurement since it's a reasonable
2567 * approximation of 1us and fast to compute.
2568 */
2569 delta >>= 10;
2570 if (!delta)
2571 return 0;
Yuyang Du9d89c252015-07-15 08:04:37 +08002572 sa->last_update_time = now;
Paul Turner9d85f212012-10-04 13:18:29 +02002573
2574 /* delta_w is the amount already accumulated against our next period */
Yuyang Du9d89c252015-07-15 08:04:37 +08002575 delta_w = sa->period_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002576 if (delta + delta_w >= 1024) {
Paul Turner9d85f212012-10-04 13:18:29 +02002577 decayed = 1;
2578
Yuyang Du9d89c252015-07-15 08:04:37 +08002579 /* how much left for next period will start over, we don't know yet */
2580 sa->period_contrib = 0;
2581
Paul Turner9d85f212012-10-04 13:18:29 +02002582 /*
2583 * Now that we know we're crossing a period boundary, figure
2584 * out how much from delta we need to complete the current
2585 * period and accrue it.
2586 */
2587 delta_w = 1024 - delta_w;
Yuyang Du13962232015-07-15 08:04:41 +08002588 if (weight) {
Yuyang Du9d89c252015-07-15 08:04:37 +08002589 sa->load_sum += weight * delta_w;
Yuyang Du13962232015-07-15 08:04:41 +08002590 if (cfs_rq)
2591 cfs_rq->runnable_load_sum += weight * delta_w;
2592 }
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002593 if (running)
Yuyang Du9d89c252015-07-15 08:04:37 +08002594 sa->util_sum += delta_w * scale_freq >> SCHED_CAPACITY_SHIFT;
Paul Turner9d85f212012-10-04 13:18:29 +02002595
Paul Turner5b51f2f2012-10-04 13:18:32 +02002596 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002597
Paul Turner5b51f2f2012-10-04 13:18:32 +02002598 /* Figure out how many additional periods this update spans */
2599 periods = delta / 1024;
2600 delta %= 1024;
2601
Yuyang Du9d89c252015-07-15 08:04:37 +08002602 sa->load_sum = decay_load(sa->load_sum, periods + 1);
Yuyang Du13962232015-07-15 08:04:41 +08002603 if (cfs_rq) {
2604 cfs_rq->runnable_load_sum =
2605 decay_load(cfs_rq->runnable_load_sum, periods + 1);
2606 }
Yuyang Du9d89c252015-07-15 08:04:37 +08002607 sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
Paul Turner5b51f2f2012-10-04 13:18:32 +02002608
2609 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
Yuyang Du9d89c252015-07-15 08:04:37 +08002610 contrib = __compute_runnable_contrib(periods);
Yuyang Du13962232015-07-15 08:04:41 +08002611 if (weight) {
Yuyang Du9d89c252015-07-15 08:04:37 +08002612 sa->load_sum += weight * contrib;
Yuyang Du13962232015-07-15 08:04:41 +08002613 if (cfs_rq)
2614 cfs_rq->runnable_load_sum += weight * contrib;
2615 }
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002616 if (running)
Yuyang Du9d89c252015-07-15 08:04:37 +08002617 sa->util_sum += contrib * scale_freq >> SCHED_CAPACITY_SHIFT;
Paul Turner9d85f212012-10-04 13:18:29 +02002618 }
2619
2620 /* Remainder of delta accrued against u_0` */
Yuyang Du13962232015-07-15 08:04:41 +08002621 if (weight) {
Yuyang Du9d89c252015-07-15 08:04:37 +08002622 sa->load_sum += weight * delta;
Yuyang Du13962232015-07-15 08:04:41 +08002623 if (cfs_rq)
2624 cfs_rq->runnable_load_sum += weight * delta;
2625 }
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002626 if (running)
Yuyang Du9d89c252015-07-15 08:04:37 +08002627 sa->util_sum += delta * scale_freq >> SCHED_CAPACITY_SHIFT;
2628
2629 sa->period_contrib += delta;
2630
2631 if (decayed) {
2632 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
Yuyang Du13962232015-07-15 08:04:41 +08002633 if (cfs_rq) {
2634 cfs_rq->runnable_load_avg =
2635 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2636 }
Yuyang Du9d89c252015-07-15 08:04:37 +08002637 sa->util_avg = (sa->util_sum << SCHED_LOAD_SHIFT) / LOAD_AVG_MAX;
2638 }
Paul Turner9d85f212012-10-04 13:18:29 +02002639
2640 return decayed;
2641}
2642
Paul Turnerc566e8e2012-10-04 13:18:30 +02002643#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turnerbb17f652012-10-04 13:18:31 +02002644/*
Yuyang Du9d89c252015-07-15 08:04:37 +08002645 * Updating tg's load_avg is necessary before update_cfs_share (which is done)
2646 * and effective_load (which is not done because it is too costly).
Paul Turnerbb17f652012-10-04 13:18:31 +02002647 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002648static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
Paul Turnerbb17f652012-10-04 13:18:31 +02002649{
Yuyang Du9d89c252015-07-15 08:04:37 +08002650 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
Paul Turnerbb17f652012-10-04 13:18:31 +02002651
Yuyang Du9d89c252015-07-15 08:04:37 +08002652 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2653 atomic_long_add(delta, &cfs_rq->tg->load_avg);
2654 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
Paul Turnerbb17f652012-10-04 13:18:31 +02002655 }
Paul Turner8165e142012-10-04 13:18:31 +02002656}
Dietmar Eggemannf5f97392014-02-26 11:19:33 +00002657
Peter Zijlstra6e831252014-02-11 16:11:48 +01002658#else /* CONFIG_FAIR_GROUP_SCHED */
Yuyang Du9d89c252015-07-15 08:04:37 +08002659static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002660#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turnerc566e8e2012-10-04 13:18:30 +02002661
Paul Turnerf1b17282012-10-04 13:18:31 +02002662static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2663
Yuyang Du9d89c252015-07-15 08:04:37 +08002664/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2665static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2666{
2667 int decayed;
2668 struct sched_avg *sa = &cfs_rq->avg;
2669
2670 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2671 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2672 sa->load_avg = max_t(long, sa->load_avg - r, 0);
2673 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
2674 }
2675
2676 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2677 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
2678 sa->util_avg = max_t(long, sa->util_avg - r, 0);
2679 sa->util_sum = max_t(s32, sa->util_sum -
2680 ((r * LOAD_AVG_MAX) >> SCHED_LOAD_SHIFT), 0);
2681 }
2682
2683 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
Yuyang Du13962232015-07-15 08:04:41 +08002684 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08002685
2686#ifndef CONFIG_64BIT
2687 smp_wmb();
2688 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2689#endif
2690
2691 return decayed;
2692}
2693
2694/* Update task and its cfs_rq load average */
2695static inline void update_load_avg(struct sched_entity *se, int update_tg)
Paul Turner9d85f212012-10-04 13:18:29 +02002696{
Paul Turner2dac7542012-10-04 13:18:30 +02002697 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Morten Rasmussen0c1dc6b2015-03-04 08:46:26 +01002698 int cpu = cpu_of(rq_of(cfs_rq));
Yuyang Du9d89c252015-07-15 08:04:37 +08002699 u64 now = cfs_rq_clock_task(cfs_rq);
Paul Turner2dac7542012-10-04 13:18:30 +02002700
Paul Turnerf1b17282012-10-04 13:18:31 +02002701 /*
Yuyang Du9d89c252015-07-15 08:04:37 +08002702 * Track task load average for carrying it to new CPU after migrated, and
2703 * track group sched_entity load average for task_h_load calc in migration
Paul Turnerf1b17282012-10-04 13:18:31 +02002704 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002705 __update_load_avg(now, cpu, &se->avg,
Yuyang Du13962232015-07-15 08:04:41 +08002706 se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
Paul Turnerf1b17282012-10-04 13:18:31 +02002707
Yuyang Du9d89c252015-07-15 08:04:37 +08002708 if (update_cfs_rq_load_avg(now, cfs_rq) && update_tg)
2709 update_tg_load_avg(cfs_rq, 0);
2710}
Paul Turner2dac7542012-10-04 13:18:30 +02002711
Yuyang Du9d89c252015-07-15 08:04:37 +08002712/* Add the load generated by se into cfs_rq's load average */
2713static inline void
2714enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2715{
2716 struct sched_avg *sa = &se->avg;
2717 u64 now = cfs_rq_clock_task(cfs_rq);
2718 int migrated = 0, decayed;
Paul Turner9ee474f2012-10-04 13:18:30 +02002719
Yuyang Du9d89c252015-07-15 08:04:37 +08002720 if (sa->last_update_time == 0) {
2721 sa->last_update_time = now;
2722 migrated = 1;
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002723 }
Yuyang Du9d89c252015-07-15 08:04:37 +08002724 else {
2725 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
Yuyang Du13962232015-07-15 08:04:41 +08002726 se->on_rq * scale_load_down(se->load.weight),
2727 cfs_rq->curr == se, NULL);
Yuyang Du9d89c252015-07-15 08:04:37 +08002728 }
2729
2730 decayed = update_cfs_rq_load_avg(now, cfs_rq);
2731
Yuyang Du13962232015-07-15 08:04:41 +08002732 cfs_rq->runnable_load_avg += sa->load_avg;
2733 cfs_rq->runnable_load_sum += sa->load_sum;
2734
Yuyang Du9d89c252015-07-15 08:04:37 +08002735 if (migrated) {
2736 cfs_rq->avg.load_avg += sa->load_avg;
2737 cfs_rq->avg.load_sum += sa->load_sum;
2738 cfs_rq->avg.util_avg += sa->util_avg;
2739 cfs_rq->avg.util_sum += sa->util_sum;
2740 }
2741
2742 if (decayed || migrated)
2743 update_tg_load_avg(cfs_rq, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02002744}
2745
Yuyang Du13962232015-07-15 08:04:41 +08002746/* Remove the runnable load generated by se from cfs_rq's runnable load average */
2747static inline void
2748dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2749{
2750 update_load_avg(se, 1);
2751
2752 cfs_rq->runnable_load_avg =
2753 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
2754 cfs_rq->runnable_load_sum =
2755 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
2756}
2757
Paul Turner9ee474f2012-10-04 13:18:30 +02002758/*
Yuyang Du9d89c252015-07-15 08:04:37 +08002759 * Task first catches up with cfs_rq, and then subtract
2760 * itself from the cfs_rq (task must be off the queue now).
Paul Turner9ee474f2012-10-04 13:18:30 +02002761 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002762void remove_entity_load_avg(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02002763{
Yuyang Du9d89c252015-07-15 08:04:37 +08002764 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2765 u64 last_update_time;
Paul Turner9ee474f2012-10-04 13:18:30 +02002766
Yuyang Du9d89c252015-07-15 08:04:37 +08002767#ifndef CONFIG_64BIT
2768 u64 last_update_time_copy;
Paul Turner9ee474f2012-10-04 13:18:30 +02002769
Yuyang Du9d89c252015-07-15 08:04:37 +08002770 do {
2771 last_update_time_copy = cfs_rq->load_last_update_time_copy;
2772 smp_rmb();
2773 last_update_time = cfs_rq->avg.last_update_time;
2774 } while (last_update_time != last_update_time_copy);
2775#else
2776 last_update_time = cfs_rq->avg.last_update_time;
2777#endif
Paul Turner9ee474f2012-10-04 13:18:30 +02002778
Yuyang Du13962232015-07-15 08:04:41 +08002779 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
Yuyang Du9d89c252015-07-15 08:04:37 +08002780 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
2781 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
Paul Turner2dac7542012-10-04 13:18:30 +02002782}
Vincent Guittot642dbc32013-04-18 18:34:26 +02002783
2784/*
2785 * Update the rq's load with the elapsed running time before entering
2786 * idle. if the last scheduled task is not a CFS task, idle_enter will
2787 * be the only way to update the runnable statistic.
2788 */
2789void idle_enter_fair(struct rq *this_rq)
2790{
Vincent Guittot642dbc32013-04-18 18:34:26 +02002791}
2792
2793/*
2794 * Update the rq's load with the elapsed idle time before a task is
2795 * scheduled. if the newly scheduled task is not a CFS task, idle_exit will
2796 * be the only way to update the runnable statistic.
2797 */
2798void idle_exit_fair(struct rq *this_rq)
2799{
Vincent Guittot642dbc32013-04-18 18:34:26 +02002800}
2801
Yuyang Du7ea241a2015-07-15 08:04:42 +08002802static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
2803{
2804 return cfs_rq->runnable_load_avg;
2805}
2806
2807static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
2808{
2809 return cfs_rq->avg.load_avg;
2810}
2811
Peter Zijlstra6e831252014-02-11 16:11:48 +01002812static int idle_balance(struct rq *this_rq);
2813
Peter Zijlstra38033c32014-01-23 20:32:21 +01002814#else /* CONFIG_SMP */
2815
Yuyang Du9d89c252015-07-15 08:04:37 +08002816static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
2817static inline void
2818enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
Yuyang Du13962232015-07-15 08:04:41 +08002819static inline void
2820dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
Yuyang Du9d89c252015-07-15 08:04:37 +08002821static inline void remove_entity_load_avg(struct sched_entity *se) {}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002822
2823static inline int idle_balance(struct rq *rq)
2824{
2825 return 0;
2826}
2827
Peter Zijlstra38033c32014-01-23 20:32:21 +01002828#endif /* CONFIG_SMP */
Paul Turner9d85f212012-10-04 13:18:29 +02002829
Ingo Molnar2396af62007-08-09 11:16:48 +02002830static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002831{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002832#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02002833 struct task_struct *tsk = NULL;
2834
2835 if (entity_is_task(se))
2836 tsk = task_of(se);
2837
Lucas De Marchi41acab82010-03-10 23:37:45 -03002838 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002839 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002840
2841 if ((s64)delta < 0)
2842 delta = 0;
2843
Lucas De Marchi41acab82010-03-10 23:37:45 -03002844 if (unlikely(delta > se->statistics.sleep_max))
2845 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002846
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002847 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002848 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01002849
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002850 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02002851 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002852 trace_sched_stat_sleep(tsk, delta);
2853 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002854 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03002855 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02002856 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002857
2858 if ((s64)delta < 0)
2859 delta = 0;
2860
Lucas De Marchi41acab82010-03-10 23:37:45 -03002861 if (unlikely(delta > se->statistics.block_max))
2862 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002863
Peter Zijlstra8c79a042012-01-30 14:51:37 +01002864 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03002865 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02002866
Peter Zijlstrae4143142009-07-23 20:13:26 +02002867 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002868 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03002869 se->statistics.iowait_sum += delta;
2870 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02002871 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07002872 }
2873
Andrew Vaginb781a602011-11-28 12:03:35 +03002874 trace_sched_stat_blocked(tsk, delta);
2875
Peter Zijlstrae4143142009-07-23 20:13:26 +02002876 /*
2877 * Blocking time is in units of nanosecs, so shift by
2878 * 20 to get a milliseconds-range estimation of the
2879 * amount of time that the task spent sleeping:
2880 */
2881 if (unlikely(prof_on == SLEEP_PROFILING)) {
2882 profile_hits(SLEEP_PROFILING,
2883 (void *)get_wchan(tsk),
2884 delta >> 20);
2885 }
2886 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02002887 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002888 }
2889#endif
2890}
2891
Peter Zijlstraddc97292007-10-15 17:00:10 +02002892static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
2893{
2894#ifdef CONFIG_SCHED_DEBUG
2895 s64 d = se->vruntime - cfs_rq->min_vruntime;
2896
2897 if (d < 0)
2898 d = -d;
2899
2900 if (d > 3*sysctl_sched_latency)
2901 schedstat_inc(cfs_rq, nr_spread_over);
2902#endif
2903}
2904
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002905static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002906place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
2907{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02002908 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002909
Peter Zijlstra2cb86002007-11-09 22:39:37 +01002910 /*
2911 * The 'current' period is already promised to the current tasks,
2912 * however the extra weight of the new task will slow them down a
2913 * little, place the new task so that it fits in the slot that
2914 * stays open at the end.
2915 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02002916 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02002917 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002918
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002919 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01002920 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002921 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02002922
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002923 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002924 * Halve their sleep time's effect, to allow
2925 * for a gentler effect of sleepers:
2926 */
2927 if (sched_feat(GENTLE_FAIR_SLEEPERS))
2928 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02002929
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02002930 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002931 }
2932
Mike Galbraithb5d9d732009-09-08 11:12:28 +02002933 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05302934 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002935}
2936
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002937static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
2938
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002939static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002940enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002941{
2942 /*
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002943 * Update the normalized vruntime before updating min_vruntime
Kamalesh Babulal0fc576d2013-06-27 11:24:18 +05302944 * through calling update_curr().
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002945 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01002946 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002947 se->vruntime += cfs_rq->min_vruntime;
2948
2949 /*
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02002950 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002951 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +02002952 update_curr(cfs_rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08002953 enqueue_entity_load_avg(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08002954 account_entity_enqueue(cfs_rq, se);
2955 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002956
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01002957 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02002958 place_entity(cfs_rq, se, 0);
Ingo Molnar2396af62007-08-09 11:16:48 +02002959 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02002960 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002961
Ingo Molnard2417e52007-08-09 11:16:47 +02002962 update_stats_enqueue(cfs_rq, se);
Peter Zijlstraddc97292007-10-15 17:00:10 +02002963 check_spread(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02002964 if (se != cfs_rq->curr)
2965 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002966 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002967
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002968 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08002969 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07002970 check_enqueue_throttle(cfs_rq);
2971 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02002972}
2973
Rik van Riel2c13c9192011-02-01 09:48:37 -05002974static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01002975{
Rik van Riel2c13c9192011-02-01 09:48:37 -05002976 for_each_sched_entity(se) {
2977 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01002978 if (cfs_rq->last != se)
Rik van Riel2c13c9192011-02-01 09:48:37 -05002979 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01002980
2981 cfs_rq->last = NULL;
Rik van Riel2c13c9192011-02-01 09:48:37 -05002982 }
2983}
Peter Zijlstra2002c692008-11-11 11:52:33 +01002984
Rik van Riel2c13c9192011-02-01 09:48:37 -05002985static void __clear_buddies_next(struct sched_entity *se)
2986{
2987 for_each_sched_entity(se) {
2988 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01002989 if (cfs_rq->next != se)
Rik van Riel2c13c9192011-02-01 09:48:37 -05002990 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01002991
2992 cfs_rq->next = NULL;
Rik van Riel2c13c9192011-02-01 09:48:37 -05002993 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01002994}
2995
Rik van Rielac53db52011-02-01 09:51:03 -05002996static void __clear_buddies_skip(struct sched_entity *se)
2997{
2998 for_each_sched_entity(se) {
2999 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01003000 if (cfs_rq->skip != se)
Rik van Rielac53db52011-02-01 09:51:03 -05003001 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01003002
3003 cfs_rq->skip = NULL;
Rik van Rielac53db52011-02-01 09:51:03 -05003004 }
3005}
3006
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01003007static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3008{
Rik van Riel2c13c9192011-02-01 09:48:37 -05003009 if (cfs_rq->last == se)
3010 __clear_buddies_last(se);
3011
3012 if (cfs_rq->next == se)
3013 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05003014
3015 if (cfs_rq->skip == se)
3016 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01003017}
3018
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003019static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07003020
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003021static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003022dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003023{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02003024 /*
3025 * Update run-time statistics of the 'current'.
3026 */
3027 update_curr(cfs_rq);
Yuyang Du13962232015-07-15 08:04:41 +08003028 dequeue_entity_load_avg(cfs_rq, se);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02003029
Ingo Molnar19b6a2e2007-08-09 11:16:48 +02003030 update_stats_dequeue(cfs_rq, se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003031 if (flags & DEQUEUE_SLEEP) {
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02003032#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003033 if (entity_is_task(se)) {
3034 struct task_struct *tsk = task_of(se);
3035
3036 if (tsk->state & TASK_INTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003037 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003038 if (tsk->state & TASK_UNINTERRUPTIBLE)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003039 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003040 }
Dmitry Adamushkodb36cc72007-10-15 17:00:06 +02003041#endif
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02003042 }
3043
Peter Zijlstra2002c692008-11-11 11:52:33 +01003044 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01003045
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003046 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003047 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003048 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003049 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003050
3051 /*
3052 * Normalize the entity after updating the min_vruntime because the
3053 * update can refer to the ->curr item and we need to reflect this
3054 * movement in our normalized position.
3055 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003056 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003057 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07003058
Paul Turnerd8b49862011-07-21 09:43:41 -07003059 /* return excess runtime on last dequeue */
3060 return_cfs_rq_runtime(cfs_rq);
3061
Peter Zijlstra1e876232011-05-17 16:21:10 -07003062 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003063 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003064}
3065
3066/*
3067 * Preempt the current task with a newly woken task if needed:
3068 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02003069static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02003070check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003071{
Peter Zijlstra11697832007-09-05 14:32:49 +02003072 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003073 struct sched_entity *se;
3074 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02003075
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +02003076 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02003077 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01003078 if (delta_exec > ideal_runtime) {
Kirill Tkhai88751252014-06-29 00:03:57 +04003079 resched_curr(rq_of(cfs_rq));
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01003080 /*
3081 * The current task ran long enough, ensure it doesn't get
3082 * re-elected due to buddy favours.
3083 */
3084 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003085 return;
3086 }
3087
3088 /*
3089 * Ensure that a task that missed wakeup preemption by a
3090 * narrow margin doesn't have to wait for a full slice.
3091 * This also mitigates buddy induced latencies under load.
3092 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02003093 if (delta_exec < sysctl_sched_min_granularity)
3094 return;
3095
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003096 se = __pick_first_entity(cfs_rq);
3097 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02003098
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003099 if (delta < 0)
3100 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01003101
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003102 if (delta > ideal_runtime)
Kirill Tkhai88751252014-06-29 00:03:57 +04003103 resched_curr(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003104}
3105
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003106static void
Ingo Molnar8494f412007-08-09 11:16:48 +02003107set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003108{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003109 /* 'current' is not kept within the tree. */
3110 if (se->on_rq) {
3111 /*
3112 * Any task has to be enqueued before it get to execute on
3113 * a CPU. So account for the time it spent waiting on the
3114 * runqueue.
3115 */
3116 update_stats_wait_end(cfs_rq, se);
3117 __dequeue_entity(cfs_rq, se);
Yuyang Du9d89c252015-07-15 08:04:37 +08003118 update_load_avg(se, 1);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003119 }
3120
Ingo Molnar79303e92007-08-09 11:16:47 +02003121 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02003122 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02003123#ifdef CONFIG_SCHEDSTATS
3124 /*
3125 * Track our maximum slice length, if the CPU's load is at
3126 * least twice that of our own weight (i.e. dont track it
3127 * when there are only lesser-weight tasks around):
3128 */
Dmitry Adamushko495eca42007-10-15 17:00:06 +02003129 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003130 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02003131 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3132 }
3133#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02003134 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003135}
3136
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02003137static int
3138wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3139
Rik van Rielac53db52011-02-01 09:51:03 -05003140/*
3141 * Pick the next process, keeping these things in mind, in this order:
3142 * 1) keep things fair between processes/task groups
3143 * 2) pick the "next" process, since someone really wants that to run
3144 * 3) pick the "last" process, for cache locality
3145 * 4) do not run the "skip" process, if something else is available
3146 */
Peter Zijlstra678d5712012-02-11 06:05:00 +01003147static struct sched_entity *
3148pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003149{
Peter Zijlstra678d5712012-02-11 06:05:00 +01003150 struct sched_entity *left = __pick_first_entity(cfs_rq);
3151 struct sched_entity *se;
3152
3153 /*
3154 * If curr is set we have to see if its left of the leftmost entity
3155 * still in the tree, provided there was anything in the tree at all.
3156 */
3157 if (!left || (curr && entity_before(curr, left)))
3158 left = curr;
3159
3160 se = left; /* ideally we run the leftmost entity */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003161
Rik van Rielac53db52011-02-01 09:51:03 -05003162 /*
3163 * Avoid running the skip buddy, if running something else can
3164 * be done without getting too unfair.
3165 */
3166 if (cfs_rq->skip == se) {
Peter Zijlstra678d5712012-02-11 06:05:00 +01003167 struct sched_entity *second;
3168
3169 if (se == curr) {
3170 second = __pick_first_entity(cfs_rq);
3171 } else {
3172 second = __pick_next_entity(se);
3173 if (!second || (curr && entity_before(curr, second)))
3174 second = curr;
3175 }
3176
Rik van Rielac53db52011-02-01 09:51:03 -05003177 if (second && wakeup_preempt_entity(second, left) < 1)
3178 se = second;
3179 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003180
Mike Galbraithf685cea2009-10-23 23:09:22 +02003181 /*
3182 * Prefer last buddy, try to return the CPU to a preempted task.
3183 */
3184 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3185 se = cfs_rq->last;
3186
Rik van Rielac53db52011-02-01 09:51:03 -05003187 /*
3188 * Someone really wants this to run. If it's not unfair, run it.
3189 */
3190 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3191 se = cfs_rq->next;
3192
Mike Galbraithf685cea2009-10-23 23:09:22 +02003193 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01003194
3195 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003196}
3197
Peter Zijlstra678d5712012-02-11 06:05:00 +01003198static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003199
Ingo Molnarab6cde22007-08-09 11:16:48 +02003200static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003201{
3202 /*
3203 * If still on the runqueue then deactivate_task()
3204 * was not called and update_curr() has to be done:
3205 */
3206 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02003207 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003208
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003209 /* throttle cfs_rqs exceeding runtime */
3210 check_cfs_rq_runtime(cfs_rq);
3211
Peter Zijlstraddc97292007-10-15 17:00:10 +02003212 check_spread(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003213 if (prev->on_rq) {
Ingo Molnar5870db52007-08-09 11:16:47 +02003214 update_stats_wait_start(cfs_rq, prev);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003215 /* Put 'current' back into the tree. */
3216 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02003217 /* in !on_rq case, update occurred at dequeue */
Yuyang Du9d89c252015-07-15 08:04:37 +08003218 update_load_avg(prev, 0);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003219 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02003220 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003221}
3222
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003223static void
3224entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003225{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003226 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003227 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003228 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003229 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003230
Paul Turner43365bd2010-12-15 19:10:17 -08003231 /*
Paul Turner9d85f212012-10-04 13:18:29 +02003232 * Ensure that runnable average is periodically updated.
3233 */
Yuyang Du9d89c252015-07-15 08:04:37 +08003234 update_load_avg(curr, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02003235 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02003236
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003237#ifdef CONFIG_SCHED_HRTICK
3238 /*
3239 * queued ticks are scheduled to match the slice, so don't bother
3240 * validating it and just reschedule.
3241 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003242 if (queued) {
Kirill Tkhai88751252014-06-29 00:03:57 +04003243 resched_curr(rq_of(cfs_rq));
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003244 return;
3245 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003246 /*
3247 * don't let the period tick interfere with the hrtick preemption
3248 */
3249 if (!sched_feat(DOUBLE_TICK) &&
3250 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3251 return;
3252#endif
3253
Yong Zhang2c2efae2011-07-29 16:20:33 +08003254 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02003255 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003256}
3257
Paul Turnerab84d312011-07-21 09:43:28 -07003258
3259/**************************************************
3260 * CFS bandwidth control machinery
3261 */
3262
3263#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02003264
3265#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01003266static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003267
3268static inline bool cfs_bandwidth_used(void)
3269{
Ingo Molnarc5905af2012-02-24 08:31:31 +01003270 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003271}
3272
Ben Segall1ee14e62013-10-16 11:16:12 -07003273void cfs_bandwidth_usage_inc(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003274{
Ben Segall1ee14e62013-10-16 11:16:12 -07003275 static_key_slow_inc(&__cfs_bandwidth_used);
3276}
3277
3278void cfs_bandwidth_usage_dec(void)
3279{
3280 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003281}
3282#else /* HAVE_JUMP_LABEL */
3283static bool cfs_bandwidth_used(void)
3284{
3285 return true;
3286}
3287
Ben Segall1ee14e62013-10-16 11:16:12 -07003288void cfs_bandwidth_usage_inc(void) {}
3289void cfs_bandwidth_usage_dec(void) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003290#endif /* HAVE_JUMP_LABEL */
3291
Paul Turnerab84d312011-07-21 09:43:28 -07003292/*
3293 * default period for cfs group bandwidth.
3294 * default: 0.1s, units: nanoseconds
3295 */
3296static inline u64 default_cfs_period(void)
3297{
3298 return 100000000ULL;
3299}
Paul Turnerec12cb72011-07-21 09:43:30 -07003300
3301static inline u64 sched_cfs_bandwidth_slice(void)
3302{
3303 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3304}
3305
Paul Turnera9cf55b2011-07-21 09:43:32 -07003306/*
3307 * Replenish runtime according to assigned quota and update expiration time.
3308 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3309 * additional synchronization around rq->lock.
3310 *
3311 * requires cfs_b->lock
3312 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02003313void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07003314{
3315 u64 now;
3316
3317 if (cfs_b->quota == RUNTIME_INF)
3318 return;
3319
3320 now = sched_clock_cpu(smp_processor_id());
3321 cfs_b->runtime = cfs_b->quota;
3322 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3323}
3324
Peter Zijlstra029632f2011-10-25 10:00:11 +02003325static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3326{
3327 return &tg->cfs_bandwidth;
3328}
3329
Paul Turnerf1b17282012-10-04 13:18:31 +02003330/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3331static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3332{
3333 if (unlikely(cfs_rq->throttle_count))
3334 return cfs_rq->throttled_clock_task;
3335
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003336 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02003337}
3338
Paul Turner85dac902011-07-21 09:43:33 -07003339/* returns 0 on failure to allocate runtime */
3340static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07003341{
3342 struct task_group *tg = cfs_rq->tg;
3343 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003344 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07003345
3346 /* note: this is a positive sum as runtime_remaining <= 0 */
3347 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3348
3349 raw_spin_lock(&cfs_b->lock);
3350 if (cfs_b->quota == RUNTIME_INF)
3351 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07003352 else {
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003353 start_cfs_bandwidth(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07003354
3355 if (cfs_b->runtime > 0) {
3356 amount = min(cfs_b->runtime, min_amount);
3357 cfs_b->runtime -= amount;
3358 cfs_b->idle = 0;
3359 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003360 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07003361 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07003362 raw_spin_unlock(&cfs_b->lock);
3363
3364 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003365 /*
3366 * we may have advanced our local expiration to account for allowed
3367 * spread between our sched_clock and the one on which runtime was
3368 * issued.
3369 */
3370 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3371 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07003372
3373 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003374}
3375
3376/*
3377 * Note: This depends on the synchronization provided by sched_clock and the
3378 * fact that rq->clock snapshots this value.
3379 */
3380static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3381{
3382 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003383
3384 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003385 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07003386 return;
3387
3388 if (cfs_rq->runtime_remaining < 0)
3389 return;
3390
3391 /*
3392 * If the local deadline has passed we have to consider the
3393 * possibility that our sched_clock is 'fast' and the global deadline
3394 * has not truly expired.
3395 *
3396 * Fortunately we can check determine whether this the case by checking
Ben Segall51f21762014-05-19 15:49:45 -07003397 * whether the global deadline has advanced. It is valid to compare
3398 * cfs_b->runtime_expires without any locks since we only care about
3399 * exact equality, so a partial write will still work.
Paul Turnera9cf55b2011-07-21 09:43:32 -07003400 */
3401
Ben Segall51f21762014-05-19 15:49:45 -07003402 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
Paul Turnera9cf55b2011-07-21 09:43:32 -07003403 /* extend local deadline, drift is bounded above by 2 ticks */
3404 cfs_rq->runtime_expires += TICK_NSEC;
3405 } else {
3406 /* global deadline is ahead, expiration has passed */
3407 cfs_rq->runtime_remaining = 0;
3408 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003409}
3410
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003411static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003412{
Paul Turnera9cf55b2011-07-21 09:43:32 -07003413 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07003414 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003415 expire_cfs_rq_runtime(cfs_rq);
3416
3417 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07003418 return;
3419
Paul Turner85dac902011-07-21 09:43:33 -07003420 /*
3421 * if we're unable to extend our runtime we resched so that the active
3422 * hierarchy can be throttled
3423 */
3424 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
Kirill Tkhai88751252014-06-29 00:03:57 +04003425 resched_curr(rq_of(cfs_rq));
Paul Turnerec12cb72011-07-21 09:43:30 -07003426}
3427
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003428static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003429void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003430{
Paul Turner56f570e2011-11-07 20:26:33 -08003431 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07003432 return;
3433
3434 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3435}
3436
Paul Turner85dac902011-07-21 09:43:33 -07003437static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3438{
Paul Turner56f570e2011-11-07 20:26:33 -08003439 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07003440}
3441
Paul Turner64660c82011-07-21 09:43:36 -07003442/* check whether cfs_rq, or any parent, is throttled */
3443static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3444{
Paul Turner56f570e2011-11-07 20:26:33 -08003445 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07003446}
3447
3448/*
3449 * Ensure that neither of the group entities corresponding to src_cpu or
3450 * dest_cpu are members of a throttled hierarchy when performing group
3451 * load-balance operations.
3452 */
3453static inline int throttled_lb_pair(struct task_group *tg,
3454 int src_cpu, int dest_cpu)
3455{
3456 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3457
3458 src_cfs_rq = tg->cfs_rq[src_cpu];
3459 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3460
3461 return throttled_hierarchy(src_cfs_rq) ||
3462 throttled_hierarchy(dest_cfs_rq);
3463}
3464
3465/* updated child weight may affect parent so we have to do this bottom up */
3466static int tg_unthrottle_up(struct task_group *tg, void *data)
3467{
3468 struct rq *rq = data;
3469 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3470
3471 cfs_rq->throttle_count--;
3472#ifdef CONFIG_SMP
3473 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02003474 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003475 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02003476 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07003477 }
3478#endif
3479
3480 return 0;
3481}
3482
3483static int tg_throttle_down(struct task_group *tg, void *data)
3484{
3485 struct rq *rq = data;
3486 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3487
Paul Turner82958362012-10-04 13:18:31 +02003488 /* group is entering throttled state, stop time */
3489 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003490 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07003491 cfs_rq->throttle_count++;
3492
3493 return 0;
3494}
3495
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003496static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07003497{
3498 struct rq *rq = rq_of(cfs_rq);
3499 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3500 struct sched_entity *se;
3501 long task_delta, dequeue = 1;
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003502 bool empty;
Paul Turner85dac902011-07-21 09:43:33 -07003503
3504 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3505
Paul Turnerf1b17282012-10-04 13:18:31 +02003506 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07003507 rcu_read_lock();
3508 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3509 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07003510
3511 task_delta = cfs_rq->h_nr_running;
3512 for_each_sched_entity(se) {
3513 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3514 /* throttled entity or throttle-on-deactivate */
3515 if (!se->on_rq)
3516 break;
3517
3518 if (dequeue)
3519 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3520 qcfs_rq->h_nr_running -= task_delta;
3521
3522 if (qcfs_rq->load.weight)
3523 dequeue = 0;
3524 }
3525
3526 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04003527 sub_nr_running(rq, task_delta);
Paul Turner85dac902011-07-21 09:43:33 -07003528
3529 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003530 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07003531 raw_spin_lock(&cfs_b->lock);
Cong Wangd49db342015-06-24 12:41:47 -07003532 empty = list_empty(&cfs_b->throttled_cfs_rq);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003533
Ben Segallc06f04c2014-06-20 15:21:20 -07003534 /*
3535 * Add to the _head_ of the list, so that an already-started
3536 * distribute_cfs_runtime will not see us
3537 */
3538 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003539
3540 /*
3541 * If we're the first throttled task, make sure the bandwidth
3542 * timer is running.
3543 */
3544 if (empty)
3545 start_cfs_bandwidth(cfs_b);
3546
Paul Turner85dac902011-07-21 09:43:33 -07003547 raw_spin_unlock(&cfs_b->lock);
3548}
3549
Peter Zijlstra029632f2011-10-25 10:00:11 +02003550void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07003551{
3552 struct rq *rq = rq_of(cfs_rq);
3553 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3554 struct sched_entity *se;
3555 int enqueue = 1;
3556 long task_delta;
3557
Michael Wang22b958d2013-06-04 14:23:39 +08003558 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07003559
3560 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02003561
3562 update_rq_clock(rq);
3563
Paul Turner671fd9d2011-07-21 09:43:34 -07003564 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003565 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07003566 list_del_rcu(&cfs_rq->throttled_list);
3567 raw_spin_unlock(&cfs_b->lock);
3568
Paul Turner64660c82011-07-21 09:43:36 -07003569 /* update hierarchical throttle state */
3570 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3571
Paul Turner671fd9d2011-07-21 09:43:34 -07003572 if (!cfs_rq->load.weight)
3573 return;
3574
3575 task_delta = cfs_rq->h_nr_running;
3576 for_each_sched_entity(se) {
3577 if (se->on_rq)
3578 enqueue = 0;
3579
3580 cfs_rq = cfs_rq_of(se);
3581 if (enqueue)
3582 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3583 cfs_rq->h_nr_running += task_delta;
3584
3585 if (cfs_rq_throttled(cfs_rq))
3586 break;
3587 }
3588
3589 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04003590 add_nr_running(rq, task_delta);
Paul Turner671fd9d2011-07-21 09:43:34 -07003591
3592 /* determine whether we need to wake up potentially idle cpu */
3593 if (rq->curr == rq->idle && rq->cfs.nr_running)
Kirill Tkhai88751252014-06-29 00:03:57 +04003594 resched_curr(rq);
Paul Turner671fd9d2011-07-21 09:43:34 -07003595}
3596
3597static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3598 u64 remaining, u64 expires)
3599{
3600 struct cfs_rq *cfs_rq;
Ben Segallc06f04c2014-06-20 15:21:20 -07003601 u64 runtime;
3602 u64 starting_runtime = remaining;
Paul Turner671fd9d2011-07-21 09:43:34 -07003603
3604 rcu_read_lock();
3605 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3606 throttled_list) {
3607 struct rq *rq = rq_of(cfs_rq);
3608
3609 raw_spin_lock(&rq->lock);
3610 if (!cfs_rq_throttled(cfs_rq))
3611 goto next;
3612
3613 runtime = -cfs_rq->runtime_remaining + 1;
3614 if (runtime > remaining)
3615 runtime = remaining;
3616 remaining -= runtime;
3617
3618 cfs_rq->runtime_remaining += runtime;
3619 cfs_rq->runtime_expires = expires;
3620
3621 /* we check whether we're throttled above */
3622 if (cfs_rq->runtime_remaining > 0)
3623 unthrottle_cfs_rq(cfs_rq);
3624
3625next:
3626 raw_spin_unlock(&rq->lock);
3627
3628 if (!remaining)
3629 break;
3630 }
3631 rcu_read_unlock();
3632
Ben Segallc06f04c2014-06-20 15:21:20 -07003633 return starting_runtime - remaining;
Paul Turner671fd9d2011-07-21 09:43:34 -07003634}
3635
Paul Turner58088ad2011-07-21 09:43:31 -07003636/*
3637 * Responsible for refilling a task_group's bandwidth and unthrottling its
3638 * cfs_rqs as appropriate. If there has been no activity within the last
3639 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
3640 * used to track this state.
3641 */
3642static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
3643{
Paul Turner671fd9d2011-07-21 09:43:34 -07003644 u64 runtime, runtime_expires;
Ben Segall51f21762014-05-19 15:49:45 -07003645 int throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07003646
Paul Turner58088ad2011-07-21 09:43:31 -07003647 /* no need to continue the timer with no bandwidth constraint */
3648 if (cfs_b->quota == RUNTIME_INF)
Ben Segall51f21762014-05-19 15:49:45 -07003649 goto out_deactivate;
Paul Turner58088ad2011-07-21 09:43:31 -07003650
Paul Turner671fd9d2011-07-21 09:43:34 -07003651 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003652 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07003653
Ben Segall51f21762014-05-19 15:49:45 -07003654 /*
3655 * idle depends on !throttled (for the case of a large deficit), and if
3656 * we're going inactive then everything else can be deferred
3657 */
3658 if (cfs_b->idle && !throttled)
3659 goto out_deactivate;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003660
3661 __refill_cfs_bandwidth_runtime(cfs_b);
3662
Paul Turner671fd9d2011-07-21 09:43:34 -07003663 if (!throttled) {
3664 /* mark as potentially idle for the upcoming period */
3665 cfs_b->idle = 1;
Ben Segall51f21762014-05-19 15:49:45 -07003666 return 0;
Paul Turner671fd9d2011-07-21 09:43:34 -07003667 }
Paul Turner58088ad2011-07-21 09:43:31 -07003668
Nikhil Raoe8da1b12011-07-21 09:43:40 -07003669 /* account preceding periods in which throttling occurred */
3670 cfs_b->nr_throttled += overrun;
3671
Paul Turner671fd9d2011-07-21 09:43:34 -07003672 runtime_expires = cfs_b->runtime_expires;
Paul Turner671fd9d2011-07-21 09:43:34 -07003673
3674 /*
Ben Segallc06f04c2014-06-20 15:21:20 -07003675 * This check is repeated as we are holding onto the new bandwidth while
3676 * we unthrottle. This can potentially race with an unthrottled group
3677 * trying to acquire new bandwidth from the global pool. This can result
3678 * in us over-using our runtime if it is all used during this loop, but
3679 * only by limited amounts in that extreme case.
Paul Turner671fd9d2011-07-21 09:43:34 -07003680 */
Ben Segallc06f04c2014-06-20 15:21:20 -07003681 while (throttled && cfs_b->runtime > 0) {
3682 runtime = cfs_b->runtime;
Paul Turner671fd9d2011-07-21 09:43:34 -07003683 raw_spin_unlock(&cfs_b->lock);
3684 /* we can't nest cfs_b->lock while distributing bandwidth */
3685 runtime = distribute_cfs_runtime(cfs_b, runtime,
3686 runtime_expires);
3687 raw_spin_lock(&cfs_b->lock);
3688
3689 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
Ben Segallc06f04c2014-06-20 15:21:20 -07003690
3691 cfs_b->runtime -= min(runtime, cfs_b->runtime);
Paul Turner671fd9d2011-07-21 09:43:34 -07003692 }
3693
Paul Turner671fd9d2011-07-21 09:43:34 -07003694 /*
3695 * While we are ensured activity in the period following an
3696 * unthrottle, this also covers the case in which the new bandwidth is
3697 * insufficient to cover the existing bandwidth deficit. (Forcing the
3698 * timer to remain active while there are any throttled entities.)
3699 */
3700 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07003701
Ben Segall51f21762014-05-19 15:49:45 -07003702 return 0;
3703
3704out_deactivate:
Ben Segall51f21762014-05-19 15:49:45 -07003705 return 1;
Paul Turner58088ad2011-07-21 09:43:31 -07003706}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003707
Paul Turnerd8b49862011-07-21 09:43:41 -07003708/* a cfs_rq won't donate quota below this amount */
3709static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
3710/* minimum remaining period time to redistribute slack quota */
3711static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
3712/* how long we wait to gather additional slack before distributing */
3713static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
3714
Ben Segalldb06e782013-10-16 11:16:17 -07003715/*
3716 * Are we near the end of the current quota period?
3717 *
3718 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
Thomas Gleixner4961b6e2015-04-14 21:09:05 +00003719 * hrtimer base being cleared by hrtimer_start. In the case of
Ben Segalldb06e782013-10-16 11:16:17 -07003720 * migrate_hrtimers, base is never cleared, so we are fine.
3721 */
Paul Turnerd8b49862011-07-21 09:43:41 -07003722static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
3723{
3724 struct hrtimer *refresh_timer = &cfs_b->period_timer;
3725 u64 remaining;
3726
3727 /* if the call-back is running a quota refresh is already occurring */
3728 if (hrtimer_callback_running(refresh_timer))
3729 return 1;
3730
3731 /* is a quota refresh about to occur? */
3732 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
3733 if (remaining < min_expire)
3734 return 1;
3735
3736 return 0;
3737}
3738
3739static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
3740{
3741 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
3742
3743 /* if there's a quota refresh soon don't bother with slack */
3744 if (runtime_refresh_within(cfs_b, min_left))
3745 return;
3746
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02003747 hrtimer_start(&cfs_b->slack_timer,
3748 ns_to_ktime(cfs_bandwidth_slack_period),
3749 HRTIMER_MODE_REL);
Paul Turnerd8b49862011-07-21 09:43:41 -07003750}
3751
3752/* we know any runtime found here is valid as update_curr() precedes return */
3753static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3754{
3755 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3756 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
3757
3758 if (slack_runtime <= 0)
3759 return;
3760
3761 raw_spin_lock(&cfs_b->lock);
3762 if (cfs_b->quota != RUNTIME_INF &&
3763 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
3764 cfs_b->runtime += slack_runtime;
3765
3766 /* we are under rq->lock, defer unthrottling using a timer */
3767 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
3768 !list_empty(&cfs_b->throttled_cfs_rq))
3769 start_cfs_slack_bandwidth(cfs_b);
3770 }
3771 raw_spin_unlock(&cfs_b->lock);
3772
3773 /* even if it's not valid for return we don't want to try again */
3774 cfs_rq->runtime_remaining -= slack_runtime;
3775}
3776
3777static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3778{
Paul Turner56f570e2011-11-07 20:26:33 -08003779 if (!cfs_bandwidth_used())
3780 return;
3781
Paul Turnerfccfdc62011-11-07 20:26:34 -08003782 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07003783 return;
3784
3785 __return_cfs_rq_runtime(cfs_rq);
3786}
3787
3788/*
3789 * This is done with a timer (instead of inline with bandwidth return) since
3790 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
3791 */
3792static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
3793{
3794 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
3795 u64 expires;
3796
3797 /* confirm we're still not at a refresh boundary */
Paul Turnerd8b49862011-07-21 09:43:41 -07003798 raw_spin_lock(&cfs_b->lock);
Ben Segalldb06e782013-10-16 11:16:17 -07003799 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
3800 raw_spin_unlock(&cfs_b->lock);
3801 return;
3802 }
3803
Ben Segallc06f04c2014-06-20 15:21:20 -07003804 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
Paul Turnerd8b49862011-07-21 09:43:41 -07003805 runtime = cfs_b->runtime;
Ben Segallc06f04c2014-06-20 15:21:20 -07003806
Paul Turnerd8b49862011-07-21 09:43:41 -07003807 expires = cfs_b->runtime_expires;
3808 raw_spin_unlock(&cfs_b->lock);
3809
3810 if (!runtime)
3811 return;
3812
3813 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
3814
3815 raw_spin_lock(&cfs_b->lock);
3816 if (expires == cfs_b->runtime_expires)
Ben Segallc06f04c2014-06-20 15:21:20 -07003817 cfs_b->runtime -= min(runtime, cfs_b->runtime);
Paul Turnerd8b49862011-07-21 09:43:41 -07003818 raw_spin_unlock(&cfs_b->lock);
3819}
3820
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003821/*
3822 * When a group wakes up we want to make sure that its quota is not already
3823 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
3824 * runtime as update_curr() throttling can not not trigger until it's on-rq.
3825 */
3826static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
3827{
Paul Turner56f570e2011-11-07 20:26:33 -08003828 if (!cfs_bandwidth_used())
3829 return;
3830
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003831 /* an active group must be handled by the update_curr()->put() path */
3832 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
3833 return;
3834
3835 /* ensure the group is not already throttled */
3836 if (cfs_rq_throttled(cfs_rq))
3837 return;
3838
3839 /* update runtime allocation */
3840 account_cfs_rq_runtime(cfs_rq, 0);
3841 if (cfs_rq->runtime_remaining <= 0)
3842 throttle_cfs_rq(cfs_rq);
3843}
3844
3845/* conditionally throttle active cfs_rq's from put_prev_entity() */
Peter Zijlstra678d5712012-02-11 06:05:00 +01003846static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003847{
Paul Turner56f570e2011-11-07 20:26:33 -08003848 if (!cfs_bandwidth_used())
Peter Zijlstra678d5712012-02-11 06:05:00 +01003849 return false;
Paul Turner56f570e2011-11-07 20:26:33 -08003850
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003851 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
Peter Zijlstra678d5712012-02-11 06:05:00 +01003852 return false;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003853
3854 /*
3855 * it's possible for a throttled entity to be forced into a running
3856 * state (e.g. set_curr_task), in this case we're finished.
3857 */
3858 if (cfs_rq_throttled(cfs_rq))
Peter Zijlstra678d5712012-02-11 06:05:00 +01003859 return true;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003860
3861 throttle_cfs_rq(cfs_rq);
Peter Zijlstra678d5712012-02-11 06:05:00 +01003862 return true;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003863}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003864
Peter Zijlstra029632f2011-10-25 10:00:11 +02003865static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
3866{
3867 struct cfs_bandwidth *cfs_b =
3868 container_of(timer, struct cfs_bandwidth, slack_timer);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003869
Peter Zijlstra029632f2011-10-25 10:00:11 +02003870 do_sched_cfs_slack_timer(cfs_b);
3871
3872 return HRTIMER_NORESTART;
3873}
3874
3875static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
3876{
3877 struct cfs_bandwidth *cfs_b =
3878 container_of(timer, struct cfs_bandwidth, period_timer);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003879 int overrun;
3880 int idle = 0;
3881
Ben Segall51f21762014-05-19 15:49:45 -07003882 raw_spin_lock(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003883 for (;;) {
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003884 overrun = hrtimer_forward_now(timer, cfs_b->period);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003885 if (!overrun)
3886 break;
3887
3888 idle = do_sched_cfs_period_timer(cfs_b, overrun);
3889 }
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02003890 if (idle)
3891 cfs_b->period_active = 0;
Ben Segall51f21762014-05-19 15:49:45 -07003892 raw_spin_unlock(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003893
3894 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
3895}
3896
3897void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3898{
3899 raw_spin_lock_init(&cfs_b->lock);
3900 cfs_b->runtime = 0;
3901 cfs_b->quota = RUNTIME_INF;
3902 cfs_b->period = ns_to_ktime(default_cfs_period());
3903
3904 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02003905 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003906 cfs_b->period_timer.function = sched_cfs_period_timer;
3907 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3908 cfs_b->slack_timer.function = sched_cfs_slack_timer;
3909}
3910
3911static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3912{
3913 cfs_rq->runtime_enabled = 0;
3914 INIT_LIST_HEAD(&cfs_rq->throttled_list);
3915}
3916
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003917void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003918{
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02003919 lockdep_assert_held(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003920
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02003921 if (!cfs_b->period_active) {
3922 cfs_b->period_active = 1;
3923 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
3924 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
3925 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02003926}
3927
3928static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
3929{
Tetsuo Handa7f1a1692014-12-25 15:51:21 +09003930 /* init_cfs_bandwidth() was not called */
3931 if (!cfs_b->throttled_cfs_rq.next)
3932 return;
3933
Peter Zijlstra029632f2011-10-25 10:00:11 +02003934 hrtimer_cancel(&cfs_b->period_timer);
3935 hrtimer_cancel(&cfs_b->slack_timer);
3936}
3937
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04003938static void __maybe_unused update_runtime_enabled(struct rq *rq)
3939{
3940 struct cfs_rq *cfs_rq;
3941
3942 for_each_leaf_cfs_rq(rq, cfs_rq) {
3943 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
3944
3945 raw_spin_lock(&cfs_b->lock);
3946 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
3947 raw_spin_unlock(&cfs_b->lock);
3948 }
3949}
3950
Arnd Bergmann38dc3342013-01-25 14:14:22 +00003951static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003952{
3953 struct cfs_rq *cfs_rq;
3954
3955 for_each_leaf_cfs_rq(rq, cfs_rq) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02003956 if (!cfs_rq->runtime_enabled)
3957 continue;
3958
3959 /*
3960 * clock_task is not advancing so we just need to make sure
3961 * there's some valid quota amount
3962 */
Ben Segall51f21762014-05-19 15:49:45 -07003963 cfs_rq->runtime_remaining = 1;
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04003964 /*
3965 * Offline rq is schedulable till cpu is completely disabled
3966 * in take_cpu_down(), so we prevent new cfs throttling here.
3967 */
3968 cfs_rq->runtime_enabled = 0;
3969
Peter Zijlstra029632f2011-10-25 10:00:11 +02003970 if (cfs_rq_throttled(cfs_rq))
3971 unthrottle_cfs_rq(cfs_rq);
3972 }
3973}
3974
3975#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02003976static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3977{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003978 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02003979}
3980
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003981static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
Peter Zijlstra678d5712012-02-11 06:05:00 +01003982static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003983static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003984static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07003985
3986static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3987{
3988 return 0;
3989}
Paul Turner64660c82011-07-21 09:43:36 -07003990
3991static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3992{
3993 return 0;
3994}
3995
3996static inline int throttled_lb_pair(struct task_group *tg,
3997 int src_cpu, int dest_cpu)
3998{
3999 return 0;
4000}
Peter Zijlstra029632f2011-10-25 10:00:11 +02004001
4002void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4003
4004#ifdef CONFIG_FAIR_GROUP_SCHED
4005static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07004006#endif
4007
Peter Zijlstra029632f2011-10-25 10:00:11 +02004008static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4009{
4010 return NULL;
4011}
4012static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04004013static inline void update_runtime_enabled(struct rq *rq) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07004014static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02004015
4016#endif /* CONFIG_CFS_BANDWIDTH */
4017
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004018/**************************************************
4019 * CFS operations on tasks:
4020 */
4021
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004022#ifdef CONFIG_SCHED_HRTICK
4023static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4024{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004025 struct sched_entity *se = &p->se;
4026 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4027
4028 WARN_ON(task_rq(p) != rq);
4029
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004030 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004031 u64 slice = sched_slice(cfs_rq, se);
4032 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4033 s64 delta = slice - ran;
4034
4035 if (delta < 0) {
4036 if (rq->curr == p)
Kirill Tkhai88751252014-06-29 00:03:57 +04004037 resched_curr(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004038 return;
4039 }
Peter Zijlstra31656512008-07-18 18:01:23 +02004040 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004041 }
4042}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004043
4044/*
4045 * called from enqueue/dequeue and updates the hrtick when the
4046 * current task is from our class and nr_running is low enough
4047 * to matter.
4048 */
4049static void hrtick_update(struct rq *rq)
4050{
4051 struct task_struct *curr = rq->curr;
4052
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004053 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004054 return;
4055
4056 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4057 hrtick_start_fair(rq, curr);
4058}
Dhaval Giani55e12e52008-06-24 23:39:43 +05304059#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004060static inline void
4061hrtick_start_fair(struct rq *rq, struct task_struct *p)
4062{
4063}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004064
4065static inline void hrtick_update(struct rq *rq)
4066{
4067}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004068#endif
4069
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004070/*
4071 * The enqueue_task method is called before nr_running is
4072 * increased. Here we update the fair scheduling stats and
4073 * then put the task into the rbtree:
4074 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00004075static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004076enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004077{
4078 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01004079 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004080
4081 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01004082 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004083 break;
4084 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004085 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07004086
4087 /*
4088 * end evaluation on encountering a throttled cfs_rq
4089 *
4090 * note: in the case of encountering a throttled cfs_rq we will
4091 * post the final h_nr_running increment below.
4092 */
4093 if (cfs_rq_throttled(cfs_rq))
4094 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07004095 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07004096
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004097 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004098 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004099
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004100 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08004101 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07004102 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004103
Paul Turner85dac902011-07-21 09:43:33 -07004104 if (cfs_rq_throttled(cfs_rq))
4105 break;
4106
Yuyang Du9d89c252015-07-15 08:04:37 +08004107 update_load_avg(se, 1);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08004108 update_cfs_shares(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004109 }
4110
Yuyang Ducd126af2015-07-15 08:04:36 +08004111 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04004112 add_nr_running(rq, 1);
Yuyang Ducd126af2015-07-15 08:04:36 +08004113
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004114 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004115}
4116
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004117static void set_next_buddy(struct sched_entity *se);
4118
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004119/*
4120 * The dequeue_task method is called before nr_running is
4121 * decreased. We remove the task from the rbtree and
4122 * update the fair scheduling stats:
4123 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004124static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004125{
4126 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01004127 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004128 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004129
4130 for_each_sched_entity(se) {
4131 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004132 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07004133
4134 /*
4135 * end evaluation on encountering a throttled cfs_rq
4136 *
4137 * note: in the case of encountering a throttled cfs_rq we will
4138 * post the final h_nr_running decrement below.
4139 */
4140 if (cfs_rq_throttled(cfs_rq))
4141 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07004142 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004143
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004144 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004145 if (cfs_rq->load.weight) {
4146 /*
4147 * Bias pick_next to pick a task from this cfs_rq, as
4148 * p is sleeping when it is within its sched_slice.
4149 */
4150 if (task_sleep && parent_entity(se))
4151 set_next_buddy(parent_entity(se));
Paul Turner9598c822011-07-06 22:30:37 -07004152
4153 /* avoid re-evaluating load for this entity */
4154 se = parent_entity(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004155 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004156 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004157 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004158 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004159
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004160 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08004161 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07004162 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004163
Paul Turner85dac902011-07-21 09:43:33 -07004164 if (cfs_rq_throttled(cfs_rq))
4165 break;
4166
Yuyang Du9d89c252015-07-15 08:04:37 +08004167 update_load_avg(se, 1);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08004168 update_cfs_shares(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004169 }
4170
Yuyang Ducd126af2015-07-15 08:04:36 +08004171 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04004172 sub_nr_running(rq, 1);
Yuyang Ducd126af2015-07-15 08:04:36 +08004173
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004174 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004175}
4176
Gregory Haskinse7693a32008-01-25 21:08:09 +01004177#ifdef CONFIG_SMP
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004178
4179/*
4180 * per rq 'load' arrray crap; XXX kill this.
4181 */
4182
4183/*
4184 * The exact cpuload at various idx values, calculated at every tick would be
4185 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
4186 *
4187 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
4188 * on nth tick when cpu may be busy, then we have:
4189 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
4190 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
4191 *
4192 * decay_load_missed() below does efficient calculation of
4193 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
4194 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
4195 *
4196 * The calculation is approximated on a 128 point scale.
4197 * degrade_zero_ticks is the number of ticks after which load at any
4198 * particular idx is approximated to be zero.
4199 * degrade_factor is a precomputed table, a row for each load idx.
4200 * Each column corresponds to degradation factor for a power of two ticks,
4201 * based on 128 point scale.
4202 * Example:
4203 * row 2, col 3 (=12) says that the degradation at load idx 2 after
4204 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
4205 *
4206 * With this power of 2 load factors, we can degrade the load n times
4207 * by looking at 1 bits in n and doing as many mult/shift instead of
4208 * n mult/shifts needed by the exact degradation.
4209 */
4210#define DEGRADE_SHIFT 7
4211static const unsigned char
4212 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4213static const unsigned char
4214 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4215 {0, 0, 0, 0, 0, 0, 0, 0},
4216 {64, 32, 8, 0, 0, 0, 0, 0},
4217 {96, 72, 40, 12, 1, 0, 0},
4218 {112, 98, 75, 43, 15, 1, 0},
4219 {120, 112, 98, 76, 45, 16, 2} };
4220
4221/*
4222 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4223 * would be when CPU is idle and so we just decay the old load without
4224 * adding any new load.
4225 */
4226static unsigned long
4227decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4228{
4229 int j = 0;
4230
4231 if (!missed_updates)
4232 return load;
4233
4234 if (missed_updates >= degrade_zero_ticks[idx])
4235 return 0;
4236
4237 if (idx == 1)
4238 return load >> missed_updates;
4239
4240 while (missed_updates) {
4241 if (missed_updates % 2)
4242 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4243
4244 missed_updates >>= 1;
4245 j++;
4246 }
4247 return load;
4248}
4249
4250/*
4251 * Update rq->cpu_load[] statistics. This function is usually called every
4252 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
4253 * every tick. We fix it up based on jiffies.
4254 */
4255static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
4256 unsigned long pending_updates)
4257{
4258 int i, scale;
4259
4260 this_rq->nr_load_updates++;
4261
4262 /* Update our load: */
4263 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4264 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4265 unsigned long old_load, new_load;
4266
4267 /* scale is effectively 1 << i now, and >> i divides by scale */
4268
4269 old_load = this_rq->cpu_load[i];
4270 old_load = decay_load_missed(old_load, pending_updates - 1, i);
4271 new_load = this_load;
4272 /*
4273 * Round up the averaging division if load is increasing. This
4274 * prevents us from getting stuck on 9 if the load is 10, for
4275 * example.
4276 */
4277 if (new_load > old_load)
4278 new_load += scale - 1;
4279
4280 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4281 }
4282
4283 sched_avg_update(this_rq);
4284}
4285
Yuyang Du7ea241a2015-07-15 08:04:42 +08004286/* Used instead of source_load when we know the type == 0 */
4287static unsigned long weighted_cpuload(const int cpu)
4288{
4289 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4290}
4291
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004292#ifdef CONFIG_NO_HZ_COMMON
4293/*
4294 * There is no sane way to deal with nohz on smp when using jiffies because the
4295 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4296 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4297 *
4298 * Therefore we cannot use the delta approach from the regular tick since that
4299 * would seriously skew the load calculation. However we'll make do for those
4300 * updates happening while idle (nohz_idle_balance) or coming out of idle
4301 * (tick_nohz_idle_exit).
4302 *
4303 * This means we might still be one tick off for nohz periods.
4304 */
4305
4306/*
4307 * Called from nohz_idle_balance() to update the load ratings before doing the
4308 * idle balance.
4309 */
4310static void update_idle_cpu_load(struct rq *this_rq)
4311{
Jason Low316c1608d2015-04-28 13:00:20 -07004312 unsigned long curr_jiffies = READ_ONCE(jiffies);
Yuyang Du7ea241a2015-07-15 08:04:42 +08004313 unsigned long load = weighted_cpuload(cpu_of(this_rq));
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004314 unsigned long pending_updates;
4315
4316 /*
4317 * bail if there's load or we're actually up-to-date.
4318 */
4319 if (load || curr_jiffies == this_rq->last_load_update_tick)
4320 return;
4321
4322 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4323 this_rq->last_load_update_tick = curr_jiffies;
4324
4325 __update_cpu_load(this_rq, load, pending_updates);
4326}
4327
4328/*
4329 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
4330 */
4331void update_cpu_load_nohz(void)
4332{
4333 struct rq *this_rq = this_rq();
Jason Low316c1608d2015-04-28 13:00:20 -07004334 unsigned long curr_jiffies = READ_ONCE(jiffies);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004335 unsigned long pending_updates;
4336
4337 if (curr_jiffies == this_rq->last_load_update_tick)
4338 return;
4339
4340 raw_spin_lock(&this_rq->lock);
4341 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4342 if (pending_updates) {
4343 this_rq->last_load_update_tick = curr_jiffies;
4344 /*
4345 * We were idle, this means load 0, the current load might be
4346 * !0 due to remote wakeups and the sort.
4347 */
4348 __update_cpu_load(this_rq, 0, pending_updates);
4349 }
4350 raw_spin_unlock(&this_rq->lock);
4351}
4352#endif /* CONFIG_NO_HZ */
4353
4354/*
4355 * Called from scheduler_tick()
4356 */
4357void update_cpu_load_active(struct rq *this_rq)
4358{
Yuyang Du7ea241a2015-07-15 08:04:42 +08004359 unsigned long load = weighted_cpuload(cpu_of(this_rq));
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004360 /*
4361 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
4362 */
4363 this_rq->last_load_update_tick = jiffies;
4364 __update_cpu_load(this_rq, load, 1);
4365}
4366
Peter Zijlstra029632f2011-10-25 10:00:11 +02004367/*
4368 * Return a low guess at the load of a migration-source cpu weighted
4369 * according to the scheduling class and "nice" value.
4370 *
4371 * We want to under-estimate the load of migration sources, to
4372 * balance conservatively.
4373 */
4374static unsigned long source_load(int cpu, int type)
4375{
4376 struct rq *rq = cpu_rq(cpu);
4377 unsigned long total = weighted_cpuload(cpu);
4378
4379 if (type == 0 || !sched_feat(LB_BIAS))
4380 return total;
4381
4382 return min(rq->cpu_load[type-1], total);
4383}
4384
4385/*
4386 * Return a high guess at the load of a migration-target cpu weighted
4387 * according to the scheduling class and "nice" value.
4388 */
4389static unsigned long target_load(int cpu, int type)
4390{
4391 struct rq *rq = cpu_rq(cpu);
4392 unsigned long total = weighted_cpuload(cpu);
4393
4394 if (type == 0 || !sched_feat(LB_BIAS))
4395 return total;
4396
4397 return max(rq->cpu_load[type-1], total);
4398}
4399
Nicolas Pitreced549f2014-05-26 18:19:38 -04004400static unsigned long capacity_of(int cpu)
Peter Zijlstra029632f2011-10-25 10:00:11 +02004401{
Nicolas Pitreced549f2014-05-26 18:19:38 -04004402 return cpu_rq(cpu)->cpu_capacity;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004403}
4404
Vincent Guittotca6d75e2015-02-27 16:54:09 +01004405static unsigned long capacity_orig_of(int cpu)
4406{
4407 return cpu_rq(cpu)->cpu_capacity_orig;
4408}
4409
Peter Zijlstra029632f2011-10-25 10:00:11 +02004410static unsigned long cpu_avg_load_per_task(int cpu)
4411{
4412 struct rq *rq = cpu_rq(cpu);
Jason Low316c1608d2015-04-28 13:00:20 -07004413 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
Yuyang Du7ea241a2015-07-15 08:04:42 +08004414 unsigned long load_avg = weighted_cpuload(cpu);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004415
4416 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08004417 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004418
4419 return 0;
4420}
4421
Michael Wang62470412013-07-04 12:55:51 +08004422static void record_wakee(struct task_struct *p)
4423{
4424 /*
4425 * Rough decay (wiping) for cost saving, don't worry
4426 * about the boundary, really active task won't care
4427 * about the loss.
4428 */
Manuel Schölling2538d962014-05-22 19:45:23 +02004429 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
Rik van Riel096aa332014-05-16 00:13:32 -04004430 current->wakee_flips >>= 1;
Michael Wang62470412013-07-04 12:55:51 +08004431 current->wakee_flip_decay_ts = jiffies;
4432 }
4433
4434 if (current->last_wakee != p) {
4435 current->last_wakee = p;
4436 current->wakee_flips++;
4437 }
4438}
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004439
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02004440static void task_waking_fair(struct task_struct *p)
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004441{
4442 struct sched_entity *se = &p->se;
4443 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstra3fe16982011-04-05 17:23:48 +02004444 u64 min_vruntime;
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004445
Peter Zijlstra3fe16982011-04-05 17:23:48 +02004446#ifndef CONFIG_64BIT
4447 u64 min_vruntime_copy;
Peter Zijlstra74f8e4b2011-04-05 17:23:47 +02004448
Peter Zijlstra3fe16982011-04-05 17:23:48 +02004449 do {
4450 min_vruntime_copy = cfs_rq->min_vruntime_copy;
4451 smp_rmb();
4452 min_vruntime = cfs_rq->min_vruntime;
4453 } while (min_vruntime != min_vruntime_copy);
4454#else
4455 min_vruntime = cfs_rq->min_vruntime;
4456#endif
4457
4458 se->vruntime -= min_vruntime;
Michael Wang62470412013-07-04 12:55:51 +08004459 record_wakee(p);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004460}
4461
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004462#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02004463/*
4464 * effective_load() calculates the load change as seen from the root_task_group
4465 *
4466 * Adding load to a group doesn't make a group heavier, but can cause movement
4467 * of group shares between cpus. Assuming the shares were perfectly aligned one
4468 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004469 *
4470 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4471 * on this @cpu and results in a total addition (subtraction) of @wg to the
4472 * total group weight.
4473 *
4474 * Given a runqueue weight distribution (rw_i) we can compute a shares
4475 * distribution (s_i) using:
4476 *
4477 * s_i = rw_i / \Sum rw_j (1)
4478 *
4479 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4480 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4481 * shares distribution (s_i):
4482 *
4483 * rw_i = { 2, 4, 1, 0 }
4484 * s_i = { 2/7, 4/7, 1/7, 0 }
4485 *
4486 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4487 * task used to run on and the CPU the waker is running on), we need to
4488 * compute the effect of waking a task on either CPU and, in case of a sync
4489 * wakeup, compute the effect of the current task going to sleep.
4490 *
4491 * So for a change of @wl to the local @cpu with an overall group weight change
4492 * of @wl we can compute the new shares distribution (s'_i) using:
4493 *
4494 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4495 *
4496 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4497 * differences in waking a task to CPU 0. The additional task changes the
4498 * weight and shares distributions like:
4499 *
4500 * rw'_i = { 3, 4, 1, 0 }
4501 * s'_i = { 3/8, 4/8, 1/8, 0 }
4502 *
4503 * We can then compute the difference in effective weight by using:
4504 *
4505 * dw_i = S * (s'_i - s_i) (3)
4506 *
4507 * Where 'S' is the group weight as seen by its parent.
4508 *
4509 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4510 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4511 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02004512 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004513static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004514{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004515 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02004516
Rik van Riel9722c2d2014-01-06 11:39:12 +00004517 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02004518 return wl;
4519
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004520 for_each_sched_entity(se) {
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004521 long w, W;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004522
Paul Turner977dda72011-01-14 17:57:50 -08004523 tg = se->my_q->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004524
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004525 /*
4526 * W = @wg + \Sum rw_j
4527 */
4528 W = wg + calc_tg_weight(tg, se->my_q);
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004529
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004530 /*
4531 * w = rw_i + @wl
4532 */
Yuyang Du7ea241a2015-07-15 08:04:42 +08004533 w = cfs_rq_load_avg(se->my_q) + wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02004534
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004535 /*
4536 * wl = S * s'_i; see (2)
4537 */
4538 if (W > 0 && w < W)
Yuyang Du32a8df42014-12-19 08:29:56 +08004539 wl = (w * (long)tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08004540 else
4541 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02004542
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004543 /*
4544 * Per the above, wl is the new se->load.weight value; since
4545 * those are clipped to [MIN_SHARES, ...) do so now. See
4546 * calc_cfs_shares().
4547 */
Paul Turner977dda72011-01-14 17:57:50 -08004548 if (wl < MIN_SHARES)
4549 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004550
4551 /*
4552 * wl = dw_i = S * (s'_i - s_i); see (3)
4553 */
Yuyang Du9d89c252015-07-15 08:04:37 +08004554 wl -= se->avg.load_avg;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004555
4556 /*
4557 * Recursively apply this logic to all parent groups to compute
4558 * the final effective load change on the root group. Since
4559 * only the @tg group gets extra weight, all parent groups can
4560 * only redistribute existing shares. @wl is the shift in shares
4561 * resulting from this level per the above.
4562 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004563 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004564 }
4565
4566 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004567}
4568#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004569
Mel Gorman58d081b2013-10-07 11:29:10 +01004570static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004571{
Peter Zijlstra83378262008-06-27 13:41:37 +02004572 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004573}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004574
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004575#endif
4576
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004577/*
4578 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
4579 * A waker of many should wake a different task than the one last awakened
4580 * at a frequency roughly N times higher than one of its wakees. In order
4581 * to determine whether we should let the load spread vs consolodating to
4582 * shared cache, we look for a minimum 'flip' frequency of llc_size in one
4583 * partner, and a factor of lls_size higher frequency in the other. With
4584 * both conditions met, we can be relatively sure that the relationship is
4585 * non-monogamous, with partner count exceeding socket size. Waker/wakee
4586 * being client/server, worker/dispatcher, interrupt source or whatever is
4587 * irrelevant, spread criteria is apparent partner count exceeds socket size.
4588 */
Michael Wang62470412013-07-04 12:55:51 +08004589static int wake_wide(struct task_struct *p)
4590{
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004591 unsigned int master = current->wakee_flips;
4592 unsigned int slave = p->wakee_flips;
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08004593 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08004594
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004595 if (master < slave)
4596 swap(master, slave);
4597 if (slave < factor || master < slave * factor)
4598 return 0;
4599 return 1;
Michael Wang62470412013-07-04 12:55:51 +08004600}
4601
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004602static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004603{
Paul Turnere37b6a72011-01-21 20:44:59 -08004604 s64 this_load, load;
Vincent Guittotbd61c982014-08-26 13:06:50 +02004605 s64 this_eff_load, prev_eff_load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004606 int idx, this_cpu, prev_cpu;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004607 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02004608 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004609 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004610
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004611 idx = sd->wake_idx;
4612 this_cpu = smp_processor_id();
4613 prev_cpu = task_cpu(p);
4614 load = source_load(prev_cpu, idx);
4615 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004616
4617 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004618 * If sync wakeup then subtract the (maximum possible)
4619 * effect of the currently running task from the load
4620 * of the current CPU:
4621 */
Peter Zijlstra83378262008-06-27 13:41:37 +02004622 if (sync) {
4623 tg = task_group(current);
Yuyang Du9d89c252015-07-15 08:04:37 +08004624 weight = current->se.avg.load_avg;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004625
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004626 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02004627 load += effective_load(tg, prev_cpu, 0, -weight);
4628 }
4629
4630 tg = task_group(p);
Yuyang Du9d89c252015-07-15 08:04:37 +08004631 weight = p->se.avg.load_avg;
Peter Zijlstra83378262008-06-27 13:41:37 +02004632
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004633 /*
4634 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004635 * due to the sync cause above having dropped this_load to 0, we'll
4636 * always have an imbalance, but there's really nothing you can do
4637 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02004638 *
4639 * Otherwise check if either cpus are near enough in load to allow this
4640 * task to be woken on this_cpu.
4641 */
Vincent Guittotbd61c982014-08-26 13:06:50 +02004642 this_eff_load = 100;
4643 this_eff_load *= capacity_of(prev_cpu);
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004644
Vincent Guittotbd61c982014-08-26 13:06:50 +02004645 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
4646 prev_eff_load *= capacity_of(this_cpu);
4647
4648 if (this_load > 0) {
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004649 this_eff_load *= this_load +
4650 effective_load(tg, this_cpu, weight, weight);
4651
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004652 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
Vincent Guittotbd61c982014-08-26 13:06:50 +02004653 }
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02004654
Vincent Guittotbd61c982014-08-26 13:06:50 +02004655 balanced = this_eff_load <= prev_eff_load;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004656
Lucas De Marchi41acab82010-03-10 23:37:45 -03004657 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02004658
Vincent Guittot05bfb652014-08-26 13:06:45 +02004659 if (!balanced)
4660 return 0;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004661
Vincent Guittot05bfb652014-08-26 13:06:45 +02004662 schedstat_inc(sd, ttwu_move_affine);
4663 schedstat_inc(p, se.statistics.nr_wakeups_affine);
4664
4665 return 1;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01004666}
4667
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004668/*
4669 * find_idlest_group finds and returns the least busy CPU group within the
4670 * domain.
4671 */
4672static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02004673find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004674 int this_cpu, int sd_flag)
Gregory Haskinse7693a32008-01-25 21:08:09 +01004675{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07004676 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004677 unsigned long min_load = ULONG_MAX, this_load = 0;
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004678 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004679 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004680
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004681 if (sd_flag & SD_BALANCE_WAKE)
4682 load_idx = sd->wake_idx;
4683
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004684 do {
4685 unsigned long load, avg_load;
4686 int local_group;
4687 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004688
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004689 /* Skip over this group if it has no CPUs allowed */
4690 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004691 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004692 continue;
4693
4694 local_group = cpumask_test_cpu(this_cpu,
4695 sched_group_cpus(group));
4696
4697 /* Tally up the load of all CPUs in the group */
4698 avg_load = 0;
4699
4700 for_each_cpu(i, sched_group_cpus(group)) {
4701 /* Bias balancing toward cpus of our domain */
4702 if (local_group)
4703 load = source_load(i, load_idx);
4704 else
4705 load = target_load(i, load_idx);
4706
4707 avg_load += load;
4708 }
4709
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04004710 /* Adjust by relative CPU capacity of the group */
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04004711 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004712
4713 if (local_group) {
4714 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004715 } else if (avg_load < min_load) {
4716 min_load = avg_load;
4717 idlest = group;
4718 }
4719 } while (group = group->next, group != sd->groups);
4720
4721 if (!idlest || 100*this_load < imbalance*min_load)
4722 return NULL;
4723 return idlest;
4724}
4725
4726/*
4727 * find_idlest_cpu - find the idlest cpu among the cpus in group.
4728 */
4729static int
4730find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
4731{
4732 unsigned long load, min_load = ULONG_MAX;
Nicolas Pitre83a0a962014-09-04 11:32:10 -04004733 unsigned int min_exit_latency = UINT_MAX;
4734 u64 latest_idle_timestamp = 0;
4735 int least_loaded_cpu = this_cpu;
4736 int shallowest_idle_cpu = -1;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004737 int i;
4738
4739 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02004740 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Nicolas Pitre83a0a962014-09-04 11:32:10 -04004741 if (idle_cpu(i)) {
4742 struct rq *rq = cpu_rq(i);
4743 struct cpuidle_state *idle = idle_get_state(rq);
4744 if (idle && idle->exit_latency < min_exit_latency) {
4745 /*
4746 * We give priority to a CPU whose idle state
4747 * has the smallest exit latency irrespective
4748 * of any idle timestamp.
4749 */
4750 min_exit_latency = idle->exit_latency;
4751 latest_idle_timestamp = rq->idle_stamp;
4752 shallowest_idle_cpu = i;
4753 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
4754 rq->idle_stamp > latest_idle_timestamp) {
4755 /*
4756 * If equal or no active idle state, then
4757 * the most recently idled CPU might have
4758 * a warmer cache.
4759 */
4760 latest_idle_timestamp = rq->idle_stamp;
4761 shallowest_idle_cpu = i;
4762 }
Yao Dongdong9f967422014-10-28 04:08:06 +00004763 } else if (shallowest_idle_cpu == -1) {
Nicolas Pitre83a0a962014-09-04 11:32:10 -04004764 load = weighted_cpuload(i);
4765 if (load < min_load || (load == min_load && i == this_cpu)) {
4766 min_load = load;
4767 least_loaded_cpu = i;
4768 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01004769 }
4770 }
4771
Nicolas Pitre83a0a962014-09-04 11:32:10 -04004772 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004773}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004774
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004775/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004776 * Try and locate an idle CPU in the sched_domain.
4777 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004778static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004779{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004780 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07004781 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004782 int i = task_cpu(p);
4783
4784 if (idle_cpu(target))
4785 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004786
4787 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004788 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004789 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004790 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
4791 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004792
4793 /*
Linus Torvalds37407ea2012-09-16 12:29:43 -07004794 * Otherwise, iterate the domains and find an elegible idle cpu.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004795 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01004796 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08004797 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07004798 sg = sd->groups;
4799 do {
4800 if (!cpumask_intersects(sched_group_cpus(sg),
4801 tsk_cpus_allowed(p)))
4802 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02004803
Linus Torvalds37407ea2012-09-16 12:29:43 -07004804 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01004805 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07004806 goto next;
4807 }
4808
4809 target = cpumask_first_and(sched_group_cpus(sg),
4810 tsk_cpus_allowed(p));
4811 goto done;
4812next:
4813 sg = sg->next;
4814 } while (sg != sd->groups);
4815 }
4816done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004817 return target;
4818}
Vincent Guittot8bb5b002015-03-04 08:48:47 +01004819/*
4820 * get_cpu_usage returns the amount of capacity of a CPU that is used by CFS
4821 * tasks. The unit of the return value must be the one of capacity so we can
4822 * compare the usage with the capacity of the CPU that is available for CFS
4823 * task (ie cpu_capacity).
Yuyang Du9d89c252015-07-15 08:04:37 +08004824 * cfs.avg.util_avg is the sum of running time of runnable tasks on a
Vincent Guittot8bb5b002015-03-04 08:48:47 +01004825 * CPU. It represents the amount of utilization of a CPU in the range
4826 * [0..SCHED_LOAD_SCALE]. The usage of a CPU can't be higher than the full
4827 * capacity of the CPU because it's about the running time on this CPU.
Yuyang Du9d89c252015-07-15 08:04:37 +08004828 * Nevertheless, cfs.avg.util_avg can be higher than SCHED_LOAD_SCALE
4829 * because of unfortunate rounding in util_avg or just
Vincent Guittot8bb5b002015-03-04 08:48:47 +01004830 * after migrating tasks until the average stabilizes with the new running
4831 * time. So we need to check that the usage stays into the range
4832 * [0..cpu_capacity_orig] and cap if necessary.
4833 * Without capping the usage, a group could be seen as overloaded (CPU0 usage
4834 * at 121% + CPU1 usage at 80%) whereas CPU1 has 20% of available capacity
4835 */
4836static int get_cpu_usage(int cpu)
4837{
Yuyang Du9d89c252015-07-15 08:04:37 +08004838 unsigned long usage = cpu_rq(cpu)->cfs.avg.util_avg;
Vincent Guittot8bb5b002015-03-04 08:48:47 +01004839 unsigned long capacity = capacity_orig_of(cpu);
4840
4841 if (usage >= SCHED_LOAD_SCALE)
4842 return capacity;
4843
4844 return (usage * capacity) >> SCHED_LOAD_SHIFT;
4845}
Peter Zijlstraa50bde52009-11-12 15:55:28 +01004846
4847/*
Morten Rasmussende91b9c2014-02-18 14:14:24 +00004848 * select_task_rq_fair: Select target runqueue for the waking task in domains
4849 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
4850 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004851 *
Morten Rasmussende91b9c2014-02-18 14:14:24 +00004852 * Balances load by selecting the idlest cpu in the idlest group, or under
4853 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004854 *
Morten Rasmussende91b9c2014-02-18 14:14:24 +00004855 * Returns the target cpu number.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004856 *
4857 * preempt must be disabled.
4858 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01004859static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01004860select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004861{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004862 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004863 int cpu = smp_processor_id();
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004864 int new_cpu = prev_cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004865 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02004866 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004867
Kirill Tkhaia8edd072014-09-12 17:41:16 +04004868 if (sd_flag & SD_BALANCE_WAKE)
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004869 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
Gregory Haskinse7693a32008-01-25 21:08:09 +01004870
Peter Zijlstradce840a2011-04-07 14:09:50 +02004871 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004872 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f42882009-12-16 18:04:34 +01004873 if (!(tmp->flags & SD_LOAD_BALANCE))
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004874 break;
Peter Zijlstrae4f42882009-12-16 18:04:34 +01004875
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004876 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004877 * If both cpu and prev_cpu are part of this domain,
4878 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01004879 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07004880 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
4881 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
4882 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08004883 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004884 }
4885
Alex Shif03542a2012-07-26 08:55:34 +08004886 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02004887 sd = tmp;
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004888 else if (!want_affine)
4889 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004890 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004891
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004892 if (affine_sd) {
4893 sd = NULL; /* Prefer wake_affine over balance flags */
4894 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
4895 new_cpu = cpu;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01004896 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02004897
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02004898 if (!sd) {
4899 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
4900 new_cpu = select_idle_sibling(p, new_cpu);
4901
4902 } else while (sd) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004903 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004904 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004905
Peter Zijlstra0763a662009-09-14 19:37:39 +02004906 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004907 sd = sd->child;
4908 continue;
4909 }
4910
Vincent Guittotc44f2a02013-10-18 13:52:21 +02004911 group = find_idlest_group(sd, p, cpu, sd_flag);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004912 if (!group) {
4913 sd = sd->child;
4914 continue;
4915 }
4916
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02004917 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004918 if (new_cpu == -1 || new_cpu == cpu) {
4919 /* Now try balancing at a lower domain level of cpu */
4920 sd = sd->child;
4921 continue;
4922 }
4923
4924 /* Now try balancing at a lower domain level of new_cpu */
4925 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004926 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004927 sd = NULL;
4928 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02004929 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004930 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02004931 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02004932 sd = tmp;
4933 }
4934 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01004935 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02004936 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01004937
Peter Zijlstrac88d5912009-09-10 13:50:02 +02004938 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01004939}
Paul Turner0a74bef2012-10-04 13:18:30 +02004940
4941/*
4942 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
4943 * cfs_rq_of(p) references at time of call are still valid and identify the
4944 * previous cpu. However, the caller only guarantees p->pi_lock is held; no
4945 * other assumptions, including the state of rq->lock, should be made.
4946 */
Yuyang Du9d89c252015-07-15 08:04:37 +08004947static void migrate_task_rq_fair(struct task_struct *p, int next_cpu)
Paul Turner0a74bef2012-10-04 13:18:30 +02004948{
Paul Turneraff3e492012-10-04 13:18:30 +02004949 /*
Yuyang Du9d89c252015-07-15 08:04:37 +08004950 * We are supposed to update the task to "current" time, then its up to date
4951 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
4952 * what current time is, so simply throw away the out-of-date time. This
4953 * will result in the wakee task is less decayed, but giving the wakee more
4954 * load sounds not bad.
Paul Turneraff3e492012-10-04 13:18:30 +02004955 */
Yuyang Du9d89c252015-07-15 08:04:37 +08004956 remove_entity_load_avg(&p->se);
4957
4958 /* Tell new CPU we are migrated */
4959 p->se.avg.last_update_time = 0;
Ben Segall3944a922014-05-15 15:59:20 -07004960
4961 /* We have migrated, no longer consider this task hot */
Yuyang Du9d89c252015-07-15 08:04:37 +08004962 p->se.exec_start = 0;
Paul Turner0a74bef2012-10-04 13:18:30 +02004963}
Yuyang Du12695572015-07-15 08:04:40 +08004964
4965static void task_dead_fair(struct task_struct *p)
4966{
4967 remove_entity_load_avg(&p->se);
4968}
Gregory Haskinse7693a32008-01-25 21:08:09 +01004969#endif /* CONFIG_SMP */
4970
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004971static unsigned long
4972wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004973{
4974 unsigned long gran = sysctl_sched_wakeup_granularity;
4975
4976 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01004977 * Since its curr running now, convert the gran from real-time
4978 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01004979 *
4980 * By using 'se' instead of 'curr' we penalize light tasks, so
4981 * they get preempted easier. That is, if 'se' < 'curr' then
4982 * the resulting gran will be larger, therefore penalizing the
4983 * lighter, if otoh 'se' > 'curr' then the resulting gran will
4984 * be smaller, again penalizing the lighter task.
4985 *
4986 * This is especially important for buddies when the leftmost
4987 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004988 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08004989 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02004990}
4991
4992/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02004993 * Should 'se' preempt 'curr'.
4994 *
4995 * |s1
4996 * |s2
4997 * |s3
4998 * g
4999 * |<--->|c
5000 *
5001 * w(c, s1) = -1
5002 * w(c, s2) = 0
5003 * w(c, s3) = 1
5004 *
5005 */
5006static int
5007wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5008{
5009 s64 gran, vdiff = curr->vruntime - se->vruntime;
5010
5011 if (vdiff <= 0)
5012 return -1;
5013
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01005014 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02005015 if (vdiff > gran)
5016 return 1;
5017
5018 return 0;
5019}
5020
Peter Zijlstra02479092008-11-04 21:25:10 +01005021static void set_last_buddy(struct sched_entity *se)
5022{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07005023 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5024 return;
5025
5026 for_each_sched_entity(se)
5027 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01005028}
5029
5030static void set_next_buddy(struct sched_entity *se)
5031{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07005032 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5033 return;
5034
5035 for_each_sched_entity(se)
5036 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01005037}
5038
Rik van Rielac53db52011-02-01 09:51:03 -05005039static void set_skip_buddy(struct sched_entity *se)
5040{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07005041 for_each_sched_entity(se)
5042 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05005043}
5044
Peter Zijlstra464b7522008-10-24 11:06:15 +02005045/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005046 * Preempt the current task with a newly woken task if needed:
5047 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02005048static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005049{
5050 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02005051 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01005052 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02005053 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005054 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01005055
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01005056 if (unlikely(se == pse))
5057 return;
5058
Paul Turner5238cdd2011-07-21 09:43:37 -07005059 /*
Kirill Tkhai163122b2014-08-20 13:48:29 +04005060 * This is possible from callers such as attach_tasks(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07005061 * unconditionally check_prempt_curr() after an enqueue (which may have
5062 * lead to a throttle). This both saves work and prevents false
5063 * next-buddy nomination below.
5064 */
5065 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5066 return;
5067
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005068 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02005069 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005070 next_buddy_marked = 1;
5071 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02005072
Bharata B Raoaec0a512008-08-28 14:42:49 +05305073 /*
5074 * We can come here with TIF_NEED_RESCHED already set from new task
5075 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07005076 *
5077 * Note: this also catches the edge-case of curr being in a throttled
5078 * group (e.g. via set_curr_task), since update_curr() (in the
5079 * enqueue of curr) will have resulted in resched being set. This
5080 * prevents us from potentially nominating it as a false LAST_BUDDY
5081 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05305082 */
5083 if (test_tsk_need_resched(curr))
5084 return;
5085
Darren Harta2f5c9a2011-02-22 13:04:33 -08005086 /* Idle tasks are by definition preempted by non-idle tasks. */
5087 if (unlikely(curr->policy == SCHED_IDLE) &&
5088 likely(p->policy != SCHED_IDLE))
5089 goto preempt;
5090
Ingo Molnar91c234b2007-10-15 17:00:18 +02005091 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08005092 * Batch and idle tasks do not preempt non-idle tasks (their preemption
5093 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02005094 */
Ingo Molnar8ed92e52012-10-14 14:28:50 +02005095 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02005096 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005097
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005098 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07005099 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005100 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005101 if (wakeup_preempt_entity(se, pse) == 1) {
5102 /*
5103 * Bias pick_next to pick the sched entity that is
5104 * triggering this preemption.
5105 */
5106 if (!next_buddy_marked)
5107 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005108 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005109 }
Jupyung Leea65ac742009-11-17 18:51:40 +09005110
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005111 return;
5112
5113preempt:
Kirill Tkhai88751252014-06-29 00:03:57 +04005114 resched_curr(rq);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005115 /*
5116 * Only set the backward buddy when the current task is still
5117 * on the rq. This can happen when a wakeup gets interleaved
5118 * with schedule on the ->pre_schedule() or idle_balance()
5119 * point, either of which can * drop the rq lock.
5120 *
5121 * Also, during early boot the idle thread is in the fair class,
5122 * for obvious reasons its a bad idea to schedule back to it.
5123 */
5124 if (unlikely(!se->on_rq || curr == rq->idle))
5125 return;
5126
5127 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5128 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005129}
5130
Peter Zijlstra606dba22012-02-11 06:05:00 +01005131static struct task_struct *
5132pick_next_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005133{
5134 struct cfs_rq *cfs_rq = &rq->cfs;
5135 struct sched_entity *se;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005136 struct task_struct *p;
Peter Zijlstra37e117c2014-02-14 12:25:08 +01005137 int new_tasks;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005138
Peter Zijlstra6e831252014-02-11 16:11:48 +01005139again:
Peter Zijlstra678d5712012-02-11 06:05:00 +01005140#ifdef CONFIG_FAIR_GROUP_SCHED
5141 if (!cfs_rq->nr_running)
Peter Zijlstra38033c32014-01-23 20:32:21 +01005142 goto idle;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005143
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01005144 if (prev->sched_class != &fair_sched_class)
Peter Zijlstra678d5712012-02-11 06:05:00 +01005145 goto simple;
5146
5147 /*
5148 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5149 * likely that a next task is from the same cgroup as the current.
5150 *
5151 * Therefore attempt to avoid putting and setting the entire cgroup
5152 * hierarchy, only change the part that actually changes.
5153 */
5154
5155 do {
5156 struct sched_entity *curr = cfs_rq->curr;
5157
5158 /*
5159 * Since we got here without doing put_prev_entity() we also
5160 * have to consider cfs_rq->curr. If it is still a runnable
5161 * entity, update_curr() will update its vruntime, otherwise
5162 * forget we've ever seen it.
5163 */
Ben Segall54d27362015-04-06 15:28:10 -07005164 if (curr) {
5165 if (curr->on_rq)
5166 update_curr(cfs_rq);
5167 else
5168 curr = NULL;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005169
Ben Segall54d27362015-04-06 15:28:10 -07005170 /*
5171 * This call to check_cfs_rq_runtime() will do the
5172 * throttle and dequeue its entity in the parent(s).
5173 * Therefore the 'simple' nr_running test will indeed
5174 * be correct.
5175 */
5176 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5177 goto simple;
5178 }
Peter Zijlstra678d5712012-02-11 06:05:00 +01005179
5180 se = pick_next_entity(cfs_rq, curr);
5181 cfs_rq = group_cfs_rq(se);
5182 } while (cfs_rq);
5183
5184 p = task_of(se);
5185
5186 /*
5187 * Since we haven't yet done put_prev_entity and if the selected task
5188 * is a different task than we started out with, try and touch the
5189 * least amount of cfs_rqs.
5190 */
5191 if (prev != p) {
5192 struct sched_entity *pse = &prev->se;
5193
5194 while (!(cfs_rq = is_same_group(se, pse))) {
5195 int se_depth = se->depth;
5196 int pse_depth = pse->depth;
5197
5198 if (se_depth <= pse_depth) {
5199 put_prev_entity(cfs_rq_of(pse), pse);
5200 pse = parent_entity(pse);
5201 }
5202 if (se_depth >= pse_depth) {
5203 set_next_entity(cfs_rq_of(se), se);
5204 se = parent_entity(se);
5205 }
5206 }
5207
5208 put_prev_entity(cfs_rq, pse);
5209 set_next_entity(cfs_rq, se);
5210 }
5211
5212 if (hrtick_enabled(rq))
5213 hrtick_start_fair(rq, p);
5214
5215 return p;
5216simple:
5217 cfs_rq = &rq->cfs;
5218#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005219
Tim Blechmann36ace272009-11-24 11:55:45 +01005220 if (!cfs_rq->nr_running)
Peter Zijlstra38033c32014-01-23 20:32:21 +01005221 goto idle;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005222
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01005223 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +01005224
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005225 do {
Peter Zijlstra678d5712012-02-11 06:05:00 +01005226 se = pick_next_entity(cfs_rq, NULL);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01005227 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005228 cfs_rq = group_cfs_rq(se);
5229 } while (cfs_rq);
5230
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005231 p = task_of(se);
Peter Zijlstra678d5712012-02-11 06:05:00 +01005232
Mike Galbraithb39e66e2011-11-22 15:20:07 +01005233 if (hrtick_enabled(rq))
5234 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005235
5236 return p;
Peter Zijlstra38033c32014-01-23 20:32:21 +01005237
5238idle:
Peter Zijlstracbce1a62015-06-11 14:46:54 +02005239 /*
5240 * This is OK, because current is on_cpu, which avoids it being picked
5241 * for load-balance and preemption/IRQs are still disabled avoiding
5242 * further scheduler activity on it and we're being very careful to
5243 * re-start the picking loop.
5244 */
5245 lockdep_unpin_lock(&rq->lock);
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04005246 new_tasks = idle_balance(rq);
Peter Zijlstracbce1a62015-06-11 14:46:54 +02005247 lockdep_pin_lock(&rq->lock);
Peter Zijlstra37e117c2014-02-14 12:25:08 +01005248 /*
5249 * Because idle_balance() releases (and re-acquires) rq->lock, it is
5250 * possible for any higher priority task to appear. In that case we
5251 * must re-start the pick_next_entity() loop.
5252 */
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04005253 if (new_tasks < 0)
Peter Zijlstra37e117c2014-02-14 12:25:08 +01005254 return RETRY_TASK;
5255
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04005256 if (new_tasks > 0)
Peter Zijlstra38033c32014-01-23 20:32:21 +01005257 goto again;
Peter Zijlstra38033c32014-01-23 20:32:21 +01005258
5259 return NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005260}
5261
5262/*
5263 * Account for a descheduled task:
5264 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02005265static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005266{
5267 struct sched_entity *se = &prev->se;
5268 struct cfs_rq *cfs_rq;
5269
5270 for_each_sched_entity(se) {
5271 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02005272 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005273 }
5274}
5275
Rik van Rielac53db52011-02-01 09:51:03 -05005276/*
5277 * sched_yield() is very simple
5278 *
5279 * The magic of dealing with the ->skip buddy is in pick_next_entity.
5280 */
5281static void yield_task_fair(struct rq *rq)
5282{
5283 struct task_struct *curr = rq->curr;
5284 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5285 struct sched_entity *se = &curr->se;
5286
5287 /*
5288 * Are we the only task in the tree?
5289 */
5290 if (unlikely(rq->nr_running == 1))
5291 return;
5292
5293 clear_buddies(cfs_rq, se);
5294
5295 if (curr->policy != SCHED_BATCH) {
5296 update_rq_clock(rq);
5297 /*
5298 * Update run-time statistics of the 'current'.
5299 */
5300 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01005301 /*
5302 * Tell update_rq_clock() that we've just updated,
5303 * so we don't do microscopic update in schedule()
5304 * and double the fastpath cost.
5305 */
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +01005306 rq_clock_skip_update(rq, true);
Rik van Rielac53db52011-02-01 09:51:03 -05005307 }
5308
5309 set_skip_buddy(se);
5310}
5311
Mike Galbraithd95f4122011-02-01 09:50:51 -05005312static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5313{
5314 struct sched_entity *se = &p->se;
5315
Paul Turner5238cdd2011-07-21 09:43:37 -07005316 /* throttled hierarchies are not runnable */
5317 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05005318 return false;
5319
5320 /* Tell the scheduler that we'd really like pse to run next. */
5321 set_next_buddy(se);
5322
Mike Galbraithd95f4122011-02-01 09:50:51 -05005323 yield_task_fair(rq);
5324
5325 return true;
5326}
5327
Peter Williams681f3e62007-10-24 18:23:51 +02005328#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005329/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005330 * Fair scheduling class load-balancing methods.
5331 *
5332 * BASICS
5333 *
5334 * The purpose of load-balancing is to achieve the same basic fairness the
5335 * per-cpu scheduler provides, namely provide a proportional amount of compute
5336 * time to each task. This is expressed in the following equation:
5337 *
5338 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
5339 *
5340 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
5341 * W_i,0 is defined as:
5342 *
5343 * W_i,0 = \Sum_j w_i,j (2)
5344 *
5345 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
5346 * is derived from the nice value as per prio_to_weight[].
5347 *
5348 * The weight average is an exponential decay average of the instantaneous
5349 * weight:
5350 *
5351 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
5352 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005353 * C_i is the compute capacity of cpu i, typically it is the
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005354 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
5355 * can also include other factors [XXX].
5356 *
5357 * To achieve this balance we define a measure of imbalance which follows
5358 * directly from (1):
5359 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005360 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005361 *
5362 * We them move tasks around to minimize the imbalance. In the continuous
5363 * function space it is obvious this converges, in the discrete case we get
5364 * a few fun cases generally called infeasible weight scenarios.
5365 *
5366 * [XXX expand on:
5367 * - infeasible weights;
5368 * - local vs global optima in the discrete case. ]
5369 *
5370 *
5371 * SCHED DOMAINS
5372 *
5373 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5374 * for all i,j solution, we create a tree of cpus that follows the hardware
5375 * topology where each level pairs two lower groups (or better). This results
5376 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5377 * tree to only the first of the previous level and we decrease the frequency
5378 * of load-balance at each level inv. proportional to the number of cpus in
5379 * the groups.
5380 *
5381 * This yields:
5382 *
5383 * log_2 n 1 n
5384 * \Sum { --- * --- * 2^i } = O(n) (5)
5385 * i = 0 2^i 2^i
5386 * `- size of each group
5387 * | | `- number of cpus doing load-balance
5388 * | `- freq
5389 * `- sum over all levels
5390 *
5391 * Coupled with a limit on how many tasks we can migrate every balance pass,
5392 * this makes (5) the runtime complexity of the balancer.
5393 *
5394 * An important property here is that each CPU is still (indirectly) connected
5395 * to every other cpu in at most O(log n) steps:
5396 *
5397 * The adjacency matrix of the resulting graph is given by:
5398 *
5399 * log_2 n
5400 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5401 * k = 0
5402 *
5403 * And you'll find that:
5404 *
5405 * A^(log_2 n)_i,j != 0 for all i,j (7)
5406 *
5407 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5408 * The task movement gives a factor of O(m), giving a convergence complexity
5409 * of:
5410 *
5411 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5412 *
5413 *
5414 * WORK CONSERVING
5415 *
5416 * In order to avoid CPUs going idle while there's still work to do, new idle
5417 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5418 * tree itself instead of relying on other CPUs to bring it work.
5419 *
5420 * This adds some complexity to both (5) and (8) but it reduces the total idle
5421 * time.
5422 *
5423 * [XXX more?]
5424 *
5425 *
5426 * CGROUPS
5427 *
5428 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5429 *
5430 * s_k,i
5431 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5432 * S_k
5433 *
5434 * Where
5435 *
5436 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5437 *
5438 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5439 *
5440 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5441 * property.
5442 *
5443 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5444 * rewrite all of this once again.]
5445 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005446
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09005447static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5448
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005449enum fbq_type { regular, remote, all };
5450
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005451#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01005452#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02005453#define LBF_DST_PINNED 0x04
5454#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005455
5456struct lb_env {
5457 struct sched_domain *sd;
5458
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005459 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05305460 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005461
5462 int dst_cpu;
5463 struct rq *dst_rq;
5464
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305465 struct cpumask *dst_grpmask;
5466 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005467 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005468 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08005469 /* The set of CPUs under consideration for load-balancing */
5470 struct cpumask *cpus;
5471
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005472 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01005473
5474 unsigned int loop;
5475 unsigned int loop_break;
5476 unsigned int loop_max;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005477
5478 enum fbq_type fbq_type;
Kirill Tkhai163122b2014-08-20 13:48:29 +04005479 struct list_head tasks;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005480};
5481
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005482/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02005483 * Is this task likely cache-hot:
5484 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005485static int task_hot(struct task_struct *p, struct lb_env *env)
Peter Zijlstra029632f2011-10-25 10:00:11 +02005486{
5487 s64 delta;
5488
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005489 lockdep_assert_held(&env->src_rq->lock);
5490
Peter Zijlstra029632f2011-10-25 10:00:11 +02005491 if (p->sched_class != &fair_sched_class)
5492 return 0;
5493
5494 if (unlikely(p->policy == SCHED_IDLE))
5495 return 0;
5496
5497 /*
5498 * Buddy candidates are cache hot:
5499 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005500 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
Peter Zijlstra029632f2011-10-25 10:00:11 +02005501 (&p->se == cfs_rq_of(&p->se)->next ||
5502 &p->se == cfs_rq_of(&p->se)->last))
5503 return 1;
5504
5505 if (sysctl_sched_migration_cost == -1)
5506 return 1;
5507 if (sysctl_sched_migration_cost == 0)
5508 return 0;
5509
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005510 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
Peter Zijlstra029632f2011-10-25 10:00:11 +02005511
5512 return delta < (s64)sysctl_sched_migration_cost;
5513}
5514
Mel Gorman3a7053b2013-10-07 11:29:00 +01005515#ifdef CONFIG_NUMA_BALANCING
Rik van Rielc1ceac62015-05-14 22:59:36 -04005516/*
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305517 * Returns 1, if task migration degrades locality
5518 * Returns 0, if task migration improves locality i.e migration preferred.
5519 * Returns -1, if task migration is not affected by locality.
Rik van Rielc1ceac62015-05-14 22:59:36 -04005520 */
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305521static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
Mel Gorman3a7053b2013-10-07 11:29:00 +01005522{
Rik van Rielb1ad0652014-05-15 13:03:06 -04005523 struct numa_group *numa_group = rcu_dereference(p->numa_group);
Rik van Rielc1ceac62015-05-14 22:59:36 -04005524 unsigned long src_faults, dst_faults;
Mel Gorman3a7053b2013-10-07 11:29:00 +01005525 int src_nid, dst_nid;
5526
Iulia Manda44dba3d2014-10-31 02:13:31 +02005527 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305528 return -1;
5529
5530 if (!sched_feat(NUMA))
5531 return -1;
Mel Gorman7a0f3082013-10-07 11:29:01 +01005532
5533 src_nid = cpu_to_node(env->src_cpu);
5534 dst_nid = cpu_to_node(env->dst_cpu);
5535
Mel Gorman83e1d2c2013-10-07 11:29:27 +01005536 if (src_nid == dst_nid)
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305537 return -1;
Mel Gorman7a0f3082013-10-07 11:29:01 +01005538
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305539 /* Migrating away from the preferred node is always bad. */
5540 if (src_nid == p->numa_preferred_nid) {
5541 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
5542 return 1;
5543 else
5544 return -1;
5545 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01005546
Rik van Rielc1ceac62015-05-14 22:59:36 -04005547 /* Encourage migration to the preferred node. */
5548 if (dst_nid == p->numa_preferred_nid)
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305549 return 0;
Rik van Rielc1ceac62015-05-14 22:59:36 -04005550
5551 if (numa_group) {
5552 src_faults = group_faults(p, src_nid);
5553 dst_faults = group_faults(p, dst_nid);
5554 } else {
5555 src_faults = task_faults(p, src_nid);
5556 dst_faults = task_faults(p, dst_nid);
5557 }
5558
5559 return dst_faults < src_faults;
Mel Gorman7a0f3082013-10-07 11:29:01 +01005560}
5561
Mel Gorman3a7053b2013-10-07 11:29:00 +01005562#else
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305563static inline int migrate_degrades_locality(struct task_struct *p,
Mel Gorman3a7053b2013-10-07 11:29:00 +01005564 struct lb_env *env)
5565{
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305566 return -1;
Mel Gorman7a0f3082013-10-07 11:29:01 +01005567}
Mel Gorman3a7053b2013-10-07 11:29:00 +01005568#endif
5569
Peter Zijlstra029632f2011-10-25 10:00:11 +02005570/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005571 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
5572 */
5573static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005574int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005575{
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305576 int tsk_cache_hot;
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005577
5578 lockdep_assert_held(&env->src_rq->lock);
5579
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005580 /*
5581 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09005582 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005583 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09005584 * 3) running (obviously), or
5585 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005586 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09005587 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
5588 return 0;
5589
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005590 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005591 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305592
Lucas De Marchi41acab82010-03-10 23:37:45 -03005593 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305594
Peter Zijlstra62633222013-08-19 12:41:09 +02005595 env->flags |= LBF_SOME_PINNED;
5596
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305597 /*
5598 * Remember if this task can be migrated to any other cpu in
5599 * our sched_group. We may want to revisit it if we couldn't
5600 * meet load balance goals by pulling other tasks on src_cpu.
5601 *
5602 * Also avoid computing new_dst_cpu if we have already computed
5603 * one in current iteration.
5604 */
Peter Zijlstra62633222013-08-19 12:41:09 +02005605 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305606 return 0;
5607
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005608 /* Prevent to re-select dst_cpu via env's cpus */
5609 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
5610 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02005611 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005612 env->new_dst_cpu = cpu;
5613 break;
5614 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305615 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09005616
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005617 return 0;
5618 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305619
5620 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005621 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005622
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005623 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03005624 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005625 return 0;
5626 }
5627
5628 /*
5629 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01005630 * 1) destination numa is preferred
5631 * 2) task is cache cold, or
5632 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005633 */
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305634 tsk_cache_hot = migrate_degrades_locality(p, env);
5635 if (tsk_cache_hot == -1)
5636 tsk_cache_hot = task_hot(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01005637
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305638 if (tsk_cache_hot <= 0 ||
Kirill Tkhai7a96c232014-09-22 22:36:12 +04005639 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05305640 if (tsk_cache_hot == 1) {
Mel Gorman3a7053b2013-10-07 11:29:00 +01005641 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
5642 schedstat_inc(p, se.statistics.nr_forced_migrations);
5643 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005644 return 1;
5645 }
5646
Zhang Hang4e2dcb72013-04-10 14:04:55 +08005647 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
5648 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005649}
5650
Peter Zijlstra897c3952009-12-17 17:45:42 +01005651/*
Kirill Tkhai163122b2014-08-20 13:48:29 +04005652 * detach_task() -- detach the task for the migration specified in env
Peter Zijlstra897c3952009-12-17 17:45:42 +01005653 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04005654static void detach_task(struct task_struct *p, struct lb_env *env)
5655{
5656 lockdep_assert_held(&env->src_rq->lock);
5657
5658 deactivate_task(env->src_rq, p, 0);
5659 p->on_rq = TASK_ON_RQ_MIGRATING;
5660 set_task_cpu(p, env->dst_cpu);
5661}
5662
5663/*
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005664 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
Peter Zijlstra897c3952009-12-17 17:45:42 +01005665 * part of active balancing operations within "domain".
Peter Zijlstra897c3952009-12-17 17:45:42 +01005666 *
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005667 * Returns a task if successful and NULL otherwise.
Peter Zijlstra897c3952009-12-17 17:45:42 +01005668 */
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005669static struct task_struct *detach_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01005670{
5671 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005672
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005673 lockdep_assert_held(&env->src_rq->lock);
5674
Peter Zijlstra367456c2012-02-20 21:49:09 +01005675 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01005676 if (!can_migrate_task(p, env))
5677 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005678
Kirill Tkhai163122b2014-08-20 13:48:29 +04005679 detach_task(p, env);
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005680
Peter Zijlstra367456c2012-02-20 21:49:09 +01005681 /*
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005682 * Right now, this is only the second place where
Kirill Tkhai163122b2014-08-20 13:48:29 +04005683 * lb_gained[env->idle] is updated (other is detach_tasks)
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005684 * so we can safely collect stats here rather than
Kirill Tkhai163122b2014-08-20 13:48:29 +04005685 * inside detach_tasks().
Peter Zijlstra367456c2012-02-20 21:49:09 +01005686 */
5687 schedstat_inc(env->sd, lb_gained[env->idle]);
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005688 return p;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005689 }
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005690 return NULL;
Peter Zijlstra897c3952009-12-17 17:45:42 +01005691}
5692
Peter Zijlstraeb953082012-04-17 13:38:40 +02005693static const unsigned int sched_nr_migrate_break = 32;
5694
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005695/*
Kirill Tkhai163122b2014-08-20 13:48:29 +04005696 * detach_tasks() -- tries to detach up to imbalance weighted load from
5697 * busiest_rq, as part of a balancing operation within domain "sd".
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005698 *
Kirill Tkhai163122b2014-08-20 13:48:29 +04005699 * Returns number of detached tasks if successful and 0 otherwise.
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005700 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04005701static int detach_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005702{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005703 struct list_head *tasks = &env->src_rq->cfs_tasks;
5704 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01005705 unsigned long load;
Kirill Tkhai163122b2014-08-20 13:48:29 +04005706 int detached = 0;
5707
5708 lockdep_assert_held(&env->src_rq->lock);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005709
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005710 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005711 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005712
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005713 while (!list_empty(tasks)) {
Yuyang Du985d3a42015-07-06 06:11:51 +08005714 /*
5715 * We don't want to steal all, otherwise we may be treated likewise,
5716 * which could at worst lead to a livelock crash.
5717 */
5718 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
5719 break;
5720
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005721 p = list_first_entry(tasks, struct task_struct, se.group_node);
5722
Peter Zijlstra367456c2012-02-20 21:49:09 +01005723 env->loop++;
5724 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005725 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005726 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005727
5728 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01005729 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02005730 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01005731 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01005732 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02005733 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005734
Joonsoo Kimd3198082013-04-23 17:27:40 +09005735 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01005736 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005737
Peter Zijlstra367456c2012-02-20 21:49:09 +01005738 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005739
Peter Zijlstraeb953082012-04-17 13:38:40 +02005740 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005741 goto next;
5742
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005743 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01005744 goto next;
5745
Kirill Tkhai163122b2014-08-20 13:48:29 +04005746 detach_task(p, env);
5747 list_add(&p->se.group_node, &env->tasks);
5748
5749 detached++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005750 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005751
5752#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01005753 /*
5754 * NEWIDLE balancing is a source of latency, so preemptible
Kirill Tkhai163122b2014-08-20 13:48:29 +04005755 * kernels will stop after the first task is detached to minimize
Peter Zijlstraee00e662009-12-17 17:25:20 +01005756 * the critical section.
5757 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005758 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005759 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005760#endif
5761
Peter Zijlstraee00e662009-12-17 17:25:20 +01005762 /*
5763 * We only want to steal up to the prescribed amount of
5764 * weighted load.
5765 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005766 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01005767 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005768
Peter Zijlstra367456c2012-02-20 21:49:09 +01005769 continue;
5770next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005771 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005772 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01005773
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005774 /*
Kirill Tkhai163122b2014-08-20 13:48:29 +04005775 * Right now, this is one of only two places we collect this stat
5776 * so we can safely collect detach_one_task() stats here rather
5777 * than inside detach_one_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005778 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04005779 schedstat_add(env->sd, lb_gained[env->idle], detached);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005780
Kirill Tkhai163122b2014-08-20 13:48:29 +04005781 return detached;
5782}
5783
5784/*
5785 * attach_task() -- attach the task detached by detach_task() to its new rq.
5786 */
5787static void attach_task(struct rq *rq, struct task_struct *p)
5788{
5789 lockdep_assert_held(&rq->lock);
5790
5791 BUG_ON(task_rq(p) != rq);
5792 p->on_rq = TASK_ON_RQ_QUEUED;
5793 activate_task(rq, p, 0);
5794 check_preempt_curr(rq, p, 0);
5795}
5796
5797/*
5798 * attach_one_task() -- attaches the task returned from detach_one_task() to
5799 * its new rq.
5800 */
5801static void attach_one_task(struct rq *rq, struct task_struct *p)
5802{
5803 raw_spin_lock(&rq->lock);
5804 attach_task(rq, p);
5805 raw_spin_unlock(&rq->lock);
5806}
5807
5808/*
5809 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
5810 * new rq.
5811 */
5812static void attach_tasks(struct lb_env *env)
5813{
5814 struct list_head *tasks = &env->tasks;
5815 struct task_struct *p;
5816
5817 raw_spin_lock(&env->dst_rq->lock);
5818
5819 while (!list_empty(tasks)) {
5820 p = list_first_entry(tasks, struct task_struct, se.group_node);
5821 list_del_init(&p->se.group_node);
5822
5823 attach_task(env->dst_rq, p);
5824 }
5825
5826 raw_spin_unlock(&env->dst_rq->lock);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005827}
5828
Peter Zijlstra230059de2009-12-17 17:47:12 +01005829#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner48a16752012-10-04 13:18:31 +02005830static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005831{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005832 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02005833 struct cfs_rq *cfs_rq;
5834 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005835
Paul Turner48a16752012-10-04 13:18:31 +02005836 raw_spin_lock_irqsave(&rq->lock, flags);
5837 update_rq_clock(rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08005838
Peter Zijlstra9763b672011-07-13 13:09:25 +02005839 /*
5840 * Iterates the task_group tree in a bottom up fashion, see
5841 * list_add_leaf_cfs_rq() for details.
5842 */
Paul Turner64660c82011-07-21 09:43:36 -07005843 for_each_leaf_cfs_rq(rq, cfs_rq) {
Yuyang Du9d89c252015-07-15 08:04:37 +08005844 /* throttled entities do not contribute to load */
5845 if (throttled_hierarchy(cfs_rq))
5846 continue;
Paul Turner48a16752012-10-04 13:18:31 +02005847
Yuyang Du9d89c252015-07-15 08:04:37 +08005848 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq))
5849 update_tg_load_avg(cfs_rq, 0);
5850 }
Paul Turner48a16752012-10-04 13:18:31 +02005851 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005852}
5853
Peter Zijlstra9763b672011-07-13 13:09:25 +02005854/*
Vladimir Davydov68520792013-07-15 17:49:19 +04005855 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02005856 * This needs to be done in a top-down fashion because the load of a child
5857 * group is a fraction of its parents load.
5858 */
Vladimir Davydov68520792013-07-15 17:49:19 +04005859static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02005860{
Vladimir Davydov68520792013-07-15 17:49:19 +04005861 struct rq *rq = rq_of(cfs_rq);
5862 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005863 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04005864 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005865
Vladimir Davydov68520792013-07-15 17:49:19 +04005866 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005867 return;
5868
Vladimir Davydov68520792013-07-15 17:49:19 +04005869 cfs_rq->h_load_next = NULL;
5870 for_each_sched_entity(se) {
5871 cfs_rq = cfs_rq_of(se);
5872 cfs_rq->h_load_next = se;
5873 if (cfs_rq->last_h_load_update == now)
5874 break;
5875 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02005876
Vladimir Davydov68520792013-07-15 17:49:19 +04005877 if (!se) {
Yuyang Du7ea241a2015-07-15 08:04:42 +08005878 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
Vladimir Davydov68520792013-07-15 17:49:19 +04005879 cfs_rq->last_h_load_update = now;
5880 }
5881
5882 while ((se = cfs_rq->h_load_next) != NULL) {
5883 load = cfs_rq->h_load;
Yuyang Du7ea241a2015-07-15 08:04:42 +08005884 load = div64_ul(load * se->avg.load_avg,
5885 cfs_rq_load_avg(cfs_rq) + 1);
Vladimir Davydov68520792013-07-15 17:49:19 +04005886 cfs_rq = group_cfs_rq(se);
5887 cfs_rq->h_load = load;
5888 cfs_rq->last_h_load_update = now;
5889 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02005890}
5891
Peter Zijlstra367456c2012-02-20 21:49:09 +01005892static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01005893{
Peter Zijlstra367456c2012-02-20 21:49:09 +01005894 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005895
Vladimir Davydov68520792013-07-15 17:49:19 +04005896 update_cfs_rq_h_load(cfs_rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08005897 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
Yuyang Du7ea241a2015-07-15 08:04:42 +08005898 cfs_rq_load_avg(cfs_rq) + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01005899}
5900#else
Paul Turner48a16752012-10-04 13:18:31 +02005901static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005902{
Vincent Guittot6c1d47c2015-07-15 08:04:38 +08005903 struct rq *rq = cpu_rq(cpu);
5904 struct cfs_rq *cfs_rq = &rq->cfs;
5905 unsigned long flags;
5906
5907 raw_spin_lock_irqsave(&rq->lock, flags);
5908 update_rq_clock(rq);
5909 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
5910 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08005911}
5912
Peter Zijlstra367456c2012-02-20 21:49:09 +01005913static unsigned long task_h_load(struct task_struct *p)
5914{
Yuyang Du9d89c252015-07-15 08:04:37 +08005915 return p->se.avg.load_avg;
Peter Zijlstra230059de2009-12-17 17:47:12 +01005916}
5917#endif
5918
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005919/********** Helpers for find_busiest_group ************************/
Rik van Rielcaeb1782014-07-28 14:16:28 -04005920
5921enum group_type {
5922 group_other = 0,
5923 group_imbalanced,
5924 group_overloaded,
5925};
5926
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005927/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005928 * sg_lb_stats - stats of a sched_group required for load_balancing
5929 */
5930struct sg_lb_stats {
5931 unsigned long avg_load; /*Avg load across the CPUs of the group */
5932 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005933 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005934 unsigned long load_per_task;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005935 unsigned long group_capacity;
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005936 unsigned long group_usage; /* Total usage of the group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005937 unsigned int sum_nr_running; /* Nr tasks running in the group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005938 unsigned int idle_cpus;
5939 unsigned int group_weight;
Rik van Rielcaeb1782014-07-28 14:16:28 -04005940 enum group_type group_type;
Vincent Guittotea678212015-02-27 16:54:11 +01005941 int group_no_capacity;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005942#ifdef CONFIG_NUMA_BALANCING
5943 unsigned int nr_numa_running;
5944 unsigned int nr_preferred_running;
5945#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005946};
5947
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005948/*
5949 * sd_lb_stats - Structure to store the statistics of a sched_domain
5950 * during load balancing.
5951 */
5952struct sd_lb_stats {
5953 struct sched_group *busiest; /* Busiest group in this sd */
5954 struct sched_group *local; /* Local group in this sd */
5955 unsigned long total_load; /* Total load of all groups in sd */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005956 unsigned long total_capacity; /* Total capacity of all groups in sd */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005957 unsigned long avg_load; /* Average load across all groups in sd */
5958
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005959 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005960 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09005961};
5962
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005963static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
5964{
5965 /*
5966 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
5967 * local_stat because update_sg_lb_stats() does a full clear/assignment.
5968 * We must however clear busiest_stat::avg_load because
5969 * update_sd_pick_busiest() reads this before assignment.
5970 */
5971 *sds = (struct sd_lb_stats){
5972 .busiest = NULL,
5973 .local = NULL,
5974 .total_load = 0UL,
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005975 .total_capacity = 0UL,
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005976 .busiest_stat = {
5977 .avg_load = 0UL,
Rik van Rielcaeb1782014-07-28 14:16:28 -04005978 .sum_nr_running = 0,
5979 .group_type = group_other,
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02005980 },
5981 };
5982}
5983
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005984/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005985 * get_sd_load_idx - Obtain the load index for a given sched domain.
5986 * @sd: The sched_domain whose load_idx is to be obtained.
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05305987 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02005988 *
5989 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005990 */
5991static inline int get_sd_load_idx(struct sched_domain *sd,
5992 enum cpu_idle_type idle)
5993{
5994 int load_idx;
5995
5996 switch (idle) {
5997 case CPU_NOT_IDLE:
5998 load_idx = sd->busy_idx;
5999 break;
6000
6001 case CPU_NEWLY_IDLE:
6002 load_idx = sd->newidle_idx;
6003 break;
6004 default:
6005 load_idx = sd->idle_idx;
6006 break;
6007 }
6008
6009 return load_idx;
6010}
6011
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006012static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006013{
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006014 if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))
6015 return sd->smt_gain / sd->span_weight;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006016
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006017 return SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006018}
6019
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006020unsigned long __weak arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006021{
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006022 return default_scale_cpu_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006023}
6024
Nicolas Pitreced549f2014-05-26 18:19:38 -04006025static unsigned long scale_rt_capacity(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006026{
6027 struct rq *rq = cpu_rq(cpu);
Vincent Guittotb5b48602015-02-27 16:54:08 +01006028 u64 total, used, age_stamp, avg;
Peter Zijlstracadefd32014-02-27 10:40:35 +01006029 s64 delta;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006030
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02006031 /*
6032 * Since we're reading these variables without serialization make sure
6033 * we read them once before doing sanity checks on them.
6034 */
Jason Low316c1608d2015-04-28 13:00:20 -07006035 age_stamp = READ_ONCE(rq->age_stamp);
6036 avg = READ_ONCE(rq->rt_avg);
Peter Zijlstracebde6d2015-01-05 11:18:10 +01006037 delta = __rq_clock_broken(rq) - age_stamp;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07006038
Peter Zijlstracadefd32014-02-27 10:40:35 +01006039 if (unlikely(delta < 0))
6040 delta = 0;
6041
6042 total = sched_avg_period() + delta;
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02006043
Vincent Guittotb5b48602015-02-27 16:54:08 +01006044 used = div_u64(avg, total);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006045
Vincent Guittotb5b48602015-02-27 16:54:08 +01006046 if (likely(used < SCHED_CAPACITY_SCALE))
6047 return SCHED_CAPACITY_SCALE - used;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006048
Vincent Guittotb5b48602015-02-27 16:54:08 +01006049 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006050}
6051
Nicolas Pitreced549f2014-05-26 18:19:38 -04006052static void update_cpu_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006053{
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006054 unsigned long capacity = SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006055 struct sched_group *sdg = sd->groups;
6056
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006057 if (sched_feat(ARCH_CAPACITY))
6058 capacity *= arch_scale_cpu_capacity(sd, cpu);
6059 else
6060 capacity *= default_scale_cpu_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006061
Vincent Guittot26bc3c52014-08-26 13:06:47 +02006062 capacity >>= SCHED_CAPACITY_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006063
Vincent Guittotca6d75e2015-02-27 16:54:09 +01006064 cpu_rq(cpu)->cpu_capacity_orig = capacity;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006065
Nicolas Pitreced549f2014-05-26 18:19:38 -04006066 capacity *= scale_rt_capacity(cpu);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006067 capacity >>= SCHED_CAPACITY_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006068
Nicolas Pitreced549f2014-05-26 18:19:38 -04006069 if (!capacity)
6070 capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006071
Nicolas Pitreced549f2014-05-26 18:19:38 -04006072 cpu_rq(cpu)->cpu_capacity = capacity;
6073 sdg->sgc->capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006074}
6075
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006076void update_group_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006077{
6078 struct sched_domain *child = sd->child;
6079 struct sched_group *group, *sdg = sd->groups;
Vincent Guittotdc7ff762015-03-03 11:35:03 +01006080 unsigned long capacity;
Vincent Guittot4ec44122011-12-12 20:21:08 +01006081 unsigned long interval;
6082
6083 interval = msecs_to_jiffies(sd->balance_interval);
6084 interval = clamp(interval, 1UL, max_load_balance_interval);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006085 sdg->sgc->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006086
6087 if (!child) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04006088 update_cpu_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006089 return;
6090 }
6091
Vincent Guittotdc7ff762015-03-03 11:35:03 +01006092 capacity = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006093
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02006094 if (child->flags & SD_OVERLAP) {
6095 /*
6096 * SD_OVERLAP domains cannot assume that child groups
6097 * span the current group.
6098 */
6099
Peter Zijlstra863bffc2013-08-28 11:44:39 +02006100 for_each_cpu(cpu, sched_group_cpus(sdg)) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006101 struct sched_group_capacity *sgc;
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306102 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra863bffc2013-08-28 11:44:39 +02006103
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306104 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006105 * build_sched_domains() -> init_sched_groups_capacity()
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306106 * gets here before we've attached the domains to the
6107 * runqueues.
6108 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04006109 * Use capacity_of(), which is set irrespective of domains
6110 * in update_cpu_capacity().
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306111 *
Vincent Guittotdc7ff762015-03-03 11:35:03 +01006112 * This avoids capacity from being 0 and
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306113 * causing divide-by-zero issues on boot.
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306114 */
6115 if (unlikely(!rq->sd)) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04006116 capacity += capacity_of(cpu);
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306117 continue;
6118 }
6119
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006120 sgc = rq->sd->groups->sgc;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006121 capacity += sgc->capacity;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02006122 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02006123 } else {
6124 /*
6125 * !SD_OVERLAP domains can assume that child groups
6126 * span the current group.
6127 */
6128
6129 group = child->groups;
6130 do {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006131 capacity += group->sgc->capacity;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02006132 group = group->next;
6133 } while (group != child->groups);
6134 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006135
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006136 sdg->sgc->capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006137}
6138
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006139/*
Vincent Guittotea678212015-02-27 16:54:11 +01006140 * Check whether the capacity of the rq has been noticeably reduced by side
6141 * activity. The imbalance_pct is used for the threshold.
6142 * Return true is the capacity is reduced
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006143 */
6144static inline int
Vincent Guittotea678212015-02-27 16:54:11 +01006145check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006146{
Vincent Guittotea678212015-02-27 16:54:11 +01006147 return ((rq->cpu_capacity * sd->imbalance_pct) <
6148 (rq->cpu_capacity_orig * 100));
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006149}
6150
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006151/*
6152 * Group imbalance indicates (and tries to solve) the problem where balancing
6153 * groups is inadequate due to tsk_cpus_allowed() constraints.
6154 *
6155 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6156 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6157 * Something like:
6158 *
6159 * { 0 1 2 3 } { 4 5 6 7 }
6160 * * * * *
6161 *
6162 * If we were to balance group-wise we'd place two tasks in the first group and
6163 * two tasks in the second group. Clearly this is undesired as it will overload
6164 * cpu 3 and leave one of the cpus in the second group unused.
6165 *
6166 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02006167 * by noticing the lower domain failed to reach balance and had difficulty
6168 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006169 *
6170 * When this is so detected; this group becomes a candidate for busiest; see
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05306171 * update_sd_pick_busiest(). And calculate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02006172 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006173 * to create an effective group imbalance.
6174 *
6175 * This is a somewhat tricky proposition since the next run might not find the
6176 * group imbalance and decide the groups need to be balanced again. A most
6177 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006178 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006179
Peter Zijlstra62633222013-08-19 12:41:09 +02006180static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006181{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006182 return group->sgc->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006183}
6184
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006185/*
Vincent Guittotea678212015-02-27 16:54:11 +01006186 * group_has_capacity returns true if the group has spare capacity that could
6187 * be used by some tasks.
6188 * We consider that a group has spare capacity if the * number of task is
6189 * smaller than the number of CPUs or if the usage is lower than the available
6190 * capacity for CFS tasks.
6191 * For the latter, we use a threshold to stabilize the state, to take into
6192 * account the variance of the tasks' load and to return true if the available
6193 * capacity in meaningful for the load balancer.
6194 * As an example, an available capacity of 1% can appear but it doesn't make
6195 * any benefit for the load balance.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006196 */
Vincent Guittotea678212015-02-27 16:54:11 +01006197static inline bool
6198group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006199{
Vincent Guittotea678212015-02-27 16:54:11 +01006200 if (sgs->sum_nr_running < sgs->group_weight)
6201 return true;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006202
Vincent Guittotea678212015-02-27 16:54:11 +01006203 if ((sgs->group_capacity * 100) >
6204 (sgs->group_usage * env->sd->imbalance_pct))
6205 return true;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006206
Vincent Guittotea678212015-02-27 16:54:11 +01006207 return false;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006208}
6209
Vincent Guittotea678212015-02-27 16:54:11 +01006210/*
6211 * group_is_overloaded returns true if the group has more tasks than it can
6212 * handle.
6213 * group_is_overloaded is not equals to !group_has_capacity because a group
6214 * with the exact right number of tasks, has no more spare capacity but is not
6215 * overloaded so both group_has_capacity and group_is_overloaded return
6216 * false.
6217 */
6218static inline bool
6219group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
Rik van Rielcaeb1782014-07-28 14:16:28 -04006220{
Vincent Guittotea678212015-02-27 16:54:11 +01006221 if (sgs->sum_nr_running <= sgs->group_weight)
6222 return false;
6223
6224 if ((sgs->group_capacity * 100) <
6225 (sgs->group_usage * env->sd->imbalance_pct))
6226 return true;
6227
6228 return false;
6229}
6230
6231static enum group_type group_classify(struct lb_env *env,
6232 struct sched_group *group,
6233 struct sg_lb_stats *sgs)
6234{
6235 if (sgs->group_no_capacity)
Rik van Rielcaeb1782014-07-28 14:16:28 -04006236 return group_overloaded;
6237
6238 if (sg_imbalanced(group))
6239 return group_imbalanced;
6240
6241 return group_other;
6242}
6243
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006244/**
6245 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
6246 * @env: The load balancing environment.
6247 * @group: sched_group whose statistics are to be updated.
6248 * @load_idx: Load index of sched_domain of this_cpu for load calc.
6249 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006250 * @sgs: variable to hold the statistics for this group.
Masanari Iidacd3bd4e2014-07-28 12:38:06 +09006251 * @overload: Indicate more than one runnable task for any CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006252 */
6253static inline void update_sg_lb_stats(struct lb_env *env,
6254 struct sched_group *group, int load_idx,
Tim Chen4486edd2014-06-23 12:16:49 -07006255 int local_group, struct sg_lb_stats *sgs,
6256 bool *overload)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006257{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006258 unsigned long load;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006259 int i;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006260
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006261 memset(sgs, 0, sizeof(*sgs));
6262
Michael Wangb94031302012-07-12 16:10:13 +08006263 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006264 struct rq *rq = cpu_rq(i);
6265
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006266 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02006267 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02006268 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02006269 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006270 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006271
6272 sgs->group_load += load;
Vincent Guittot8bb5b002015-03-04 08:48:47 +01006273 sgs->group_usage += get_cpu_usage(i);
Vincent Guittot65fdac02014-08-26 13:06:46 +02006274 sgs->sum_nr_running += rq->cfs.h_nr_running;
Tim Chen4486edd2014-06-23 12:16:49 -07006275
6276 if (rq->nr_running > 1)
6277 *overload = true;
6278
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006279#ifdef CONFIG_NUMA_BALANCING
6280 sgs->nr_numa_running += rq->nr_numa_running;
6281 sgs->nr_preferred_running += rq->nr_preferred_running;
6282#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006283 sgs->sum_weighted_load += weighted_cpuload(i);
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006284 if (idle_cpu(i))
6285 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006286 }
6287
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006288 /* Adjust by relative CPU capacity of the group */
6289 sgs->group_capacity = group->sgc->capacity;
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006290 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006291
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006292 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02006293 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006294
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006295 sgs->group_weight = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006296
Vincent Guittotea678212015-02-27 16:54:11 +01006297 sgs->group_no_capacity = group_is_overloaded(env, sgs);
6298 sgs->group_type = group_classify(env, group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006299}
6300
6301/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10006302 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07006303 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006304 * @sds: sched_domain statistics
6305 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10006306 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10006307 *
6308 * Determine if @sg is a busier group than the previously selected
6309 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02006310 *
6311 * Return: %true if @sg is a busier group than the previously selected
6312 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006313 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006314static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10006315 struct sd_lb_stats *sds,
6316 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006317 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006318{
Rik van Rielcaeb1782014-07-28 14:16:28 -04006319 struct sg_lb_stats *busiest = &sds->busiest_stat;
Michael Neuling532cb4c2010-06-08 14:57:02 +10006320
Rik van Rielcaeb1782014-07-28 14:16:28 -04006321 if (sgs->group_type > busiest->group_type)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006322 return true;
6323
Rik van Rielcaeb1782014-07-28 14:16:28 -04006324 if (sgs->group_type < busiest->group_type)
6325 return false;
6326
6327 if (sgs->avg_load <= busiest->avg_load)
6328 return false;
6329
6330 /* This is the busiest node in its class. */
6331 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006332 return true;
6333
6334 /*
6335 * ASYM_PACKING needs to move all the work to the lowest
6336 * numbered CPUs in the group, therefore mark all groups
6337 * higher than ourself as busy.
6338 */
Rik van Rielcaeb1782014-07-28 14:16:28 -04006339 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006340 if (!sds->busiest)
6341 return true;
6342
6343 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
6344 return true;
6345 }
6346
6347 return false;
6348}
6349
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006350#ifdef CONFIG_NUMA_BALANCING
6351static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6352{
6353 if (sgs->sum_nr_running > sgs->nr_numa_running)
6354 return regular;
6355 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6356 return remote;
6357 return all;
6358}
6359
6360static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6361{
6362 if (rq->nr_running > rq->nr_numa_running)
6363 return regular;
6364 if (rq->nr_running > rq->nr_preferred_running)
6365 return remote;
6366 return all;
6367}
6368#else
6369static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6370{
6371 return all;
6372}
6373
6374static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6375{
6376 return regular;
6377}
6378#endif /* CONFIG_NUMA_BALANCING */
6379
Michael Neuling532cb4c2010-06-08 14:57:02 +10006380/**
Hui Kang461819a2011-10-11 23:00:59 -04006381 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07006382 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006383 * @sds: variable to hold the statistics for this sched_domain.
6384 */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006385static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006386{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006387 struct sched_domain *child = env->sd->child;
6388 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006389 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006390 int load_idx, prefer_sibling = 0;
Tim Chen4486edd2014-06-23 12:16:49 -07006391 bool overload = false;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006392
6393 if (child && child->flags & SD_PREFER_SIBLING)
6394 prefer_sibling = 1;
6395
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006396 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006397
6398 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006399 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006400 int local_group;
6401
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006402 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006403 if (local_group) {
6404 sds->local = sg;
6405 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006406
6407 if (env->idle != CPU_NEWLY_IDLE ||
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006408 time_after_eq(jiffies, sg->sgc->next_update))
6409 update_group_capacity(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006410 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006411
Tim Chen4486edd2014-06-23 12:16:49 -07006412 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6413 &overload);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006414
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006415 if (local_group)
6416 goto next_group;
6417
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006418 /*
6419 * In case the child domain prefers tasks go to siblings
Vincent Guittotea678212015-02-27 16:54:11 +01006420 * first, lower the sg capacity so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07006421 * and move all the excess tasks away. We lower the capacity
6422 * of a group only if the local group has the capacity to fit
Vincent Guittotea678212015-02-27 16:54:11 +01006423 * these excess tasks. The extra check prevents the case where
6424 * you always pull from the heaviest group when it is already
6425 * under-utilized (possible with a large weight task outweighs
6426 * the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006427 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006428 if (prefer_sibling && sds->local &&
Vincent Guittotea678212015-02-27 16:54:11 +01006429 group_has_capacity(env, &sds->local_stat) &&
6430 (sgs->sum_nr_running > 1)) {
6431 sgs->group_no_capacity = 1;
6432 sgs->group_type = group_overloaded;
Wanpeng Licb0b9f22014-11-05 07:44:50 +08006433 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006434
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006435 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006436 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006437 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006438 }
6439
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006440next_group:
6441 /* Now, start updating sd_lb_stats */
6442 sds->total_load += sgs->group_load;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006443 sds->total_capacity += sgs->group_capacity;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006444
Michael Neuling532cb4c2010-06-08 14:57:02 +10006445 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006446 } while (sg != env->sd->groups);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006447
6448 if (env->sd->flags & SD_NUMA)
6449 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
Tim Chen4486edd2014-06-23 12:16:49 -07006450
6451 if (!env->sd->parent) {
6452 /* update overload indicator if we are at root domain */
6453 if (env->dst_rq->rd->overload != overload)
6454 env->dst_rq->rd->overload = overload;
6455 }
6456
Michael Neuling532cb4c2010-06-08 14:57:02 +10006457}
6458
Michael Neuling532cb4c2010-06-08 14:57:02 +10006459/**
6460 * check_asym_packing - Check to see if the group is packed into the
6461 * sched doman.
6462 *
6463 * This is primarily intended to used at the sibling level. Some
6464 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6465 * case of POWER7, it can move to lower SMT modes only when higher
6466 * threads are idle. When in lower SMT modes, the threads will
6467 * perform better since they share less core resources. Hence when we
6468 * have idle threads, we want them to be the higher ones.
6469 *
6470 * This packing function is run on idle threads. It checks to see if
6471 * the busiest CPU in this domain (core in the P7 case) has a higher
6472 * CPU number than the packing function is being run on. Here we are
6473 * assuming lower CPU number will be equivalent to lower a SMT thread
6474 * number.
6475 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02006476 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10006477 * this CPU. The amount of the imbalance is returned in *imbalance.
6478 *
Randy Dunlapcd968912012-06-08 13:18:33 -07006479 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006480 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10006481 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006482static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006483{
6484 int busiest_cpu;
6485
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006486 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006487 return 0;
6488
6489 if (!sds->busiest)
6490 return 0;
6491
6492 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006493 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006494 return 0;
6495
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006496 env->imbalance = DIV_ROUND_CLOSEST(
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006497 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006498 SCHED_CAPACITY_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006499
Michael Neuling532cb4c2010-06-08 14:57:02 +10006500 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006501}
6502
6503/**
6504 * fix_small_imbalance - Calculate the minor imbalance that exists
6505 * amongst the groups of a sched_domain, during
6506 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07006507 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006508 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006509 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006510static inline
6511void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006512{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006513 unsigned long tmp, capa_now = 0, capa_move = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006514 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006515 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006516 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006517
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006518 local = &sds->local_stat;
6519 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006520
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006521 if (!local->sum_nr_running)
6522 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
6523 else if (busiest->load_per_task > local->load_per_task)
6524 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006525
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006526 scaled_busy_load_per_task =
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006527 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006528 busiest->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006529
Vladimir Davydov3029ede2013-09-15 17:49:14 +04006530 if (busiest->avg_load + scaled_busy_load_per_task >=
6531 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006532 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006533 return;
6534 }
6535
6536 /*
6537 * OK, we don't have enough imbalance to justify moving tasks,
Nicolas Pitreced549f2014-05-26 18:19:38 -04006538 * however we may be able to increase total CPU capacity used by
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006539 * moving them.
6540 */
6541
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006542 capa_now += busiest->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006543 min(busiest->load_per_task, busiest->avg_load);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006544 capa_now += local->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006545 min(local->load_per_task, local->avg_load);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006546 capa_now /= SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006547
6548 /* Amount of load we'd subtract */
Vincent Guittota2cd4262014-03-11 17:26:06 +01006549 if (busiest->avg_load > scaled_busy_load_per_task) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006550 capa_move += busiest->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006551 min(busiest->load_per_task,
Vincent Guittota2cd4262014-03-11 17:26:06 +01006552 busiest->avg_load - scaled_busy_load_per_task);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006553 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006554
6555 /* Amount of load we'd add */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006556 if (busiest->avg_load * busiest->group_capacity <
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006557 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006558 tmp = (busiest->avg_load * busiest->group_capacity) /
6559 local->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006560 } else {
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006561 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006562 local->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006563 }
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006564 capa_move += local->group_capacity *
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02006565 min(local->load_per_task, local->avg_load + tmp);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006566 capa_move /= SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006567
6568 /* Move if we gain throughput */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006569 if (capa_move > capa_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006570 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006571}
6572
6573/**
6574 * calculate_imbalance - Calculate the amount of imbalance present within the
6575 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006576 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006577 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006578 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006579static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006580{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006581 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006582 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006583
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006584 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006585 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006586
Rik van Rielcaeb1782014-07-28 14:16:28 -04006587 if (busiest->group_type == group_imbalanced) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006588 /*
6589 * In the group_imb case we cannot rely on group-wide averages
6590 * to ensure cpu-load equilibrium, look at wider averages. XXX
6591 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006592 busiest->load_per_task =
6593 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006594 }
6595
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006596 /*
6597 * In the presence of smp nice balancing, certain scenarios can have
6598 * max load less than avg load(as we skip the groups at or below
Nicolas Pitreced549f2014-05-26 18:19:38 -04006599 * its cpu_capacity, while calculating max_load..)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006600 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04006601 if (busiest->avg_load <= sds->avg_load ||
6602 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006603 env->imbalance = 0;
6604 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006605 }
6606
Peter Zijlstra9a5d9ba2014-07-29 17:15:11 +02006607 /*
6608 * If there aren't any idle cpus, avoid creating some.
6609 */
6610 if (busiest->group_type == group_overloaded &&
6611 local->group_type == group_overloaded) {
Vincent Guittotea678212015-02-27 16:54:11 +01006612 load_above_capacity = busiest->sum_nr_running *
6613 SCHED_LOAD_SCALE;
6614 if (load_above_capacity > busiest->group_capacity)
6615 load_above_capacity -= busiest->group_capacity;
6616 else
6617 load_above_capacity = ~0UL;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006618 }
6619
6620 /*
6621 * We're trying to get all the cpus to the average_load, so we don't
6622 * want to push ourselves above the average load, nor do we wish to
6623 * reduce the max loaded cpu below the average load. At the same time,
6624 * we also don't want to reduce the group load below the group capacity
6625 * (so that we can implement power-savings policies etc). Thus we look
6626 * for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006627 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006628 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006629
6630 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006631 env->imbalance = min(
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006632 max_pull * busiest->group_capacity,
6633 (sds->avg_load - local->avg_load) * local->group_capacity
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006634 ) / SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006635
6636 /*
6637 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03006638 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006639 * a think about bumping its value to force at least one task to be
6640 * moved
6641 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006642 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006643 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006644}
Nikhil Raofab47622010-10-15 13:12:29 -07006645
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006646/******* find_busiest_group() helpers end here *********************/
6647
6648/**
6649 * find_busiest_group - Returns the busiest group within the sched_domain
6650 * if there is an imbalance. If there isn't an imbalance, and
6651 * the user has opted for power-savings, it returns a group whose
6652 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
6653 * such a group exists.
6654 *
6655 * Also calculates the amount of weighted load which should be moved
6656 * to restore balance.
6657 *
Randy Dunlapcd968912012-06-08 13:18:33 -07006658 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006659 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02006660 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006661 * - If no imbalance and user has opted for power-savings balance,
6662 * return the least loaded group whose CPUs can be
6663 * put to idle by rebalancing its tasks onto our group.
6664 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006665static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006666{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006667 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006668 struct sd_lb_stats sds;
6669
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006670 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006671
6672 /*
6673 * Compute the various statistics relavent for load balancing at
6674 * this level.
6675 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006676 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006677 local = &sds.local_stat;
6678 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006679
Vincent Guittotea678212015-02-27 16:54:11 +01006680 /* ASYM feature bypasses nice load balance check */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006681 if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
6682 check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006683 return sds.busiest;
6684
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006685 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006686 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006687 goto out_balanced;
6688
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006689 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
6690 / sds.total_capacity;
Ken Chenb0432d82011-04-07 17:23:22 -07006691
Peter Zijlstra866ab432011-02-21 18:56:47 +01006692 /*
6693 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006694 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01006695 * isn't true due to cpus_allowed constraints and the like.
6696 */
Rik van Rielcaeb1782014-07-28 14:16:28 -04006697 if (busiest->group_type == group_imbalanced)
Peter Zijlstra866ab432011-02-21 18:56:47 +01006698 goto force_balance;
6699
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006700 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Vincent Guittotea678212015-02-27 16:54:11 +01006701 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
6702 busiest->group_no_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07006703 goto force_balance;
6704
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006705 /*
Zhihui Zhang9c58c792014-09-20 21:24:36 -04006706 * If the local group is busier than the selected busiest group
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006707 * don't try and pull any tasks.
6708 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006709 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006710 goto out_balanced;
6711
Peter Zijlstracc57aa82011-02-21 18:55:32 +01006712 /*
6713 * Don't pull any tasks if this group is already above the domain
6714 * average load.
6715 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006716 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006717 goto out_balanced;
6718
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006719 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006720 /*
Vincent Guittot43f4d662014-10-01 15:38:55 +02006721 * This cpu is idle. If the busiest group is not overloaded
6722 * and there is no imbalance between this and busiest group
6723 * wrt idle cpus, it is balanced. The imbalance becomes
6724 * significant if the diff is greater than 1 otherwise we
6725 * might end up to just move the imbalance on another group
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006726 */
Vincent Guittot43f4d662014-10-01 15:38:55 +02006727 if ((busiest->group_type != group_overloaded) &&
6728 (local->idle_cpus <= (busiest->idle_cpus + 1)))
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006729 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01006730 } else {
6731 /*
6732 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
6733 * imbalance_pct to be conservative.
6734 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006735 if (100 * busiest->avg_load <=
6736 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01006737 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006738 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006739
Nikhil Raofab47622010-10-15 13:12:29 -07006740force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006741 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006742 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006743 return sds.busiest;
6744
6745out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006746 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006747 return NULL;
6748}
6749
6750/*
6751 * find_busiest_queue - find the busiest runqueue among the cpus in group.
6752 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006753static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08006754 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006755{
6756 struct rq *busiest = NULL, *rq;
Nicolas Pitreced549f2014-05-26 18:19:38 -04006757 unsigned long busiest_load = 0, busiest_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006758 int i;
6759
Peter Zijlstra6906a402013-08-19 15:20:21 +02006760 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Vincent Guittotea678212015-02-27 16:54:11 +01006761 unsigned long capacity, wl;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006762 enum fbq_type rt;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006763
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006764 rq = cpu_rq(i);
6765 rt = fbq_classify_rq(rq);
6766
6767 /*
6768 * We classify groups/runqueues into three groups:
6769 * - regular: there are !numa tasks
6770 * - remote: there are numa tasks that run on the 'wrong' node
6771 * - all: there is no distinction
6772 *
6773 * In order to avoid migrating ideally placed numa tasks,
6774 * ignore those when there's better options.
6775 *
6776 * If we ignore the actual busiest queue to migrate another
6777 * task, the next balance pass can still reduce the busiest
6778 * queue by moving tasks around inside the node.
6779 *
6780 * If we cannot move enough load due to this classification
6781 * the next pass will adjust the group classification and
6782 * allow migration of more tasks.
6783 *
6784 * Both cases only affect the total convergence complexity.
6785 */
6786 if (rt > env->fbq_type)
6787 continue;
6788
Nicolas Pitreced549f2014-05-26 18:19:38 -04006789 capacity = capacity_of(i);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006790
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006791 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006792
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006793 /*
6794 * When comparing with imbalance, use weighted_cpuload()
Nicolas Pitreced549f2014-05-26 18:19:38 -04006795 * which is not scaled with the cpu capacity.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006796 */
Vincent Guittotea678212015-02-27 16:54:11 +01006797
6798 if (rq->nr_running == 1 && wl > env->imbalance &&
6799 !check_cpu_capacity(rq, env->sd))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006800 continue;
6801
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006802 /*
6803 * For the load comparisons with the other cpu's, consider
Nicolas Pitreced549f2014-05-26 18:19:38 -04006804 * the weighted_cpuload() scaled with the cpu capacity, so
6805 * that the load can be moved away from the cpu that is
6806 * potentially running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006807 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04006808 * Thus we're looking for max(wl_i / capacity_i), crosswise
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006809 * multiplication to rid ourselves of the division works out
Nicolas Pitreced549f2014-05-26 18:19:38 -04006810 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
6811 * our previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01006812 */
Nicolas Pitreced549f2014-05-26 18:19:38 -04006813 if (wl * busiest_capacity > busiest_load * capacity) {
Joonsoo Kim95a79b82013-08-06 17:36:41 +09006814 busiest_load = wl;
Nicolas Pitreced549f2014-05-26 18:19:38 -04006815 busiest_capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006816 busiest = rq;
6817 }
6818 }
6819
6820 return busiest;
6821}
6822
6823/*
6824 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
6825 * so long as it is large enough.
6826 */
6827#define MAX_PINNED_INTERVAL 512
6828
6829/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09006830DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006831
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006832static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006833{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006834 struct sched_domain *sd = env->sd;
6835
6836 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006837
6838 /*
6839 * ASYM_PACKING needs to force migrate tasks from busy but
6840 * higher numbered CPUs in order to pack all tasks in the
6841 * lowest numbered CPUs.
6842 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006843 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006844 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006845 }
6846
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01006847 /*
6848 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
6849 * It's worth migrating the task if the src_cpu's capacity is reduced
6850 * because of other sched_class or IRQs if more capacity stays
6851 * available on dst_cpu.
6852 */
6853 if ((env->idle != CPU_NOT_IDLE) &&
6854 (env->src_rq->cfs.h_nr_running == 1)) {
6855 if ((check_cpu_capacity(env->src_rq, sd)) &&
6856 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
6857 return 1;
6858 }
6859
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01006860 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
6861}
6862
Tejun Heo969c7922010-05-06 18:49:21 +02006863static int active_load_balance_cpu_stop(void *data);
6864
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006865static int should_we_balance(struct lb_env *env)
6866{
6867 struct sched_group *sg = env->sd->groups;
6868 struct cpumask *sg_cpus, *sg_mask;
6869 int cpu, balance_cpu = -1;
6870
6871 /*
6872 * In the newly idle case, we will allow all the cpu's
6873 * to do the newly idle load balance.
6874 */
6875 if (env->idle == CPU_NEWLY_IDLE)
6876 return 1;
6877
6878 sg_cpus = sched_group_cpus(sg);
6879 sg_mask = sched_group_mask(sg);
6880 /* Try to find first idle cpu */
6881 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
6882 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
6883 continue;
6884
6885 balance_cpu = cpu;
6886 break;
6887 }
6888
6889 if (balance_cpu == -1)
6890 balance_cpu = group_balance_cpu(sg);
6891
6892 /*
6893 * First idle cpu or the first cpu(busiest) in this sched group
6894 * is eligible for doing load balancing at this and above domains.
6895 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09006896 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006897}
6898
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006899/*
6900 * Check this_cpu to ensure it is balanced within domain. Attempt to move
6901 * tasks if there is an imbalance.
6902 */
6903static int load_balance(int this_cpu, struct rq *this_rq,
6904 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006905 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006906{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306907 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02006908 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006909 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006910 struct rq *busiest;
6911 unsigned long flags;
Christoph Lameter4ba29682014-08-26 19:12:21 -05006912 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006913
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006914 struct lb_env env = {
6915 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006916 .dst_cpu = this_cpu,
6917 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306918 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006919 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02006920 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08006921 .cpus = cpus,
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006922 .fbq_type = all,
Kirill Tkhai163122b2014-08-20 13:48:29 +04006923 .tasks = LIST_HEAD_INIT(env.tasks),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006924 };
6925
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006926 /*
6927 * For NEWLY_IDLE load_balancing, we don't need to consider
6928 * other cpus in our group
6929 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006930 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006931 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09006932
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006933 cpumask_copy(cpus, cpu_active_mask);
6934
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006935 schedstat_inc(sd, lb_count[idle]);
6936
6937redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006938 if (!should_we_balance(&env)) {
6939 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006940 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006941 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006942
Joonsoo Kim23f0d202013-08-06 17:36:42 +09006943 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006944 if (!group) {
6945 schedstat_inc(sd, lb_nobusyg[idle]);
6946 goto out_balanced;
6947 }
6948
Michael Wangb94031302012-07-12 16:10:13 +08006949 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006950 if (!busiest) {
6951 schedstat_inc(sd, lb_nobusyq[idle]);
6952 goto out_balanced;
6953 }
6954
Michael Wang78feefc2012-08-06 16:41:59 +08006955 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006956
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006957 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006958
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01006959 env.src_cpu = busiest->cpu;
6960 env.src_rq = busiest;
6961
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006962 ld_moved = 0;
6963 if (busiest->nr_running > 1) {
6964 /*
6965 * Attempt to move tasks. If find_busiest_group has found
6966 * an imbalance but busiest->nr_running <= 1, the group is
6967 * still unbalanced. ld_moved simply stays zero, so it is
6968 * correctly treated as an imbalance.
6969 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006970 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02006971 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006972
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006973more_balance:
Kirill Tkhai163122b2014-08-20 13:48:29 +04006974 raw_spin_lock_irqsave(&busiest->lock, flags);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306975
6976 /*
6977 * cur_ld_moved - load moved in current iteration
6978 * ld_moved - cumulative load moved across iterations
6979 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04006980 cur_ld_moved = detach_tasks(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006981
6982 /*
Kirill Tkhai163122b2014-08-20 13:48:29 +04006983 * We've detached some tasks from busiest_rq. Every
6984 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
6985 * unlock busiest->lock, and we are able to be sure
6986 * that nobody can manipulate the tasks in parallel.
6987 * See task_rq_lock() family for the details.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006988 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04006989
6990 raw_spin_unlock(&busiest->lock);
6991
6992 if (cur_ld_moved) {
6993 attach_tasks(&env);
6994 ld_moved += cur_ld_moved;
6995 }
6996
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006997 local_irq_restore(flags);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306998
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09006999 if (env.flags & LBF_NEED_BREAK) {
7000 env.flags &= ~LBF_NEED_BREAK;
7001 goto more_balance;
7002 }
7003
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307004 /*
7005 * Revisit (affine) tasks on src_cpu that couldn't be moved to
7006 * us and move them to an alternate dst_cpu in our sched_group
7007 * where they can run. The upper limit on how many times we
7008 * iterate on same src_cpu is dependent on number of cpus in our
7009 * sched_group.
7010 *
7011 * This changes load balance semantics a bit on who can move
7012 * load to a given_cpu. In addition to the given_cpu itself
7013 * (or a ilb_cpu acting on its behalf where given_cpu is
7014 * nohz-idle), we now have balance_cpu in a position to move
7015 * load to given_cpu. In rare situations, this may cause
7016 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7017 * _independently_ and at _same_ time to move some load to
7018 * given_cpu) causing exceess load to be moved to given_cpu.
7019 * This however should not happen so much in practice and
7020 * moreover subsequent load balance cycles should correct the
7021 * excess load moved.
7022 */
Peter Zijlstra62633222013-08-19 12:41:09 +02007023 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307024
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04007025 /* Prevent to re-select dst_cpu via env's cpus */
7026 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7027
Michael Wang78feefc2012-08-06 16:41:59 +08007028 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307029 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02007030 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307031 env.loop = 0;
7032 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09007033
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307034 /*
7035 * Go back to "more_balance" rather than "redo" since we
7036 * need to continue with same src_cpu.
7037 */
7038 goto more_balance;
7039 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007040
Peter Zijlstra62633222013-08-19 12:41:09 +02007041 /*
7042 * We failed to reach balance because of affinity.
7043 */
7044 if (sd_parent) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007045 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
Peter Zijlstra62633222013-08-19 12:41:09 +02007046
Vincent Guittotafdeee02014-08-26 13:06:44 +02007047 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
Peter Zijlstra62633222013-08-19 12:41:09 +02007048 *group_imbalance = 1;
Peter Zijlstra62633222013-08-19 12:41:09 +02007049 }
7050
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007051 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007052 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007053 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05307054 if (!cpumask_empty(cpus)) {
7055 env.loop = 0;
7056 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007057 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05307058 }
Vincent Guittotafdeee02014-08-26 13:06:44 +02007059 goto out_all_pinned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007060 }
7061 }
7062
7063 if (!ld_moved) {
7064 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07007065 /*
7066 * Increment the failure counter only on periodic balance.
7067 * We do not want newidle balance, which can be very
7068 * frequent, pollute the failure counter causing
7069 * excessive cache_hot migrations and active balances.
7070 */
7071 if (idle != CPU_NEWLY_IDLE)
7072 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007073
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007074 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007075 raw_spin_lock_irqsave(&busiest->lock, flags);
7076
Tejun Heo969c7922010-05-06 18:49:21 +02007077 /* don't kick the active_load_balance_cpu_stop,
7078 * if the curr task on busiest cpu can't be
7079 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007080 */
7081 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02007082 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007083 raw_spin_unlock_irqrestore(&busiest->lock,
7084 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007085 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007086 goto out_one_pinned;
7087 }
7088
Tejun Heo969c7922010-05-06 18:49:21 +02007089 /*
7090 * ->active_balance synchronizes accesses to
7091 * ->active_balance_work. Once set, it's cleared
7092 * only after active load balance is finished.
7093 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007094 if (!busiest->active_balance) {
7095 busiest->active_balance = 1;
7096 busiest->push_cpu = this_cpu;
7097 active_balance = 1;
7098 }
7099 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02007100
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007101 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02007102 stop_one_cpu_nowait(cpu_of(busiest),
7103 active_load_balance_cpu_stop, busiest,
7104 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007105 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007106
7107 /*
7108 * We've kicked active balancing, reset the failure
7109 * counter.
7110 */
7111 sd->nr_balance_failed = sd->cache_nice_tries+1;
7112 }
7113 } else
7114 sd->nr_balance_failed = 0;
7115
7116 if (likely(!active_balance)) {
7117 /* We were unbalanced, so reset the balancing interval */
7118 sd->balance_interval = sd->min_interval;
7119 } else {
7120 /*
7121 * If we've begun active balancing, start to back off. This
7122 * case may not be covered by the all_pinned logic if there
7123 * is only 1 task on the busy runqueue (because we don't call
Kirill Tkhai163122b2014-08-20 13:48:29 +04007124 * detach_tasks).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007125 */
7126 if (sd->balance_interval < sd->max_interval)
7127 sd->balance_interval *= 2;
7128 }
7129
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007130 goto out;
7131
7132out_balanced:
Vincent Guittotafdeee02014-08-26 13:06:44 +02007133 /*
7134 * We reach balance although we may have faced some affinity
7135 * constraints. Clear the imbalance flag if it was set.
7136 */
7137 if (sd_parent) {
7138 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7139
7140 if (*group_imbalance)
7141 *group_imbalance = 0;
7142 }
7143
7144out_all_pinned:
7145 /*
7146 * We reach balance because all tasks are pinned at this level so
7147 * we can't migrate them. Let the imbalance flag set so parent level
7148 * can try to migrate them.
7149 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007150 schedstat_inc(sd, lb_balanced[idle]);
7151
7152 sd->nr_balance_failed = 0;
7153
7154out_one_pinned:
7155 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007156 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02007157 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007158 (sd->balance_interval < sd->max_interval))
7159 sd->balance_interval *= 2;
7160
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08007161 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007162out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007163 return ld_moved;
7164}
7165
Jason Low52a08ef2014-05-08 17:49:22 -07007166static inline unsigned long
7167get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7168{
7169 unsigned long interval = sd->balance_interval;
7170
7171 if (cpu_busy)
7172 interval *= sd->busy_factor;
7173
7174 /* scale ms to jiffies */
7175 interval = msecs_to_jiffies(interval);
7176 interval = clamp(interval, 1UL, max_load_balance_interval);
7177
7178 return interval;
7179}
7180
7181static inline void
7182update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7183{
7184 unsigned long interval, next;
7185
7186 interval = get_sd_balance_interval(sd, cpu_busy);
7187 next = sd->last_balance + interval;
7188
7189 if (time_after(*next_balance, next))
7190 *next_balance = next;
7191}
7192
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007193/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007194 * idle_balance is called by schedule() if this_cpu is about to become
7195 * idle. Attempts to pull tasks from other CPUs.
7196 */
Peter Zijlstra6e831252014-02-11 16:11:48 +01007197static int idle_balance(struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007198{
Jason Low52a08ef2014-05-08 17:49:22 -07007199 unsigned long next_balance = jiffies + HZ;
7200 int this_cpu = this_rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007201 struct sched_domain *sd;
7202 int pulled_task = 0;
Jason Low9bd721c2013-09-13 11:26:52 -07007203 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007204
Peter Zijlstra6e831252014-02-11 16:11:48 +01007205 idle_enter_fair(this_rq);
Jason Low0e5b5332014-04-28 15:45:54 -07007206
Peter Zijlstra6e831252014-02-11 16:11:48 +01007207 /*
7208 * We must set idle_stamp _before_ calling idle_balance(), such that we
7209 * measure the duration of idle_balance() as idle time.
7210 */
7211 this_rq->idle_stamp = rq_clock(this_rq);
7212
Tim Chen4486edd2014-06-23 12:16:49 -07007213 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7214 !this_rq->rd->overload) {
Jason Low52a08ef2014-05-08 17:49:22 -07007215 rcu_read_lock();
7216 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7217 if (sd)
7218 update_next_balance(sd, 0, &next_balance);
7219 rcu_read_unlock();
7220
Peter Zijlstra6e831252014-02-11 16:11:48 +01007221 goto out;
Jason Low52a08ef2014-05-08 17:49:22 -07007222 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007223
Peter Zijlstraf492e122009-12-23 15:29:42 +01007224 raw_spin_unlock(&this_rq->lock);
7225
Paul Turner48a16752012-10-04 13:18:31 +02007226 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02007227 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007228 for_each_domain(this_cpu, sd) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007229 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07007230 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007231
7232 if (!(sd->flags & SD_LOAD_BALANCE))
7233 continue;
7234
Jason Low52a08ef2014-05-08 17:49:22 -07007235 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7236 update_next_balance(sd, 0, &next_balance);
Jason Low9bd721c2013-09-13 11:26:52 -07007237 break;
Jason Low52a08ef2014-05-08 17:49:22 -07007238 }
Jason Low9bd721c2013-09-13 11:26:52 -07007239
Peter Zijlstraf492e122009-12-23 15:29:42 +01007240 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07007241 t0 = sched_clock_cpu(this_cpu);
7242
Peter Zijlstraf492e122009-12-23 15:29:42 +01007243 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007244 sd, CPU_NEWLY_IDLE,
7245 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07007246
7247 domain_cost = sched_clock_cpu(this_cpu) - t0;
7248 if (domain_cost > sd->max_newidle_lb_cost)
7249 sd->max_newidle_lb_cost = domain_cost;
7250
7251 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01007252 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007253
Jason Low52a08ef2014-05-08 17:49:22 -07007254 update_next_balance(sd, 0, &next_balance);
Jason Low39a4d9c2014-04-23 18:30:35 -07007255
7256 /*
7257 * Stop searching for tasks to pull if there are
7258 * now runnable tasks on this rq.
7259 */
7260 if (pulled_task || this_rq->nr_running > 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007261 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007262 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007263 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01007264
7265 raw_spin_lock(&this_rq->lock);
7266
Jason Low0e5b5332014-04-28 15:45:54 -07007267 if (curr_cost > this_rq->max_idle_balance_cost)
7268 this_rq->max_idle_balance_cost = curr_cost;
7269
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01007270 /*
Jason Low0e5b5332014-04-28 15:45:54 -07007271 * While browsing the domains, we released the rq lock, a task could
7272 * have been enqueued in the meantime. Since we're not going idle,
7273 * pretend we pulled a task.
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01007274 */
Jason Low0e5b5332014-04-28 15:45:54 -07007275 if (this_rq->cfs.h_nr_running && !pulled_task)
Peter Zijlstra6e831252014-02-11 16:11:48 +01007276 pulled_task = 1;
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01007277
Peter Zijlstra6e831252014-02-11 16:11:48 +01007278out:
Jason Low52a08ef2014-05-08 17:49:22 -07007279 /* Move the next balance forward */
7280 if (time_after(this_rq->next_balance, next_balance))
7281 this_rq->next_balance = next_balance;
7282
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04007283 /* Is there a task of a high priority class? */
Kirill Tkhai46383642014-03-15 02:15:07 +04007284 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04007285 pulled_task = -1;
7286
7287 if (pulled_task) {
7288 idle_exit_fair(this_rq);
Peter Zijlstra6e831252014-02-11 16:11:48 +01007289 this_rq->idle_stamp = 0;
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04007290 }
Peter Zijlstra6e831252014-02-11 16:11:48 +01007291
Daniel Lezcano3c4017c2014-01-17 10:04:03 +01007292 return pulled_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007293}
7294
7295/*
Tejun Heo969c7922010-05-06 18:49:21 +02007296 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
7297 * running tasks off the busiest CPU onto idle CPUs. It requires at
7298 * least 1 task to be running on each physical CPU where possible, and
7299 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007300 */
Tejun Heo969c7922010-05-06 18:49:21 +02007301static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007302{
Tejun Heo969c7922010-05-06 18:49:21 +02007303 struct rq *busiest_rq = data;
7304 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007305 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02007306 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007307 struct sched_domain *sd;
Kirill Tkhaie5673f22014-08-20 13:48:01 +04007308 struct task_struct *p = NULL;
Tejun Heo969c7922010-05-06 18:49:21 +02007309
7310 raw_spin_lock_irq(&busiest_rq->lock);
7311
7312 /* make sure the requested cpu hasn't gone down in the meantime */
7313 if (unlikely(busiest_cpu != smp_processor_id() ||
7314 !busiest_rq->active_balance))
7315 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007316
7317 /* Is there any task to move? */
7318 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02007319 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007320
7321 /*
7322 * This condition is "impossible", if it occurs
7323 * we need to fix it. Originally reported by
7324 * Bjorn Helgaas on a 128-cpu setup.
7325 */
7326 BUG_ON(busiest_rq == target_rq);
7327
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007328 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007329 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007330 for_each_domain(target_cpu, sd) {
7331 if ((sd->flags & SD_LOAD_BALANCE) &&
7332 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7333 break;
7334 }
7335
7336 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007337 struct lb_env env = {
7338 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01007339 .dst_cpu = target_cpu,
7340 .dst_rq = target_rq,
7341 .src_cpu = busiest_rq->cpu,
7342 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007343 .idle = CPU_IDLE,
7344 };
7345
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007346 schedstat_inc(sd, alb_count);
7347
Kirill Tkhaie5673f22014-08-20 13:48:01 +04007348 p = detach_one_task(&env);
7349 if (p)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007350 schedstat_inc(sd, alb_pushed);
7351 else
7352 schedstat_inc(sd, alb_failed);
7353 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007354 rcu_read_unlock();
Tejun Heo969c7922010-05-06 18:49:21 +02007355out_unlock:
7356 busiest_rq->active_balance = 0;
Kirill Tkhaie5673f22014-08-20 13:48:01 +04007357 raw_spin_unlock(&busiest_rq->lock);
7358
7359 if (p)
7360 attach_one_task(target_rq, p);
7361
7362 local_irq_enable();
7363
Tejun Heo969c7922010-05-06 18:49:21 +02007364 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007365}
7366
Mike Galbraithd987fc72011-12-05 10:01:47 +01007367static inline int on_null_domain(struct rq *rq)
7368{
7369 return unlikely(!rcu_dereference_sched(rq->sd));
7370}
7371
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007372#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007373/*
7374 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007375 * - When one of the busy CPUs notice that there may be an idle rebalancing
7376 * needed, they will kick the idle load balancer, which then does idle
7377 * load balancing for all the idle CPUs.
7378 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007379static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007380 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007381 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007382 unsigned long next_balance; /* in jiffy units */
7383} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007384
Daniel Lezcano3dd03372014-01-06 12:34:41 +01007385static inline int find_new_ilb(void)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007386{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007387 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007388
Suresh Siddha786d6dc2011-12-01 17:07:35 -08007389 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7390 return ilb;
7391
7392 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007393}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007394
7395/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007396 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7397 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7398 * CPU (if there is one).
7399 */
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01007400static void nohz_balancer_kick(void)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007401{
7402 int ilb_cpu;
7403
7404 nohz.next_balance++;
7405
Daniel Lezcano3dd03372014-01-06 12:34:41 +01007406 ilb_cpu = find_new_ilb();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007407
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007408 if (ilb_cpu >= nr_cpu_ids)
7409 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007410
Suresh Siddhacd490c52011-12-06 11:26:34 -08007411 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08007412 return;
7413 /*
7414 * Use smp_send_reschedule() instead of resched_cpu().
7415 * This way we generate a sched IPI on the target cpu which
7416 * is idle. And the softirq performing nohz idle load balance
7417 * will be run before returning from the IPI.
7418 */
7419 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007420 return;
7421}
7422
Alex Shic1cc0172012-09-10 15:10:58 +08007423static inline void nohz_balance_exit_idle(int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08007424{
7425 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
Mike Galbraithd987fc72011-12-05 10:01:47 +01007426 /*
7427 * Completely isolated CPUs don't ever set, so we must test.
7428 */
7429 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7430 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7431 atomic_dec(&nohz.nr_cpus);
7432 }
Suresh Siddha71325962012-01-19 18:28:57 -08007433 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7434 }
7435}
7436
Suresh Siddha69e1e812011-12-01 17:07:33 -08007437static inline void set_cpu_sd_state_busy(void)
7438{
7439 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307440 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08007441
Suresh Siddha69e1e812011-12-01 17:07:33 -08007442 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307443 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02007444
7445 if (!sd || !sd->nohz_idle)
7446 goto unlock;
7447 sd->nohz_idle = 0;
7448
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007449 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02007450unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08007451 rcu_read_unlock();
7452}
7453
7454void set_cpu_sd_state_idle(void)
7455{
7456 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307457 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08007458
Suresh Siddha69e1e812011-12-01 17:07:33 -08007459 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307460 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02007461
7462 if (!sd || sd->nohz_idle)
7463 goto unlock;
7464 sd->nohz_idle = 1;
7465
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007466 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02007467unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08007468 rcu_read_unlock();
7469}
7470
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007471/*
Alex Shic1cc0172012-09-10 15:10:58 +08007472 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007473 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007474 */
Alex Shic1cc0172012-09-10 15:10:58 +08007475void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007476{
Suresh Siddha71325962012-01-19 18:28:57 -08007477 /*
7478 * If this cpu is going down, then nothing needs to be done.
7479 */
7480 if (!cpu_active(cpu))
7481 return;
7482
Alex Shic1cc0172012-09-10 15:10:58 +08007483 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7484 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007485
Mike Galbraithd987fc72011-12-05 10:01:47 +01007486 /*
7487 * If we're a completely isolated CPU, we don't play.
7488 */
7489 if (on_null_domain(cpu_rq(cpu)))
7490 return;
7491
Alex Shic1cc0172012-09-10 15:10:58 +08007492 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7493 atomic_inc(&nohz.nr_cpus);
7494 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007495}
Suresh Siddha71325962012-01-19 18:28:57 -08007496
Paul Gortmaker0db06282013-06-19 14:53:51 -04007497static int sched_ilb_notifier(struct notifier_block *nfb,
Suresh Siddha71325962012-01-19 18:28:57 -08007498 unsigned long action, void *hcpu)
7499{
7500 switch (action & ~CPU_TASKS_FROZEN) {
7501 case CPU_DYING:
Alex Shic1cc0172012-09-10 15:10:58 +08007502 nohz_balance_exit_idle(smp_processor_id());
Suresh Siddha71325962012-01-19 18:28:57 -08007503 return NOTIFY_OK;
7504 default:
7505 return NOTIFY_DONE;
7506 }
7507}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007508#endif
7509
7510static DEFINE_SPINLOCK(balancing);
7511
Peter Zijlstra49c022e2011-04-05 10:14:25 +02007512/*
7513 * Scale the max load_balance interval with the number of CPUs in the system.
7514 * This trades load-balance latency on larger machines for less cross talk.
7515 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007516void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02007517{
7518 max_load_balance_interval = HZ*num_online_cpus()/10;
7519}
7520
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007521/*
7522 * It checks each scheduling domain to see if it is due to be balanced,
7523 * and initiates a balancing operation if so.
7524 *
Libinb9b08532013-04-01 19:14:01 +08007525 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007526 */
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007527static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007528{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007529 int continue_balancing = 1;
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007530 int cpu = rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007531 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02007532 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007533 /* Earliest time when we have to do rebalance again */
7534 unsigned long next_balance = jiffies + 60*HZ;
7535 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07007536 int need_serialize, need_decay = 0;
7537 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007538
Paul Turner48a16752012-10-04 13:18:31 +02007539 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08007540
Peter Zijlstradce840a2011-04-07 14:09:50 +02007541 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007542 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07007543 /*
7544 * Decay the newidle max times here because this is a regular
7545 * visit to all the domains. Decay ~1% per second.
7546 */
7547 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
7548 sd->max_newidle_lb_cost =
7549 (sd->max_newidle_lb_cost * 253) / 256;
7550 sd->next_decay_max_lb_cost = jiffies + HZ;
7551 need_decay = 1;
7552 }
7553 max_cost += sd->max_newidle_lb_cost;
7554
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007555 if (!(sd->flags & SD_LOAD_BALANCE))
7556 continue;
7557
Jason Lowf48627e2013-09-13 11:26:53 -07007558 /*
7559 * Stop the load balance at this level. There is another
7560 * CPU in our sched group which is doing load balancing more
7561 * actively.
7562 */
7563 if (!continue_balancing) {
7564 if (need_decay)
7565 continue;
7566 break;
7567 }
7568
Jason Low52a08ef2014-05-08 17:49:22 -07007569 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007570
7571 need_serialize = sd->flags & SD_SERIALIZE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007572 if (need_serialize) {
7573 if (!spin_trylock(&balancing))
7574 goto out;
7575 }
7576
7577 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007578 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007579 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02007580 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09007581 * env->dst_cpu, so we can't know our idle
7582 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007583 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09007584 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007585 }
7586 sd->last_balance = jiffies;
Jason Low52a08ef2014-05-08 17:49:22 -07007587 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007588 }
7589 if (need_serialize)
7590 spin_unlock(&balancing);
7591out:
7592 if (time_after(next_balance, sd->last_balance + interval)) {
7593 next_balance = sd->last_balance + interval;
7594 update_next_balance = 1;
7595 }
Jason Lowf48627e2013-09-13 11:26:53 -07007596 }
7597 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007598 /*
Jason Lowf48627e2013-09-13 11:26:53 -07007599 * Ensure the rq-wide value also decays but keep it at a
7600 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007601 */
Jason Lowf48627e2013-09-13 11:26:53 -07007602 rq->max_idle_balance_cost =
7603 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007604 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007605 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007606
7607 /*
7608 * next_balance will be updated only when there is a need.
7609 * When the cpu is attached to null domain for ex, it will not be
7610 * updated.
7611 */
7612 if (likely(update_next_balance))
7613 rq->next_balance = next_balance;
7614}
7615
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007616#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007617/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007618 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007619 * rebalancing for all the cpus for whom scheduler ticks are stopped.
7620 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01007621static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007622{
Daniel Lezcano208cb162014-01-06 12:34:44 +01007623 int this_cpu = this_rq->cpu;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007624 struct rq *rq;
7625 int balance_cpu;
7626
Suresh Siddha1c792db2011-12-01 17:07:32 -08007627 if (idle != CPU_IDLE ||
7628 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
7629 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007630
7631 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08007632 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007633 continue;
7634
7635 /*
7636 * If this cpu gets work to do, stop the load balancing
7637 * work being done for other cpus. Next load
7638 * balancing owner will pick it up.
7639 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08007640 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007641 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007642
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02007643 rq = cpu_rq(balance_cpu);
7644
Tim Chened61bbc2014-05-20 14:39:27 -07007645 /*
7646 * If time for next balance is due,
7647 * do the balance.
7648 */
7649 if (time_after_eq(jiffies, rq->next_balance)) {
7650 raw_spin_lock_irq(&rq->lock);
7651 update_rq_clock(rq);
7652 update_idle_cpu_load(rq);
7653 raw_spin_unlock_irq(&rq->lock);
7654 rebalance_domains(rq, CPU_IDLE);
7655 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007656
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007657 if (time_after(this_rq->next_balance, rq->next_balance))
7658 this_rq->next_balance = rq->next_balance;
7659 }
7660 nohz.next_balance = this_rq->next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08007661end:
7662 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007663}
7664
7665/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007666 * Current heuristic for kicking the idle load balancer in the presence
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007667 * of an idle cpu in the system.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007668 * - This rq has more than one task.
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007669 * - This rq has at least one CFS task and the capacity of the CPU is
7670 * significantly reduced because of RT tasks or IRQs.
7671 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
7672 * multiple busy cpu.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007673 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
7674 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007675 */
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007676static inline bool nohz_kick_needed(struct rq *rq)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007677{
7678 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007679 struct sched_domain *sd;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007680 struct sched_group_capacity *sgc;
Daniel Lezcano4a725622014-01-06 12:34:39 +01007681 int nr_busy, cpu = rq->cpu;
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007682 bool kick = false;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007683
Daniel Lezcano4a725622014-01-06 12:34:39 +01007684 if (unlikely(rq->idle_balance))
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007685 return false;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007686
Suresh Siddha1c792db2011-12-01 17:07:32 -08007687 /*
7688 * We may be recently in ticked or tickless idle mode. At the first
7689 * busy tick after returning from idle, we will update the busy stats.
7690 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08007691 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08007692 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007693
7694 /*
7695 * None are in tickless mode and hence no need for NOHZ idle load
7696 * balancing.
7697 */
7698 if (likely(!atomic_read(&nohz.nr_cpus)))
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007699 return false;
Suresh Siddha1c792db2011-12-01 17:07:32 -08007700
7701 if (time_before(now, nohz.next_balance))
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007702 return false;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007703
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007704 if (rq->nr_running >= 2)
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007705 return true;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007706
Peter Zijlstra067491b2011-12-07 14:32:08 +01007707 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307708 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307709 if (sd) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007710 sgc = sd->groups->sgc;
7711 nr_busy = atomic_read(&sgc->nr_busy_cpus);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307712
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007713 if (nr_busy > 1) {
7714 kick = true;
7715 goto unlock;
7716 }
7717
7718 }
7719
7720 sd = rcu_dereference(rq->sd);
7721 if (sd) {
7722 if ((rq->cfs.h_nr_running >= 1) &&
7723 check_cpu_capacity(rq, sd)) {
7724 kick = true;
7725 goto unlock;
7726 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007727 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307728
7729 sd = rcu_dereference(per_cpu(sd_asym, cpu));
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307730 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007731 sched_domain_span(sd)) < cpu)) {
7732 kick = true;
7733 goto unlock;
7734 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307735
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007736unlock:
Peter Zijlstra067491b2011-12-07 14:32:08 +01007737 rcu_read_unlock();
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007738 return kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007739}
7740#else
Daniel Lezcano208cb162014-01-06 12:34:44 +01007741static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007742#endif
7743
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007744/*
7745 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007746 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007747 */
7748static void run_rebalance_domains(struct softirq_action *h)
7749{
Daniel Lezcano208cb162014-01-06 12:34:44 +01007750 struct rq *this_rq = this_rq();
Suresh Siddha6eb57e02011-10-03 15:09:01 -07007751 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007752 CPU_IDLE : CPU_NOT_IDLE;
7753
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007754 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007755 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007756 * balancing on behalf of the other idle cpus whose ticks are
Preeti U Murthyd4573c32015-03-26 18:32:44 +05307757 * stopped. Do nohz_idle_balance *before* rebalance_domains to
7758 * give the idle cpus a chance to load balance. Else we may
7759 * load balance only within the local sched_domain hierarchy
7760 * and abort nohz_idle_balance altogether if we pull some load.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007761 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01007762 nohz_idle_balance(this_rq, idle);
Preeti U Murthyd4573c32015-03-26 18:32:44 +05307763 rebalance_domains(this_rq, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007764}
7765
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007766/*
7767 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007768 */
Daniel Lezcano7caff662014-01-06 12:34:38 +01007769void trigger_load_balance(struct rq *rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007770{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007771 /* Don't need to rebalance while attached to NULL domain */
Daniel Lezcanoc7260992014-01-06 12:34:45 +01007772 if (unlikely(on_null_domain(rq)))
7773 return;
7774
7775 if (time_after_eq(jiffies, rq->next_balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007776 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007777#ifdef CONFIG_NO_HZ_COMMON
Daniel Lezcanoc7260992014-01-06 12:34:45 +01007778 if (nohz_kick_needed(rq))
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01007779 nohz_balancer_kick();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007780#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007781}
7782
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007783static void rq_online_fair(struct rq *rq)
7784{
7785 update_sysctl();
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04007786
7787 update_runtime_enabled(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007788}
7789
7790static void rq_offline_fair(struct rq *rq)
7791{
7792 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07007793
7794 /* Ensure any throttled groups are reachable by pick_next_task */
7795 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01007796}
7797
Dhaval Giani55e12e52008-06-24 23:39:43 +05307798#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02007799
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007800/*
7801 * scheduler tick hitting a task of our scheduling class:
7802 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01007803static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007804{
7805 struct cfs_rq *cfs_rq;
7806 struct sched_entity *se = &curr->se;
7807
7808 for_each_sched_entity(se) {
7809 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01007810 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007811 }
Ben Segall18bf2802012-10-04 12:51:20 +02007812
Dave Kleikamp10e84b92013-07-31 13:53:35 -07007813 if (numabalancing_enabled)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02007814 task_tick_numa(rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007815}
7816
7817/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007818 * called on fork with the child task as argument from the parent's context
7819 * - child not yet on the tasklist
7820 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007821 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007822static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007823{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09007824 struct cfs_rq *cfs_rq;
7825 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02007826 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007827 struct rq *rq = this_rq();
7828 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007829
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007830 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007831
Peter Zijlstra861d0342010-08-19 13:31:43 +02007832 update_rq_clock(rq);
7833
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09007834 cfs_rq = task_cfs_rq(current);
7835 curr = cfs_rq->curr;
7836
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09007837 /*
7838 * Not only the cpu but also the task_group of the parent might have
7839 * been changed after parent->se.parent,cfs_rq were copied to
7840 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
7841 * of child point to valid ones.
7842 */
7843 rcu_read_lock();
7844 __set_task_cpu(p, this_cpu);
7845 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007846
Ting Yang7109c4422007-08-28 12:53:24 +02007847 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007848
Mike Galbraithb5d9d732009-09-08 11:12:28 +02007849 if (curr)
7850 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02007851 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02007852
Peter Zijlstracd29fe62009-11-27 17:32:46 +01007853 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02007854 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02007855 * Upon rescheduling, sched_class::put_prev_task() will place
7856 * 'current' within the tree based on its new key value.
7857 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02007858 swap(curr->vruntime, se->vruntime);
Kirill Tkhai88751252014-06-29 00:03:57 +04007859 resched_curr(rq);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02007860 }
7861
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01007862 se->vruntime -= cfs_rq->min_vruntime;
7863
Thomas Gleixner05fa7852009-11-17 14:28:38 +01007864 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02007865}
7866
Steven Rostedtcb469842008-01-25 21:08:22 +01007867/*
7868 * Priority of the task has changed. Check to see if we preempt
7869 * the current task.
7870 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007871static void
7872prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01007873{
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007874 if (!task_on_rq_queued(p))
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007875 return;
7876
Steven Rostedtcb469842008-01-25 21:08:22 +01007877 /*
7878 * Reschedule if we are currently running on this runqueue and
7879 * our priority decreased, or if we are not currently running on
7880 * this runqueue and our priority is higher than the current's
7881 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007882 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01007883 if (p->prio > oldprio)
Kirill Tkhai88751252014-06-29 00:03:57 +04007884 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01007885 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02007886 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01007887}
7888
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007889static void switched_from_fair(struct rq *rq, struct task_struct *p)
7890{
7891 struct sched_entity *se = &p->se;
7892 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7893
7894 /*
George McCollister791c9e02014-02-18 17:56:51 -06007895 * Ensure the task's vruntime is normalized, so that when it's
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007896 * switched back to the fair class the enqueue_entity(.flags=0) will
7897 * do the right thing.
7898 *
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007899 * If it's queued, then the dequeue_entity(.flags=0) will already
7900 * have normalized the vruntime, if it's !queued, then only when
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007901 * the task is sleeping will it still have non-normalized vruntime.
7902 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04007903 if (!task_on_rq_queued(p) && p->state != TASK_RUNNING) {
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007904 /*
7905 * Fix up our vruntime so that the current sleep doesn't
7906 * cause 'unlimited' sleep bonus.
7907 */
7908 place_entity(cfs_rq, se, 0);
7909 se->vruntime -= cfs_rq->min_vruntime;
7910 }
Paul Turner9ee474f2012-10-04 13:18:30 +02007911
Alex Shi141965c2013-06-26 13:05:39 +08007912#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +08007913 /* Catch up with the cfs_rq and remove our load when we leave */
7914 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq), &se->avg,
Yuyang Du13962232015-07-15 08:04:41 +08007915 se->on_rq * scale_load_down(se->load.weight), cfs_rq->curr == se, NULL);
Yuyang Du9d89c252015-07-15 08:04:37 +08007916
7917 cfs_rq->avg.load_avg =
7918 max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
7919 cfs_rq->avg.load_sum =
7920 max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
7921 cfs_rq->avg.util_avg =
7922 max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
7923 cfs_rq->avg.util_sum =
7924 max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02007925#endif
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007926}
7927
Steven Rostedtcb469842008-01-25 21:08:22 +01007928/*
7929 * We switched to the sched_fair class.
7930 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007931static void switched_to_fair(struct rq *rq, struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01007932{
Kirill Tkhaif36c0192014-08-06 12:06:01 +04007933 struct sched_entity *se = &p->se;
Byungchul Park7855a352015-08-10 18:02:55 +09007934
7935#ifdef CONFIG_FAIR_GROUP_SCHED
Michael wangeb7a59b2014-02-20 11:14:53 +08007936 /*
7937 * Since the real-depth could have been changed (only FAIR
7938 * class maintain depth value), reset depth properly.
7939 */
7940 se->depth = se->parent ? se->parent->depth + 1 : 0;
7941#endif
Byungchul Park7855a352015-08-10 18:02:55 +09007942
7943 if (!task_on_rq_queued(p)) {
7944
7945 /*
7946 * Ensure the task has a non-normalized vruntime when it is switched
7947 * back to the fair class with !queued, so that enqueue_entity() at
7948 * wake-up time will do the right thing.
7949 *
7950 * If it's queued, then the enqueue_entity(.flags=0) makes the task
7951 * has non-normalized vruntime, if it's !queued, then it still has
7952 * normalized vruntime.
7953 */
7954 if (p->state != TASK_RUNNING)
7955 se->vruntime += cfs_rq_of(se)->min_vruntime;
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007956 return;
Byungchul Park7855a352015-08-10 18:02:55 +09007957 }
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007958
Steven Rostedtcb469842008-01-25 21:08:22 +01007959 /*
7960 * We were most likely switched from sched_rt, so
7961 * kick off the schedule if running, otherwise just see
7962 * if we can still preempt the current task.
7963 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01007964 if (rq->curr == p)
Kirill Tkhai88751252014-06-29 00:03:57 +04007965 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01007966 else
Peter Zijlstra15afe092008-09-20 23:38:02 +02007967 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01007968}
7969
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007970/* Account for a task changing its policy or group.
7971 *
7972 * This routine is mostly called to set cfs_rq->curr field when a task
7973 * migrates between groups/classes.
7974 */
7975static void set_curr_task_fair(struct rq *rq)
7976{
7977 struct sched_entity *se = &rq->curr->se;
7978
Paul Turnerec12cb72011-07-21 09:43:30 -07007979 for_each_sched_entity(se) {
7980 struct cfs_rq *cfs_rq = cfs_rq_of(se);
7981
7982 set_next_entity(cfs_rq, se);
7983 /* ensure bandwidth has been allocated on our new cfs_rq */
7984 account_cfs_rq_runtime(cfs_rq, 0);
7985 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02007986}
7987
Peter Zijlstra029632f2011-10-25 10:00:11 +02007988void init_cfs_rq(struct cfs_rq *cfs_rq)
7989{
7990 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02007991 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7992#ifndef CONFIG_64BIT
7993 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
7994#endif
Alex Shi141965c2013-06-26 13:05:39 +08007995#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +08007996 atomic_long_set(&cfs_rq->removed_load_avg, 0);
7997 atomic_long_set(&cfs_rq->removed_util_avg, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02007998#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02007999}
8000
Peter Zijlstra810b3812008-02-29 15:21:01 -05008001#ifdef CONFIG_FAIR_GROUP_SCHED
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04008002static void task_move_group_fair(struct task_struct *p, int queued)
Peter Zijlstra810b3812008-02-29 15:21:01 -05008003{
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008004 struct sched_entity *se = &p->se;
Paul Turneraff3e492012-10-04 13:18:30 +02008005 struct cfs_rq *cfs_rq;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008006
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008007 /*
8008 * If the task was not on the rq at the time of this cgroup movement
8009 * it must have been asleep, sleeping tasks keep their ->vruntime
8010 * absolute on their old rq until wakeup (needed for the fair sleeper
8011 * bonus in place_entity()).
8012 *
8013 * If it was on the rq, we've just 'preempted' it, which does convert
8014 * ->vruntime to a relative base.
8015 *
8016 * Make sure both cases convert their relative position when migrating
8017 * to another cgroup's rq. This does somewhat interfere with the
8018 * fair sleeper stuff for the first placement, but who cares.
8019 */
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09008020 /*
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04008021 * When !queued, vruntime of the task has usually NOT been normalized.
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09008022 * But there are some cases where it has already been normalized:
8023 *
8024 * - Moving a forked child which is waiting for being woken up by
8025 * wake_up_new_task().
Daisuke Nishimura62af3782011-12-15 14:37:41 +09008026 * - Moving a task which has been woken up by try_to_wake_up() and
8027 * waiting for actually being woken up by sched_ttwu_pending().
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09008028 *
8029 * To prevent boost or penalty in the new cfs_rq caused by delta
8030 * min_vruntime between the two cfs_rqs, we skip vruntime adjustment.
8031 */
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04008032 if (!queued && (!se->sum_exec_runtime || p->state == TASK_WAKING))
8033 queued = 1;
Daisuke Nishimura7ceff012011-12-15 14:36:07 +09008034
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04008035 if (!queued)
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008036 se->vruntime -= cfs_rq_of(se)->min_vruntime;
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008037 set_task_rq(p, task_cpu(p));
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008038 se->depth = se->parent ? se->parent->depth + 1 : 0;
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04008039 if (!queued) {
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008040 cfs_rq = cfs_rq_of(se);
8041 se->vruntime += cfs_rq->min_vruntime;
Yuyang Du9d89c252015-07-15 08:04:37 +08008042
Paul Turneraff3e492012-10-04 13:18:30 +02008043#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +08008044 /* Virtually synchronize task with its new cfs_rq */
8045 p->se.avg.last_update_time = cfs_rq->avg.last_update_time;
8046 cfs_rq->avg.load_avg += p->se.avg.load_avg;
8047 cfs_rq->avg.load_sum += p->se.avg.load_sum;
8048 cfs_rq->avg.util_avg += p->se.avg.util_avg;
8049 cfs_rq->avg.util_sum += p->se.avg.util_sum;
Paul Turneraff3e492012-10-04 13:18:30 +02008050#endif
8051 }
Peter Zijlstra810b3812008-02-29 15:21:01 -05008052}
Peter Zijlstra029632f2011-10-25 10:00:11 +02008053
8054void free_fair_sched_group(struct task_group *tg)
8055{
8056 int i;
8057
8058 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8059
8060 for_each_possible_cpu(i) {
8061 if (tg->cfs_rq)
8062 kfree(tg->cfs_rq[i]);
Yuyang Du12695572015-07-15 08:04:40 +08008063 if (tg->se) {
8064 if (tg->se[i])
8065 remove_entity_load_avg(tg->se[i]);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008066 kfree(tg->se[i]);
Yuyang Du12695572015-07-15 08:04:40 +08008067 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02008068 }
8069
8070 kfree(tg->cfs_rq);
8071 kfree(tg->se);
8072}
8073
8074int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8075{
8076 struct cfs_rq *cfs_rq;
8077 struct sched_entity *se;
8078 int i;
8079
8080 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8081 if (!tg->cfs_rq)
8082 goto err;
8083 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8084 if (!tg->se)
8085 goto err;
8086
8087 tg->shares = NICE_0_LOAD;
8088
8089 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8090
8091 for_each_possible_cpu(i) {
8092 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8093 GFP_KERNEL, cpu_to_node(i));
8094 if (!cfs_rq)
8095 goto err;
8096
8097 se = kzalloc_node(sizeof(struct sched_entity),
8098 GFP_KERNEL, cpu_to_node(i));
8099 if (!se)
8100 goto err_free_rq;
8101
8102 init_cfs_rq(cfs_rq);
8103 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Yuyang Du540247f2015-07-15 08:04:39 +08008104 init_entity_runnable_average(se);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008105 }
8106
8107 return 1;
8108
8109err_free_rq:
8110 kfree(cfs_rq);
8111err:
8112 return 0;
8113}
8114
8115void unregister_fair_sched_group(struct task_group *tg, int cpu)
8116{
8117 struct rq *rq = cpu_rq(cpu);
8118 unsigned long flags;
8119
8120 /*
8121 * Only empty task groups can be destroyed; so we can speculatively
8122 * check on_list without danger of it being re-added.
8123 */
8124 if (!tg->cfs_rq[cpu]->on_list)
8125 return;
8126
8127 raw_spin_lock_irqsave(&rq->lock, flags);
8128 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8129 raw_spin_unlock_irqrestore(&rq->lock, flags);
8130}
8131
8132void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8133 struct sched_entity *se, int cpu,
8134 struct sched_entity *parent)
8135{
8136 struct rq *rq = cpu_rq(cpu);
8137
8138 cfs_rq->tg = tg;
8139 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008140 init_cfs_rq_runtime(cfs_rq);
8141
8142 tg->cfs_rq[cpu] = cfs_rq;
8143 tg->se[cpu] = se;
8144
8145 /* se could be NULL for root_task_group */
8146 if (!se)
8147 return;
8148
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008149 if (!parent) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02008150 se->cfs_rq = &rq->cfs;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008151 se->depth = 0;
8152 } else {
Peter Zijlstra029632f2011-10-25 10:00:11 +02008153 se->cfs_rq = parent->my_q;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008154 se->depth = parent->depth + 1;
8155 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02008156
8157 se->my_q = cfs_rq;
Paul Turner0ac9b1c2013-10-16 11:16:27 -07008158 /* guarantee group entities always have weight */
8159 update_load_set(&se->load, NICE_0_LOAD);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008160 se->parent = parent;
8161}
8162
8163static DEFINE_MUTEX(shares_mutex);
8164
8165int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8166{
8167 int i;
8168 unsigned long flags;
8169
8170 /*
8171 * We can't change the weight of the root cgroup.
8172 */
8173 if (!tg->se[0])
8174 return -EINVAL;
8175
8176 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8177
8178 mutex_lock(&shares_mutex);
8179 if (tg->shares == shares)
8180 goto done;
8181
8182 tg->shares = shares;
8183 for_each_possible_cpu(i) {
8184 struct rq *rq = cpu_rq(i);
8185 struct sched_entity *se;
8186
8187 se = tg->se[i];
8188 /* Propagate contribution to hierarchy */
8189 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02008190
8191 /* Possible calls to update_curr() need rq clock */
8192 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08008193 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02008194 update_cfs_shares(group_cfs_rq(se));
8195 raw_spin_unlock_irqrestore(&rq->lock, flags);
8196 }
8197
8198done:
8199 mutex_unlock(&shares_mutex);
8200 return 0;
8201}
8202#else /* CONFIG_FAIR_GROUP_SCHED */
8203
8204void free_fair_sched_group(struct task_group *tg) { }
8205
8206int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8207{
8208 return 1;
8209}
8210
8211void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
8212
8213#endif /* CONFIG_FAIR_GROUP_SCHED */
8214
Peter Zijlstra810b3812008-02-29 15:21:01 -05008215
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07008216static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00008217{
8218 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00008219 unsigned int rr_interval = 0;
8220
8221 /*
8222 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
8223 * idle runqueue:
8224 */
Peter Williams0d721ce2009-09-21 01:31:53 +00008225 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08008226 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00008227
8228 return rr_interval;
8229}
8230
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008231/*
8232 * All the scheduling class methods:
8233 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02008234const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02008235 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008236 .enqueue_task = enqueue_task_fair,
8237 .dequeue_task = dequeue_task_fair,
8238 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05008239 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008240
Ingo Molnar2e09bf52007-10-15 17:00:05 +02008241 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008242
8243 .pick_next_task = pick_next_task_fair,
8244 .put_prev_task = put_prev_task_fair,
8245
Peter Williams681f3e62007-10-24 18:23:51 +02008246#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08008247 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02008248 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08008249
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01008250 .rq_online = rq_online_fair,
8251 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01008252
8253 .task_waking = task_waking_fair,
Yuyang Du12695572015-07-15 08:04:40 +08008254 .task_dead = task_dead_fair,
Peter Williams681f3e62007-10-24 18:23:51 +02008255#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008256
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02008257 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008258 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008259 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01008260
8261 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008262 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01008263 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05008264
Peter Williams0d721ce2009-09-21 01:31:53 +00008265 .get_rr_interval = get_rr_interval_fair,
8266
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01008267 .update_curr = update_curr_fair,
8268
Peter Zijlstra810b3812008-02-29 15:21:01 -05008269#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008270 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05008271#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008272};
8273
8274#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02008275void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008276{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008277 struct cfs_rq *cfs_rq;
8278
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01008279 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02008280 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02008281 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01008282 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008283}
Srikar Dronamraju397f2372015-06-25 22:51:43 +05308284
8285#ifdef CONFIG_NUMA_BALANCING
8286void show_numa_stats(struct task_struct *p, struct seq_file *m)
8287{
8288 int node;
8289 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
8290
8291 for_each_online_node(node) {
8292 if (p->numa_faults) {
8293 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
8294 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
8295 }
8296 if (p->numa_group) {
8297 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
8298 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
8299 }
8300 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
8301 }
8302}
8303#endif /* CONFIG_NUMA_BALANCING */
8304#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02008305
8306__init void init_sched_fair_class(void)
8307{
8308#ifdef CONFIG_SMP
8309 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8310
Frederic Weisbecker3451d022011-08-10 23:21:01 +02008311#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08008312 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008313 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Suresh Siddha71325962012-01-19 18:28:57 -08008314 cpu_notifier(sched_ilb_notifier, 0);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008315#endif
8316#endif /* SMP */
8317
8318}