blob: c8c5d2d484249048cca7dd62fdfc38fbe7de2883 [file] [log] [blame]
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02001/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
Peter Zijlstra21805082007-08-25 18:41:53 +020018 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
Peter Zijlstra90eec102015-11-16 11:08:45 +010020 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020021 */
22
Christian Ehrhardt1983a922009-11-30 12:16:47 +010023#include <linux/sched.h>
Mel Gormancb251762016-02-05 09:08:36 +000024#include <linux/latencytop.h>
Sisir Koppaka3436ae12011-03-26 18:22:55 +053025#include <linux/cpumask.h>
Nicolas Pitre83a0a962014-09-04 11:32:10 -040026#include <linux/cpuidle.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020027#include <linux/slab.h>
28#include <linux/profile.h>
29#include <linux/interrupt.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020030#include <linux/mempolicy.h>
Mel Gormane14808b2012-11-19 10:59:15 +000031#include <linux/migrate.h>
Peter Zijlstracbee9f82012-10-25 14:16:43 +020032#include <linux/task_work.h>
Peter Zijlstra029632f2011-10-25 10:00:11 +020033
34#include <trace/events/sched.h>
35
36#include "sched.h"
Arjan van de Ven97455122008-01-25 21:08:34 +010037
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020038/*
Peter Zijlstra21805082007-08-25 18:41:53 +020039 * Targeted preemption latency for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090040 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020041 *
Peter Zijlstra21805082007-08-25 18:41:53 +020042 * NOTE: this latency value is not the same as the concept of
Ingo Molnard274a4c2007-10-15 17:00:14 +020043 * 'timeslice length' - timeslices in CFS are of variable length
44 * and have no persistent notion like in traditional, time-slice
45 * based scheduling concepts.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020046 *
Ingo Molnard274a4c2007-10-15 17:00:14 +020047 * (to see the precise effective timeslice length of your workload,
48 * run vmstat and monitor the context-switches (cs) field)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020049 */
Mike Galbraith21406922010-03-11 17:17:15 +010050unsigned int sysctl_sched_latency = 6000000ULL;
51unsigned int normalized_sysctl_sched_latency = 6000000ULL;
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020052
53/*
Christian Ehrhardt1983a922009-11-30 12:16:47 +010054 * The initial- and re-scaling of tunables is configurable
55 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
56 *
57 * Options are:
58 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
59 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
60 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
61 */
62enum sched_tunable_scaling sysctl_sched_tunable_scaling
63 = SCHED_TUNABLESCALING_LOG;
64
65/*
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010066 * Minimal preemption granularity for CPU-bound tasks:
Takuya Yoshikawa864616e2010-10-14 16:09:13 +090067 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010068 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020069unsigned int sysctl_sched_min_granularity = 750000ULL;
70unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010071
72/*
73 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
74 */
Ingo Molnar0bf377b2010-09-12 08:14:52 +020075static unsigned int sched_nr_latency = 8;
Peter Zijlstrab2be5e92007-11-09 22:39:37 +010076
77/*
Mike Galbraith2bba22c2009-09-09 15:41:37 +020078 * After fork, child runs first. If set to 0 (default) then
Ingo Molnar2bd8e6d2007-10-15 17:00:02 +020079 * parent will (try to) run first.
80 */
Mike Galbraith2bba22c2009-09-09 15:41:37 +020081unsigned int sysctl_sched_child_runs_first __read_mostly;
Peter Zijlstra21805082007-08-25 18:41:53 +020082
83/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020084 * SCHED_OTHER wake-up granularity.
Mike Galbraith172e0822009-09-09 15:41:37 +020085 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020086 *
87 * This option delays the preemption effects of decoupled workloads
88 * and reduces their over-scheduling. Synchronous workloads will still
89 * have immediate wakeup/sleep latencies.
90 */
Mike Galbraith172e0822009-09-09 15:41:37 +020091unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +010092unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +020093
Ingo Molnarda84d962007-10-15 17:00:18 +020094const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
95
Paul Turnera7a4f8a2010-11-15 15:47:06 -080096/*
97 * The exponential sliding window over which load is averaged for shares
98 * distribution.
99 * (default: 10msec)
100 */
101unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
102
Paul Turnerec12cb72011-07-21 09:43:30 -0700103#ifdef CONFIG_CFS_BANDWIDTH
104/*
105 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
106 * each time a cfs_rq requests quota.
107 *
108 * Note: in the case that the slice exceeds the runtime remaining (either due
109 * to consumption or the quota being specified to be smaller than the slice)
110 * we will always only issue the remaining available time.
111 *
112 * default: 5 msec, units: microseconds
113 */
114unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
115#endif
116
Paul Gortmaker85276322013-04-19 15:10:50 -0400117static inline void update_load_add(struct load_weight *lw, unsigned long inc)
118{
119 lw->weight += inc;
120 lw->inv_weight = 0;
121}
122
123static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
124{
125 lw->weight -= dec;
126 lw->inv_weight = 0;
127}
128
129static inline void update_load_set(struct load_weight *lw, unsigned long w)
130{
131 lw->weight = w;
132 lw->inv_weight = 0;
133}
134
Peter Zijlstra029632f2011-10-25 10:00:11 +0200135/*
136 * Increase the granularity value when there are more CPUs,
137 * because with more CPUs the 'effective latency' as visible
138 * to users decreases. But the relationship is not linear,
139 * so pick a second-best guess by going with the log2 of the
140 * number of CPUs.
141 *
142 * This idea comes from the SD scheduler of Con Kolivas:
143 */
Nicholas Mc Guire58ac93e2015-05-15 21:05:42 +0200144static unsigned int get_update_sysctl_factor(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200145{
Nicholas Mc Guire58ac93e2015-05-15 21:05:42 +0200146 unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200147 unsigned int factor;
148
149 switch (sysctl_sched_tunable_scaling) {
150 case SCHED_TUNABLESCALING_NONE:
151 factor = 1;
152 break;
153 case SCHED_TUNABLESCALING_LINEAR:
154 factor = cpus;
155 break;
156 case SCHED_TUNABLESCALING_LOG:
157 default:
158 factor = 1 + ilog2(cpus);
159 break;
160 }
161
162 return factor;
163}
164
165static void update_sysctl(void)
166{
167 unsigned int factor = get_update_sysctl_factor();
168
169#define SET_SYSCTL(name) \
170 (sysctl_##name = (factor) * normalized_sysctl_##name)
171 SET_SYSCTL(sched_min_granularity);
172 SET_SYSCTL(sched_latency);
173 SET_SYSCTL(sched_wakeup_granularity);
174#undef SET_SYSCTL
175}
176
177void sched_init_granularity(void)
178{
179 update_sysctl();
180}
181
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100182#define WMULT_CONST (~0U)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200183#define WMULT_SHIFT 32
184
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100185static void __update_inv_weight(struct load_weight *lw)
Peter Zijlstra029632f2011-10-25 10:00:11 +0200186{
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100187 unsigned long w;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200188
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100189 if (likely(lw->inv_weight))
190 return;
191
192 w = scale_load_down(lw->weight);
193
194 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
195 lw->inv_weight = 1;
196 else if (unlikely(!w))
197 lw->inv_weight = WMULT_CONST;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200198 else
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100199 lw->inv_weight = WMULT_CONST / w;
200}
Peter Zijlstra029632f2011-10-25 10:00:11 +0200201
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100202/*
203 * delta_exec * weight / lw.weight
204 * OR
205 * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
206 *
Yuyang Du1c3de5e2016-03-30 07:07:51 +0800207 * Either weight := NICE_0_LOAD and lw \e sched_prio_to_wmult[], in which case
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100208 * we're guaranteed shift stays positive because inv_weight is guaranteed to
209 * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
210 *
211 * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
212 * weight/lw.weight <= 1, and therefore our shift will also be positive.
213 */
214static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
215{
216 u64 fact = scale_load_down(weight);
217 int shift = WMULT_SHIFT;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200218
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100219 __update_inv_weight(lw);
220
221 if (unlikely(fact >> 32)) {
222 while (fact >> 32) {
223 fact >>= 1;
224 shift--;
225 }
Peter Zijlstra029632f2011-10-25 10:00:11 +0200226 }
227
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100228 /* hint to use a 32x32->64 mul */
229 fact = (u64)(u32)fact * lw->inv_weight;
Peter Zijlstra029632f2011-10-25 10:00:11 +0200230
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100231 while (fact >> 32) {
232 fact >>= 1;
233 shift--;
234 }
235
236 return mul_u64_u32_shr(delta_exec, fact, shift);
Peter Zijlstra029632f2011-10-25 10:00:11 +0200237}
238
239
240const struct sched_class fair_sched_class;
Peter Zijlstraa4c2f002008-10-17 19:27:03 +0200241
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200242/**************************************************************
243 * CFS operations on generic schedulable entities:
244 */
245
246#ifdef CONFIG_FAIR_GROUP_SCHED
247
248/* cpu runqueue to which this cfs_rq is attached */
249static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
250{
251 return cfs_rq->rq;
252}
253
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200254/* An entity is a task if it doesn't "own" a runqueue */
255#define entity_is_task(se) (!se->my_q)
256
Peter Zijlstra8f488942009-07-24 12:25:30 +0200257static inline struct task_struct *task_of(struct sched_entity *se)
258{
259#ifdef CONFIG_SCHED_DEBUG
260 WARN_ON_ONCE(!entity_is_task(se));
261#endif
262 return container_of(se, struct task_struct, se);
263}
264
Peter Zijlstrab7581492008-04-19 19:45:00 +0200265/* Walk up scheduling entities hierarchy */
266#define for_each_sched_entity(se) \
267 for (; se; se = se->parent)
268
269static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
270{
271 return p->se.cfs_rq;
272}
273
274/* runqueue on which this entity is (to be) queued */
275static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
276{
277 return se->cfs_rq;
278}
279
280/* runqueue "owned" by this group */
281static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
282{
283 return grp->my_q;
284}
285
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800286static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
287{
288 if (!cfs_rq->on_list) {
Paul Turner67e86252010-11-15 15:47:05 -0800289 /*
290 * Ensure we either appear before our parent (if already
291 * enqueued) or force our parent to appear after us when it is
292 * enqueued. The fact that we always enqueue bottom-up
293 * reduces this to two cases.
294 */
295 if (cfs_rq->tg->parent &&
296 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
297 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800298 &rq_of(cfs_rq)->leaf_cfs_rq_list);
Paul Turner67e86252010-11-15 15:47:05 -0800299 } else {
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
301 &rq_of(cfs_rq)->leaf_cfs_rq_list);
302 }
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800303
304 cfs_rq->on_list = 1;
305 }
306}
307
308static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
309{
310 if (cfs_rq->on_list) {
311 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
312 cfs_rq->on_list = 0;
313 }
314}
315
Peter Zijlstrab7581492008-04-19 19:45:00 +0200316/* Iterate thr' all leaf cfs_rq's on a runqueue */
317#define for_each_leaf_cfs_rq(rq, cfs_rq) \
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
319
320/* Do the two (enqueued) entities belong to the same group ? */
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100321static inline struct cfs_rq *
Peter Zijlstrab7581492008-04-19 19:45:00 +0200322is_same_group(struct sched_entity *se, struct sched_entity *pse)
323{
324 if (se->cfs_rq == pse->cfs_rq)
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100325 return se->cfs_rq;
Peter Zijlstrab7581492008-04-19 19:45:00 +0200326
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100327 return NULL;
Peter Zijlstrab7581492008-04-19 19:45:00 +0200328}
329
330static inline struct sched_entity *parent_entity(struct sched_entity *se)
331{
332 return se->parent;
333}
334
Peter Zijlstra464b7522008-10-24 11:06:15 +0200335static void
336find_matching_se(struct sched_entity **se, struct sched_entity **pse)
337{
338 int se_depth, pse_depth;
339
340 /*
341 * preemption test can be made between sibling entities who are in the
342 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
343 * both tasks until we find their ancestors who are siblings of common
344 * parent.
345 */
346
347 /* First walk up until both entities are at same depth */
Peter Zijlstrafed14d42012-02-11 06:05:00 +0100348 se_depth = (*se)->depth;
349 pse_depth = (*pse)->depth;
Peter Zijlstra464b7522008-10-24 11:06:15 +0200350
351 while (se_depth > pse_depth) {
352 se_depth--;
353 *se = parent_entity(*se);
354 }
355
356 while (pse_depth > se_depth) {
357 pse_depth--;
358 *pse = parent_entity(*pse);
359 }
360
361 while (!is_same_group(*se, *pse)) {
362 *se = parent_entity(*se);
363 *pse = parent_entity(*pse);
364 }
365}
366
Peter Zijlstra8f488942009-07-24 12:25:30 +0200367#else /* !CONFIG_FAIR_GROUP_SCHED */
368
369static inline struct task_struct *task_of(struct sched_entity *se)
370{
371 return container_of(se, struct task_struct, se);
372}
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200373
374static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
375{
376 return container_of(cfs_rq, struct rq, cfs);
377}
378
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200379#define entity_is_task(se) 1
380
Peter Zijlstrab7581492008-04-19 19:45:00 +0200381#define for_each_sched_entity(se) \
382 for (; se; se = NULL)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200383
Peter Zijlstrab7581492008-04-19 19:45:00 +0200384static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200385{
Peter Zijlstrab7581492008-04-19 19:45:00 +0200386 return &task_rq(p)->cfs;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200387}
388
Peter Zijlstrab7581492008-04-19 19:45:00 +0200389static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
390{
391 struct task_struct *p = task_of(se);
392 struct rq *rq = task_rq(p);
393
394 return &rq->cfs;
395}
396
397/* runqueue "owned" by this group */
398static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
399{
400 return NULL;
401}
402
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -0800403static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
404{
405}
406
407static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
408{
409}
410
Peter Zijlstrab7581492008-04-19 19:45:00 +0200411#define for_each_leaf_cfs_rq(rq, cfs_rq) \
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
413
Peter Zijlstrab7581492008-04-19 19:45:00 +0200414static inline struct sched_entity *parent_entity(struct sched_entity *se)
415{
416 return NULL;
417}
418
Peter Zijlstra464b7522008-10-24 11:06:15 +0200419static inline void
420find_matching_se(struct sched_entity **se, struct sched_entity **pse)
421{
422}
423
Peter Zijlstrab7581492008-04-19 19:45:00 +0200424#endif /* CONFIG_FAIR_GROUP_SCHED */
425
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -0700426static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100427void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200428
429/**************************************************************
430 * Scheduling class tree data structure manipulation methods:
431 */
432
Andrei Epure1bf08232013-03-12 21:12:24 +0200433static inline u64 max_vruntime(u64 max_vruntime, u64 vruntime)
Peter Zijlstra02e04312007-10-15 17:00:07 +0200434{
Andrei Epure1bf08232013-03-12 21:12:24 +0200435 s64 delta = (s64)(vruntime - max_vruntime);
Peter Zijlstra368059a2007-10-15 17:00:11 +0200436 if (delta > 0)
Andrei Epure1bf08232013-03-12 21:12:24 +0200437 max_vruntime = vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200438
Andrei Epure1bf08232013-03-12 21:12:24 +0200439 return max_vruntime;
Peter Zijlstra02e04312007-10-15 17:00:07 +0200440}
441
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200442static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
Peter Zijlstrab0ffd242007-10-15 17:00:12 +0200443{
444 s64 delta = (s64)(vruntime - min_vruntime);
445 if (delta < 0)
446 min_vruntime = vruntime;
447
448 return min_vruntime;
449}
450
Fabio Checconi54fdc582009-07-16 12:32:27 +0200451static inline int entity_before(struct sched_entity *a,
452 struct sched_entity *b)
453{
454 return (s64)(a->vruntime - b->vruntime) < 0;
455}
456
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200457static void update_min_vruntime(struct cfs_rq *cfs_rq)
458{
459 u64 vruntime = cfs_rq->min_vruntime;
460
461 if (cfs_rq->curr)
462 vruntime = cfs_rq->curr->vruntime;
463
464 if (cfs_rq->rb_leftmost) {
465 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
466 struct sched_entity,
467 run_node);
468
Peter Zijlstrae17036d2009-01-15 14:53:39 +0100469 if (!cfs_rq->curr)
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200470 vruntime = se->vruntime;
471 else
472 vruntime = min_vruntime(vruntime, se->vruntime);
473 }
474
Andrei Epure1bf08232013-03-12 21:12:24 +0200475 /* ensure we never gain time by being placed backwards. */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200476 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
Peter Zijlstra3fe16982011-04-05 17:23:48 +0200477#ifndef CONFIG_64BIT
478 smp_wmb();
479 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
480#endif
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200481}
482
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200483/*
484 * Enqueue an entity into the rb-tree:
485 */
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200486static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200487{
488 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
489 struct rb_node *parent = NULL;
490 struct sched_entity *entry;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200491 int leftmost = 1;
492
493 /*
494 * Find the right place in the rbtree:
495 */
496 while (*link) {
497 parent = *link;
498 entry = rb_entry(parent, struct sched_entity, run_node);
499 /*
500 * We dont care about collisions. Nodes with
501 * the same key stay together.
502 */
Stephan Baerwolf2bd2d6f2011-07-20 14:46:59 +0200503 if (entity_before(se, entry)) {
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200504 link = &parent->rb_left;
505 } else {
506 link = &parent->rb_right;
507 leftmost = 0;
508 }
509 }
510
511 /*
512 * Maintain a cache of leftmost tree entries (it is frequently
513 * used):
514 */
Peter Zijlstra1af5f732008-10-24 11:06:13 +0200515 if (leftmost)
Ingo Molnar57cb4992007-10-15 17:00:11 +0200516 cfs_rq->rb_leftmost = &se->run_node;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200517
518 rb_link_node(&se->run_node, parent, link);
519 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200520}
521
Ingo Molnar0702e3e2007-10-15 17:00:14 +0200522static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200523{
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100524 if (cfs_rq->rb_leftmost == &se->run_node) {
525 struct rb_node *next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100526
527 next_node = rb_next(&se->run_node);
528 cfs_rq->rb_leftmost = next_node;
Peter Zijlstra3fe69742008-03-14 20:55:51 +0100529 }
Ingo Molnare9acbff2007-10-15 17:00:04 +0200530
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200531 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200532}
533
Peter Zijlstra029632f2011-10-25 10:00:11 +0200534struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200535{
Peter Zijlstraf4b67552008-11-04 21:25:07 +0100536 struct rb_node *left = cfs_rq->rb_leftmost;
537
538 if (!left)
539 return NULL;
540
541 return rb_entry(left, struct sched_entity, run_node);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200542}
543
Rik van Rielac53db52011-02-01 09:51:03 -0500544static struct sched_entity *__pick_next_entity(struct sched_entity *se)
545{
546 struct rb_node *next = rb_next(&se->run_node);
547
548 if (!next)
549 return NULL;
550
551 return rb_entry(next, struct sched_entity, run_node);
552}
553
554#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +0200555struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200556{
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100557 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200558
Balbir Singh70eee742008-02-22 13:25:53 +0530559 if (!last)
560 return NULL;
Ingo Molnar7eee3e62008-02-22 10:32:21 +0100561
562 return rb_entry(last, struct sched_entity, run_node);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +0200563}
564
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200565/**************************************************************
566 * Scheduling class statistics methods:
567 */
568
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100569int sched_proc_update_handler(struct ctl_table *table, int write,
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700570 void __user *buffer, size_t *lenp,
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100571 loff_t *ppos)
572{
Alexey Dobriyan8d65af72009-09-23 15:57:19 -0700573 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
Nicholas Mc Guire58ac93e2015-05-15 21:05:42 +0200574 unsigned int factor = get_update_sysctl_factor();
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100575
576 if (ret || !write)
577 return ret;
578
579 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
580 sysctl_sched_min_granularity);
581
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100582#define WRT_SYSCTL(name) \
583 (normalized_sysctl_##name = sysctl_##name / (factor))
584 WRT_SYSCTL(sched_min_granularity);
585 WRT_SYSCTL(sched_latency);
586 WRT_SYSCTL(sched_wakeup_granularity);
Christian Ehrhardtacb4a842009-11-30 12:16:48 +0100587#undef WRT_SYSCTL
588
Peter Zijlstrab2be5e92007-11-09 22:39:37 +0100589 return 0;
590}
591#endif
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200592
593/*
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200594 * delta /= w
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200595 */
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100596static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200597{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200598 if (unlikely(se->load.weight != NICE_0_LOAD))
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100599 delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200600
601 return delta;
602}
603
604/*
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200605 * The idea is to set a period in which each task runs once.
606 *
Borislav Petkov532b1852012-08-08 16:16:04 +0200607 * When there are too many tasks (sched_nr_latency) we have to stretch
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200608 * this period because otherwise the slices get too small.
609 *
610 * p = (nr <= nl) ? l : l*nr/nl
611 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200612static u64 __sched_period(unsigned long nr_running)
613{
Boqun Feng8e2b0bf2015-07-02 22:25:52 +0800614 if (unlikely(nr_running > sched_nr_latency))
615 return nr_running * sysctl_sched_min_granularity;
616 else
617 return sysctl_sched_latency;
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +0200618}
619
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200620/*
621 * We calculate the wall-time slice from the period by taking a part
622 * proportional to the weight.
623 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200624 * s = p*P[w/rw]
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200625 */
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +0200626static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Peter Zijlstra21805082007-08-25 18:41:53 +0200627{
Mike Galbraith0a582442009-01-02 12:16:42 +0100628 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200629
Mike Galbraith0a582442009-01-02 12:16:42 +0100630 for_each_sched_entity(se) {
Lin Ming6272d682009-01-15 17:17:15 +0100631 struct load_weight *load;
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200632 struct load_weight lw;
Lin Ming6272d682009-01-15 17:17:15 +0100633
634 cfs_rq = cfs_rq_of(se);
635 load = &cfs_rq->load;
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200636
Mike Galbraith0a582442009-01-02 12:16:42 +0100637 if (unlikely(!se->on_rq)) {
Christian Engelmayer3104bf02009-06-16 10:35:12 +0200638 lw = cfs_rq->load;
Mike Galbraith0a582442009-01-02 12:16:42 +0100639
640 update_load_add(&lw, se->load.weight);
641 load = &lw;
642 }
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100643 slice = __calc_delta(slice, se->load.weight, load);
Mike Galbraith0a582442009-01-02 12:16:42 +0100644 }
645 return slice;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200646}
647
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200648/*
Andrei Epure660cc002013-03-11 12:03:20 +0200649 * We calculate the vruntime slice of a to-be-inserted task.
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200650 *
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200651 * vs = s/w
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200652 */
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200653static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnar647e7ca2007-10-15 17:00:13 +0200654{
Peter Zijlstraf9c0b092008-10-17 19:27:04 +0200655 return calc_delta_fair(sched_slice(cfs_rq, se), se);
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200656}
657
Alex Shia75cdaa2013-06-20 10:18:47 +0800658#ifdef CONFIG_SMP
Rik van Rielba7e5a22014-09-04 16:35:30 -0400659static int select_idle_sibling(struct task_struct *p, int cpu);
Mel Gormanfb13c7e2013-10-07 11:29:17 +0100660static unsigned long task_h_load(struct task_struct *p);
661
Yuyang Du9d89c252015-07-15 08:04:37 +0800662/*
663 * We choose a half-life close to 1 scheduling period.
Leo Yan84fb5a12015-09-15 18:57:37 +0800664 * Note: The tables runnable_avg_yN_inv and runnable_avg_yN_sum are
665 * dependent on this value.
Yuyang Du9d89c252015-07-15 08:04:37 +0800666 */
667#define LOAD_AVG_PERIOD 32
668#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
Leo Yan84fb5a12015-09-15 18:57:37 +0800669#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_AVG_MAX */
Alex Shia75cdaa2013-06-20 10:18:47 +0800670
Yuyang Du540247f2015-07-15 08:04:39 +0800671/* Give new sched_entity start runnable values to heavy its load in infant time */
672void init_entity_runnable_average(struct sched_entity *se)
Alex Shia75cdaa2013-06-20 10:18:47 +0800673{
Yuyang Du540247f2015-07-15 08:04:39 +0800674 struct sched_avg *sa = &se->avg;
Alex Shia75cdaa2013-06-20 10:18:47 +0800675
Yuyang Du9d89c252015-07-15 08:04:37 +0800676 sa->last_update_time = 0;
677 /*
678 * sched_avg's period_contrib should be strictly less then 1024, so
679 * we give it 1023 to make sure it is almost a period (1024us), and
680 * will definitely be update (after enqueue).
681 */
682 sa->period_contrib = 1023;
Yuyang Du540247f2015-07-15 08:04:39 +0800683 sa->load_avg = scale_load_down(se->load.weight);
Yuyang Du9d89c252015-07-15 08:04:37 +0800684 sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
Yuyang Du2b8c41d2016-03-30 04:30:56 +0800685 /*
686 * At this point, util_avg won't be used in select_task_rq_fair anyway
687 */
688 sa->util_avg = 0;
689 sa->util_sum = 0;
Yuyang Du9d89c252015-07-15 08:04:37 +0800690 /* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
Alex Shia75cdaa2013-06-20 10:18:47 +0800691}
Yuyang Du7ea241a2015-07-15 08:04:42 +0800692
Yuyang Du2b8c41d2016-03-30 04:30:56 +0800693/*
694 * With new tasks being created, their initial util_avgs are extrapolated
695 * based on the cfs_rq's current util_avg:
696 *
697 * util_avg = cfs_rq->util_avg / (cfs_rq->load_avg + 1) * se.load.weight
698 *
699 * However, in many cases, the above util_avg does not give a desired
700 * value. Moreover, the sum of the util_avgs may be divergent, such
701 * as when the series is a harmonic series.
702 *
703 * To solve this problem, we also cap the util_avg of successive tasks to
704 * only 1/2 of the left utilization budget:
705 *
706 * util_avg_cap = (1024 - cfs_rq->avg.util_avg) / 2^n
707 *
708 * where n denotes the nth task.
709 *
710 * For example, a simplest series from the beginning would be like:
711 *
712 * task util_avg: 512, 256, 128, 64, 32, 16, 8, ...
713 * cfs_rq util_avg: 512, 768, 896, 960, 992, 1008, 1016, ...
714 *
715 * Finally, that extrapolated util_avg is clamped to the cap (util_avg_cap)
716 * if util_avg > util_avg_cap.
717 */
718void post_init_entity_util_avg(struct sched_entity *se)
719{
720 struct cfs_rq *cfs_rq = cfs_rq_of(se);
721 struct sched_avg *sa = &se->avg;
Yuyang Du172895e2016-04-05 12:12:27 +0800722 long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2;
Yuyang Du2b8c41d2016-03-30 04:30:56 +0800723
724 if (cap > 0) {
725 if (cfs_rq->avg.util_avg != 0) {
726 sa->util_avg = cfs_rq->avg.util_avg * se->load.weight;
727 sa->util_avg /= (cfs_rq->avg.load_avg + 1);
728
729 if (sa->util_avg > cap)
730 sa->util_avg = cap;
731 } else {
732 sa->util_avg = cap;
733 }
734 sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
735 }
736}
737
Alex Shia75cdaa2013-06-20 10:18:47 +0800738#else
Yuyang Du540247f2015-07-15 08:04:39 +0800739void init_entity_runnable_average(struct sched_entity *se)
Alex Shia75cdaa2013-06-20 10:18:47 +0800740{
741}
Yuyang Du2b8c41d2016-03-30 04:30:56 +0800742void post_init_entity_util_avg(struct sched_entity *se)
743{
744}
Alex Shia75cdaa2013-06-20 10:18:47 +0800745#endif
746
Peter Zijlstraa7be37a2008-06-27 13:41:11 +0200747/*
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100748 * Update the current task's runtime statistics.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200749 */
Ingo Molnarb7cc0892007-08-09 11:16:47 +0200750static void update_curr(struct cfs_rq *cfs_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200751{
Ingo Molnar429d43b2007-10-15 17:00:03 +0200752 struct sched_entity *curr = cfs_rq->curr;
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200753 u64 now = rq_clock_task(rq_of(cfs_rq));
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100754 u64 delta_exec;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200755
756 if (unlikely(!curr))
757 return;
758
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100759 delta_exec = now - curr->exec_start;
760 if (unlikely((s64)delta_exec <= 0))
Peter Zijlstra34f28ec2008-12-16 08:45:31 +0100761 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200762
Ingo Molnar8ebc91d2007-10-15 17:00:03 +0200763 curr->exec_start = now;
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100764
Peter Zijlstra9dbdb152013-11-18 18:27:06 +0100765 schedstat_set(curr->statistics.exec_max,
766 max(delta_exec, curr->statistics.exec_max));
767
768 curr->sum_exec_runtime += delta_exec;
769 schedstat_add(cfs_rq, exec_clock, delta_exec);
770
771 curr->vruntime += calc_delta_fair(delta_exec, curr);
772 update_min_vruntime(cfs_rq);
773
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100774 if (entity_is_task(curr)) {
775 struct task_struct *curtask = task_of(curr);
776
Ingo Molnarf977bb42009-09-13 18:15:54 +0200777 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100778 cpuacct_charge(curtask, delta_exec);
Frank Mayharf06febc2008-09-12 09:54:39 -0700779 account_group_exec_runtime(curtask, delta_exec);
Srivatsa Vaddagirid842de82007-12-02 20:04:49 +0100780 }
Paul Turnerec12cb72011-07-21 09:43:30 -0700781
782 account_cfs_rq_runtime(cfs_rq, delta_exec);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200783}
784
Stanislaw Gruszka6e998912014-11-12 16:58:44 +0100785static void update_curr_fair(struct rq *rq)
786{
787 update_curr(cfs_rq_of(&rq->curr->se));
788}
789
Joonwoo Park3ea94de2015-11-12 19:38:54 -0800790#ifdef CONFIG_SCHEDSTATS
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200791static inline void
Ingo Molnar5870db52007-08-09 11:16:47 +0200792update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200793{
Joonwoo Park3ea94de2015-11-12 19:38:54 -0800794 u64 wait_start = rq_clock(rq_of(cfs_rq));
795
796 if (entity_is_task(se) && task_on_rq_migrating(task_of(se)) &&
797 likely(wait_start > se->statistics.wait_start))
798 wait_start -= se->statistics.wait_start;
799
800 se->statistics.wait_start = wait_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200801}
802
Joonwoo Park3ea94de2015-11-12 19:38:54 -0800803static void
804update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
805{
806 struct task_struct *p;
Mel Gormancb251762016-02-05 09:08:36 +0000807 u64 delta;
808
809 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
Joonwoo Park3ea94de2015-11-12 19:38:54 -0800810
811 if (entity_is_task(se)) {
812 p = task_of(se);
813 if (task_on_rq_migrating(p)) {
814 /*
815 * Preserve migrating task's wait time so wait_start
816 * time stamp can be adjusted to accumulate wait time
817 * prior to migration.
818 */
819 se->statistics.wait_start = delta;
820 return;
821 }
822 trace_sched_stat_wait(p, delta);
823 }
824
825 se->statistics.wait_max = max(se->statistics.wait_max, delta);
826 se->statistics.wait_count++;
827 se->statistics.wait_sum += delta;
828 se->statistics.wait_start = 0;
829}
Joonwoo Park3ea94de2015-11-12 19:38:54 -0800830
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200831/*
832 * Task is being enqueued - update stats:
833 */
Mel Gormancb251762016-02-05 09:08:36 +0000834static inline void
835update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200836{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200837 /*
838 * Are we enqueueing a waiting task? (for current tasks
839 * a dequeue/enqueue event is a NOP)
840 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200841 if (se != cfs_rq->curr)
Ingo Molnar5870db52007-08-09 11:16:47 +0200842 update_stats_wait_start(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200843}
844
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200845static inline void
Mel Gormancb251762016-02-05 09:08:36 +0000846update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200847{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200848 /*
849 * Mark the end of the wait period if dequeueing a
850 * waiting task:
851 */
Ingo Molnar429d43b2007-10-15 17:00:03 +0200852 if (se != cfs_rq->curr)
Ingo Molnar9ef0a962007-08-09 11:16:47 +0200853 update_stats_wait_end(cfs_rq, se);
Mel Gormancb251762016-02-05 09:08:36 +0000854
855 if (flags & DEQUEUE_SLEEP) {
856 if (entity_is_task(se)) {
857 struct task_struct *tsk = task_of(se);
858
859 if (tsk->state & TASK_INTERRUPTIBLE)
860 se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
861 if (tsk->state & TASK_UNINTERRUPTIBLE)
862 se->statistics.block_start = rq_clock(rq_of(cfs_rq));
863 }
864 }
865
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200866}
Mel Gormancb251762016-02-05 09:08:36 +0000867#else
868static inline void
869update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
870{
871}
872
873static inline void
874update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
875{
876}
877
878static inline void
879update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
880{
881}
882
883static inline void
884update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
885{
886}
887#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200888
889/*
890 * We are picking a new current task - update its stats:
891 */
892static inline void
Ingo Molnar79303e92007-08-09 11:16:47 +0200893update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200894{
895 /*
896 * We are starting a new run period:
897 */
Frederic Weisbecker78becc22013-04-12 01:51:02 +0200898 se->exec_start = rq_clock_task(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200899}
900
Ingo Molnarbf0f6f22007-07-09 18:51:58 +0200901/**************************************************
902 * Scheduling class queueing methods:
903 */
904
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200905#ifdef CONFIG_NUMA_BALANCING
906/*
Mel Gorman598f0ec2013-10-07 11:28:55 +0100907 * Approximate time to scan a full NUMA task in ms. The task scan period is
908 * calculated based on the tasks virtual memory size and
909 * numa_balancing_scan_size.
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200910 */
Mel Gorman598f0ec2013-10-07 11:28:55 +0100911unsigned int sysctl_numa_balancing_scan_period_min = 1000;
912unsigned int sysctl_numa_balancing_scan_period_max = 60000;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +0200913
914/* Portion of address space to scan in MB */
915unsigned int sysctl_numa_balancing_scan_size = 256;
Peter Zijlstracbee9f82012-10-25 14:16:43 +0200916
Peter Zijlstra4b96a292012-10-25 14:16:47 +0200917/* Scan @scan_size MB every @scan_period after an initial @scan_delay in ms */
918unsigned int sysctl_numa_balancing_scan_delay = 1000;
919
Mel Gorman598f0ec2013-10-07 11:28:55 +0100920static unsigned int task_nr_scan_windows(struct task_struct *p)
921{
922 unsigned long rss = 0;
923 unsigned long nr_scan_pages;
924
925 /*
926 * Calculations based on RSS as non-present and empty pages are skipped
927 * by the PTE scanner and NUMA hinting faults should be trapped based
928 * on resident pages
929 */
930 nr_scan_pages = sysctl_numa_balancing_scan_size << (20 - PAGE_SHIFT);
931 rss = get_mm_rss(p->mm);
932 if (!rss)
933 rss = nr_scan_pages;
934
935 rss = round_up(rss, nr_scan_pages);
936 return rss / nr_scan_pages;
937}
938
939/* For sanitys sake, never scan more PTEs than MAX_SCAN_WINDOW MB/sec. */
940#define MAX_SCAN_WINDOW 2560
941
942static unsigned int task_scan_min(struct task_struct *p)
943{
Jason Low316c1608d2015-04-28 13:00:20 -0700944 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
Mel Gorman598f0ec2013-10-07 11:28:55 +0100945 unsigned int scan, floor;
946 unsigned int windows = 1;
947
Kirill Tkhai64192652014-10-16 14:39:37 +0400948 if (scan_size < MAX_SCAN_WINDOW)
949 windows = MAX_SCAN_WINDOW / scan_size;
Mel Gorman598f0ec2013-10-07 11:28:55 +0100950 floor = 1000 / windows;
951
952 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
953 return max_t(unsigned int, floor, scan);
954}
955
956static unsigned int task_scan_max(struct task_struct *p)
957{
958 unsigned int smin = task_scan_min(p);
959 unsigned int smax;
960
961 /* Watch for min being lower than max due to floor calculations */
962 smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
963 return max(smin, smax);
964}
965
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +0100966static void account_numa_enqueue(struct rq *rq, struct task_struct *p)
967{
968 rq->nr_numa_running += (p->numa_preferred_nid != -1);
969 rq->nr_preferred_running += (p->numa_preferred_nid == task_node(p));
970}
971
972static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
973{
974 rq->nr_numa_running -= (p->numa_preferred_nid != -1);
975 rq->nr_preferred_running -= (p->numa_preferred_nid == task_node(p));
976}
977
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100978struct numa_group {
979 atomic_t refcount;
980
981 spinlock_t lock; /* nr_tasks, tasks */
982 int nr_tasks;
Mel Gormane29cf082013-10-07 11:29:22 +0100983 pid_t gid;
Rik van Riel4142c3e2016-01-25 17:07:39 -0500984 int active_nodes;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100985
986 struct rcu_head rcu;
Mel Gorman989348b2013-10-07 11:29:40 +0100987 unsigned long total_faults;
Rik van Riel4142c3e2016-01-25 17:07:39 -0500988 unsigned long max_faults_cpu;
Rik van Riel7e2703e2014-01-27 17:03:45 -0500989 /*
990 * Faults_cpu is used to decide whether memory should move
991 * towards the CPU. As a consequence, these stats are weighted
992 * more by CPU use than by memory faults.
993 */
Rik van Riel50ec8a42014-01-27 17:03:42 -0500994 unsigned long *faults_cpu;
Mel Gorman989348b2013-10-07 11:29:40 +0100995 unsigned long faults[0];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +0100996};
997
Rik van Rielbe1e4e72014-01-27 17:03:48 -0500998/* Shared or private faults. */
999#define NR_NUMA_HINT_FAULT_TYPES 2
1000
1001/* Memory and CPU locality */
1002#define NR_NUMA_HINT_FAULT_STATS (NR_NUMA_HINT_FAULT_TYPES * 2)
1003
1004/* Averaged statistics, and temporary buffers. */
1005#define NR_NUMA_HINT_FAULT_BUCKETS (NR_NUMA_HINT_FAULT_STATS * 2)
1006
Mel Gormane29cf082013-10-07 11:29:22 +01001007pid_t task_numa_group_id(struct task_struct *p)
1008{
1009 return p->numa_group ? p->numa_group->gid : 0;
1010}
1011
Iulia Manda44dba3d2014-10-31 02:13:31 +02001012/*
1013 * The averaged statistics, shared & private, memory & cpu,
1014 * occupy the first half of the array. The second half of the
1015 * array is for current counters, which are averaged into the
1016 * first set by task_numa_placement.
1017 */
1018static inline int task_faults_idx(enum numa_faults_stats s, int nid, int priv)
Mel Gormanac8e8952013-10-07 11:29:03 +01001019{
Iulia Manda44dba3d2014-10-31 02:13:31 +02001020 return NR_NUMA_HINT_FAULT_TYPES * (s * nr_node_ids + nid) + priv;
Mel Gormanac8e8952013-10-07 11:29:03 +01001021}
1022
1023static inline unsigned long task_faults(struct task_struct *p, int nid)
1024{
Iulia Manda44dba3d2014-10-31 02:13:31 +02001025 if (!p->numa_faults)
Mel Gormanac8e8952013-10-07 11:29:03 +01001026 return 0;
1027
Iulia Manda44dba3d2014-10-31 02:13:31 +02001028 return p->numa_faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1029 p->numa_faults[task_faults_idx(NUMA_MEM, nid, 1)];
Mel Gormanac8e8952013-10-07 11:29:03 +01001030}
1031
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001032static inline unsigned long group_faults(struct task_struct *p, int nid)
1033{
1034 if (!p->numa_group)
1035 return 0;
1036
Iulia Manda44dba3d2014-10-31 02:13:31 +02001037 return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
1038 p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001039}
1040
Rik van Riel20e07de2014-01-27 17:03:43 -05001041static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
1042{
Iulia Manda44dba3d2014-10-31 02:13:31 +02001043 return group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 0)] +
1044 group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
Rik van Riel20e07de2014-01-27 17:03:43 -05001045}
1046
Rik van Riel4142c3e2016-01-25 17:07:39 -05001047/*
1048 * A node triggering more than 1/3 as many NUMA faults as the maximum is
1049 * considered part of a numa group's pseudo-interleaving set. Migrations
1050 * between these nodes are slowed down, to allow things to settle down.
1051 */
1052#define ACTIVE_NODE_FRACTION 3
1053
1054static bool numa_is_active_node(int nid, struct numa_group *ng)
1055{
1056 return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
1057}
1058
Rik van Riel6c6b1192014-10-17 03:29:52 -04001059/* Handle placement on systems where not all nodes are directly connected. */
1060static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
1061 int maxdist, bool task)
1062{
1063 unsigned long score = 0;
1064 int node;
1065
1066 /*
1067 * All nodes are directly connected, and the same distance
1068 * from each other. No need for fancy placement algorithms.
1069 */
1070 if (sched_numa_topology_type == NUMA_DIRECT)
1071 return 0;
1072
1073 /*
1074 * This code is called for each node, introducing N^2 complexity,
1075 * which should be ok given the number of nodes rarely exceeds 8.
1076 */
1077 for_each_online_node(node) {
1078 unsigned long faults;
1079 int dist = node_distance(nid, node);
1080
1081 /*
1082 * The furthest away nodes in the system are not interesting
1083 * for placement; nid was already counted.
1084 */
1085 if (dist == sched_max_numa_distance || node == nid)
1086 continue;
1087
1088 /*
1089 * On systems with a backplane NUMA topology, compare groups
1090 * of nodes, and move tasks towards the group with the most
1091 * memory accesses. When comparing two nodes at distance
1092 * "hoplimit", only nodes closer by than "hoplimit" are part
1093 * of each group. Skip other nodes.
1094 */
1095 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1096 dist > maxdist)
1097 continue;
1098
1099 /* Add up the faults from nearby nodes. */
1100 if (task)
1101 faults = task_faults(p, node);
1102 else
1103 faults = group_faults(p, node);
1104
1105 /*
1106 * On systems with a glueless mesh NUMA topology, there are
1107 * no fixed "groups of nodes". Instead, nodes that are not
1108 * directly connected bounce traffic through intermediate
1109 * nodes; a numa_group can occupy any set of nodes.
1110 * The further away a node is, the less the faults count.
1111 * This seems to result in good task placement.
1112 */
1113 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1114 faults *= (sched_max_numa_distance - dist);
1115 faults /= (sched_max_numa_distance - LOCAL_DISTANCE);
1116 }
1117
1118 score += faults;
1119 }
1120
1121 return score;
1122}
1123
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001124/*
1125 * These return the fraction of accesses done by a particular task, or
1126 * task group, on a particular numa node. The group weight is given a
1127 * larger multiplier, in order to group tasks together that are almost
1128 * evenly spread out between numa nodes.
1129 */
Rik van Riel7bd95322014-10-17 03:29:51 -04001130static inline unsigned long task_weight(struct task_struct *p, int nid,
1131 int dist)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001132{
Rik van Riel7bd95322014-10-17 03:29:51 -04001133 unsigned long faults, total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001134
Iulia Manda44dba3d2014-10-31 02:13:31 +02001135 if (!p->numa_faults)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001136 return 0;
1137
1138 total_faults = p->total_numa_faults;
1139
1140 if (!total_faults)
1141 return 0;
1142
Rik van Riel7bd95322014-10-17 03:29:51 -04001143 faults = task_faults(p, nid);
Rik van Riel6c6b1192014-10-17 03:29:52 -04001144 faults += score_nearby_nodes(p, nid, dist, true);
1145
Rik van Riel7bd95322014-10-17 03:29:51 -04001146 return 1000 * faults / total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001147}
1148
Rik van Riel7bd95322014-10-17 03:29:51 -04001149static inline unsigned long group_weight(struct task_struct *p, int nid,
1150 int dist)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001151{
Rik van Riel7bd95322014-10-17 03:29:51 -04001152 unsigned long faults, total_faults;
1153
1154 if (!p->numa_group)
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001155 return 0;
1156
Rik van Riel7bd95322014-10-17 03:29:51 -04001157 total_faults = p->numa_group->total_faults;
1158
1159 if (!total_faults)
1160 return 0;
1161
1162 faults = group_faults(p, nid);
Rik van Riel6c6b1192014-10-17 03:29:52 -04001163 faults += score_nearby_nodes(p, nid, dist, false);
1164
Rik van Riel7bd95322014-10-17 03:29:51 -04001165 return 1000 * faults / total_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001166}
1167
Rik van Riel10f39042014-01-27 17:03:44 -05001168bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
1169 int src_nid, int dst_cpu)
1170{
1171 struct numa_group *ng = p->numa_group;
1172 int dst_nid = cpu_to_node(dst_cpu);
1173 int last_cpupid, this_cpupid;
1174
1175 this_cpupid = cpu_pid_to_cpupid(dst_cpu, current->pid);
1176
1177 /*
1178 * Multi-stage node selection is used in conjunction with a periodic
1179 * migration fault to build a temporal task<->page relation. By using
1180 * a two-stage filter we remove short/unlikely relations.
1181 *
1182 * Using P(p) ~ n_p / n_t as per frequentist probability, we can equate
1183 * a task's usage of a particular page (n_p) per total usage of this
1184 * page (n_t) (in a given time-span) to a probability.
1185 *
1186 * Our periodic faults will sample this probability and getting the
1187 * same result twice in a row, given these samples are fully
1188 * independent, is then given by P(n)^2, provided our sample period
1189 * is sufficiently short compared to the usage pattern.
1190 *
1191 * This quadric squishes small probabilities, making it less likely we
1192 * act on an unlikely task<->page relation.
1193 */
1194 last_cpupid = page_cpupid_xchg_last(page, this_cpupid);
1195 if (!cpupid_pid_unset(last_cpupid) &&
1196 cpupid_to_nid(last_cpupid) != dst_nid)
1197 return false;
1198
1199 /* Always allow migrate on private faults */
1200 if (cpupid_match_pid(p, last_cpupid))
1201 return true;
1202
1203 /* A shared fault, but p->numa_group has not been set up yet. */
1204 if (!ng)
1205 return true;
1206
1207 /*
Rik van Riel4142c3e2016-01-25 17:07:39 -05001208 * Destination node is much more heavily used than the source
1209 * node? Allow migration.
Rik van Riel10f39042014-01-27 17:03:44 -05001210 */
Rik van Riel4142c3e2016-01-25 17:07:39 -05001211 if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
1212 ACTIVE_NODE_FRACTION)
Rik van Riel10f39042014-01-27 17:03:44 -05001213 return true;
1214
1215 /*
Rik van Riel4142c3e2016-01-25 17:07:39 -05001216 * Distribute memory according to CPU & memory use on each node,
1217 * with 3/4 hysteresis to avoid unnecessary memory migrations:
1218 *
1219 * faults_cpu(dst) 3 faults_cpu(src)
1220 * --------------- * - > ---------------
1221 * faults_mem(dst) 4 faults_mem(src)
Rik van Riel10f39042014-01-27 17:03:44 -05001222 */
Rik van Riel4142c3e2016-01-25 17:07:39 -05001223 return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
1224 group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
Rik van Riel10f39042014-01-27 17:03:44 -05001225}
1226
Mel Gormane6628d52013-10-07 11:29:02 +01001227static unsigned long weighted_cpuload(const int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +01001228static unsigned long source_load(int cpu, int type);
1229static unsigned long target_load(int cpu, int type);
Nicolas Pitreced549f2014-05-26 18:19:38 -04001230static unsigned long capacity_of(int cpu);
Mel Gorman58d081b2013-10-07 11:29:10 +01001231static long effective_load(struct task_group *tg, int cpu, long wl, long wg);
Mel Gormane6628d52013-10-07 11:29:02 +01001232
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001233/* Cached statistics for all CPUs within a node */
Mel Gorman58d081b2013-10-07 11:29:10 +01001234struct numa_stats {
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001235 unsigned long nr_running;
Mel Gorman58d081b2013-10-07 11:29:10 +01001236 unsigned long load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001237
1238 /* Total compute capacity of CPUs on a node */
Nicolas Pitre5ef20ca2014-05-26 18:19:34 -04001239 unsigned long compute_capacity;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001240
1241 /* Approximate capacity in terms of runnable tasks on a node */
Nicolas Pitre5ef20ca2014-05-26 18:19:34 -04001242 unsigned long task_capacity;
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001243 int has_free_capacity;
Mel Gorman58d081b2013-10-07 11:29:10 +01001244};
Mel Gormane6628d52013-10-07 11:29:02 +01001245
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001246/*
1247 * XXX borrowed from update_sg_lb_stats
1248 */
1249static void update_numa_stats(struct numa_stats *ns, int nid)
1250{
Rik van Riel83d7f242014-08-04 13:23:28 -04001251 int smt, cpu, cpus = 0;
1252 unsigned long capacity;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001253
1254 memset(ns, 0, sizeof(*ns));
1255 for_each_cpu(cpu, cpumask_of_node(nid)) {
1256 struct rq *rq = cpu_rq(cpu);
1257
1258 ns->nr_running += rq->nr_running;
1259 ns->load += weighted_cpuload(cpu);
Nicolas Pitreced549f2014-05-26 18:19:38 -04001260 ns->compute_capacity += capacity_of(cpu);
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001261
1262 cpus++;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001263 }
1264
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001265 /*
1266 * If we raced with hotplug and there are no CPUs left in our mask
1267 * the @ns structure is NULL'ed and task_numa_compare() will
1268 * not find this node attractive.
1269 *
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001270 * We'll either bail at !has_free_capacity, or we'll detect a huge
1271 * imbalance and bail there.
Peter Zijlstra5eca82a2013-11-06 18:47:57 +01001272 */
1273 if (!cpus)
1274 return;
1275
Rik van Riel83d7f242014-08-04 13:23:28 -04001276 /* smt := ceil(cpus / capacity), assumes: 1 < smt_power < 2 */
1277 smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, ns->compute_capacity);
1278 capacity = cpus / smt; /* cores */
1279
1280 ns->task_capacity = min_t(unsigned, capacity,
1281 DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE));
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001282 ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001283}
1284
Mel Gorman58d081b2013-10-07 11:29:10 +01001285struct task_numa_env {
1286 struct task_struct *p;
1287
1288 int src_cpu, src_nid;
1289 int dst_cpu, dst_nid;
1290
1291 struct numa_stats src_stats, dst_stats;
1292
Wanpeng Li40ea2b42013-12-05 19:10:17 +08001293 int imbalance_pct;
Rik van Riel7bd95322014-10-17 03:29:51 -04001294 int dist;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001295
1296 struct task_struct *best_task;
1297 long best_imp;
Mel Gorman58d081b2013-10-07 11:29:10 +01001298 int best_cpu;
1299};
1300
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001301static void task_numa_assign(struct task_numa_env *env,
1302 struct task_struct *p, long imp)
1303{
1304 if (env->best_task)
1305 put_task_struct(env->best_task);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001306
1307 env->best_task = p;
1308 env->best_imp = imp;
1309 env->best_cpu = env->dst_cpu;
1310}
1311
Rik van Riel28a21742014-06-23 11:46:13 -04001312static bool load_too_imbalanced(long src_load, long dst_load,
Rik van Riele63da032014-05-14 13:22:21 -04001313 struct task_numa_env *env)
1314{
Rik van Riele4991b22015-05-27 15:04:27 -04001315 long imb, old_imb;
1316 long orig_src_load, orig_dst_load;
Rik van Riel28a21742014-06-23 11:46:13 -04001317 long src_capacity, dst_capacity;
1318
1319 /*
1320 * The load is corrected for the CPU capacity available on each node.
1321 *
1322 * src_load dst_load
1323 * ------------ vs ---------
1324 * src_capacity dst_capacity
1325 */
1326 src_capacity = env->src_stats.compute_capacity;
1327 dst_capacity = env->dst_stats.compute_capacity;
Rik van Riele63da032014-05-14 13:22:21 -04001328
1329 /* We care about the slope of the imbalance, not the direction. */
Rik van Riele4991b22015-05-27 15:04:27 -04001330 if (dst_load < src_load)
1331 swap(dst_load, src_load);
Rik van Riele63da032014-05-14 13:22:21 -04001332
1333 /* Is the difference below the threshold? */
Rik van Riele4991b22015-05-27 15:04:27 -04001334 imb = dst_load * src_capacity * 100 -
1335 src_load * dst_capacity * env->imbalance_pct;
Rik van Riele63da032014-05-14 13:22:21 -04001336 if (imb <= 0)
1337 return false;
1338
1339 /*
1340 * The imbalance is above the allowed threshold.
Rik van Riele4991b22015-05-27 15:04:27 -04001341 * Compare it with the old imbalance.
Rik van Riele63da032014-05-14 13:22:21 -04001342 */
Rik van Riel28a21742014-06-23 11:46:13 -04001343 orig_src_load = env->src_stats.load;
Rik van Riele4991b22015-05-27 15:04:27 -04001344 orig_dst_load = env->dst_stats.load;
Rik van Riel28a21742014-06-23 11:46:13 -04001345
Rik van Riele4991b22015-05-27 15:04:27 -04001346 if (orig_dst_load < orig_src_load)
1347 swap(orig_dst_load, orig_src_load);
Rik van Riele63da032014-05-14 13:22:21 -04001348
Rik van Riele4991b22015-05-27 15:04:27 -04001349 old_imb = orig_dst_load * src_capacity * 100 -
1350 orig_src_load * dst_capacity * env->imbalance_pct;
1351
1352 /* Would this change make things worse? */
1353 return (imb > old_imb);
Rik van Riele63da032014-05-14 13:22:21 -04001354}
1355
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001356/*
1357 * This checks if the overall compute and NUMA accesses of the system would
1358 * be improved if the source tasks was migrated to the target dst_cpu taking
1359 * into account that it might be best if task running on the dst_cpu should
1360 * be exchanged with the source task
1361 */
Rik van Riel887c2902013-10-07 11:29:31 +01001362static void task_numa_compare(struct task_numa_env *env,
1363 long taskimp, long groupimp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001364{
1365 struct rq *src_rq = cpu_rq(env->src_cpu);
1366 struct rq *dst_rq = cpu_rq(env->dst_cpu);
1367 struct task_struct *cur;
Rik van Riel28a21742014-06-23 11:46:13 -04001368 long src_load, dst_load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001369 long load;
Rik van Riel1c5d3eb2014-06-23 11:46:15 -04001370 long imp = env->p->numa_group ? groupimp : taskimp;
Rik van Riel0132c3e2014-06-23 11:46:16 -04001371 long moveimp = imp;
Rik van Riel7bd95322014-10-17 03:29:51 -04001372 int dist = env->dist;
Gavin Guo1dff76b2016-01-20 12:36:58 +08001373 bool assigned = false;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001374
1375 rcu_read_lock();
Kirill Tkhai1effd9f2014-10-22 11:17:11 +04001376
1377 raw_spin_lock_irq(&dst_rq->lock);
1378 cur = dst_rq->curr;
1379 /*
Gavin Guo1dff76b2016-01-20 12:36:58 +08001380 * No need to move the exiting task or idle task.
Kirill Tkhai1effd9f2014-10-22 11:17:11 +04001381 */
1382 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001383 cur = NULL;
Gavin Guo1dff76b2016-01-20 12:36:58 +08001384 else {
1385 /*
1386 * The task_struct must be protected here to protect the
1387 * p->numa_faults access in the task_weight since the
1388 * numa_faults could already be freed in the following path:
1389 * finish_task_switch()
1390 * --> put_task_struct()
1391 * --> __put_task_struct()
1392 * --> task_numa_free()
1393 */
1394 get_task_struct(cur);
1395 }
1396
Kirill Tkhai1effd9f2014-10-22 11:17:11 +04001397 raw_spin_unlock_irq(&dst_rq->lock);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001398
1399 /*
Peter Zijlstra7af68332014-11-10 10:54:35 +01001400 * Because we have preemption enabled we can get migrated around and
1401 * end try selecting ourselves (current == env->p) as a swap candidate.
1402 */
1403 if (cur == env->p)
1404 goto unlock;
1405
1406 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001407 * "imp" is the fault differential for the source task between the
1408 * source and destination node. Calculate the total differential for
1409 * the source task and potential destination task. The more negative
1410 * the value is, the more rmeote accesses that would be expected to
1411 * be incurred if the tasks were swapped.
1412 */
1413 if (cur) {
1414 /* Skip this swap candidate if cannot move to the source cpu */
1415 if (!cpumask_test_cpu(env->src_cpu, tsk_cpus_allowed(cur)))
1416 goto unlock;
1417
Rik van Riel887c2902013-10-07 11:29:31 +01001418 /*
1419 * If dst and source tasks are in the same NUMA group, or not
Rik van Rielca28aa532013-10-07 11:29:32 +01001420 * in any group then look only at task weights.
Rik van Riel887c2902013-10-07 11:29:31 +01001421 */
Rik van Rielca28aa532013-10-07 11:29:32 +01001422 if (cur->numa_group == env->p->numa_group) {
Rik van Riel7bd95322014-10-17 03:29:51 -04001423 imp = taskimp + task_weight(cur, env->src_nid, dist) -
1424 task_weight(cur, env->dst_nid, dist);
Rik van Rielca28aa532013-10-07 11:29:32 +01001425 /*
1426 * Add some hysteresis to prevent swapping the
1427 * tasks within a group over tiny differences.
1428 */
1429 if (cur->numa_group)
1430 imp -= imp/16;
Rik van Riel887c2902013-10-07 11:29:31 +01001431 } else {
Rik van Rielca28aa532013-10-07 11:29:32 +01001432 /*
1433 * Compare the group weights. If a task is all by
1434 * itself (not part of a group), use the task weight
1435 * instead.
1436 */
Rik van Rielca28aa532013-10-07 11:29:32 +01001437 if (cur->numa_group)
Rik van Riel7bd95322014-10-17 03:29:51 -04001438 imp += group_weight(cur, env->src_nid, dist) -
1439 group_weight(cur, env->dst_nid, dist);
Rik van Rielca28aa532013-10-07 11:29:32 +01001440 else
Rik van Riel7bd95322014-10-17 03:29:51 -04001441 imp += task_weight(cur, env->src_nid, dist) -
1442 task_weight(cur, env->dst_nid, dist);
Rik van Riel887c2902013-10-07 11:29:31 +01001443 }
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001444 }
1445
Rik van Riel0132c3e2014-06-23 11:46:16 -04001446 if (imp <= env->best_imp && moveimp <= env->best_imp)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001447 goto unlock;
1448
1449 if (!cur) {
1450 /* Is there capacity at our destination? */
Rik van Rielb932c032014-08-04 13:23:27 -04001451 if (env->src_stats.nr_running <= env->src_stats.task_capacity &&
Nicolas Pitre1b6a7492014-05-26 18:19:35 -04001452 !env->dst_stats.has_free_capacity)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001453 goto unlock;
1454
1455 goto balance;
1456 }
1457
1458 /* Balance doesn't matter much if we're running a task per cpu */
Rik van Riel0132c3e2014-06-23 11:46:16 -04001459 if (imp > env->best_imp && src_rq->nr_running == 1 &&
1460 dst_rq->nr_running == 1)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001461 goto assign;
1462
1463 /*
1464 * In the overloaded case, try and keep the load balanced.
1465 */
1466balance:
Peter Zijlstrae720fff2014-07-11 16:01:53 +02001467 load = task_h_load(env->p);
1468 dst_load = env->dst_stats.load + load;
1469 src_load = env->src_stats.load - load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001470
Rik van Riel0132c3e2014-06-23 11:46:16 -04001471 if (moveimp > imp && moveimp > env->best_imp) {
1472 /*
1473 * If the improvement from just moving env->p direction is
1474 * better than swapping tasks around, check if a move is
1475 * possible. Store a slightly smaller score than moveimp,
1476 * so an actually idle CPU will win.
1477 */
1478 if (!load_too_imbalanced(src_load, dst_load, env)) {
1479 imp = moveimp - 1;
Gavin Guo1dff76b2016-01-20 12:36:58 +08001480 put_task_struct(cur);
Rik van Riel0132c3e2014-06-23 11:46:16 -04001481 cur = NULL;
1482 goto assign;
1483 }
1484 }
1485
1486 if (imp <= env->best_imp)
1487 goto unlock;
1488
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001489 if (cur) {
Peter Zijlstrae720fff2014-07-11 16:01:53 +02001490 load = task_h_load(cur);
1491 dst_load -= load;
1492 src_load += load;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001493 }
1494
Rik van Riel28a21742014-06-23 11:46:13 -04001495 if (load_too_imbalanced(src_load, dst_load, env))
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001496 goto unlock;
1497
Rik van Rielba7e5a22014-09-04 16:35:30 -04001498 /*
1499 * One idle CPU per node is evaluated for a task numa move.
1500 * Call select_idle_sibling to maybe find a better one.
1501 */
1502 if (!cur)
1503 env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
1504
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001505assign:
Gavin Guo1dff76b2016-01-20 12:36:58 +08001506 assigned = true;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001507 task_numa_assign(env, cur, imp);
1508unlock:
1509 rcu_read_unlock();
Gavin Guo1dff76b2016-01-20 12:36:58 +08001510 /*
1511 * The dst_rq->curr isn't assigned. The protection for task_struct is
1512 * finished.
1513 */
1514 if (cur && !assigned)
1515 put_task_struct(cur);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001516}
1517
Rik van Riel887c2902013-10-07 11:29:31 +01001518static void task_numa_find_cpu(struct task_numa_env *env,
1519 long taskimp, long groupimp)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001520{
1521 int cpu;
1522
1523 for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
1524 /* Skip this CPU if the source task cannot migrate */
1525 if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(env->p)))
1526 continue;
1527
1528 env->dst_cpu = cpu;
Rik van Riel887c2902013-10-07 11:29:31 +01001529 task_numa_compare(env, taskimp, groupimp);
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001530 }
1531}
1532
Rik van Riel6f9aad02015-05-28 09:52:49 -04001533/* Only move tasks to a NUMA node less busy than the current node. */
1534static bool numa_has_capacity(struct task_numa_env *env)
1535{
1536 struct numa_stats *src = &env->src_stats;
1537 struct numa_stats *dst = &env->dst_stats;
1538
1539 if (src->has_free_capacity && !dst->has_free_capacity)
1540 return false;
1541
1542 /*
1543 * Only consider a task move if the source has a higher load
1544 * than the destination, corrected for CPU capacity on each node.
1545 *
1546 * src->load dst->load
1547 * --------------------- vs ---------------------
1548 * src->compute_capacity dst->compute_capacity
1549 */
Srikar Dronamraju44dcb042015-06-16 17:26:00 +05301550 if (src->load * dst->compute_capacity * env->imbalance_pct >
1551
1552 dst->load * src->compute_capacity * 100)
Rik van Riel6f9aad02015-05-28 09:52:49 -04001553 return true;
1554
1555 return false;
1556}
1557
Mel Gorman58d081b2013-10-07 11:29:10 +01001558static int task_numa_migrate(struct task_struct *p)
Mel Gormane6628d52013-10-07 11:29:02 +01001559{
Mel Gorman58d081b2013-10-07 11:29:10 +01001560 struct task_numa_env env = {
1561 .p = p,
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001562
Mel Gorman58d081b2013-10-07 11:29:10 +01001563 .src_cpu = task_cpu(p),
Ingo Molnarb32e86b2013-10-07 11:29:30 +01001564 .src_nid = task_node(p),
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001565
1566 .imbalance_pct = 112,
1567
1568 .best_task = NULL,
1569 .best_imp = 0,
Rik van Riel4142c3e2016-01-25 17:07:39 -05001570 .best_cpu = -1,
Mel Gorman58d081b2013-10-07 11:29:10 +01001571 };
1572 struct sched_domain *sd;
Rik van Riel887c2902013-10-07 11:29:31 +01001573 unsigned long taskweight, groupweight;
Rik van Riel7bd95322014-10-17 03:29:51 -04001574 int nid, ret, dist;
Rik van Riel887c2902013-10-07 11:29:31 +01001575 long taskimp, groupimp;
Mel Gormane6628d52013-10-07 11:29:02 +01001576
Mel Gorman58d081b2013-10-07 11:29:10 +01001577 /*
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001578 * Pick the lowest SD_NUMA domain, as that would have the smallest
1579 * imbalance and would be the first to start moving tasks about.
1580 *
1581 * And we want to avoid any moving of tasks about, as that would create
1582 * random movement of tasks -- counter the numa conditions we're trying
1583 * to satisfy here.
Mel Gorman58d081b2013-10-07 11:29:10 +01001584 */
Mel Gormane6628d52013-10-07 11:29:02 +01001585 rcu_read_lock();
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001586 sd = rcu_dereference(per_cpu(sd_numa, env.src_cpu));
Rik van Riel46a73e82013-11-11 19:29:25 -05001587 if (sd)
1588 env.imbalance_pct = 100 + (sd->imbalance_pct - 100) / 2;
Mel Gormane6628d52013-10-07 11:29:02 +01001589 rcu_read_unlock();
1590
Rik van Riel46a73e82013-11-11 19:29:25 -05001591 /*
1592 * Cpusets can break the scheduler domain tree into smaller
1593 * balance domains, some of which do not cross NUMA boundaries.
1594 * Tasks that are "trapped" in such domains cannot be migrated
1595 * elsewhere, so there is no point in (re)trying.
1596 */
1597 if (unlikely(!sd)) {
Wanpeng Lide1b3012013-12-12 15:23:24 +08001598 p->numa_preferred_nid = task_node(p);
Rik van Riel46a73e82013-11-11 19:29:25 -05001599 return -EINVAL;
1600 }
1601
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001602 env.dst_nid = p->numa_preferred_nid;
Rik van Riel7bd95322014-10-17 03:29:51 -04001603 dist = env.dist = node_distance(env.src_nid, env.dst_nid);
1604 taskweight = task_weight(p, env.src_nid, dist);
1605 groupweight = group_weight(p, env.src_nid, dist);
1606 update_numa_stats(&env.src_stats, env.src_nid);
1607 taskimp = task_weight(p, env.dst_nid, dist) - taskweight;
1608 groupimp = group_weight(p, env.dst_nid, dist) - groupweight;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001609 update_numa_stats(&env.dst_stats, env.dst_nid);
Mel Gorman58d081b2013-10-07 11:29:10 +01001610
Rik van Riela43455a2014-06-04 16:09:42 -04001611 /* Try to find a spot on the preferred nid. */
Rik van Riel6f9aad02015-05-28 09:52:49 -04001612 if (numa_has_capacity(&env))
1613 task_numa_find_cpu(&env, taskimp, groupimp);
Rik van Riele1dda8a2013-10-07 11:29:19 +01001614
Rik van Riel9de05d42014-10-09 17:27:47 -04001615 /*
1616 * Look at other nodes in these cases:
1617 * - there is no space available on the preferred_nid
1618 * - the task is part of a numa_group that is interleaved across
1619 * multiple NUMA nodes; in order to better consolidate the group,
1620 * we need to check other locations.
1621 */
Rik van Riel4142c3e2016-01-25 17:07:39 -05001622 if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001623 for_each_online_node(nid) {
1624 if (nid == env.src_nid || nid == p->numa_preferred_nid)
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001625 continue;
1626
Rik van Riel7bd95322014-10-17 03:29:51 -04001627 dist = node_distance(env.src_nid, env.dst_nid);
Rik van Riel6c6b1192014-10-17 03:29:52 -04001628 if (sched_numa_topology_type == NUMA_BACKPLANE &&
1629 dist != env.dist) {
1630 taskweight = task_weight(p, env.src_nid, dist);
1631 groupweight = group_weight(p, env.src_nid, dist);
1632 }
Rik van Riel7bd95322014-10-17 03:29:51 -04001633
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001634 /* Only consider nodes where both task and groups benefit */
Rik van Riel7bd95322014-10-17 03:29:51 -04001635 taskimp = task_weight(p, nid, dist) - taskweight;
1636 groupimp = group_weight(p, nid, dist) - groupweight;
Rik van Riel887c2902013-10-07 11:29:31 +01001637 if (taskimp < 0 && groupimp < 0)
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001638 continue;
1639
Rik van Riel7bd95322014-10-17 03:29:51 -04001640 env.dist = dist;
Mel Gorman2c8a50a2013-10-07 11:29:18 +01001641 env.dst_nid = nid;
1642 update_numa_stats(&env.dst_stats, env.dst_nid);
Rik van Riel6f9aad02015-05-28 09:52:49 -04001643 if (numa_has_capacity(&env))
1644 task_numa_find_cpu(&env, taskimp, groupimp);
Mel Gorman58d081b2013-10-07 11:29:10 +01001645 }
1646 }
1647
Rik van Riel68d1b022014-04-11 13:00:29 -04001648 /*
1649 * If the task is part of a workload that spans multiple NUMA nodes,
1650 * and is migrating into one of the workload's active nodes, remember
1651 * this node as the task's preferred numa node, so the workload can
1652 * settle down.
1653 * A task that migrated to a second choice node will be better off
1654 * trying for a better one later. Do not set the preferred node here.
1655 */
Rik van Rieldb015da2014-06-23 11:41:34 -04001656 if (p->numa_group) {
Rik van Riel4142c3e2016-01-25 17:07:39 -05001657 struct numa_group *ng = p->numa_group;
1658
Rik van Rieldb015da2014-06-23 11:41:34 -04001659 if (env.best_cpu == -1)
1660 nid = env.src_nid;
1661 else
1662 nid = env.dst_nid;
1663
Rik van Riel4142c3e2016-01-25 17:07:39 -05001664 if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
Rik van Rieldb015da2014-06-23 11:41:34 -04001665 sched_setnuma(p, env.dst_nid);
1666 }
1667
1668 /* No better CPU than the current one was found. */
1669 if (env.best_cpu == -1)
1670 return -EAGAIN;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01001671
Rik van Riel04bb2f92013-10-07 11:29:36 +01001672 /*
1673 * Reset the scan period if the task is being rescheduled on an
1674 * alternative node to recheck if the tasks is now properly placed.
1675 */
1676 p->numa_scan_period = task_scan_min(p);
1677
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001678 if (env.best_task == NULL) {
Mel Gorman286549d2014-01-21 15:51:03 -08001679 ret = migrate_task_to(p, env.best_cpu);
1680 if (ret != 0)
1681 trace_sched_stick_numa(p, env.src_cpu, env.best_cpu);
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001682 return ret;
1683 }
1684
1685 ret = migrate_swap(p, env.best_task);
Mel Gorman286549d2014-01-21 15:51:03 -08001686 if (ret != 0)
1687 trace_sched_stick_numa(p, env.src_cpu, task_cpu(env.best_task));
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001688 put_task_struct(env.best_task);
1689 return ret;
Mel Gormane6628d52013-10-07 11:29:02 +01001690}
1691
Mel Gorman6b9a7462013-10-07 11:29:11 +01001692/* Attempt to migrate a task to a CPU on the preferred node. */
1693static void numa_migrate_preferred(struct task_struct *p)
1694{
Rik van Riel5085e2a2014-04-11 13:00:28 -04001695 unsigned long interval = HZ;
1696
Rik van Riel2739d3e2013-10-07 11:29:41 +01001697 /* This task has no NUMA fault statistics yet */
Iulia Manda44dba3d2014-10-31 02:13:31 +02001698 if (unlikely(p->numa_preferred_nid == -1 || !p->numa_faults))
Rik van Riel2739d3e2013-10-07 11:29:41 +01001699 return;
1700
1701 /* Periodically retry migrating the task to the preferred node */
Rik van Riel5085e2a2014-04-11 13:00:28 -04001702 interval = min(interval, msecs_to_jiffies(p->numa_scan_period) / 16);
1703 p->numa_migrate_retry = jiffies + interval;
Rik van Riel2739d3e2013-10-07 11:29:41 +01001704
Mel Gorman6b9a7462013-10-07 11:29:11 +01001705 /* Success if task is already running on preferred CPU */
Wanpeng Lide1b3012013-12-12 15:23:24 +08001706 if (task_node(p) == p->numa_preferred_nid)
Mel Gorman6b9a7462013-10-07 11:29:11 +01001707 return;
1708
Mel Gorman6b9a7462013-10-07 11:29:11 +01001709 /* Otherwise, try migrate to a CPU on the preferred node */
Rik van Riel2739d3e2013-10-07 11:29:41 +01001710 task_numa_migrate(p);
Mel Gorman6b9a7462013-10-07 11:29:11 +01001711}
1712
Rik van Riel04bb2f92013-10-07 11:29:36 +01001713/*
Rik van Riel4142c3e2016-01-25 17:07:39 -05001714 * Find out how many nodes on the workload is actively running on. Do this by
Rik van Riel20e07de2014-01-27 17:03:43 -05001715 * tracking the nodes from which NUMA hinting faults are triggered. This can
1716 * be different from the set of nodes where the workload's memory is currently
1717 * located.
Rik van Riel20e07de2014-01-27 17:03:43 -05001718 */
Rik van Riel4142c3e2016-01-25 17:07:39 -05001719static void numa_group_count_active_nodes(struct numa_group *numa_group)
Rik van Riel20e07de2014-01-27 17:03:43 -05001720{
1721 unsigned long faults, max_faults = 0;
Rik van Riel4142c3e2016-01-25 17:07:39 -05001722 int nid, active_nodes = 0;
Rik van Riel20e07de2014-01-27 17:03:43 -05001723
1724 for_each_online_node(nid) {
1725 faults = group_faults_cpu(numa_group, nid);
1726 if (faults > max_faults)
1727 max_faults = faults;
1728 }
1729
1730 for_each_online_node(nid) {
1731 faults = group_faults_cpu(numa_group, nid);
Rik van Riel4142c3e2016-01-25 17:07:39 -05001732 if (faults * ACTIVE_NODE_FRACTION > max_faults)
1733 active_nodes++;
Rik van Riel20e07de2014-01-27 17:03:43 -05001734 }
Rik van Riel4142c3e2016-01-25 17:07:39 -05001735
1736 numa_group->max_faults_cpu = max_faults;
1737 numa_group->active_nodes = active_nodes;
Rik van Riel20e07de2014-01-27 17:03:43 -05001738}
1739
1740/*
Rik van Riel04bb2f92013-10-07 11:29:36 +01001741 * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
1742 * increments. The more local the fault statistics are, the higher the scan
Rik van Riela22b4b02014-06-23 11:41:35 -04001743 * period will be for the next scan window. If local/(local+remote) ratio is
1744 * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
1745 * the scan period will decrease. Aim for 70% local accesses.
Rik van Riel04bb2f92013-10-07 11:29:36 +01001746 */
1747#define NUMA_PERIOD_SLOTS 10
Rik van Riela22b4b02014-06-23 11:41:35 -04001748#define NUMA_PERIOD_THRESHOLD 7
Rik van Riel04bb2f92013-10-07 11:29:36 +01001749
1750/*
1751 * Increase the scan period (slow down scanning) if the majority of
1752 * our memory is already on our local node, or if the majority of
1753 * the page accesses are shared with other processes.
1754 * Otherwise, decrease the scan period.
1755 */
1756static void update_task_scan_period(struct task_struct *p,
1757 unsigned long shared, unsigned long private)
1758{
1759 unsigned int period_slot;
1760 int ratio;
1761 int diff;
1762
1763 unsigned long remote = p->numa_faults_locality[0];
1764 unsigned long local = p->numa_faults_locality[1];
1765
1766 /*
1767 * If there were no record hinting faults then either the task is
1768 * completely idle or all activity is areas that are not of interest
Mel Gorman074c2382015-03-25 15:55:42 -07001769 * to automatic numa balancing. Related to that, if there were failed
1770 * migration then it implies we are migrating too quickly or the local
1771 * node is overloaded. In either case, scan slower
Rik van Riel04bb2f92013-10-07 11:29:36 +01001772 */
Mel Gorman074c2382015-03-25 15:55:42 -07001773 if (local + shared == 0 || p->numa_faults_locality[2]) {
Rik van Riel04bb2f92013-10-07 11:29:36 +01001774 p->numa_scan_period = min(p->numa_scan_period_max,
1775 p->numa_scan_period << 1);
1776
1777 p->mm->numa_next_scan = jiffies +
1778 msecs_to_jiffies(p->numa_scan_period);
1779
1780 return;
1781 }
1782
1783 /*
1784 * Prepare to scale scan period relative to the current period.
1785 * == NUMA_PERIOD_THRESHOLD scan period stays the same
1786 * < NUMA_PERIOD_THRESHOLD scan period decreases (scan faster)
1787 * >= NUMA_PERIOD_THRESHOLD scan period increases (scan slower)
1788 */
1789 period_slot = DIV_ROUND_UP(p->numa_scan_period, NUMA_PERIOD_SLOTS);
1790 ratio = (local * NUMA_PERIOD_SLOTS) / (local + remote);
1791 if (ratio >= NUMA_PERIOD_THRESHOLD) {
1792 int slot = ratio - NUMA_PERIOD_THRESHOLD;
1793 if (!slot)
1794 slot = 1;
1795 diff = slot * period_slot;
1796 } else {
1797 diff = -(NUMA_PERIOD_THRESHOLD - ratio) * period_slot;
1798
1799 /*
1800 * Scale scan rate increases based on sharing. There is an
1801 * inverse relationship between the degree of sharing and
1802 * the adjustment made to the scanning period. Broadly
1803 * speaking the intent is that there is little point
1804 * scanning faster if shared accesses dominate as it may
1805 * simply bounce migrations uselessly
1806 */
Yasuaki Ishimatsu2847c902014-10-22 16:04:35 +09001807 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
Rik van Riel04bb2f92013-10-07 11:29:36 +01001808 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1809 }
1810
1811 p->numa_scan_period = clamp(p->numa_scan_period + diff,
1812 task_scan_min(p), task_scan_max(p));
1813 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
1814}
1815
Rik van Riel7e2703e2014-01-27 17:03:45 -05001816/*
1817 * Get the fraction of time the task has been running since the last
1818 * NUMA placement cycle. The scheduler keeps similar statistics, but
1819 * decays those on a 32ms period, which is orders of magnitude off
1820 * from the dozens-of-seconds NUMA balancing period. Use the scheduler
1821 * stats only if the task is so new there are no NUMA statistics yet.
1822 */
1823static u64 numa_get_avg_runtime(struct task_struct *p, u64 *period)
1824{
1825 u64 runtime, delta, now;
1826 /* Use the start of this time slice to avoid calculations. */
1827 now = p->se.exec_start;
1828 runtime = p->se.sum_exec_runtime;
1829
1830 if (p->last_task_numa_placement) {
1831 delta = runtime - p->last_sum_exec_runtime;
1832 *period = now - p->last_task_numa_placement;
1833 } else {
Yuyang Du9d89c252015-07-15 08:04:37 +08001834 delta = p->se.avg.load_sum / p->se.load.weight;
1835 *period = LOAD_AVG_MAX;
Rik van Riel7e2703e2014-01-27 17:03:45 -05001836 }
1837
1838 p->last_sum_exec_runtime = runtime;
1839 p->last_task_numa_placement = now;
1840
1841 return delta;
1842}
1843
Rik van Riel54009412014-10-17 03:29:53 -04001844/*
1845 * Determine the preferred nid for a task in a numa_group. This needs to
1846 * be done in a way that produces consistent results with group_weight,
1847 * otherwise workloads might not converge.
1848 */
1849static int preferred_group_nid(struct task_struct *p, int nid)
1850{
1851 nodemask_t nodes;
1852 int dist;
1853
1854 /* Direct connections between all NUMA nodes. */
1855 if (sched_numa_topology_type == NUMA_DIRECT)
1856 return nid;
1857
1858 /*
1859 * On a system with glueless mesh NUMA topology, group_weight
1860 * scores nodes according to the number of NUMA hinting faults on
1861 * both the node itself, and on nearby nodes.
1862 */
1863 if (sched_numa_topology_type == NUMA_GLUELESS_MESH) {
1864 unsigned long score, max_score = 0;
1865 int node, max_node = nid;
1866
1867 dist = sched_max_numa_distance;
1868
1869 for_each_online_node(node) {
1870 score = group_weight(p, node, dist);
1871 if (score > max_score) {
1872 max_score = score;
1873 max_node = node;
1874 }
1875 }
1876 return max_node;
1877 }
1878
1879 /*
1880 * Finding the preferred nid in a system with NUMA backplane
1881 * interconnect topology is more involved. The goal is to locate
1882 * tasks from numa_groups near each other in the system, and
1883 * untangle workloads from different sides of the system. This requires
1884 * searching down the hierarchy of node groups, recursively searching
1885 * inside the highest scoring group of nodes. The nodemask tricks
1886 * keep the complexity of the search down.
1887 */
1888 nodes = node_online_map;
1889 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1890 unsigned long max_faults = 0;
Jan Beulich81907472015-01-23 08:25:38 +00001891 nodemask_t max_group = NODE_MASK_NONE;
Rik van Riel54009412014-10-17 03:29:53 -04001892 int a, b;
1893
1894 /* Are there nodes at this distance from each other? */
1895 if (!find_numa_distance(dist))
1896 continue;
1897
1898 for_each_node_mask(a, nodes) {
1899 unsigned long faults = 0;
1900 nodemask_t this_group;
1901 nodes_clear(this_group);
1902
1903 /* Sum group's NUMA faults; includes a==b case. */
1904 for_each_node_mask(b, nodes) {
1905 if (node_distance(a, b) < dist) {
1906 faults += group_faults(p, b);
1907 node_set(b, this_group);
1908 node_clear(b, nodes);
1909 }
1910 }
1911
1912 /* Remember the top group. */
1913 if (faults > max_faults) {
1914 max_faults = faults;
1915 max_group = this_group;
1916 /*
1917 * subtle: at the smallest distance there is
1918 * just one node left in each "group", the
1919 * winner is the preferred nid.
1920 */
1921 nid = a;
1922 }
1923 }
1924 /* Next round, evaluate the nodes within max_group. */
Jan Beulich890a5402015-02-09 12:30:00 +01001925 if (!max_faults)
1926 break;
Rik van Riel54009412014-10-17 03:29:53 -04001927 nodes = max_group;
1928 }
1929 return nid;
1930}
1931
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001932static void task_numa_placement(struct task_struct *p)
1933{
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001934 int seq, nid, max_nid = -1, max_group_nid = -1;
1935 unsigned long max_faults = 0, max_group_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01001936 unsigned long fault_types[2] = { 0, 0 };
Rik van Riel7e2703e2014-01-27 17:03:45 -05001937 unsigned long total_faults;
1938 u64 runtime, period;
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001939 spinlock_t *group_lock = NULL;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001940
Jason Low7e5a2c12015-04-30 17:28:14 -07001941 /*
1942 * The p->mm->numa_scan_seq field gets updated without
1943 * exclusive access. Use READ_ONCE() here to ensure
1944 * that the field is read in a single access:
1945 */
Jason Low316c1608d2015-04-28 13:00:20 -07001946 seq = READ_ONCE(p->mm->numa_scan_seq);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001947 if (p->numa_scan_seq == seq)
1948 return;
1949 p->numa_scan_seq = seq;
Mel Gorman598f0ec2013-10-07 11:28:55 +01001950 p->numa_scan_period_max = task_scan_max(p);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02001951
Rik van Riel7e2703e2014-01-27 17:03:45 -05001952 total_faults = p->numa_faults_locality[0] +
1953 p->numa_faults_locality[1];
1954 runtime = numa_get_avg_runtime(p, &period);
1955
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001956 /* If the task is part of a group prevent parallel updates to group stats */
1957 if (p->numa_group) {
1958 group_lock = &p->numa_group->lock;
Mike Galbraith60e69ee2014-04-07 10:55:15 +02001959 spin_lock_irq(group_lock);
Mel Gorman7dbd13e2013-10-07 11:29:29 +01001960 }
1961
Mel Gorman688b7582013-10-07 11:28:58 +01001962 /* Find the node with the highest number of faults */
1963 for_each_online_node(nid) {
Iulia Manda44dba3d2014-10-31 02:13:31 +02001964 /* Keep track of the offsets in numa_faults array */
1965 int mem_idx, membuf_idx, cpu_idx, cpubuf_idx;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001966 unsigned long faults = 0, group_faults = 0;
Iulia Manda44dba3d2014-10-31 02:13:31 +02001967 int priv;
Mel Gorman745d6142013-10-07 11:28:59 +01001968
Rik van Rielbe1e4e72014-01-27 17:03:48 -05001969 for (priv = 0; priv < NR_NUMA_HINT_FAULT_TYPES; priv++) {
Rik van Riel7e2703e2014-01-27 17:03:45 -05001970 long diff, f_diff, f_weight;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001971
Iulia Manda44dba3d2014-10-31 02:13:31 +02001972 mem_idx = task_faults_idx(NUMA_MEM, nid, priv);
1973 membuf_idx = task_faults_idx(NUMA_MEMBUF, nid, priv);
1974 cpu_idx = task_faults_idx(NUMA_CPU, nid, priv);
1975 cpubuf_idx = task_faults_idx(NUMA_CPUBUF, nid, priv);
Mel Gorman745d6142013-10-07 11:28:59 +01001976
Mel Gormanac8e8952013-10-07 11:29:03 +01001977 /* Decay existing window, copy faults since last scan */
Iulia Manda44dba3d2014-10-31 02:13:31 +02001978 diff = p->numa_faults[membuf_idx] - p->numa_faults[mem_idx] / 2;
1979 fault_types[priv] += p->numa_faults[membuf_idx];
1980 p->numa_faults[membuf_idx] = 0;
Mel Gormanfb13c7e2013-10-07 11:29:17 +01001981
Rik van Riel7e2703e2014-01-27 17:03:45 -05001982 /*
1983 * Normalize the faults_from, so all tasks in a group
1984 * count according to CPU use, instead of by the raw
1985 * number of faults. Tasks with little runtime have
1986 * little over-all impact on throughput, and thus their
1987 * faults are less important.
1988 */
1989 f_weight = div64_u64(runtime << 16, period + 1);
Iulia Manda44dba3d2014-10-31 02:13:31 +02001990 f_weight = (f_weight * p->numa_faults[cpubuf_idx]) /
Rik van Riel7e2703e2014-01-27 17:03:45 -05001991 (total_faults + 1);
Iulia Manda44dba3d2014-10-31 02:13:31 +02001992 f_diff = f_weight - p->numa_faults[cpu_idx] / 2;
1993 p->numa_faults[cpubuf_idx] = 0;
Rik van Riel50ec8a42014-01-27 17:03:42 -05001994
Iulia Manda44dba3d2014-10-31 02:13:31 +02001995 p->numa_faults[mem_idx] += diff;
1996 p->numa_faults[cpu_idx] += f_diff;
1997 faults += p->numa_faults[mem_idx];
Mel Gorman83e1d2c2013-10-07 11:29:27 +01001998 p->total_numa_faults += diff;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01001999 if (p->numa_group) {
Iulia Manda44dba3d2014-10-31 02:13:31 +02002000 /*
2001 * safe because we can only change our own group
2002 *
2003 * mem_idx represents the offset for a given
2004 * nid and priv in a specific region because it
2005 * is at the beginning of the numa_faults array.
2006 */
2007 p->numa_group->faults[mem_idx] += diff;
2008 p->numa_group->faults_cpu[mem_idx] += f_diff;
Mel Gorman989348b2013-10-07 11:29:40 +01002009 p->numa_group->total_faults += diff;
Iulia Manda44dba3d2014-10-31 02:13:31 +02002010 group_faults += p->numa_group->faults[mem_idx];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002011 }
Mel Gormanac8e8952013-10-07 11:29:03 +01002012 }
2013
Mel Gorman688b7582013-10-07 11:28:58 +01002014 if (faults > max_faults) {
2015 max_faults = faults;
2016 max_nid = nid;
2017 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01002018
2019 if (group_faults > max_group_faults) {
2020 max_group_faults = group_faults;
2021 max_group_nid = nid;
2022 }
2023 }
2024
Rik van Riel04bb2f92013-10-07 11:29:36 +01002025 update_task_scan_period(p, fault_types[0], fault_types[1]);
2026
Mel Gorman7dbd13e2013-10-07 11:29:29 +01002027 if (p->numa_group) {
Rik van Riel4142c3e2016-01-25 17:07:39 -05002028 numa_group_count_active_nodes(p->numa_group);
Mike Galbraith60e69ee2014-04-07 10:55:15 +02002029 spin_unlock_irq(group_lock);
Rik van Riel54009412014-10-17 03:29:53 -04002030 max_nid = preferred_group_nid(p, max_group_nid);
Mel Gorman688b7582013-10-07 11:28:58 +01002031 }
2032
Rik van Rielbb97fc32014-06-04 16:33:15 -04002033 if (max_faults) {
2034 /* Set the new preferred node */
2035 if (max_nid != p->numa_preferred_nid)
2036 sched_setnuma(p, max_nid);
2037
2038 if (task_node(p) != p->numa_preferred_nid)
2039 numa_migrate_preferred(p);
Mel Gorman3a7053b2013-10-07 11:29:00 +01002040 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002041}
2042
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002043static inline int get_numa_group(struct numa_group *grp)
2044{
2045 return atomic_inc_not_zero(&grp->refcount);
2046}
2047
2048static inline void put_numa_group(struct numa_group *grp)
2049{
2050 if (atomic_dec_and_test(&grp->refcount))
2051 kfree_rcu(grp, rcu);
2052}
2053
Mel Gorman3e6a9412013-10-07 11:29:35 +01002054static void task_numa_group(struct task_struct *p, int cpupid, int flags,
2055 int *priv)
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002056{
2057 struct numa_group *grp, *my_grp;
2058 struct task_struct *tsk;
2059 bool join = false;
2060 int cpu = cpupid_to_cpu(cpupid);
2061 int i;
2062
2063 if (unlikely(!p->numa_group)) {
2064 unsigned int size = sizeof(struct numa_group) +
Rik van Riel50ec8a42014-01-27 17:03:42 -05002065 4*nr_node_ids*sizeof(unsigned long);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002066
2067 grp = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
2068 if (!grp)
2069 return;
2070
2071 atomic_set(&grp->refcount, 1);
Rik van Riel4142c3e2016-01-25 17:07:39 -05002072 grp->active_nodes = 1;
2073 grp->max_faults_cpu = 0;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002074 spin_lock_init(&grp->lock);
Mel Gormane29cf082013-10-07 11:29:22 +01002075 grp->gid = p->pid;
Rik van Riel50ec8a42014-01-27 17:03:42 -05002076 /* Second half of the array tracks nids where faults happen */
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002077 grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
2078 nr_node_ids;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002079
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002080 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
Iulia Manda44dba3d2014-10-31 02:13:31 +02002081 grp->faults[i] = p->numa_faults[i];
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002082
Mel Gorman989348b2013-10-07 11:29:40 +01002083 grp->total_faults = p->total_numa_faults;
Mel Gorman83e1d2c2013-10-07 11:29:27 +01002084
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002085 grp->nr_tasks++;
2086 rcu_assign_pointer(p->numa_group, grp);
2087 }
2088
2089 rcu_read_lock();
Jason Low316c1608d2015-04-28 13:00:20 -07002090 tsk = READ_ONCE(cpu_rq(cpu)->curr);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002091
2092 if (!cpupid_match_pid(tsk, cpupid))
Peter Zijlstra33547812013-10-09 10:24:48 +02002093 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002094
2095 grp = rcu_dereference(tsk->numa_group);
2096 if (!grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02002097 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002098
2099 my_grp = p->numa_group;
2100 if (grp == my_grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02002101 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002102
2103 /*
2104 * Only join the other group if its bigger; if we're the bigger group,
2105 * the other task will join us.
2106 */
2107 if (my_grp->nr_tasks > grp->nr_tasks)
Peter Zijlstra33547812013-10-09 10:24:48 +02002108 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002109
2110 /*
2111 * Tie-break on the grp address.
2112 */
2113 if (my_grp->nr_tasks == grp->nr_tasks && my_grp > grp)
Peter Zijlstra33547812013-10-09 10:24:48 +02002114 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002115
Rik van Rieldabe1d92013-10-07 11:29:34 +01002116 /* Always join threads in the same process. */
2117 if (tsk->mm == current->mm)
2118 join = true;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002119
Rik van Rieldabe1d92013-10-07 11:29:34 +01002120 /* Simple filter to avoid false positives due to PID collisions */
2121 if (flags & TNF_SHARED)
2122 join = true;
2123
Mel Gorman3e6a9412013-10-07 11:29:35 +01002124 /* Update priv based on whether false sharing was detected */
2125 *priv = !join;
2126
Rik van Rieldabe1d92013-10-07 11:29:34 +01002127 if (join && !get_numa_group(grp))
Peter Zijlstra33547812013-10-09 10:24:48 +02002128 goto no_join;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002129
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002130 rcu_read_unlock();
2131
2132 if (!join)
2133 return;
2134
Mike Galbraith60e69ee2014-04-07 10:55:15 +02002135 BUG_ON(irqs_disabled());
2136 double_lock_irq(&my_grp->lock, &grp->lock);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002137
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002138 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++) {
Iulia Manda44dba3d2014-10-31 02:13:31 +02002139 my_grp->faults[i] -= p->numa_faults[i];
2140 grp->faults[i] += p->numa_faults[i];
Mel Gorman989348b2013-10-07 11:29:40 +01002141 }
2142 my_grp->total_faults -= p->total_numa_faults;
2143 grp->total_faults += p->total_numa_faults;
2144
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002145 my_grp->nr_tasks--;
2146 grp->nr_tasks++;
2147
2148 spin_unlock(&my_grp->lock);
Mike Galbraith60e69ee2014-04-07 10:55:15 +02002149 spin_unlock_irq(&grp->lock);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002150
2151 rcu_assign_pointer(p->numa_group, grp);
2152
2153 put_numa_group(my_grp);
Peter Zijlstra33547812013-10-09 10:24:48 +02002154 return;
2155
2156no_join:
2157 rcu_read_unlock();
2158 return;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002159}
2160
2161void task_numa_free(struct task_struct *p)
2162{
2163 struct numa_group *grp = p->numa_group;
Iulia Manda44dba3d2014-10-31 02:13:31 +02002164 void *numa_faults = p->numa_faults;
Steven Rostedte9dd6852014-05-27 17:02:04 -04002165 unsigned long flags;
2166 int i;
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002167
2168 if (grp) {
Steven Rostedte9dd6852014-05-27 17:02:04 -04002169 spin_lock_irqsave(&grp->lock, flags);
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002170 for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
Iulia Manda44dba3d2014-10-31 02:13:31 +02002171 grp->faults[i] -= p->numa_faults[i];
Mel Gorman989348b2013-10-07 11:29:40 +01002172 grp->total_faults -= p->total_numa_faults;
2173
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002174 grp->nr_tasks--;
Steven Rostedte9dd6852014-05-27 17:02:04 -04002175 spin_unlock_irqrestore(&grp->lock, flags);
Andreea-Cristina Bernat35b123e2014-08-22 17:50:43 +03002176 RCU_INIT_POINTER(p->numa_group, NULL);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002177 put_numa_group(grp);
2178 }
2179
Iulia Manda44dba3d2014-10-31 02:13:31 +02002180 p->numa_faults = NULL;
Rik van Riel82727012013-10-07 11:29:28 +01002181 kfree(numa_faults);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002182}
2183
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002184/*
2185 * Got a PROT_NONE fault for a page on @node.
2186 */
Rik van Riel58b46da2014-01-27 17:03:47 -05002187void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002188{
2189 struct task_struct *p = current;
Peter Zijlstra6688cc02013-10-07 11:29:24 +01002190 bool migrated = flags & TNF_MIGRATED;
Rik van Riel58b46da2014-01-27 17:03:47 -05002191 int cpu_node = task_node(current);
Rik van Riel792568e2014-04-11 13:00:27 -04002192 int local = !!(flags & TNF_FAULT_LOCAL);
Rik van Riel4142c3e2016-01-25 17:07:39 -05002193 struct numa_group *ng;
Mel Gormanac8e8952013-10-07 11:29:03 +01002194 int priv;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002195
Srikar Dronamraju2a595722015-08-11 21:54:21 +05302196 if (!static_branch_likely(&sched_numa_balancing))
Mel Gorman1a687c22012-11-22 11:16:36 +00002197 return;
2198
Mel Gorman9ff1d9f2013-10-07 11:29:04 +01002199 /* for example, ksmd faulting in a user's mm */
2200 if (!p->mm)
2201 return;
2202
Mel Gormanf809ca92013-10-07 11:28:57 +01002203 /* Allocate buffer to track faults on a per-node basis */
Iulia Manda44dba3d2014-10-31 02:13:31 +02002204 if (unlikely(!p->numa_faults)) {
2205 int size = sizeof(*p->numa_faults) *
Rik van Rielbe1e4e72014-01-27 17:03:48 -05002206 NR_NUMA_HINT_FAULT_BUCKETS * nr_node_ids;
Mel Gormanf809ca92013-10-07 11:28:57 +01002207
Iulia Manda44dba3d2014-10-31 02:13:31 +02002208 p->numa_faults = kzalloc(size, GFP_KERNEL|__GFP_NOWARN);
2209 if (!p->numa_faults)
Mel Gormanf809ca92013-10-07 11:28:57 +01002210 return;
Mel Gorman745d6142013-10-07 11:28:59 +01002211
Mel Gorman83e1d2c2013-10-07 11:29:27 +01002212 p->total_numa_faults = 0;
Rik van Riel04bb2f92013-10-07 11:29:36 +01002213 memset(p->numa_faults_locality, 0, sizeof(p->numa_faults_locality));
Mel Gormanf809ca92013-10-07 11:28:57 +01002214 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002215
Mel Gormanfb003b82012-11-15 09:01:14 +00002216 /*
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002217 * First accesses are treated as private, otherwise consider accesses
2218 * to be private if the accessing pid has not changed
2219 */
2220 if (unlikely(last_cpupid == (-1 & LAST_CPUPID_MASK))) {
2221 priv = 1;
2222 } else {
2223 priv = cpupid_match_pid(p, last_cpupid);
Peter Zijlstra6688cc02013-10-07 11:29:24 +01002224 if (!priv && !(flags & TNF_NO_GROUP))
Mel Gorman3e6a9412013-10-07 11:29:35 +01002225 task_numa_group(p, last_cpupid, flags, &priv);
Peter Zijlstra8c8a7432013-10-07 11:29:21 +01002226 }
2227
Rik van Riel792568e2014-04-11 13:00:27 -04002228 /*
2229 * If a workload spans multiple NUMA nodes, a shared fault that
2230 * occurs wholly within the set of nodes that the workload is
2231 * actively using should be counted as local. This allows the
2232 * scan rate to slow down when a workload has settled down.
2233 */
Rik van Riel4142c3e2016-01-25 17:07:39 -05002234 ng = p->numa_group;
2235 if (!priv && !local && ng && ng->active_nodes > 1 &&
2236 numa_is_active_node(cpu_node, ng) &&
2237 numa_is_active_node(mem_node, ng))
Rik van Riel792568e2014-04-11 13:00:27 -04002238 local = 1;
2239
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002240 task_numa_placement(p);
Mel Gormanf809ca92013-10-07 11:28:57 +01002241
Rik van Riel2739d3e2013-10-07 11:29:41 +01002242 /*
2243 * Retry task to preferred node migration periodically, in case it
2244 * case it previously failed, or the scheduler moved us.
2245 */
2246 if (time_after(jiffies, p->numa_migrate_retry))
Mel Gorman6b9a7462013-10-07 11:29:11 +01002247 numa_migrate_preferred(p);
2248
Ingo Molnarb32e86b2013-10-07 11:29:30 +01002249 if (migrated)
2250 p->numa_pages_migrated += pages;
Mel Gorman074c2382015-03-25 15:55:42 -07002251 if (flags & TNF_MIGRATE_FAIL)
2252 p->numa_faults_locality[2] += pages;
Ingo Molnarb32e86b2013-10-07 11:29:30 +01002253
Iulia Manda44dba3d2014-10-31 02:13:31 +02002254 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages;
2255 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages;
Rik van Riel792568e2014-04-11 13:00:27 -04002256 p->numa_faults_locality[local] += pages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002257}
2258
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002259static void reset_ptenuma_scan(struct task_struct *p)
2260{
Jason Low7e5a2c12015-04-30 17:28:14 -07002261 /*
2262 * We only did a read acquisition of the mmap sem, so
2263 * p->mm->numa_scan_seq is written to without exclusive access
2264 * and the update is not guaranteed to be atomic. That's not
2265 * much of an issue though, since this is just used for
2266 * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
2267 * expensive, to avoid any form of compiler optimizations:
2268 */
Jason Low316c1608d2015-04-28 13:00:20 -07002269 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002270 p->mm->numa_scan_offset = 0;
2271}
2272
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002273/*
2274 * The expensive part of numa migration is done from task_work context.
2275 * Triggered from task_tick_numa().
2276 */
2277void task_numa_work(struct callback_head *work)
2278{
2279 unsigned long migrate, next_scan, now = jiffies;
2280 struct task_struct *p = current;
2281 struct mm_struct *mm = p->mm;
Rik van Riel51170842015-11-05 15:56:23 -05002282 u64 runtime = p->se.sum_exec_runtime;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002283 struct vm_area_struct *vma;
Mel Gorman9f406042012-11-14 18:34:32 +00002284 unsigned long start, end;
Mel Gorman598f0ec2013-10-07 11:28:55 +01002285 unsigned long nr_pte_updates = 0;
Rik van Riel4620f8c2015-09-11 09:00:27 -04002286 long pages, virtpages;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002287
2288 WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
2289
2290 work->next = work; /* protect against double add */
2291 /*
2292 * Who cares about NUMA placement when they're dying.
2293 *
2294 * NOTE: make sure not to dereference p->mm before this check,
2295 * exit_task_work() happens _after_ exit_mm() so we could be called
2296 * without p->mm even though we still had it when we enqueued this
2297 * work.
2298 */
2299 if (p->flags & PF_EXITING)
2300 return;
2301
Mel Gorman930aa172013-10-07 11:29:37 +01002302 if (!mm->numa_next_scan) {
Mel Gorman7e8d16b2013-10-07 11:28:54 +01002303 mm->numa_next_scan = now +
2304 msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
Mel Gormanb8593bf2012-11-21 01:18:23 +00002305 }
2306
2307 /*
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002308 * Enforce maximal scan/migration frequency..
2309 */
2310 migrate = mm->numa_next_scan;
2311 if (time_before(now, migrate))
2312 return;
2313
Mel Gorman598f0ec2013-10-07 11:28:55 +01002314 if (p->numa_scan_period == 0) {
2315 p->numa_scan_period_max = task_scan_max(p);
2316 p->numa_scan_period = task_scan_min(p);
2317 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002318
Mel Gormanfb003b82012-11-15 09:01:14 +00002319 next_scan = now + msecs_to_jiffies(p->numa_scan_period);
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002320 if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
2321 return;
2322
Mel Gormane14808b2012-11-19 10:59:15 +00002323 /*
Peter Zijlstra19a78d12013-10-07 11:28:51 +01002324 * Delay this task enough that another task of this mm will likely win
2325 * the next time around.
2326 */
2327 p->node_stamp += 2 * TICK_NSEC;
2328
Mel Gorman9f406042012-11-14 18:34:32 +00002329 start = mm->numa_scan_offset;
2330 pages = sysctl_numa_balancing_scan_size;
2331 pages <<= 20 - PAGE_SHIFT; /* MB in pages */
Rik van Riel4620f8c2015-09-11 09:00:27 -04002332 virtpages = pages * 8; /* Scan up to this much virtual space */
Mel Gorman9f406042012-11-14 18:34:32 +00002333 if (!pages)
2334 return;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002335
Rik van Riel4620f8c2015-09-11 09:00:27 -04002336
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002337 down_read(&mm->mmap_sem);
Mel Gorman9f406042012-11-14 18:34:32 +00002338 vma = find_vma(mm, start);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002339 if (!vma) {
2340 reset_ptenuma_scan(p);
Mel Gorman9f406042012-11-14 18:34:32 +00002341 start = 0;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002342 vma = mm->mmap;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002343 }
Mel Gorman9f406042012-11-14 18:34:32 +00002344 for (; vma; vma = vma->vm_next) {
Naoya Horiguchi6b79c572015-04-07 14:26:47 -07002345 if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
Mel Gorman8e76d4e2015-06-10 11:15:00 -07002346 is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002347 continue;
Naoya Horiguchi6b79c572015-04-07 14:26:47 -07002348 }
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002349
Mel Gorman4591ce4f2013-10-07 11:29:13 +01002350 /*
2351 * Shared library pages mapped by multiple processes are not
2352 * migrated as it is expected they are cache replicated. Avoid
2353 * hinting faults in read-only file-backed mappings or the vdso
2354 * as migrating the pages will be of marginal benefit.
2355 */
2356 if (!vma->vm_mm ||
2357 (vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
2358 continue;
2359
Mel Gorman3c67f472013-12-18 17:08:40 -08002360 /*
2361 * Skip inaccessible VMAs to avoid any confusion between
2362 * PROT_NONE and NUMA hinting ptes
2363 */
2364 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
2365 continue;
2366
Mel Gorman9f406042012-11-14 18:34:32 +00002367 do {
2368 start = max(start, vma->vm_start);
2369 end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
2370 end = min(end, vma->vm_end);
Rik van Riel4620f8c2015-09-11 09:00:27 -04002371 nr_pte_updates = change_prot_numa(vma, start, end);
Mel Gorman598f0ec2013-10-07 11:28:55 +01002372
2373 /*
Rik van Riel4620f8c2015-09-11 09:00:27 -04002374 * Try to scan sysctl_numa_balancing_size worth of
2375 * hpages that have at least one present PTE that
2376 * is not already pte-numa. If the VMA contains
2377 * areas that are unused or already full of prot_numa
2378 * PTEs, scan up to virtpages, to skip through those
2379 * areas faster.
Mel Gorman598f0ec2013-10-07 11:28:55 +01002380 */
2381 if (nr_pte_updates)
2382 pages -= (end - start) >> PAGE_SHIFT;
Rik van Riel4620f8c2015-09-11 09:00:27 -04002383 virtpages -= (end - start) >> PAGE_SHIFT;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002384
Mel Gorman9f406042012-11-14 18:34:32 +00002385 start = end;
Rik van Riel4620f8c2015-09-11 09:00:27 -04002386 if (pages <= 0 || virtpages <= 0)
Mel Gorman9f406042012-11-14 18:34:32 +00002387 goto out;
Rik van Riel3cf19622014-02-18 17:12:44 -05002388
2389 cond_resched();
Mel Gorman9f406042012-11-14 18:34:32 +00002390 } while (end != vma->vm_end);
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002391 }
2392
Mel Gorman9f406042012-11-14 18:34:32 +00002393out:
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002394 /*
Peter Zijlstrac69307d2013-10-07 11:28:41 +01002395 * It is possible to reach the end of the VMA list but the last few
2396 * VMAs are not guaranteed to the vma_migratable. If they are not, we
2397 * would find the !migratable VMA on the next scan but not reset the
2398 * scanner to the start so check it now.
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002399 */
2400 if (vma)
Mel Gorman9f406042012-11-14 18:34:32 +00002401 mm->numa_scan_offset = start;
Peter Zijlstra6e5fb222012-10-25 14:16:45 +02002402 else
2403 reset_ptenuma_scan(p);
2404 up_read(&mm->mmap_sem);
Rik van Riel51170842015-11-05 15:56:23 -05002405
2406 /*
2407 * Make sure tasks use at least 32x as much time to run other code
2408 * than they used here, to limit NUMA PTE scanning overhead to 3% max.
2409 * Usually update_task_scan_period slows down scanning enough; on an
2410 * overloaded system we need to limit overhead on a per task basis.
2411 */
2412 if (unlikely(p->se.sum_exec_runtime != runtime)) {
2413 u64 diff = p->se.sum_exec_runtime - runtime;
2414 p->node_stamp += 32 * diff;
2415 }
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002416}
2417
2418/*
2419 * Drive the periodic memory faults..
2420 */
2421void task_tick_numa(struct rq *rq, struct task_struct *curr)
2422{
2423 struct callback_head *work = &curr->numa_work;
2424 u64 period, now;
2425
2426 /*
2427 * We don't care about NUMA placement if we don't have memory.
2428 */
2429 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work)
2430 return;
2431
2432 /*
2433 * Using runtime rather than walltime has the dual advantage that
2434 * we (mostly) drive the selection from busy threads and that the
2435 * task needs to have done some actual work before we bother with
2436 * NUMA placement.
2437 */
2438 now = curr->se.sum_exec_runtime;
2439 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC;
2440
Rik van Riel25b3e5a2015-11-05 15:56:22 -05002441 if (now > curr->node_stamp + period) {
Peter Zijlstra4b96a292012-10-25 14:16:47 +02002442 if (!curr->node_stamp)
Mel Gorman598f0ec2013-10-07 11:28:55 +01002443 curr->numa_scan_period = task_scan_min(curr);
Peter Zijlstra19a78d12013-10-07 11:28:51 +01002444 curr->node_stamp += period;
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002445
2446 if (!time_before(jiffies, curr->mm->numa_next_scan)) {
2447 init_task_work(work, task_numa_work); /* TODO: move this into sched_fork() */
2448 task_work_add(curr, work, true);
2449 }
2450 }
2451}
2452#else
2453static void task_tick_numa(struct rq *rq, struct task_struct *curr)
2454{
2455}
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002456
2457static inline void account_numa_enqueue(struct rq *rq, struct task_struct *p)
2458{
2459}
2460
2461static inline void account_numa_dequeue(struct rq *rq, struct task_struct *p)
2462{
2463}
Peter Zijlstracbee9f82012-10-25 14:16:43 +02002464#endif /* CONFIG_NUMA_BALANCING */
2465
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002466static void
2467account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2468{
2469 update_load_add(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002470 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02002471 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
Peter Zijlstra367456c2012-02-20 21:49:09 +01002472#ifdef CONFIG_SMP
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002473 if (entity_is_task(se)) {
2474 struct rq *rq = rq_of(cfs_rq);
2475
2476 account_numa_enqueue(rq, task_of(se));
2477 list_add(&se->group_node, &rq->cfs_tasks);
2478 }
Peter Zijlstra367456c2012-02-20 21:49:09 +01002479#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002480 cfs_rq->nr_running++;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002481}
2482
2483static void
2484account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
2485{
2486 update_load_sub(&cfs_rq->load, se->load.weight);
Peter Zijlstrac09595f2008-06-27 13:41:14 +02002487 if (!parent_entity(se))
Peter Zijlstra029632f2011-10-25 10:00:11 +02002488 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
Tim Chenbfdb1982016-02-01 14:47:59 -08002489#ifdef CONFIG_SMP
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002490 if (entity_is_task(se)) {
2491 account_numa_dequeue(rq_of(cfs_rq), task_of(se));
Bharata B Raob87f1722008-09-25 09:53:54 +05302492 list_del_init(&se->group_node);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01002493 }
Tim Chenbfdb1982016-02-01 14:47:59 -08002494#endif
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002495 cfs_rq->nr_running--;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02002496}
2497
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002498#ifdef CONFIG_FAIR_GROUP_SCHED
2499# ifdef CONFIG_SMP
Paul Turner6d5ab292011-01-21 20:45:01 -08002500static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002501{
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002502 long tg_weight, load, shares;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002503
Peter Zijlstraea1dc6f2016-06-24 16:11:02 +02002504 /*
2505 * This really should be: cfs_rq->avg.load_avg, but instead we use
2506 * cfs_rq->load.weight, which is its upper bound. This helps ramp up
2507 * the shares for small weight interactive tasks.
2508 */
2509 load = scale_load_down(cfs_rq->load.weight);
2510
2511 tg_weight = atomic_long_read(&tg->load_avg);
2512
2513 /* Ensure tg_weight >= load */
2514 tg_weight -= cfs_rq->tg_load_avg_contrib;
2515 tg_weight += load;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002516
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002517 shares = (tg->shares * load);
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02002518 if (tg_weight)
2519 shares /= tg_weight;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002520
2521 if (shares < MIN_SHARES)
2522 shares = MIN_SHARES;
2523 if (shares > tg->shares)
2524 shares = tg->shares;
2525
2526 return shares;
2527}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002528# else /* CONFIG_SMP */
Paul Turner6d5ab292011-01-21 20:45:01 -08002529static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002530{
2531 return tg->shares;
2532}
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002533# endif /* CONFIG_SMP */
Peter Zijlstraea1dc6f2016-06-24 16:11:02 +02002534
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002535static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
2536 unsigned long weight)
2537{
Paul Turner19e5eeb2010-12-15 19:10:18 -08002538 if (se->on_rq) {
2539 /* commit outstanding execution time */
2540 if (cfs_rq->curr == se)
2541 update_curr(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002542 account_entity_dequeue(cfs_rq, se);
Paul Turner19e5eeb2010-12-15 19:10:18 -08002543 }
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002544
2545 update_load_set(&se->load, weight);
2546
2547 if (se->on_rq)
2548 account_entity_enqueue(cfs_rq, se);
2549}
2550
Paul Turner82958362012-10-04 13:18:31 +02002551static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
2552
Paul Turner6d5ab292011-01-21 20:45:01 -08002553static void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002554{
2555 struct task_group *tg;
2556 struct sched_entity *se;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002557 long shares;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002558
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002559 tg = cfs_rq->tg;
2560 se = tg->se[cpu_of(rq_of(cfs_rq))];
Paul Turner64660c82011-07-21 09:43:36 -07002561 if (!se || throttled_hierarchy(cfs_rq))
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002562 return;
Yong Zhang3ff6dca2011-01-24 15:33:52 +08002563#ifndef CONFIG_SMP
2564 if (likely(se->load.weight == tg->shares))
2565 return;
2566#endif
Paul Turner6d5ab292011-01-21 20:45:01 -08002567 shares = calc_cfs_shares(cfs_rq, tg);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002568
2569 reweight_entity(cfs_rq_of(se), se, shares);
2570}
2571#else /* CONFIG_FAIR_GROUP_SCHED */
Paul Turner6d5ab292011-01-21 20:45:01 -08002572static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
Peter Zijlstra2069dd72010-11-15 15:47:00 -08002573{
2574}
2575#endif /* CONFIG_FAIR_GROUP_SCHED */
2576
Alex Shi141965c2013-06-26 13:05:39 +08002577#ifdef CONFIG_SMP
Paul Turner5b51f2f2012-10-04 13:18:32 +02002578/* Precomputed fixed inverse multiplies for multiplication by y^n */
2579static const u32 runnable_avg_yN_inv[] = {
2580 0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
2581 0xe0ccdeeb, 0xdbfbb796, 0xd744fcc9, 0xd2a81d91, 0xce248c14, 0xc9b9bd85,
2582 0xc5672a10, 0xc12c4cc9, 0xbd08a39e, 0xb8fbaf46, 0xb504f333, 0xb123f581,
2583 0xad583ee9, 0xa9a15ab4, 0xa5fed6a9, 0xa2704302, 0x9ef5325f, 0x9b8d39b9,
2584 0x9837f050, 0x94f4efa8, 0x91c3d373, 0x8ea4398a, 0x8b95c1e3, 0x88980e80,
2585 0x85aac367, 0x82cd8698,
2586};
2587
2588/*
2589 * Precomputed \Sum y^k { 1<=k<=n }. These are floor(true_value) to prevent
2590 * over-estimates when re-combining.
2591 */
2592static const u32 runnable_avg_yN_sum[] = {
2593 0, 1002, 1982, 2941, 3880, 4798, 5697, 6576, 7437, 8279, 9103,
2594 9909,10698,11470,12226,12966,13690,14398,15091,15769,16433,17082,
2595 17718,18340,18949,19545,20128,20698,21256,21802,22336,22859,23371,
2596};
2597
2598/*
Yuyang Du7b20b912016-05-03 05:54:27 +08002599 * Precomputed \Sum y^k { 1<=k<=n, where n%32=0). Values are rolled down to
2600 * lower integers. See Documentation/scheduler/sched-avg.txt how these
2601 * were generated:
2602 */
2603static const u32 __accumulated_sum_N32[] = {
2604 0, 23371, 35056, 40899, 43820, 45281,
2605 46011, 46376, 46559, 46650, 46696, 46719,
2606};
2607
2608/*
Paul Turner9d85f212012-10-04 13:18:29 +02002609 * Approximate:
2610 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
2611 */
2612static __always_inline u64 decay_load(u64 val, u64 n)
2613{
Paul Turner5b51f2f2012-10-04 13:18:32 +02002614 unsigned int local_n;
2615
2616 if (!n)
2617 return val;
2618 else if (unlikely(n > LOAD_AVG_PERIOD * 63))
2619 return 0;
2620
2621 /* after bounds checking we can collapse to 32-bit */
2622 local_n = n;
2623
2624 /*
2625 * As y^PERIOD = 1/2, we can combine
Zhihui Zhang9c58c792014-09-20 21:24:36 -04002626 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
2627 * With a look-up table which covers y^n (n<PERIOD)
Paul Turner5b51f2f2012-10-04 13:18:32 +02002628 *
2629 * To achieve constant time decay_load.
2630 */
2631 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
2632 val >>= local_n / LOAD_AVG_PERIOD;
2633 local_n %= LOAD_AVG_PERIOD;
Paul Turner9d85f212012-10-04 13:18:29 +02002634 }
2635
Yuyang Du9d89c252015-07-15 08:04:37 +08002636 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
2637 return val;
Paul Turner5b51f2f2012-10-04 13:18:32 +02002638}
2639
2640/*
2641 * For updates fully spanning n periods, the contribution to runnable
2642 * average will be: \Sum 1024*y^n
2643 *
2644 * We can compute this reasonably efficiently by combining:
2645 * y^PERIOD = 1/2 with precomputed \Sum 1024*y^n {for n <PERIOD}
2646 */
2647static u32 __compute_runnable_contrib(u64 n)
2648{
2649 u32 contrib = 0;
2650
2651 if (likely(n <= LOAD_AVG_PERIOD))
2652 return runnable_avg_yN_sum[n];
2653 else if (unlikely(n >= LOAD_AVG_MAX_N))
2654 return LOAD_AVG_MAX;
2655
Yuyang Du7b20b912016-05-03 05:54:27 +08002656 /* Since n < LOAD_AVG_MAX_N, n/LOAD_AVG_PERIOD < 11 */
2657 contrib = __accumulated_sum_N32[n/LOAD_AVG_PERIOD];
2658 n %= LOAD_AVG_PERIOD;
Paul Turner5b51f2f2012-10-04 13:18:32 +02002659 contrib = decay_load(contrib, n);
2660 return contrib + runnable_avg_yN_sum[n];
Paul Turner9d85f212012-10-04 13:18:29 +02002661}
2662
Peter Zijlstra54a21382015-09-07 15:05:42 +02002663#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
Dietmar Eggemanne0f5f3a2015-08-14 17:23:09 +01002664
Paul Turner9d85f212012-10-04 13:18:29 +02002665/*
2666 * We can represent the historical contribution to runnable average as the
2667 * coefficients of a geometric series. To do this we sub-divide our runnable
2668 * history into segments of approximately 1ms (1024us); label the segment that
2669 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
2670 *
2671 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
2672 * p0 p1 p2
2673 * (now) (~1ms ago) (~2ms ago)
2674 *
2675 * Let u_i denote the fraction of p_i that the entity was runnable.
2676 *
2677 * We then designate the fractions u_i as our co-efficients, yielding the
2678 * following representation of historical load:
2679 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
2680 *
2681 * We choose y based on the with of a reasonably scheduling period, fixing:
2682 * y^32 = 0.5
2683 *
2684 * This means that the contribution to load ~32ms ago (u_32) will be weighted
2685 * approximately half as much as the contribution to load within the last ms
2686 * (u_0).
2687 *
2688 * When a period "rolls over" and we have new u_0`, multiplying the previous
2689 * sum again by y is sufficient to update:
2690 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
2691 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
2692 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002693static __always_inline int
2694__update_load_avg(u64 now, int cpu, struct sched_avg *sa,
Yuyang Du13962232015-07-15 08:04:41 +08002695 unsigned long weight, int running, struct cfs_rq *cfs_rq)
Paul Turner9d85f212012-10-04 13:18:29 +02002696{
Dietmar Eggemanne0f5f3a2015-08-14 17:23:09 +01002697 u64 delta, scaled_delta, periods;
Yuyang Du9d89c252015-07-15 08:04:37 +08002698 u32 contrib;
Peter Zijlstra6115c792015-09-07 15:09:15 +02002699 unsigned int delta_w, scaled_delta_w, decayed = 0;
Dietmar Eggemann6f2b0452015-09-07 14:57:22 +01002700 unsigned long scale_freq, scale_cpu;
Paul Turner9d85f212012-10-04 13:18:29 +02002701
Yuyang Du9d89c252015-07-15 08:04:37 +08002702 delta = now - sa->last_update_time;
Paul Turner9d85f212012-10-04 13:18:29 +02002703 /*
2704 * This should only happen when time goes backwards, which it
2705 * unfortunately does during sched clock init when we swap over to TSC.
2706 */
2707 if ((s64)delta < 0) {
Yuyang Du9d89c252015-07-15 08:04:37 +08002708 sa->last_update_time = now;
Paul Turner9d85f212012-10-04 13:18:29 +02002709 return 0;
2710 }
2711
2712 /*
2713 * Use 1024ns as the unit of measurement since it's a reasonable
2714 * approximation of 1us and fast to compute.
2715 */
2716 delta >>= 10;
2717 if (!delta)
2718 return 0;
Yuyang Du9d89c252015-07-15 08:04:37 +08002719 sa->last_update_time = now;
Paul Turner9d85f212012-10-04 13:18:29 +02002720
Dietmar Eggemann6f2b0452015-09-07 14:57:22 +01002721 scale_freq = arch_scale_freq_capacity(NULL, cpu);
2722 scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
2723
Paul Turner9d85f212012-10-04 13:18:29 +02002724 /* delta_w is the amount already accumulated against our next period */
Yuyang Du9d89c252015-07-15 08:04:37 +08002725 delta_w = sa->period_contrib;
Paul Turner9d85f212012-10-04 13:18:29 +02002726 if (delta + delta_w >= 1024) {
Paul Turner9d85f212012-10-04 13:18:29 +02002727 decayed = 1;
2728
Yuyang Du9d89c252015-07-15 08:04:37 +08002729 /* how much left for next period will start over, we don't know yet */
2730 sa->period_contrib = 0;
2731
Paul Turner9d85f212012-10-04 13:18:29 +02002732 /*
2733 * Now that we know we're crossing a period boundary, figure
2734 * out how much from delta we need to complete the current
2735 * period and accrue it.
2736 */
2737 delta_w = 1024 - delta_w;
Peter Zijlstra54a21382015-09-07 15:05:42 +02002738 scaled_delta_w = cap_scale(delta_w, scale_freq);
Yuyang Du13962232015-07-15 08:04:41 +08002739 if (weight) {
Dietmar Eggemanne0f5f3a2015-08-14 17:23:09 +01002740 sa->load_sum += weight * scaled_delta_w;
2741 if (cfs_rq) {
2742 cfs_rq->runnable_load_sum +=
2743 weight * scaled_delta_w;
2744 }
Yuyang Du13962232015-07-15 08:04:41 +08002745 }
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002746 if (running)
Peter Zijlstra006cdf02015-09-09 09:06:17 +02002747 sa->util_sum += scaled_delta_w * scale_cpu;
Paul Turner9d85f212012-10-04 13:18:29 +02002748
Paul Turner5b51f2f2012-10-04 13:18:32 +02002749 delta -= delta_w;
Paul Turner9d85f212012-10-04 13:18:29 +02002750
Paul Turner5b51f2f2012-10-04 13:18:32 +02002751 /* Figure out how many additional periods this update spans */
2752 periods = delta / 1024;
2753 delta %= 1024;
2754
Yuyang Du9d89c252015-07-15 08:04:37 +08002755 sa->load_sum = decay_load(sa->load_sum, periods + 1);
Yuyang Du13962232015-07-15 08:04:41 +08002756 if (cfs_rq) {
2757 cfs_rq->runnable_load_sum =
2758 decay_load(cfs_rq->runnable_load_sum, periods + 1);
2759 }
Yuyang Du9d89c252015-07-15 08:04:37 +08002760 sa->util_sum = decay_load((u64)(sa->util_sum), periods + 1);
Paul Turner5b51f2f2012-10-04 13:18:32 +02002761
2762 /* Efficiently calculate \sum (1..n_period) 1024*y^i */
Yuyang Du9d89c252015-07-15 08:04:37 +08002763 contrib = __compute_runnable_contrib(periods);
Peter Zijlstra54a21382015-09-07 15:05:42 +02002764 contrib = cap_scale(contrib, scale_freq);
Yuyang Du13962232015-07-15 08:04:41 +08002765 if (weight) {
Yuyang Du9d89c252015-07-15 08:04:37 +08002766 sa->load_sum += weight * contrib;
Yuyang Du13962232015-07-15 08:04:41 +08002767 if (cfs_rq)
2768 cfs_rq->runnable_load_sum += weight * contrib;
2769 }
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002770 if (running)
Peter Zijlstra006cdf02015-09-09 09:06:17 +02002771 sa->util_sum += contrib * scale_cpu;
Paul Turner9d85f212012-10-04 13:18:29 +02002772 }
2773
2774 /* Remainder of delta accrued against u_0` */
Peter Zijlstra54a21382015-09-07 15:05:42 +02002775 scaled_delta = cap_scale(delta, scale_freq);
Yuyang Du13962232015-07-15 08:04:41 +08002776 if (weight) {
Dietmar Eggemanne0f5f3a2015-08-14 17:23:09 +01002777 sa->load_sum += weight * scaled_delta;
Yuyang Du13962232015-07-15 08:04:41 +08002778 if (cfs_rq)
Dietmar Eggemanne0f5f3a2015-08-14 17:23:09 +01002779 cfs_rq->runnable_load_sum += weight * scaled_delta;
Yuyang Du13962232015-07-15 08:04:41 +08002780 }
Vincent Guittot36ee28e2015-02-27 16:54:04 +01002781 if (running)
Peter Zijlstra006cdf02015-09-09 09:06:17 +02002782 sa->util_sum += scaled_delta * scale_cpu;
Yuyang Du9d89c252015-07-15 08:04:37 +08002783
2784 sa->period_contrib += delta;
2785
2786 if (decayed) {
2787 sa->load_avg = div_u64(sa->load_sum, LOAD_AVG_MAX);
Yuyang Du13962232015-07-15 08:04:41 +08002788 if (cfs_rq) {
2789 cfs_rq->runnable_load_avg =
2790 div_u64(cfs_rq->runnable_load_sum, LOAD_AVG_MAX);
2791 }
Peter Zijlstra006cdf02015-09-09 09:06:17 +02002792 sa->util_avg = sa->util_sum / LOAD_AVG_MAX;
Yuyang Du9d89c252015-07-15 08:04:37 +08002793 }
Paul Turner9d85f212012-10-04 13:18:29 +02002794
2795 return decayed;
2796}
2797
Paul Turnerc566e8e2012-10-04 13:18:30 +02002798#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turnerbb17f652012-10-04 13:18:31 +02002799/*
Yuyang Du9d89c252015-07-15 08:04:37 +08002800 * Updating tg's load_avg is necessary before update_cfs_share (which is done)
2801 * and effective_load (which is not done because it is too costly).
Paul Turnerbb17f652012-10-04 13:18:31 +02002802 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002803static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
Paul Turnerbb17f652012-10-04 13:18:31 +02002804{
Yuyang Du9d89c252015-07-15 08:04:37 +08002805 long delta = cfs_rq->avg.load_avg - cfs_rq->tg_load_avg_contrib;
Paul Turnerbb17f652012-10-04 13:18:31 +02002806
Waiman Longaa0b7ae2015-12-02 13:41:50 -05002807 /*
2808 * No need to update load_avg for root_task_group as it is not used.
2809 */
2810 if (cfs_rq->tg == &root_task_group)
2811 return;
2812
Yuyang Du9d89c252015-07-15 08:04:37 +08002813 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) {
2814 atomic_long_add(delta, &cfs_rq->tg->load_avg);
2815 cfs_rq->tg_load_avg_contrib = cfs_rq->avg.load_avg;
Paul Turnerbb17f652012-10-04 13:18:31 +02002816 }
Paul Turner8165e142012-10-04 13:18:31 +02002817}
Dietmar Eggemannf5f97392014-02-26 11:19:33 +00002818
Byungchul Parkad936d82015-10-24 01:16:19 +09002819/*
2820 * Called within set_task_rq() right before setting a task's cpu. The
2821 * caller only guarantees p->pi_lock is held; no other assumptions,
2822 * including the state of rq->lock, should be made.
2823 */
2824void set_task_rq_fair(struct sched_entity *se,
2825 struct cfs_rq *prev, struct cfs_rq *next)
2826{
2827 if (!sched_feat(ATTACH_AGE_LOAD))
2828 return;
2829
2830 /*
2831 * We are supposed to update the task to "current" time, then its up to
2832 * date and ready to go to new CPU/cfs_rq. But we have difficulty in
2833 * getting what current time is, so simply throw away the out-of-date
2834 * time. This will result in the wakee task is less decayed, but giving
2835 * the wakee more load sounds not bad.
2836 */
2837 if (se->avg.last_update_time && prev) {
2838 u64 p_last_update_time;
2839 u64 n_last_update_time;
2840
2841#ifndef CONFIG_64BIT
2842 u64 p_last_update_time_copy;
2843 u64 n_last_update_time_copy;
2844
2845 do {
2846 p_last_update_time_copy = prev->load_last_update_time_copy;
2847 n_last_update_time_copy = next->load_last_update_time_copy;
2848
2849 smp_rmb();
2850
2851 p_last_update_time = prev->avg.last_update_time;
2852 n_last_update_time = next->avg.last_update_time;
2853
2854 } while (p_last_update_time != p_last_update_time_copy ||
2855 n_last_update_time != n_last_update_time_copy);
2856#else
2857 p_last_update_time = prev->avg.last_update_time;
2858 n_last_update_time = next->avg.last_update_time;
2859#endif
2860 __update_load_avg(p_last_update_time, cpu_of(rq_of(prev)),
2861 &se->avg, 0, 0, NULL);
2862 se->avg.last_update_time = n_last_update_time;
2863 }
2864}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002865#else /* CONFIG_FAIR_GROUP_SCHED */
Yuyang Du9d89c252015-07-15 08:04:37 +08002866static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
Peter Zijlstra6e831252014-02-11 16:11:48 +01002867#endif /* CONFIG_FAIR_GROUP_SCHED */
Paul Turnerc566e8e2012-10-04 13:18:30 +02002868
Paul Turnerf1b17282012-10-04 13:18:31 +02002869static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
2870
Steve Mucklea2c6c912016-03-24 15:26:07 -07002871static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
Yuyang Du9d89c252015-07-15 08:04:37 +08002872{
Steve Muckle21e96f82016-03-21 17:21:07 -07002873 struct rq *rq = rq_of(cfs_rq);
Steve Muckle21e96f82016-03-21 17:21:07 -07002874 int cpu = cpu_of(rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08002875
Steve Mucklea2c6c912016-03-24 15:26:07 -07002876 if (cpu == smp_processor_id() && &rq->cfs == cfs_rq) {
Steve Muckle21e96f82016-03-21 17:21:07 -07002877 unsigned long max = rq->cpu_capacity_orig;
2878
2879 /*
2880 * There are a few boundary cases this might miss but it should
2881 * get called often enough that that should (hopefully) not be
2882 * a real problem -- added to that it only calls on the local
2883 * CPU, so if we enqueue remotely we'll miss an update, but
2884 * the next tick/schedule should update.
2885 *
2886 * It will not get called when we go idle, because the idle
2887 * thread is a different class (!fair), nor will the utilization
2888 * number include things like RT tasks.
2889 *
2890 * As is, the util number is not freq-invariant (we'd have to
2891 * implement arch_scale_freq_capacity() for that).
2892 *
2893 * See cpu_util().
2894 */
2895 cpufreq_update_util(rq_clock(rq),
Steve Mucklea2c6c912016-03-24 15:26:07 -07002896 min(cfs_rq->avg.util_avg, max), max);
Steve Muckle21e96f82016-03-21 17:21:07 -07002897 }
Steve Mucklea2c6c912016-03-24 15:26:07 -07002898}
2899
Peter Zijlstra89741892016-06-16 10:50:40 +02002900/*
2901 * Unsigned subtract and clamp on underflow.
2902 *
2903 * Explicitly do a load-store to ensure the intermediate value never hits
2904 * memory. This allows lockless observations without ever seeing the negative
2905 * values.
2906 */
2907#define sub_positive(_ptr, _val) do { \
2908 typeof(_ptr) ptr = (_ptr); \
2909 typeof(*ptr) val = (_val); \
2910 typeof(*ptr) res, var = READ_ONCE(*ptr); \
2911 res = var - val; \
2912 if (res > var) \
2913 res = 0; \
2914 WRITE_ONCE(*ptr, res); \
2915} while (0)
2916
Steve Mucklea2c6c912016-03-24 15:26:07 -07002917/* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
2918static inline int
2919update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
2920{
2921 struct sched_avg *sa = &cfs_rq->avg;
2922 int decayed, removed_load = 0, removed_util = 0;
2923
2924 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2925 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
Peter Zijlstra89741892016-06-16 10:50:40 +02002926 sub_positive(&sa->load_avg, r);
2927 sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
Steve Mucklea2c6c912016-03-24 15:26:07 -07002928 removed_load = 1;
2929 }
2930
2931 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
2932 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
Peter Zijlstra89741892016-06-16 10:50:40 +02002933 sub_positive(&sa->util_avg, r);
2934 sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
Steve Mucklea2c6c912016-03-24 15:26:07 -07002935 removed_util = 1;
2936 }
2937
2938 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
2939 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq);
2940
2941#ifndef CONFIG_64BIT
2942 smp_wmb();
2943 cfs_rq->load_last_update_time_copy = sa->last_update_time;
2944#endif
2945
2946 if (update_freq && (decayed || removed_util))
2947 cfs_rq_util_change(cfs_rq);
Steve Muckle21e96f82016-03-21 17:21:07 -07002948
Steve Muckle41e0d372016-03-21 17:21:08 -07002949 return decayed || removed_load;
Yuyang Du9d89c252015-07-15 08:04:37 +08002950}
2951
2952/* Update task and its cfs_rq load average */
2953static inline void update_load_avg(struct sched_entity *se, int update_tg)
Paul Turner9d85f212012-10-04 13:18:29 +02002954{
Paul Turner2dac7542012-10-04 13:18:30 +02002955 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Yuyang Du9d89c252015-07-15 08:04:37 +08002956 u64 now = cfs_rq_clock_task(cfs_rq);
Rafael J. Wysocki34e2c552016-02-15 20:20:42 +01002957 struct rq *rq = rq_of(cfs_rq);
2958 int cpu = cpu_of(rq);
Paul Turner2dac7542012-10-04 13:18:30 +02002959
Paul Turnerf1b17282012-10-04 13:18:31 +02002960 /*
Yuyang Du9d89c252015-07-15 08:04:37 +08002961 * Track task load average for carrying it to new CPU after migrated, and
2962 * track group sched_entity load average for task_h_load calc in migration
Paul Turnerf1b17282012-10-04 13:18:31 +02002963 */
Yuyang Du9d89c252015-07-15 08:04:37 +08002964 __update_load_avg(now, cpu, &se->avg,
Byungchul Parka05e8c52015-08-20 20:21:56 +09002965 se->on_rq * scale_load_down(se->load.weight),
2966 cfs_rq->curr == se, NULL);
Paul Turnerf1b17282012-10-04 13:18:31 +02002967
Steve Mucklea2c6c912016-03-24 15:26:07 -07002968 if (update_cfs_rq_load_avg(now, cfs_rq, true) && update_tg)
Yuyang Du9d89c252015-07-15 08:04:37 +08002969 update_tg_load_avg(cfs_rq, 0);
2970}
Paul Turner2dac7542012-10-04 13:18:30 +02002971
Byungchul Parka05e8c52015-08-20 20:21:56 +09002972static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
2973{
Peter Zijlstraa9280512015-09-11 16:10:59 +02002974 if (!sched_feat(ATTACH_AGE_LOAD))
2975 goto skip_aging;
2976
Byungchul Park6efdb102015-08-20 20:21:59 +09002977 /*
2978 * If we got migrated (either between CPUs or between cgroups) we'll
2979 * have aged the average right before clearing @last_update_time.
2980 */
2981 if (se->avg.last_update_time) {
2982 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
2983 &se->avg, 0, 0, NULL);
2984
2985 /*
2986 * XXX: we could have just aged the entire load away if we've been
2987 * absent from the fair class for too long.
2988 */
2989 }
2990
Peter Zijlstraa9280512015-09-11 16:10:59 +02002991skip_aging:
Byungchul Parka05e8c52015-08-20 20:21:56 +09002992 se->avg.last_update_time = cfs_rq->avg.last_update_time;
2993 cfs_rq->avg.load_avg += se->avg.load_avg;
2994 cfs_rq->avg.load_sum += se->avg.load_sum;
2995 cfs_rq->avg.util_avg += se->avg.util_avg;
2996 cfs_rq->avg.util_sum += se->avg.util_sum;
Steve Mucklea2c6c912016-03-24 15:26:07 -07002997
2998 cfs_rq_util_change(cfs_rq);
Byungchul Parka05e8c52015-08-20 20:21:56 +09002999}
3000
3001static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3002{
3003 __update_load_avg(cfs_rq->avg.last_update_time, cpu_of(rq_of(cfs_rq)),
3004 &se->avg, se->on_rq * scale_load_down(se->load.weight),
3005 cfs_rq->curr == se, NULL);
3006
Peter Zijlstra89741892016-06-16 10:50:40 +02003007 sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
3008 sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
3009 sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3010 sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
Steve Mucklea2c6c912016-03-24 15:26:07 -07003011
3012 cfs_rq_util_change(cfs_rq);
Byungchul Parka05e8c52015-08-20 20:21:56 +09003013}
3014
Yuyang Du9d89c252015-07-15 08:04:37 +08003015/* Add the load generated by se into cfs_rq's load average */
3016static inline void
3017enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3018{
3019 struct sched_avg *sa = &se->avg;
3020 u64 now = cfs_rq_clock_task(cfs_rq);
Byungchul Parka05e8c52015-08-20 20:21:56 +09003021 int migrated, decayed;
Paul Turner9ee474f2012-10-04 13:18:30 +02003022
Byungchul Parka05e8c52015-08-20 20:21:56 +09003023 migrated = !sa->last_update_time;
3024 if (!migrated) {
Yuyang Du9d89c252015-07-15 08:04:37 +08003025 __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
Yuyang Du13962232015-07-15 08:04:41 +08003026 se->on_rq * scale_load_down(se->load.weight),
3027 cfs_rq->curr == se, NULL);
Yuyang Du9d89c252015-07-15 08:04:37 +08003028 }
3029
Steve Mucklea2c6c912016-03-24 15:26:07 -07003030 decayed = update_cfs_rq_load_avg(now, cfs_rq, !migrated);
Yuyang Du9d89c252015-07-15 08:04:37 +08003031
Yuyang Du13962232015-07-15 08:04:41 +08003032 cfs_rq->runnable_load_avg += sa->load_avg;
3033 cfs_rq->runnable_load_sum += sa->load_sum;
3034
Byungchul Parka05e8c52015-08-20 20:21:56 +09003035 if (migrated)
3036 attach_entity_load_avg(cfs_rq, se);
Yuyang Du9d89c252015-07-15 08:04:37 +08003037
3038 if (decayed || migrated)
3039 update_tg_load_avg(cfs_rq, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02003040}
3041
Yuyang Du13962232015-07-15 08:04:41 +08003042/* Remove the runnable load generated by se from cfs_rq's runnable load average */
3043static inline void
3044dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3045{
3046 update_load_avg(se, 1);
3047
3048 cfs_rq->runnable_load_avg =
3049 max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
3050 cfs_rq->runnable_load_sum =
Byungchul Parka05e8c52015-08-20 20:21:56 +09003051 max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
Yuyang Du13962232015-07-15 08:04:41 +08003052}
3053
Yuyang Du0905f042015-12-17 07:34:27 +08003054#ifndef CONFIG_64BIT
3055static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3056{
3057 u64 last_update_time_copy;
3058 u64 last_update_time;
3059
3060 do {
3061 last_update_time_copy = cfs_rq->load_last_update_time_copy;
3062 smp_rmb();
3063 last_update_time = cfs_rq->avg.last_update_time;
3064 } while (last_update_time != last_update_time_copy);
3065
3066 return last_update_time;
3067}
3068#else
3069static inline u64 cfs_rq_last_update_time(struct cfs_rq *cfs_rq)
3070{
3071 return cfs_rq->avg.last_update_time;
3072}
3073#endif
3074
Paul Turner9ee474f2012-10-04 13:18:30 +02003075/*
Yuyang Du9d89c252015-07-15 08:04:37 +08003076 * Task first catches up with cfs_rq, and then subtract
3077 * itself from the cfs_rq (task must be off the queue now).
Paul Turner9ee474f2012-10-04 13:18:30 +02003078 */
Yuyang Du9d89c252015-07-15 08:04:37 +08003079void remove_entity_load_avg(struct sched_entity *se)
Paul Turner9ee474f2012-10-04 13:18:30 +02003080{
Yuyang Du9d89c252015-07-15 08:04:37 +08003081 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3082 u64 last_update_time;
Paul Turner9ee474f2012-10-04 13:18:30 +02003083
Yuyang Du0905f042015-12-17 07:34:27 +08003084 /*
3085 * Newly created task or never used group entity should not be removed
3086 * from its (source) cfs_rq
3087 */
3088 if (se->avg.last_update_time == 0)
3089 return;
Paul Turner9ee474f2012-10-04 13:18:30 +02003090
Yuyang Du0905f042015-12-17 07:34:27 +08003091 last_update_time = cfs_rq_last_update_time(cfs_rq);
Paul Turner9ee474f2012-10-04 13:18:30 +02003092
Yuyang Du13962232015-07-15 08:04:41 +08003093 __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL);
Yuyang Du9d89c252015-07-15 08:04:37 +08003094 atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg);
3095 atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg);
Paul Turner2dac7542012-10-04 13:18:30 +02003096}
Vincent Guittot642dbc32013-04-18 18:34:26 +02003097
Yuyang Du7ea241a2015-07-15 08:04:42 +08003098static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq)
3099{
3100 return cfs_rq->runnable_load_avg;
3101}
3102
3103static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq)
3104{
3105 return cfs_rq->avg.load_avg;
3106}
3107
Peter Zijlstra6e831252014-02-11 16:11:48 +01003108static int idle_balance(struct rq *this_rq);
3109
Peter Zijlstra38033c32014-01-23 20:32:21 +01003110#else /* CONFIG_SMP */
3111
Rafael J. Wysocki536bd002016-05-06 14:58:43 +02003112static inline void update_load_avg(struct sched_entity *se, int not_used)
3113{
3114 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3115 struct rq *rq = rq_of(cfs_rq);
3116
3117 cpufreq_trigger_update(rq_clock(rq));
3118}
3119
Yuyang Du9d89c252015-07-15 08:04:37 +08003120static inline void
3121enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
Yuyang Du13962232015-07-15 08:04:41 +08003122static inline void
3123dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
Yuyang Du9d89c252015-07-15 08:04:37 +08003124static inline void remove_entity_load_avg(struct sched_entity *se) {}
Peter Zijlstra6e831252014-02-11 16:11:48 +01003125
Byungchul Parka05e8c52015-08-20 20:21:56 +09003126static inline void
3127attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3128static inline void
3129detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
3130
Peter Zijlstra6e831252014-02-11 16:11:48 +01003131static inline int idle_balance(struct rq *rq)
3132{
3133 return 0;
3134}
3135
Peter Zijlstra38033c32014-01-23 20:32:21 +01003136#endif /* CONFIG_SMP */
Paul Turner9d85f212012-10-04 13:18:29 +02003137
Ingo Molnar2396af62007-08-09 11:16:48 +02003138static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003139{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003140#ifdef CONFIG_SCHEDSTATS
Peter Zijlstrae4143142009-07-23 20:13:26 +02003141 struct task_struct *tsk = NULL;
3142
3143 if (entity_is_task(se))
3144 tsk = task_of(se);
3145
Lucas De Marchi41acab82010-03-10 23:37:45 -03003146 if (se->statistics.sleep_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003147 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.sleep_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003148
3149 if ((s64)delta < 0)
3150 delta = 0;
3151
Lucas De Marchi41acab82010-03-10 23:37:45 -03003152 if (unlikely(delta > se->statistics.sleep_max))
3153 se->statistics.sleep_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003154
Peter Zijlstra8c79a042012-01-30 14:51:37 +01003155 se->statistics.sleep_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03003156 se->statistics.sum_sleep_runtime += delta;
Arjan van de Ven97455122008-01-25 21:08:34 +01003157
Peter Zijlstra768d0c22009-07-23 20:13:26 +02003158 if (tsk) {
Peter Zijlstrae4143142009-07-23 20:13:26 +02003159 account_scheduler_latency(tsk, delta >> 10, 1);
Peter Zijlstra768d0c22009-07-23 20:13:26 +02003160 trace_sched_stat_sleep(tsk, delta);
3161 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003162 }
Lucas De Marchi41acab82010-03-10 23:37:45 -03003163 if (se->statistics.block_start) {
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003164 u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.block_start;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003165
3166 if ((s64)delta < 0)
3167 delta = 0;
3168
Lucas De Marchi41acab82010-03-10 23:37:45 -03003169 if (unlikely(delta > se->statistics.block_max))
3170 se->statistics.block_max = delta;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003171
Peter Zijlstra8c79a042012-01-30 14:51:37 +01003172 se->statistics.block_start = 0;
Lucas De Marchi41acab82010-03-10 23:37:45 -03003173 se->statistics.sum_sleep_runtime += delta;
Ingo Molnar30084fb2007-10-02 14:13:08 +02003174
Peter Zijlstrae4143142009-07-23 20:13:26 +02003175 if (tsk) {
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07003176 if (tsk->in_iowait) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003177 se->statistics.iowait_sum += delta;
3178 se->statistics.iowait_count++;
Peter Zijlstra768d0c22009-07-23 20:13:26 +02003179 trace_sched_stat_iowait(tsk, delta);
Arjan van de Ven8f0dfc32009-07-20 11:26:58 -07003180 }
3181
Andrew Vaginb781a602011-11-28 12:03:35 +03003182 trace_sched_stat_blocked(tsk, delta);
3183
Peter Zijlstrae4143142009-07-23 20:13:26 +02003184 /*
3185 * Blocking time is in units of nanosecs, so shift by
3186 * 20 to get a milliseconds-range estimation of the
3187 * amount of time that the task spent sleeping:
3188 */
3189 if (unlikely(prof_on == SLEEP_PROFILING)) {
3190 profile_hits(SLEEP_PROFILING,
3191 (void *)get_wchan(tsk),
3192 delta >> 20);
3193 }
3194 account_scheduler_latency(tsk, delta >> 10, 0);
Ingo Molnar30084fb2007-10-02 14:13:08 +02003195 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003196 }
3197#endif
3198}
3199
Peter Zijlstraddc97292007-10-15 17:00:10 +02003200static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
3201{
3202#ifdef CONFIG_SCHED_DEBUG
3203 s64 d = se->vruntime - cfs_rq->min_vruntime;
3204
3205 if (d < 0)
3206 d = -d;
3207
3208 if (d > 3*sysctl_sched_latency)
3209 schedstat_inc(cfs_rq, nr_spread_over);
3210#endif
3211}
3212
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003213static void
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02003214place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
3215{
Peter Zijlstra1af5f732008-10-24 11:06:13 +02003216 u64 vruntime = cfs_rq->min_vruntime;
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02003217
Peter Zijlstra2cb86002007-11-09 22:39:37 +01003218 /*
3219 * The 'current' period is already promised to the current tasks,
3220 * however the extra weight of the new task will slow them down a
3221 * little, place the new task so that it fits in the slot that
3222 * stays open at the end.
3223 */
Peter Zijlstra94dfb5e2007-10-15 17:00:05 +02003224 if (initial && sched_feat(START_DEBIT))
Peter Zijlstraf9c0b092008-10-17 19:27:04 +02003225 vruntime += sched_vslice(cfs_rq, se);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02003226
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02003227 /* sleeps up to a single latency don't count. */
Mike Galbraith5ca98802010-03-11 17:17:17 +01003228 if (!initial) {
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02003229 unsigned long thresh = sysctl_sched_latency;
Peter Zijlstraa7be37a2008-06-27 13:41:11 +02003230
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02003231 /*
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02003232 * Halve their sleep time's effect, to allow
3233 * for a gentler effect of sleepers:
3234 */
3235 if (sched_feat(GENTLE_FAIR_SLEEPERS))
3236 thresh >>= 1;
Ingo Molnar51e03042009-09-16 08:54:45 +02003237
Mike Galbraitha2e7a7e2009-09-18 09:19:25 +02003238 vruntime -= thresh;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02003239 }
3240
Mike Galbraithb5d9d732009-09-08 11:12:28 +02003241 /* ensure we never gain time by being placed backwards. */
Viresh Kumar16c8f1c2012-11-08 13:33:46 +05303242 se->vruntime = max_vruntime(se->vruntime, vruntime);
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02003243}
3244
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003245static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
3246
Mel Gormancb251762016-02-05 09:08:36 +00003247static inline void check_schedstat_required(void)
3248{
3249#ifdef CONFIG_SCHEDSTATS
3250 if (schedstat_enabled())
3251 return;
3252
3253 /* Force schedstat enabled if a dependent tracepoint is active */
3254 if (trace_sched_stat_wait_enabled() ||
3255 trace_sched_stat_sleep_enabled() ||
3256 trace_sched_stat_iowait_enabled() ||
3257 trace_sched_stat_blocked_enabled() ||
3258 trace_sched_stat_runtime_enabled()) {
Josh Poimboeufeda8dca2016-06-13 02:32:09 -05003259 printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
Mel Gormancb251762016-02-05 09:08:36 +00003260 "stat_blocked and stat_runtime require the "
3261 "kernel parameter schedstats=enabled or "
3262 "kernel.sched_schedstats=1\n");
3263 }
3264#endif
3265}
3266
Peter Zijlstrab5179ac2016-05-11 16:10:34 +02003267
3268/*
3269 * MIGRATION
3270 *
3271 * dequeue
3272 * update_curr()
3273 * update_min_vruntime()
3274 * vruntime -= min_vruntime
3275 *
3276 * enqueue
3277 * update_curr()
3278 * update_min_vruntime()
3279 * vruntime += min_vruntime
3280 *
3281 * this way the vruntime transition between RQs is done when both
3282 * min_vruntime are up-to-date.
3283 *
3284 * WAKEUP (remote)
3285 *
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02003286 * ->migrate_task_rq_fair() (p->state == TASK_WAKING)
Peter Zijlstrab5179ac2016-05-11 16:10:34 +02003287 * vruntime -= min_vruntime
3288 *
3289 * enqueue
3290 * update_curr()
3291 * update_min_vruntime()
3292 * vruntime += min_vruntime
3293 *
3294 * this way we don't have the most up-to-date min_vruntime on the originating
3295 * CPU and an up-to-date min_vruntime on the destination CPU.
3296 */
3297
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02003298static void
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003299enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003300{
Peter Zijlstra2f950352016-05-11 19:27:56 +02003301 bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATED);
3302 bool curr = cfs_rq->curr == se;
Peter Zijlstra3a47d512016-03-09 13:04:03 +01003303
Ingo Molnar53d3bc72016-05-11 08:25:53 +02003304 /*
Peter Zijlstra2f950352016-05-11 19:27:56 +02003305 * If we're the current task, we must renormalise before calling
3306 * update_curr().
Ingo Molnar53d3bc72016-05-11 08:25:53 +02003307 */
Peter Zijlstra2f950352016-05-11 19:27:56 +02003308 if (renorm && curr)
3309 se->vruntime += cfs_rq->min_vruntime;
3310
Ingo Molnarb7cc0892007-08-09 11:16:47 +02003311 update_curr(cfs_rq);
Peter Zijlstra2f950352016-05-11 19:27:56 +02003312
3313 /*
3314 * Otherwise, renormalise after, such that we're placed at the current
3315 * moment in time, instead of some random moment in the past. Being
3316 * placed in the past could significantly boost this task to the
3317 * fairness detriment of existing tasks.
3318 */
3319 if (renorm && !curr)
3320 se->vruntime += cfs_rq->min_vruntime;
3321
Yuyang Du9d89c252015-07-15 08:04:37 +08003322 enqueue_entity_load_avg(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003323 account_entity_enqueue(cfs_rq, se);
3324 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003325
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003326 if (flags & ENQUEUE_WAKEUP) {
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02003327 place_entity(cfs_rq, se, 0);
Mel Gormancb251762016-02-05 09:08:36 +00003328 if (schedstat_enabled())
3329 enqueue_sleeper(cfs_rq, se);
Ingo Molnare9acbff2007-10-15 17:00:04 +02003330 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003331
Mel Gormancb251762016-02-05 09:08:36 +00003332 check_schedstat_required();
3333 if (schedstat_enabled()) {
3334 update_stats_enqueue(cfs_rq, se);
3335 check_spread(cfs_rq, se);
3336 }
Peter Zijlstra2f950352016-05-11 19:27:56 +02003337 if (!curr)
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003338 __enqueue_entity(cfs_rq, se);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08003339 se->on_rq = 1;
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08003340
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003341 if (cfs_rq->nr_running == 1) {
Peter Zijlstra3d4b47b2010-11-15 15:47:01 -08003342 list_add_leaf_cfs_rq(cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003343 check_enqueue_throttle(cfs_rq);
3344 }
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003345}
3346
Rik van Riel2c13c9192011-02-01 09:48:37 -05003347static void __clear_buddies_last(struct sched_entity *se)
Peter Zijlstra2002c692008-11-11 11:52:33 +01003348{
Rik van Riel2c13c9192011-02-01 09:48:37 -05003349 for_each_sched_entity(se) {
3350 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01003351 if (cfs_rq->last != se)
Rik van Riel2c13c9192011-02-01 09:48:37 -05003352 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01003353
3354 cfs_rq->last = NULL;
Rik van Riel2c13c9192011-02-01 09:48:37 -05003355 }
3356}
Peter Zijlstra2002c692008-11-11 11:52:33 +01003357
Rik van Riel2c13c9192011-02-01 09:48:37 -05003358static void __clear_buddies_next(struct sched_entity *se)
3359{
3360 for_each_sched_entity(se) {
3361 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01003362 if (cfs_rq->next != se)
Rik van Riel2c13c9192011-02-01 09:48:37 -05003363 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01003364
3365 cfs_rq->next = NULL;
Rik van Riel2c13c9192011-02-01 09:48:37 -05003366 }
Peter Zijlstra2002c692008-11-11 11:52:33 +01003367}
3368
Rik van Rielac53db52011-02-01 09:51:03 -05003369static void __clear_buddies_skip(struct sched_entity *se)
3370{
3371 for_each_sched_entity(se) {
3372 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Peter Zijlstraf1044792012-02-11 06:05:00 +01003373 if (cfs_rq->skip != se)
Rik van Rielac53db52011-02-01 09:51:03 -05003374 break;
Peter Zijlstraf1044792012-02-11 06:05:00 +01003375
3376 cfs_rq->skip = NULL;
Rik van Rielac53db52011-02-01 09:51:03 -05003377 }
3378}
3379
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01003380static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
3381{
Rik van Riel2c13c9192011-02-01 09:48:37 -05003382 if (cfs_rq->last == se)
3383 __clear_buddies_last(se);
3384
3385 if (cfs_rq->next == se)
3386 __clear_buddies_next(se);
Rik van Rielac53db52011-02-01 09:51:03 -05003387
3388 if (cfs_rq->skip == se)
3389 __clear_buddies_skip(se);
Peter Zijlstraa571bbe2009-01-28 14:51:40 +01003390}
3391
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003392static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd8b49862011-07-21 09:43:41 -07003393
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003394static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003395dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003396{
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02003397 /*
3398 * Update run-time statistics of the 'current'.
3399 */
3400 update_curr(cfs_rq);
Yuyang Du13962232015-07-15 08:04:41 +08003401 dequeue_entity_load_avg(cfs_rq, se);
Dmitry Adamushkoa2a2d682007-10-15 17:00:13 +02003402
Mel Gormancb251762016-02-05 09:08:36 +00003403 if (schedstat_enabled())
3404 update_stats_dequeue(cfs_rq, se, flags);
Peter Zijlstra67e9fb22007-10-15 17:00:10 +02003405
Peter Zijlstra2002c692008-11-11 11:52:33 +01003406 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01003407
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003408 if (se != cfs_rq->curr)
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003409 __dequeue_entity(cfs_rq, se);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003410 se->on_rq = 0;
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003411 account_entity_dequeue(cfs_rq, se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003412
3413 /*
3414 * Normalize the entity after updating the min_vruntime because the
3415 * update can refer to the ->curr item and we need to reflect this
3416 * movement in our normalized position.
3417 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01003418 if (!(flags & DEQUEUE_SLEEP))
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01003419 se->vruntime -= cfs_rq->min_vruntime;
Peter Zijlstra1e876232011-05-17 16:21:10 -07003420
Paul Turnerd8b49862011-07-21 09:43:41 -07003421 /* return excess runtime on last dequeue */
3422 return_cfs_rq_runtime(cfs_rq);
3423
Peter Zijlstra1e876232011-05-17 16:21:10 -07003424 update_min_vruntime(cfs_rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08003425 update_cfs_shares(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003426}
3427
3428/*
3429 * Preempt the current task with a newly woken task if needed:
3430 */
Peter Zijlstra7c92e542007-09-05 14:32:49 +02003431static void
Ingo Molnar2e09bf52007-10-15 17:00:05 +02003432check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003433{
Peter Zijlstra11697832007-09-05 14:32:49 +02003434 unsigned long ideal_runtime, delta_exec;
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003435 struct sched_entity *se;
3436 s64 delta;
Peter Zijlstra11697832007-09-05 14:32:49 +02003437
Peter Zijlstra6d0f0eb2007-10-15 17:00:05 +02003438 ideal_runtime = sched_slice(cfs_rq, curr);
Peter Zijlstra11697832007-09-05 14:32:49 +02003439 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01003440 if (delta_exec > ideal_runtime) {
Kirill Tkhai88751252014-06-29 00:03:57 +04003441 resched_curr(rq_of(cfs_rq));
Mike Galbraitha9f3e2b2009-01-28 14:51:39 +01003442 /*
3443 * The current task ran long enough, ensure it doesn't get
3444 * re-elected due to buddy favours.
3445 */
3446 clear_buddies(cfs_rq, curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02003447 return;
3448 }
3449
3450 /*
3451 * Ensure that a task that missed wakeup preemption by a
3452 * narrow margin doesn't have to wait for a full slice.
3453 * This also mitigates buddy induced latencies under load.
3454 */
Mike Galbraithf685cea2009-10-23 23:09:22 +02003455 if (delta_exec < sysctl_sched_min_granularity)
3456 return;
3457
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003458 se = __pick_first_entity(cfs_rq);
3459 delta = curr->vruntime - se->vruntime;
Mike Galbraithf685cea2009-10-23 23:09:22 +02003460
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003461 if (delta < 0)
3462 return;
Mike Galbraithd7d82942011-01-05 05:41:17 +01003463
Wang Xingchaof4cfb332011-09-16 13:35:52 -04003464 if (delta > ideal_runtime)
Kirill Tkhai88751252014-06-29 00:03:57 +04003465 resched_curr(rq_of(cfs_rq));
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003466}
3467
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003468static void
Ingo Molnar8494f412007-08-09 11:16:48 +02003469set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003470{
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003471 /* 'current' is not kept within the tree. */
3472 if (se->on_rq) {
3473 /*
3474 * Any task has to be enqueued before it get to execute on
3475 * a CPU. So account for the time it spent waiting on the
3476 * runqueue.
3477 */
Mel Gormancb251762016-02-05 09:08:36 +00003478 if (schedstat_enabled())
3479 update_stats_wait_end(cfs_rq, se);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003480 __dequeue_entity(cfs_rq, se);
Yuyang Du9d89c252015-07-15 08:04:37 +08003481 update_load_avg(se, 1);
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02003482 }
3483
Ingo Molnar79303e92007-08-09 11:16:47 +02003484 update_stats_curr_start(cfs_rq, se);
Ingo Molnar429d43b2007-10-15 17:00:03 +02003485 cfs_rq->curr = se;
Ingo Molnareba1ed42007-10-15 17:00:02 +02003486#ifdef CONFIG_SCHEDSTATS
3487 /*
3488 * Track our maximum slice length, if the CPU's load is at
3489 * least twice that of our own weight (i.e. dont track it
3490 * when there are only lesser-weight tasks around):
3491 */
Mel Gormancb251762016-02-05 09:08:36 +00003492 if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03003493 se->statistics.slice_max = max(se->statistics.slice_max,
Ingo Molnareba1ed42007-10-15 17:00:02 +02003494 se->sum_exec_runtime - se->prev_sum_exec_runtime);
3495 }
3496#endif
Peter Zijlstra4a55b452007-09-05 14:32:49 +02003497 se->prev_sum_exec_runtime = se->sum_exec_runtime;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003498}
3499
Peter Zijlstra3f3a4902008-10-24 11:06:16 +02003500static int
3501wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
3502
Rik van Rielac53db52011-02-01 09:51:03 -05003503/*
3504 * Pick the next process, keeping these things in mind, in this order:
3505 * 1) keep things fair between processes/task groups
3506 * 2) pick the "next" process, since someone really wants that to run
3507 * 3) pick the "last" process, for cache locality
3508 * 4) do not run the "skip" process, if something else is available
3509 */
Peter Zijlstra678d5712012-02-11 06:05:00 +01003510static struct sched_entity *
3511pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003512{
Peter Zijlstra678d5712012-02-11 06:05:00 +01003513 struct sched_entity *left = __pick_first_entity(cfs_rq);
3514 struct sched_entity *se;
3515
3516 /*
3517 * If curr is set we have to see if its left of the leftmost entity
3518 * still in the tree, provided there was anything in the tree at all.
3519 */
3520 if (!left || (curr && entity_before(curr, left)))
3521 left = curr;
3522
3523 se = left; /* ideally we run the leftmost entity */
Peter Zijlstraf4b67552008-11-04 21:25:07 +01003524
Rik van Rielac53db52011-02-01 09:51:03 -05003525 /*
3526 * Avoid running the skip buddy, if running something else can
3527 * be done without getting too unfair.
3528 */
3529 if (cfs_rq->skip == se) {
Peter Zijlstra678d5712012-02-11 06:05:00 +01003530 struct sched_entity *second;
3531
3532 if (se == curr) {
3533 second = __pick_first_entity(cfs_rq);
3534 } else {
3535 second = __pick_next_entity(se);
3536 if (!second || (curr && entity_before(curr, second)))
3537 second = curr;
3538 }
3539
Rik van Rielac53db52011-02-01 09:51:03 -05003540 if (second && wakeup_preempt_entity(second, left) < 1)
3541 se = second;
3542 }
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003543
Mike Galbraithf685cea2009-10-23 23:09:22 +02003544 /*
3545 * Prefer last buddy, try to return the CPU to a preempted task.
3546 */
3547 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
3548 se = cfs_rq->last;
3549
Rik van Rielac53db52011-02-01 09:51:03 -05003550 /*
3551 * Someone really wants this to run. If it's not unfair, run it.
3552 */
3553 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
3554 se = cfs_rq->next;
3555
Mike Galbraithf685cea2009-10-23 23:09:22 +02003556 clear_buddies(cfs_rq, se);
Peter Zijlstra47932412008-11-04 21:25:09 +01003557
3558 return se;
Peter Zijlstraaa2ac252008-03-14 21:12:12 +01003559}
3560
Peter Zijlstra678d5712012-02-11 06:05:00 +01003561static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003562
Ingo Molnarab6cde22007-08-09 11:16:48 +02003563static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003564{
3565 /*
3566 * If still on the runqueue then deactivate_task()
3567 * was not called and update_curr() has to be done:
3568 */
3569 if (prev->on_rq)
Ingo Molnarb7cc0892007-08-09 11:16:47 +02003570 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003571
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003572 /* throttle cfs_rqs exceeding runtime */
3573 check_cfs_rq_runtime(cfs_rq);
3574
Mel Gormancb251762016-02-05 09:08:36 +00003575 if (schedstat_enabled()) {
3576 check_spread(cfs_rq, prev);
3577 if (prev->on_rq)
3578 update_stats_wait_start(cfs_rq, prev);
3579 }
3580
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003581 if (prev->on_rq) {
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003582 /* Put 'current' back into the tree. */
3583 __enqueue_entity(cfs_rq, prev);
Paul Turner9d85f212012-10-04 13:18:29 +02003584 /* in !on_rq case, update occurred at dequeue */
Yuyang Du9d89c252015-07-15 08:04:37 +08003585 update_load_avg(prev, 0);
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003586 }
Ingo Molnar429d43b2007-10-15 17:00:03 +02003587 cfs_rq->curr = NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003588}
3589
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003590static void
3591entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003592{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003593 /*
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003594 * Update run-time statistics of the 'current'.
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003595 */
Dmitry Adamushko30cfdcf2007-10-15 17:00:07 +02003596 update_curr(cfs_rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003597
Paul Turner43365bd2010-12-15 19:10:17 -08003598 /*
Paul Turner9d85f212012-10-04 13:18:29 +02003599 * Ensure that runnable average is periodically updated.
3600 */
Yuyang Du9d89c252015-07-15 08:04:37 +08003601 update_load_avg(curr, 1);
Peter Zijlstrabf0bd942013-07-26 23:48:42 +02003602 update_cfs_shares(cfs_rq);
Paul Turner9d85f212012-10-04 13:18:29 +02003603
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003604#ifdef CONFIG_SCHED_HRTICK
3605 /*
3606 * queued ticks are scheduled to match the slice, so don't bother
3607 * validating it and just reschedule.
3608 */
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003609 if (queued) {
Kirill Tkhai88751252014-06-29 00:03:57 +04003610 resched_curr(rq_of(cfs_rq));
Harvey Harrison983ed7a2008-04-24 18:17:55 -07003611 return;
3612 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01003613 /*
3614 * don't let the period tick interfere with the hrtick preemption
3615 */
3616 if (!sched_feat(DOUBLE_TICK) &&
3617 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
3618 return;
3619#endif
3620
Yong Zhang2c2efae2011-07-29 16:20:33 +08003621 if (cfs_rq->nr_running > 1)
Ingo Molnar2e09bf52007-10-15 17:00:05 +02003622 check_preempt_tick(cfs_rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02003623}
3624
Paul Turnerab84d312011-07-21 09:43:28 -07003625
3626/**************************************************
3627 * CFS bandwidth control machinery
3628 */
3629
3630#ifdef CONFIG_CFS_BANDWIDTH
Peter Zijlstra029632f2011-10-25 10:00:11 +02003631
3632#ifdef HAVE_JUMP_LABEL
Ingo Molnarc5905af2012-02-24 08:31:31 +01003633static struct static_key __cfs_bandwidth_used;
Peter Zijlstra029632f2011-10-25 10:00:11 +02003634
3635static inline bool cfs_bandwidth_used(void)
3636{
Ingo Molnarc5905af2012-02-24 08:31:31 +01003637 return static_key_false(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003638}
3639
Ben Segall1ee14e62013-10-16 11:16:12 -07003640void cfs_bandwidth_usage_inc(void)
Peter Zijlstra029632f2011-10-25 10:00:11 +02003641{
Ben Segall1ee14e62013-10-16 11:16:12 -07003642 static_key_slow_inc(&__cfs_bandwidth_used);
3643}
3644
3645void cfs_bandwidth_usage_dec(void)
3646{
3647 static_key_slow_dec(&__cfs_bandwidth_used);
Peter Zijlstra029632f2011-10-25 10:00:11 +02003648}
3649#else /* HAVE_JUMP_LABEL */
3650static bool cfs_bandwidth_used(void)
3651{
3652 return true;
3653}
3654
Ben Segall1ee14e62013-10-16 11:16:12 -07003655void cfs_bandwidth_usage_inc(void) {}
3656void cfs_bandwidth_usage_dec(void) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02003657#endif /* HAVE_JUMP_LABEL */
3658
Paul Turnerab84d312011-07-21 09:43:28 -07003659/*
3660 * default period for cfs group bandwidth.
3661 * default: 0.1s, units: nanoseconds
3662 */
3663static inline u64 default_cfs_period(void)
3664{
3665 return 100000000ULL;
3666}
Paul Turnerec12cb72011-07-21 09:43:30 -07003667
3668static inline u64 sched_cfs_bandwidth_slice(void)
3669{
3670 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
3671}
3672
Paul Turnera9cf55b2011-07-21 09:43:32 -07003673/*
3674 * Replenish runtime according to assigned quota and update expiration time.
3675 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
3676 * additional synchronization around rq->lock.
3677 *
3678 * requires cfs_b->lock
3679 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02003680void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
Paul Turnera9cf55b2011-07-21 09:43:32 -07003681{
3682 u64 now;
3683
3684 if (cfs_b->quota == RUNTIME_INF)
3685 return;
3686
3687 now = sched_clock_cpu(smp_processor_id());
3688 cfs_b->runtime = cfs_b->quota;
3689 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
3690}
3691
Peter Zijlstra029632f2011-10-25 10:00:11 +02003692static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
3693{
3694 return &tg->cfs_bandwidth;
3695}
3696
Paul Turnerf1b17282012-10-04 13:18:31 +02003697/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
3698static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
3699{
3700 if (unlikely(cfs_rq->throttle_count))
3701 return cfs_rq->throttled_clock_task;
3702
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003703 return rq_clock_task(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
Paul Turnerf1b17282012-10-04 13:18:31 +02003704}
3705
Paul Turner85dac902011-07-21 09:43:33 -07003706/* returns 0 on failure to allocate runtime */
3707static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerec12cb72011-07-21 09:43:30 -07003708{
3709 struct task_group *tg = cfs_rq->tg;
3710 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003711 u64 amount = 0, min_amount, expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07003712
3713 /* note: this is a positive sum as runtime_remaining <= 0 */
3714 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
3715
3716 raw_spin_lock(&cfs_b->lock);
3717 if (cfs_b->quota == RUNTIME_INF)
3718 amount = min_amount;
Paul Turner58088ad2011-07-21 09:43:31 -07003719 else {
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003720 start_cfs_bandwidth(cfs_b);
Paul Turner58088ad2011-07-21 09:43:31 -07003721
3722 if (cfs_b->runtime > 0) {
3723 amount = min(cfs_b->runtime, min_amount);
3724 cfs_b->runtime -= amount;
3725 cfs_b->idle = 0;
3726 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003727 }
Paul Turnera9cf55b2011-07-21 09:43:32 -07003728 expires = cfs_b->runtime_expires;
Paul Turnerec12cb72011-07-21 09:43:30 -07003729 raw_spin_unlock(&cfs_b->lock);
3730
3731 cfs_rq->runtime_remaining += amount;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003732 /*
3733 * we may have advanced our local expiration to account for allowed
3734 * spread between our sched_clock and the one on which runtime was
3735 * issued.
3736 */
3737 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
3738 cfs_rq->runtime_expires = expires;
Paul Turner85dac902011-07-21 09:43:33 -07003739
3740 return cfs_rq->runtime_remaining > 0;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003741}
3742
3743/*
3744 * Note: This depends on the synchronization provided by sched_clock and the
3745 * fact that rq->clock snapshots this value.
3746 */
3747static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
3748{
3749 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
Paul Turnera9cf55b2011-07-21 09:43:32 -07003750
3751 /* if the deadline is ahead of our clock, nothing to do */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003752 if (likely((s64)(rq_clock(rq_of(cfs_rq)) - cfs_rq->runtime_expires) < 0))
Paul Turnera9cf55b2011-07-21 09:43:32 -07003753 return;
3754
3755 if (cfs_rq->runtime_remaining < 0)
3756 return;
3757
3758 /*
3759 * If the local deadline has passed we have to consider the
3760 * possibility that our sched_clock is 'fast' and the global deadline
3761 * has not truly expired.
3762 *
3763 * Fortunately we can check determine whether this the case by checking
Ben Segall51f21762014-05-19 15:49:45 -07003764 * whether the global deadline has advanced. It is valid to compare
3765 * cfs_b->runtime_expires without any locks since we only care about
3766 * exact equality, so a partial write will still work.
Paul Turnera9cf55b2011-07-21 09:43:32 -07003767 */
3768
Ben Segall51f21762014-05-19 15:49:45 -07003769 if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
Paul Turnera9cf55b2011-07-21 09:43:32 -07003770 /* extend local deadline, drift is bounded above by 2 ticks */
3771 cfs_rq->runtime_expires += TICK_NSEC;
3772 } else {
3773 /* global deadline is ahead, expiration has passed */
3774 cfs_rq->runtime_remaining = 0;
3775 }
Paul Turnerec12cb72011-07-21 09:43:30 -07003776}
3777
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003778static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003779{
Paul Turnera9cf55b2011-07-21 09:43:32 -07003780 /* dock delta_exec before expiring quota (as it could span periods) */
Paul Turnerec12cb72011-07-21 09:43:30 -07003781 cfs_rq->runtime_remaining -= delta_exec;
Paul Turnera9cf55b2011-07-21 09:43:32 -07003782 expire_cfs_rq_runtime(cfs_rq);
3783
3784 if (likely(cfs_rq->runtime_remaining > 0))
Paul Turnerec12cb72011-07-21 09:43:30 -07003785 return;
3786
Paul Turner85dac902011-07-21 09:43:33 -07003787 /*
3788 * if we're unable to extend our runtime we resched so that the active
3789 * hierarchy can be throttled
3790 */
3791 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
Kirill Tkhai88751252014-06-29 00:03:57 +04003792 resched_curr(rq_of(cfs_rq));
Paul Turnerec12cb72011-07-21 09:43:30 -07003793}
3794
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07003795static __always_inline
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01003796void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
Paul Turnerec12cb72011-07-21 09:43:30 -07003797{
Paul Turner56f570e2011-11-07 20:26:33 -08003798 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
Paul Turnerec12cb72011-07-21 09:43:30 -07003799 return;
3800
3801 __account_cfs_rq_runtime(cfs_rq, delta_exec);
3802}
3803
Paul Turner85dac902011-07-21 09:43:33 -07003804static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
3805{
Paul Turner56f570e2011-11-07 20:26:33 -08003806 return cfs_bandwidth_used() && cfs_rq->throttled;
Paul Turner85dac902011-07-21 09:43:33 -07003807}
3808
Paul Turner64660c82011-07-21 09:43:36 -07003809/* check whether cfs_rq, or any parent, is throttled */
3810static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
3811{
Paul Turner56f570e2011-11-07 20:26:33 -08003812 return cfs_bandwidth_used() && cfs_rq->throttle_count;
Paul Turner64660c82011-07-21 09:43:36 -07003813}
3814
3815/*
3816 * Ensure that neither of the group entities corresponding to src_cpu or
3817 * dest_cpu are members of a throttled hierarchy when performing group
3818 * load-balance operations.
3819 */
3820static inline int throttled_lb_pair(struct task_group *tg,
3821 int src_cpu, int dest_cpu)
3822{
3823 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
3824
3825 src_cfs_rq = tg->cfs_rq[src_cpu];
3826 dest_cfs_rq = tg->cfs_rq[dest_cpu];
3827
3828 return throttled_hierarchy(src_cfs_rq) ||
3829 throttled_hierarchy(dest_cfs_rq);
3830}
3831
3832/* updated child weight may affect parent so we have to do this bottom up */
3833static int tg_unthrottle_up(struct task_group *tg, void *data)
3834{
3835 struct rq *rq = data;
3836 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3837
3838 cfs_rq->throttle_count--;
3839#ifdef CONFIG_SMP
3840 if (!cfs_rq->throttle_count) {
Paul Turnerf1b17282012-10-04 13:18:31 +02003841 /* adjust cfs_rq_clock_task() */
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003842 cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
Paul Turnerf1b17282012-10-04 13:18:31 +02003843 cfs_rq->throttled_clock_task;
Paul Turner64660c82011-07-21 09:43:36 -07003844 }
3845#endif
3846
3847 return 0;
3848}
3849
3850static int tg_throttle_down(struct task_group *tg, void *data)
3851{
3852 struct rq *rq = data;
3853 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
3854
Paul Turner82958362012-10-04 13:18:31 +02003855 /* group is entering throttled state, stop time */
3856 if (!cfs_rq->throttle_count)
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003857 cfs_rq->throttled_clock_task = rq_clock_task(rq);
Paul Turner64660c82011-07-21 09:43:36 -07003858 cfs_rq->throttle_count++;
3859
3860 return 0;
3861}
3862
Paul Turnerd3d9dc32011-07-21 09:43:39 -07003863static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner85dac902011-07-21 09:43:33 -07003864{
3865 struct rq *rq = rq_of(cfs_rq);
3866 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3867 struct sched_entity *se;
3868 long task_delta, dequeue = 1;
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003869 bool empty;
Paul Turner85dac902011-07-21 09:43:33 -07003870
3871 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
3872
Paul Turnerf1b17282012-10-04 13:18:31 +02003873 /* freeze hierarchy runnable averages while throttled */
Paul Turner64660c82011-07-21 09:43:36 -07003874 rcu_read_lock();
3875 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
3876 rcu_read_unlock();
Paul Turner85dac902011-07-21 09:43:33 -07003877
3878 task_delta = cfs_rq->h_nr_running;
3879 for_each_sched_entity(se) {
3880 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
3881 /* throttled entity or throttle-on-deactivate */
3882 if (!se->on_rq)
3883 break;
3884
3885 if (dequeue)
3886 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
3887 qcfs_rq->h_nr_running -= task_delta;
3888
3889 if (qcfs_rq->load.weight)
3890 dequeue = 0;
3891 }
3892
3893 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04003894 sub_nr_running(rq, task_delta);
Paul Turner85dac902011-07-21 09:43:33 -07003895
3896 cfs_rq->throttled = 1;
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003897 cfs_rq->throttled_clock = rq_clock(rq);
Paul Turner85dac902011-07-21 09:43:33 -07003898 raw_spin_lock(&cfs_b->lock);
Cong Wangd49db342015-06-24 12:41:47 -07003899 empty = list_empty(&cfs_b->throttled_cfs_rq);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003900
Ben Segallc06f04c2014-06-20 15:21:20 -07003901 /*
3902 * Add to the _head_ of the list, so that an already-started
3903 * distribute_cfs_runtime will not see us
3904 */
3905 list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02003906
3907 /*
3908 * If we're the first throttled task, make sure the bandwidth
3909 * timer is running.
3910 */
3911 if (empty)
3912 start_cfs_bandwidth(cfs_b);
3913
Paul Turner85dac902011-07-21 09:43:33 -07003914 raw_spin_unlock(&cfs_b->lock);
3915}
3916
Peter Zijlstra029632f2011-10-25 10:00:11 +02003917void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
Paul Turner671fd9d2011-07-21 09:43:34 -07003918{
3919 struct rq *rq = rq_of(cfs_rq);
3920 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
3921 struct sched_entity *se;
3922 int enqueue = 1;
3923 long task_delta;
3924
Michael Wang22b958d2013-06-04 14:23:39 +08003925 se = cfs_rq->tg->se[cpu_of(rq)];
Paul Turner671fd9d2011-07-21 09:43:34 -07003926
3927 cfs_rq->throttled = 0;
Frederic Weisbecker1a55af22013-04-12 01:51:01 +02003928
3929 update_rq_clock(rq);
3930
Paul Turner671fd9d2011-07-21 09:43:34 -07003931 raw_spin_lock(&cfs_b->lock);
Frederic Weisbecker78becc22013-04-12 01:51:02 +02003932 cfs_b->throttled_time += rq_clock(rq) - cfs_rq->throttled_clock;
Paul Turner671fd9d2011-07-21 09:43:34 -07003933 list_del_rcu(&cfs_rq->throttled_list);
3934 raw_spin_unlock(&cfs_b->lock);
3935
Paul Turner64660c82011-07-21 09:43:36 -07003936 /* update hierarchical throttle state */
3937 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
3938
Paul Turner671fd9d2011-07-21 09:43:34 -07003939 if (!cfs_rq->load.weight)
3940 return;
3941
3942 task_delta = cfs_rq->h_nr_running;
3943 for_each_sched_entity(se) {
3944 if (se->on_rq)
3945 enqueue = 0;
3946
3947 cfs_rq = cfs_rq_of(se);
3948 if (enqueue)
3949 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
3950 cfs_rq->h_nr_running += task_delta;
3951
3952 if (cfs_rq_throttled(cfs_rq))
3953 break;
3954 }
3955
3956 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04003957 add_nr_running(rq, task_delta);
Paul Turner671fd9d2011-07-21 09:43:34 -07003958
3959 /* determine whether we need to wake up potentially idle cpu */
3960 if (rq->curr == rq->idle && rq->cfs.nr_running)
Kirill Tkhai88751252014-06-29 00:03:57 +04003961 resched_curr(rq);
Paul Turner671fd9d2011-07-21 09:43:34 -07003962}
3963
3964static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
3965 u64 remaining, u64 expires)
3966{
3967 struct cfs_rq *cfs_rq;
Ben Segallc06f04c2014-06-20 15:21:20 -07003968 u64 runtime;
3969 u64 starting_runtime = remaining;
Paul Turner671fd9d2011-07-21 09:43:34 -07003970
3971 rcu_read_lock();
3972 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
3973 throttled_list) {
3974 struct rq *rq = rq_of(cfs_rq);
3975
3976 raw_spin_lock(&rq->lock);
3977 if (!cfs_rq_throttled(cfs_rq))
3978 goto next;
3979
3980 runtime = -cfs_rq->runtime_remaining + 1;
3981 if (runtime > remaining)
3982 runtime = remaining;
3983 remaining -= runtime;
3984
3985 cfs_rq->runtime_remaining += runtime;
3986 cfs_rq->runtime_expires = expires;
3987
3988 /* we check whether we're throttled above */
3989 if (cfs_rq->runtime_remaining > 0)
3990 unthrottle_cfs_rq(cfs_rq);
3991
3992next:
3993 raw_spin_unlock(&rq->lock);
3994
3995 if (!remaining)
3996 break;
3997 }
3998 rcu_read_unlock();
3999
Ben Segallc06f04c2014-06-20 15:21:20 -07004000 return starting_runtime - remaining;
Paul Turner671fd9d2011-07-21 09:43:34 -07004001}
4002
Paul Turner58088ad2011-07-21 09:43:31 -07004003/*
4004 * Responsible for refilling a task_group's bandwidth and unthrottling its
4005 * cfs_rqs as appropriate. If there has been no activity within the last
4006 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
4007 * used to track this state.
4008 */
4009static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
4010{
Paul Turner671fd9d2011-07-21 09:43:34 -07004011 u64 runtime, runtime_expires;
Ben Segall51f21762014-05-19 15:49:45 -07004012 int throttled;
Paul Turner58088ad2011-07-21 09:43:31 -07004013
Paul Turner58088ad2011-07-21 09:43:31 -07004014 /* no need to continue the timer with no bandwidth constraint */
4015 if (cfs_b->quota == RUNTIME_INF)
Ben Segall51f21762014-05-19 15:49:45 -07004016 goto out_deactivate;
Paul Turner58088ad2011-07-21 09:43:31 -07004017
Paul Turner671fd9d2011-07-21 09:43:34 -07004018 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
Nikhil Raoe8da1b12011-07-21 09:43:40 -07004019 cfs_b->nr_periods += overrun;
Paul Turner671fd9d2011-07-21 09:43:34 -07004020
Ben Segall51f21762014-05-19 15:49:45 -07004021 /*
4022 * idle depends on !throttled (for the case of a large deficit), and if
4023 * we're going inactive then everything else can be deferred
4024 */
4025 if (cfs_b->idle && !throttled)
4026 goto out_deactivate;
Paul Turnera9cf55b2011-07-21 09:43:32 -07004027
4028 __refill_cfs_bandwidth_runtime(cfs_b);
4029
Paul Turner671fd9d2011-07-21 09:43:34 -07004030 if (!throttled) {
4031 /* mark as potentially idle for the upcoming period */
4032 cfs_b->idle = 1;
Ben Segall51f21762014-05-19 15:49:45 -07004033 return 0;
Paul Turner671fd9d2011-07-21 09:43:34 -07004034 }
Paul Turner58088ad2011-07-21 09:43:31 -07004035
Nikhil Raoe8da1b12011-07-21 09:43:40 -07004036 /* account preceding periods in which throttling occurred */
4037 cfs_b->nr_throttled += overrun;
4038
Paul Turner671fd9d2011-07-21 09:43:34 -07004039 runtime_expires = cfs_b->runtime_expires;
Paul Turner671fd9d2011-07-21 09:43:34 -07004040
4041 /*
Ben Segallc06f04c2014-06-20 15:21:20 -07004042 * This check is repeated as we are holding onto the new bandwidth while
4043 * we unthrottle. This can potentially race with an unthrottled group
4044 * trying to acquire new bandwidth from the global pool. This can result
4045 * in us over-using our runtime if it is all used during this loop, but
4046 * only by limited amounts in that extreme case.
Paul Turner671fd9d2011-07-21 09:43:34 -07004047 */
Ben Segallc06f04c2014-06-20 15:21:20 -07004048 while (throttled && cfs_b->runtime > 0) {
4049 runtime = cfs_b->runtime;
Paul Turner671fd9d2011-07-21 09:43:34 -07004050 raw_spin_unlock(&cfs_b->lock);
4051 /* we can't nest cfs_b->lock while distributing bandwidth */
4052 runtime = distribute_cfs_runtime(cfs_b, runtime,
4053 runtime_expires);
4054 raw_spin_lock(&cfs_b->lock);
4055
4056 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
Ben Segallc06f04c2014-06-20 15:21:20 -07004057
4058 cfs_b->runtime -= min(runtime, cfs_b->runtime);
Paul Turner671fd9d2011-07-21 09:43:34 -07004059 }
4060
Paul Turner671fd9d2011-07-21 09:43:34 -07004061 /*
4062 * While we are ensured activity in the period following an
4063 * unthrottle, this also covers the case in which the new bandwidth is
4064 * insufficient to cover the existing bandwidth deficit. (Forcing the
4065 * timer to remain active while there are any throttled entities.)
4066 */
4067 cfs_b->idle = 0;
Paul Turner58088ad2011-07-21 09:43:31 -07004068
Ben Segall51f21762014-05-19 15:49:45 -07004069 return 0;
4070
4071out_deactivate:
Ben Segall51f21762014-05-19 15:49:45 -07004072 return 1;
Paul Turner58088ad2011-07-21 09:43:31 -07004073}
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004074
Paul Turnerd8b49862011-07-21 09:43:41 -07004075/* a cfs_rq won't donate quota below this amount */
4076static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
4077/* minimum remaining period time to redistribute slack quota */
4078static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
4079/* how long we wait to gather additional slack before distributing */
4080static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
4081
Ben Segalldb06e782013-10-16 11:16:17 -07004082/*
4083 * Are we near the end of the current quota period?
4084 *
4085 * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
Thomas Gleixner4961b6e2015-04-14 21:09:05 +00004086 * hrtimer base being cleared by hrtimer_start. In the case of
Ben Segalldb06e782013-10-16 11:16:17 -07004087 * migrate_hrtimers, base is never cleared, so we are fine.
4088 */
Paul Turnerd8b49862011-07-21 09:43:41 -07004089static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
4090{
4091 struct hrtimer *refresh_timer = &cfs_b->period_timer;
4092 u64 remaining;
4093
4094 /* if the call-back is running a quota refresh is already occurring */
4095 if (hrtimer_callback_running(refresh_timer))
4096 return 1;
4097
4098 /* is a quota refresh about to occur? */
4099 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
4100 if (remaining < min_expire)
4101 return 1;
4102
4103 return 0;
4104}
4105
4106static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
4107{
4108 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
4109
4110 /* if there's a quota refresh soon don't bother with slack */
4111 if (runtime_refresh_within(cfs_b, min_left))
4112 return;
4113
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02004114 hrtimer_start(&cfs_b->slack_timer,
4115 ns_to_ktime(cfs_bandwidth_slack_period),
4116 HRTIMER_MODE_REL);
Paul Turnerd8b49862011-07-21 09:43:41 -07004117}
4118
4119/* we know any runtime found here is valid as update_curr() precedes return */
4120static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4121{
4122 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
4123 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
4124
4125 if (slack_runtime <= 0)
4126 return;
4127
4128 raw_spin_lock(&cfs_b->lock);
4129 if (cfs_b->quota != RUNTIME_INF &&
4130 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
4131 cfs_b->runtime += slack_runtime;
4132
4133 /* we are under rq->lock, defer unthrottling using a timer */
4134 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
4135 !list_empty(&cfs_b->throttled_cfs_rq))
4136 start_cfs_slack_bandwidth(cfs_b);
4137 }
4138 raw_spin_unlock(&cfs_b->lock);
4139
4140 /* even if it's not valid for return we don't want to try again */
4141 cfs_rq->runtime_remaining -= slack_runtime;
4142}
4143
4144static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4145{
Paul Turner56f570e2011-11-07 20:26:33 -08004146 if (!cfs_bandwidth_used())
4147 return;
4148
Paul Turnerfccfdc62011-11-07 20:26:34 -08004149 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
Paul Turnerd8b49862011-07-21 09:43:41 -07004150 return;
4151
4152 __return_cfs_rq_runtime(cfs_rq);
4153}
4154
4155/*
4156 * This is done with a timer (instead of inline with bandwidth return) since
4157 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
4158 */
4159static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
4160{
4161 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
4162 u64 expires;
4163
4164 /* confirm we're still not at a refresh boundary */
Paul Turnerd8b49862011-07-21 09:43:41 -07004165 raw_spin_lock(&cfs_b->lock);
Ben Segalldb06e782013-10-16 11:16:17 -07004166 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
4167 raw_spin_unlock(&cfs_b->lock);
4168 return;
4169 }
4170
Ben Segallc06f04c2014-06-20 15:21:20 -07004171 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
Paul Turnerd8b49862011-07-21 09:43:41 -07004172 runtime = cfs_b->runtime;
Ben Segallc06f04c2014-06-20 15:21:20 -07004173
Paul Turnerd8b49862011-07-21 09:43:41 -07004174 expires = cfs_b->runtime_expires;
4175 raw_spin_unlock(&cfs_b->lock);
4176
4177 if (!runtime)
4178 return;
4179
4180 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
4181
4182 raw_spin_lock(&cfs_b->lock);
4183 if (expires == cfs_b->runtime_expires)
Ben Segallc06f04c2014-06-20 15:21:20 -07004184 cfs_b->runtime -= min(runtime, cfs_b->runtime);
Paul Turnerd8b49862011-07-21 09:43:41 -07004185 raw_spin_unlock(&cfs_b->lock);
4186}
4187
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004188/*
4189 * When a group wakes up we want to make sure that its quota is not already
4190 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
4191 * runtime as update_curr() throttling can not not trigger until it's on-rq.
4192 */
4193static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
4194{
Paul Turner56f570e2011-11-07 20:26:33 -08004195 if (!cfs_bandwidth_used())
4196 return;
4197
Konstantin Khlebnikov094f4692016-06-16 15:57:01 +03004198 /* Synchronize hierarchical throttle counter: */
4199 if (unlikely(!cfs_rq->throttle_uptodate)) {
4200 struct rq *rq = rq_of(cfs_rq);
4201 struct cfs_rq *pcfs_rq;
4202 struct task_group *tg;
4203
4204 cfs_rq->throttle_uptodate = 1;
4205
4206 /* Get closest up-to-date node, because leaves go first: */
4207 for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
4208 pcfs_rq = tg->cfs_rq[cpu_of(rq)];
4209 if (pcfs_rq->throttle_uptodate)
4210 break;
4211 }
4212 if (tg) {
4213 cfs_rq->throttle_count = pcfs_rq->throttle_count;
4214 cfs_rq->throttled_clock_task = rq_clock_task(rq);
4215 }
4216 }
4217
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004218 /* an active group must be handled by the update_curr()->put() path */
4219 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
4220 return;
4221
4222 /* ensure the group is not already throttled */
4223 if (cfs_rq_throttled(cfs_rq))
4224 return;
4225
4226 /* update runtime allocation */
4227 account_cfs_rq_runtime(cfs_rq, 0);
4228 if (cfs_rq->runtime_remaining <= 0)
4229 throttle_cfs_rq(cfs_rq);
4230}
4231
4232/* conditionally throttle active cfs_rq's from put_prev_entity() */
Peter Zijlstra678d5712012-02-11 06:05:00 +01004233static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004234{
Paul Turner56f570e2011-11-07 20:26:33 -08004235 if (!cfs_bandwidth_used())
Peter Zijlstra678d5712012-02-11 06:05:00 +01004236 return false;
Paul Turner56f570e2011-11-07 20:26:33 -08004237
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004238 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
Peter Zijlstra678d5712012-02-11 06:05:00 +01004239 return false;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004240
4241 /*
4242 * it's possible for a throttled entity to be forced into a running
4243 * state (e.g. set_curr_task), in this case we're finished.
4244 */
4245 if (cfs_rq_throttled(cfs_rq))
Peter Zijlstra678d5712012-02-11 06:05:00 +01004246 return true;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004247
4248 throttle_cfs_rq(cfs_rq);
Peter Zijlstra678d5712012-02-11 06:05:00 +01004249 return true;
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004250}
Peter Zijlstra029632f2011-10-25 10:00:11 +02004251
Peter Zijlstra029632f2011-10-25 10:00:11 +02004252static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
4253{
4254 struct cfs_bandwidth *cfs_b =
4255 container_of(timer, struct cfs_bandwidth, slack_timer);
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02004256
Peter Zijlstra029632f2011-10-25 10:00:11 +02004257 do_sched_cfs_slack_timer(cfs_b);
4258
4259 return HRTIMER_NORESTART;
4260}
4261
4262static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
4263{
4264 struct cfs_bandwidth *cfs_b =
4265 container_of(timer, struct cfs_bandwidth, period_timer);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004266 int overrun;
4267 int idle = 0;
4268
Ben Segall51f21762014-05-19 15:49:45 -07004269 raw_spin_lock(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004270 for (;;) {
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02004271 overrun = hrtimer_forward_now(timer, cfs_b->period);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004272 if (!overrun)
4273 break;
4274
4275 idle = do_sched_cfs_period_timer(cfs_b, overrun);
4276 }
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02004277 if (idle)
4278 cfs_b->period_active = 0;
Ben Segall51f21762014-05-19 15:49:45 -07004279 raw_spin_unlock(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004280
4281 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
4282}
4283
4284void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4285{
4286 raw_spin_lock_init(&cfs_b->lock);
4287 cfs_b->runtime = 0;
4288 cfs_b->quota = RUNTIME_INF;
4289 cfs_b->period = ns_to_ktime(default_cfs_period());
4290
4291 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02004292 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004293 cfs_b->period_timer.function = sched_cfs_period_timer;
4294 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4295 cfs_b->slack_timer.function = sched_cfs_slack_timer;
4296}
4297
4298static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
4299{
4300 cfs_rq->runtime_enabled = 0;
4301 INIT_LIST_HEAD(&cfs_rq->throttled_list);
4302}
4303
Peter Zijlstra77a4d1a2015-04-15 11:41:57 +02004304void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
Peter Zijlstra029632f2011-10-25 10:00:11 +02004305{
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02004306 lockdep_assert_held(&cfs_b->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004307
Peter Zijlstra4cfafd32015-05-14 12:23:11 +02004308 if (!cfs_b->period_active) {
4309 cfs_b->period_active = 1;
4310 hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
4311 hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
4312 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02004313}
4314
4315static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4316{
Tetsuo Handa7f1a1692014-12-25 15:51:21 +09004317 /* init_cfs_bandwidth() was not called */
4318 if (!cfs_b->throttled_cfs_rq.next)
4319 return;
4320
Peter Zijlstra029632f2011-10-25 10:00:11 +02004321 hrtimer_cancel(&cfs_b->period_timer);
4322 hrtimer_cancel(&cfs_b->slack_timer);
4323}
4324
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04004325static void __maybe_unused update_runtime_enabled(struct rq *rq)
4326{
4327 struct cfs_rq *cfs_rq;
4328
4329 for_each_leaf_cfs_rq(rq, cfs_rq) {
4330 struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
4331
4332 raw_spin_lock(&cfs_b->lock);
4333 cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
4334 raw_spin_unlock(&cfs_b->lock);
4335 }
4336}
4337
Arnd Bergmann38dc3342013-01-25 14:14:22 +00004338static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
Peter Zijlstra029632f2011-10-25 10:00:11 +02004339{
4340 struct cfs_rq *cfs_rq;
4341
4342 for_each_leaf_cfs_rq(rq, cfs_rq) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02004343 if (!cfs_rq->runtime_enabled)
4344 continue;
4345
4346 /*
4347 * clock_task is not advancing so we just need to make sure
4348 * there's some valid quota amount
4349 */
Ben Segall51f21762014-05-19 15:49:45 -07004350 cfs_rq->runtime_remaining = 1;
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04004351 /*
4352 * Offline rq is schedulable till cpu is completely disabled
4353 * in take_cpu_down(), so we prevent new cfs throttling here.
4354 */
4355 cfs_rq->runtime_enabled = 0;
4356
Peter Zijlstra029632f2011-10-25 10:00:11 +02004357 if (cfs_rq_throttled(cfs_rq))
4358 unthrottle_cfs_rq(cfs_rq);
4359 }
4360}
4361
4362#else /* CONFIG_CFS_BANDWIDTH */
Paul Turnerf1b17282012-10-04 13:18:31 +02004363static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
4364{
Frederic Weisbecker78becc22013-04-12 01:51:02 +02004365 return rq_clock_task(rq_of(cfs_rq));
Paul Turnerf1b17282012-10-04 13:18:31 +02004366}
4367
Peter Zijlstra9dbdb152013-11-18 18:27:06 +01004368static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
Peter Zijlstra678d5712012-02-11 06:05:00 +01004369static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
Paul Turnerd3d9dc32011-07-21 09:43:39 -07004370static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
Peter Zijlstra6c16a6d2012-03-21 13:07:16 -07004371static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turner85dac902011-07-21 09:43:33 -07004372
4373static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
4374{
4375 return 0;
4376}
Paul Turner64660c82011-07-21 09:43:36 -07004377
4378static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
4379{
4380 return 0;
4381}
4382
4383static inline int throttled_lb_pair(struct task_group *tg,
4384 int src_cpu, int dest_cpu)
4385{
4386 return 0;
4387}
Peter Zijlstra029632f2011-10-25 10:00:11 +02004388
4389void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
4390
4391#ifdef CONFIG_FAIR_GROUP_SCHED
4392static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
Paul Turnerab84d312011-07-21 09:43:28 -07004393#endif
4394
Peter Zijlstra029632f2011-10-25 10:00:11 +02004395static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
4396{
4397 return NULL;
4398}
4399static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04004400static inline void update_runtime_enabled(struct rq *rq) {}
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07004401static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
Peter Zijlstra029632f2011-10-25 10:00:11 +02004402
4403#endif /* CONFIG_CFS_BANDWIDTH */
4404
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004405/**************************************************
4406 * CFS operations on tasks:
4407 */
4408
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004409#ifdef CONFIG_SCHED_HRTICK
4410static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
4411{
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004412 struct sched_entity *se = &p->se;
4413 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4414
4415 WARN_ON(task_rq(p) != rq);
4416
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004417 if (cfs_rq->nr_running > 1) {
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004418 u64 slice = sched_slice(cfs_rq, se);
4419 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
4420 s64 delta = slice - ran;
4421
4422 if (delta < 0) {
4423 if (rq->curr == p)
Kirill Tkhai88751252014-06-29 00:03:57 +04004424 resched_curr(rq);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004425 return;
4426 }
Peter Zijlstra31656512008-07-18 18:01:23 +02004427 hrtick_start(rq, delta);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004428 }
4429}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004430
4431/*
4432 * called from enqueue/dequeue and updates the hrtick when the
4433 * current task is from our class and nr_running is low enough
4434 * to matter.
4435 */
4436static void hrtick_update(struct rq *rq)
4437{
4438 struct task_struct *curr = rq->curr;
4439
Mike Galbraithb39e66e2011-11-22 15:20:07 +01004440 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class)
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004441 return;
4442
4443 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
4444 hrtick_start_fair(rq, curr);
4445}
Dhaval Giani55e12e52008-06-24 23:39:43 +05304446#else /* !CONFIG_SCHED_HRTICK */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004447static inline void
4448hrtick_start_fair(struct rq *rq, struct task_struct *p)
4449{
4450}
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004451
4452static inline void hrtick_update(struct rq *rq)
4453{
4454}
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004455#endif
4456
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004457/*
4458 * The enqueue_task method is called before nr_running is
4459 * increased. Here we update the fair scheduling stats and
4460 * then put the task into the rbtree:
4461 */
Thomas Gleixnerea87bb72010-01-20 20:58:57 +00004462static void
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004463enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004464{
4465 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01004466 struct sched_entity *se = &p->se;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004467
4468 for_each_sched_entity(se) {
Peter Zijlstra62fb1852008-02-25 17:34:02 +01004469 if (se->on_rq)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004470 break;
4471 cfs_rq = cfs_rq_of(se);
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004472 enqueue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07004473
4474 /*
4475 * end evaluation on encountering a throttled cfs_rq
4476 *
4477 * note: in the case of encountering a throttled cfs_rq we will
4478 * post the final h_nr_running increment below.
4479 */
4480 if (cfs_rq_throttled(cfs_rq))
4481 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07004482 cfs_rq->h_nr_running++;
Paul Turner85dac902011-07-21 09:43:33 -07004483
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01004484 flags = ENQUEUE_WAKEUP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004485 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004486
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004487 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08004488 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07004489 cfs_rq->h_nr_running++;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004490
Paul Turner85dac902011-07-21 09:43:33 -07004491 if (cfs_rq_throttled(cfs_rq))
4492 break;
4493
Yuyang Du9d89c252015-07-15 08:04:37 +08004494 update_load_avg(se, 1);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08004495 update_cfs_shares(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004496 }
4497
Yuyang Ducd126af2015-07-15 08:04:36 +08004498 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04004499 add_nr_running(rq, 1);
Yuyang Ducd126af2015-07-15 08:04:36 +08004500
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004501 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004502}
4503
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004504static void set_next_buddy(struct sched_entity *se);
4505
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004506/*
4507 * The dequeue_task method is called before nr_running is
4508 * decreased. We remove the task from the rbtree and
4509 * update the fair scheduling stats:
4510 */
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004511static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004512{
4513 struct cfs_rq *cfs_rq;
Peter Zijlstra62fb1852008-02-25 17:34:02 +01004514 struct sched_entity *se = &p->se;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004515 int task_sleep = flags & DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004516
4517 for_each_sched_entity(se) {
4518 cfs_rq = cfs_rq_of(se);
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004519 dequeue_entity(cfs_rq, se, flags);
Paul Turner85dac902011-07-21 09:43:33 -07004520
4521 /*
4522 * end evaluation on encountering a throttled cfs_rq
4523 *
4524 * note: in the case of encountering a throttled cfs_rq we will
4525 * post the final h_nr_running decrement below.
4526 */
4527 if (cfs_rq_throttled(cfs_rq))
4528 break;
Paul Turner953bfcd2011-07-21 09:43:27 -07004529 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004530
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004531 /* Don't dequeue parent if it has other entities besides us */
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004532 if (cfs_rq->load.weight) {
Konstantin Khlebnikov754bd592016-06-16 15:57:15 +03004533 /* Avoid re-evaluating load for this entity: */
4534 se = parent_entity(se);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004535 /*
4536 * Bias pick_next to pick a task from this cfs_rq, as
4537 * p is sleeping when it is within its sched_slice.
4538 */
Konstantin Khlebnikov754bd592016-06-16 15:57:15 +03004539 if (task_sleep && se && !throttled_hierarchy(cfs_rq))
4540 set_next_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004541 break;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07004542 }
Peter Zijlstra371fd7e2010-03-24 16:38:48 +01004543 flags |= DEQUEUE_SLEEP;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004544 }
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01004545
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004546 for_each_sched_entity(se) {
Lin Ming0f317142011-07-22 09:14:31 +08004547 cfs_rq = cfs_rq_of(se);
Paul Turner953bfcd2011-07-21 09:43:27 -07004548 cfs_rq->h_nr_running--;
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004549
Paul Turner85dac902011-07-21 09:43:33 -07004550 if (cfs_rq_throttled(cfs_rq))
4551 break;
4552
Yuyang Du9d89c252015-07-15 08:04:37 +08004553 update_load_avg(se, 1);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08004554 update_cfs_shares(cfs_rq);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004555 }
4556
Yuyang Ducd126af2015-07-15 08:04:36 +08004557 if (!se)
Kirill Tkhai72465442014-05-09 03:00:14 +04004558 sub_nr_running(rq, 1);
Yuyang Ducd126af2015-07-15 08:04:36 +08004559
Peter Zijlstraa4c2f002008-10-17 19:27:03 +02004560 hrtick_update(rq);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02004561}
4562
Gregory Haskinse7693a32008-01-25 21:08:09 +01004563#ifdef CONFIG_SMP
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004564#ifdef CONFIG_NO_HZ_COMMON
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004565/*
4566 * per rq 'load' arrray crap; XXX kill this.
4567 */
4568
4569/*
Peter Zijlstrad937cdc2015-10-19 13:49:30 +02004570 * The exact cpuload calculated at every tick would be:
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004571 *
Peter Zijlstrad937cdc2015-10-19 13:49:30 +02004572 * load' = (1 - 1/2^i) * load + (1/2^i) * cur_load
4573 *
4574 * If a cpu misses updates for n ticks (as it was idle) and update gets
4575 * called on the n+1-th tick when cpu may be busy, then we have:
4576 *
4577 * load_n = (1 - 1/2^i)^n * load_0
4578 * load_n+1 = (1 - 1/2^i) * load_n + (1/2^i) * cur_load
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004579 *
4580 * decay_load_missed() below does efficient calculation of
Peter Zijlstrad937cdc2015-10-19 13:49:30 +02004581 *
4582 * load' = (1 - 1/2^i)^n * load
4583 *
4584 * Because x^(n+m) := x^n * x^m we can decompose any x^n in power-of-2 factors.
4585 * This allows us to precompute the above in said factors, thereby allowing the
4586 * reduction of an arbitrary n in O(log_2 n) steps. (See also
4587 * fixed_power_int())
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004588 *
4589 * The calculation is approximated on a 128 point scale.
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004590 */
4591#define DEGRADE_SHIFT 7
Peter Zijlstrad937cdc2015-10-19 13:49:30 +02004592
4593static const u8 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
4594static const u8 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
4595 { 0, 0, 0, 0, 0, 0, 0, 0 },
4596 { 64, 32, 8, 0, 0, 0, 0, 0 },
4597 { 96, 72, 40, 12, 1, 0, 0, 0 },
4598 { 112, 98, 75, 43, 15, 1, 0, 0 },
4599 { 120, 112, 98, 76, 45, 16, 2, 0 }
4600};
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004601
4602/*
4603 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
4604 * would be when CPU is idle and so we just decay the old load without
4605 * adding any new load.
4606 */
4607static unsigned long
4608decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
4609{
4610 int j = 0;
4611
4612 if (!missed_updates)
4613 return load;
4614
4615 if (missed_updates >= degrade_zero_ticks[idx])
4616 return 0;
4617
4618 if (idx == 1)
4619 return load >> missed_updates;
4620
4621 while (missed_updates) {
4622 if (missed_updates % 2)
4623 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
4624
4625 missed_updates >>= 1;
4626 j++;
4627 }
4628 return load;
4629}
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004630#endif /* CONFIG_NO_HZ_COMMON */
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004631
Byungchul Park59543272015-10-14 18:47:35 +09004632/**
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +02004633 * __cpu_load_update - update the rq->cpu_load[] statistics
Byungchul Park59543272015-10-14 18:47:35 +09004634 * @this_rq: The rq to update statistics for
4635 * @this_load: The current load
4636 * @pending_updates: The number of missed updates
Byungchul Park59543272015-10-14 18:47:35 +09004637 *
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004638 * Update rq->cpu_load[] statistics. This function is usually called every
Byungchul Park59543272015-10-14 18:47:35 +09004639 * scheduler tick (TICK_NSEC).
4640 *
4641 * This function computes a decaying average:
4642 *
4643 * load[i]' = (1 - 1/2^i) * load[i] + (1/2^i) * load
4644 *
4645 * Because of NOHZ it might not get called on every tick which gives need for
4646 * the @pending_updates argument.
4647 *
4648 * load[i]_n = (1 - 1/2^i) * load[i]_n-1 + (1/2^i) * load_n-1
4649 * = A * load[i]_n-1 + B ; A := (1 - 1/2^i), B := (1/2^i) * load
4650 * = A * (A * load[i]_n-2 + B) + B
4651 * = A * (A * (A * load[i]_n-3 + B) + B) + B
4652 * = A^3 * load[i]_n-3 + (A^2 + A + 1) * B
4653 * = A^n * load[i]_0 + (A^(n-1) + A^(n-2) + ... + 1) * B
4654 * = A^n * load[i]_0 + ((1 - A^n) / (1 - A)) * B
4655 * = (1 - 1/2^i)^n * (load[i]_0 - load) + load
4656 *
4657 * In the above we've assumed load_n := load, which is true for NOHZ_FULL as
4658 * any change in load would have resulted in the tick being turned back on.
4659 *
4660 * For regular NOHZ, this reduces to:
4661 *
4662 * load[i]_n = (1 - 1/2^i)^n * load[i]_0
4663 *
4664 * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004665 * term.
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004666 */
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004667static void cpu_load_update(struct rq *this_rq, unsigned long this_load,
4668 unsigned long pending_updates)
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004669{
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004670 unsigned long __maybe_unused tickless_load = this_rq->cpu_load[0];
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004671 int i, scale;
4672
4673 this_rq->nr_load_updates++;
4674
4675 /* Update our load: */
4676 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
4677 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
4678 unsigned long old_load, new_load;
4679
4680 /* scale is effectively 1 << i now, and >> i divides by scale */
4681
Byungchul Park7400d3b2016-01-15 16:07:49 +09004682 old_load = this_rq->cpu_load[i];
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004683#ifdef CONFIG_NO_HZ_COMMON
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004684 old_load = decay_load_missed(old_load, pending_updates - 1, i);
Byungchul Park7400d3b2016-01-15 16:07:49 +09004685 if (tickless_load) {
4686 old_load -= decay_load_missed(tickless_load, pending_updates - 1, i);
4687 /*
4688 * old_load can never be a negative value because a
4689 * decayed tickless_load cannot be greater than the
4690 * original tickless_load.
4691 */
4692 old_load += tickless_load;
4693 }
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004694#endif
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004695 new_load = this_load;
4696 /*
4697 * Round up the averaging division if load is increasing. This
4698 * prevents us from getting stuck on 9 if the load is 10, for
4699 * example.
4700 */
4701 if (new_load > old_load)
4702 new_load += scale - 1;
4703
4704 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
4705 }
4706
4707 sched_avg_update(this_rq);
4708}
4709
Yuyang Du7ea241a2015-07-15 08:04:42 +08004710/* Used instead of source_load when we know the type == 0 */
4711static unsigned long weighted_cpuload(const int cpu)
4712{
4713 return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
4714}
4715
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004716#ifdef CONFIG_NO_HZ_COMMON
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004717/*
4718 * There is no sane way to deal with nohz on smp when using jiffies because the
4719 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
4720 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
4721 *
4722 * Therefore we need to avoid the delta approach from the regular tick when
4723 * possible since that would seriously skew the load calculation. This is why we
4724 * use cpu_load_update_periodic() for CPUs out of nohz. However we'll rely on
4725 * jiffies deltas for updates happening while in nohz mode (idle ticks, idle
4726 * loop exit, nohz_idle_balance, nohz full exit...)
4727 *
4728 * This means we might still be one tick off for nohz periods.
4729 */
4730
4731static void cpu_load_update_nohz(struct rq *this_rq,
4732 unsigned long curr_jiffies,
4733 unsigned long load)
Frederic Weisbeckerbe68a682016-01-13 17:01:29 +01004734{
4735 unsigned long pending_updates;
4736
4737 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
4738 if (pending_updates) {
4739 this_rq->last_load_update_tick = curr_jiffies;
4740 /*
4741 * In the regular NOHZ case, we were idle, this means load 0.
4742 * In the NOHZ_FULL case, we were non-idle, we should consider
4743 * its weighted load.
4744 */
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004745 cpu_load_update(this_rq, load, pending_updates);
Frederic Weisbeckerbe68a682016-01-13 17:01:29 +01004746 }
4747}
4748
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004749/*
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004750 * Called from nohz_idle_balance() to update the load ratings before doing the
4751 * idle balance.
4752 */
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +02004753static void cpu_load_update_idle(struct rq *this_rq)
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004754{
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004755 /*
4756 * bail if there's load or we're actually up-to-date.
4757 */
Frederic Weisbeckerbe68a682016-01-13 17:01:29 +01004758 if (weighted_cpuload(cpu_of(this_rq)))
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004759 return;
4760
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004761 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), 0);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004762}
4763
4764/*
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004765 * Record CPU load on nohz entry so we know the tickless load to account
4766 * on nohz exit. cpu_load[0] happens then to be updated more frequently
4767 * than other cpu_load[idx] but it should be fine as cpu_load readers
4768 * shouldn't rely into synchronized cpu_load[*] updates.
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004769 */
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004770void cpu_load_update_nohz_start(void)
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004771{
4772 struct rq *this_rq = this_rq();
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004773
4774 /*
4775 * This is all lockless but should be fine. If weighted_cpuload changes
4776 * concurrently we'll exit nohz. And cpu_load write can race with
4777 * cpu_load_update_idle() but both updater would be writing the same.
4778 */
4779 this_rq->cpu_load[0] = weighted_cpuload(cpu_of(this_rq));
4780}
4781
4782/*
4783 * Account the tickless load in the end of a nohz frame.
4784 */
4785void cpu_load_update_nohz_stop(void)
4786{
Jason Low316c1608d2015-04-28 13:00:20 -07004787 unsigned long curr_jiffies = READ_ONCE(jiffies);
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004788 struct rq *this_rq = this_rq();
4789 unsigned long load;
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004790
4791 if (curr_jiffies == this_rq->last_load_update_tick)
4792 return;
4793
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004794 load = weighted_cpuload(cpu_of(this_rq));
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004795 raw_spin_lock(&this_rq->lock);
Matt Flemingb52fad22016-05-03 20:46:54 +01004796 update_rq_clock(this_rq);
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004797 cpu_load_update_nohz(this_rq, curr_jiffies, load);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004798 raw_spin_unlock(&this_rq->lock);
4799}
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004800#else /* !CONFIG_NO_HZ_COMMON */
4801static inline void cpu_load_update_nohz(struct rq *this_rq,
4802 unsigned long curr_jiffies,
4803 unsigned long load) { }
4804#endif /* CONFIG_NO_HZ_COMMON */
4805
4806static void cpu_load_update_periodic(struct rq *this_rq, unsigned long load)
4807{
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004808#ifdef CONFIG_NO_HZ_COMMON
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004809 /* See the mess around cpu_load_update_nohz(). */
4810 this_rq->last_load_update_tick = READ_ONCE(jiffies);
Frederic Weisbecker9fd81dd2016-04-19 17:36:51 +02004811#endif
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004812 cpu_load_update(this_rq, load, 1);
4813}
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004814
4815/*
4816 * Called from scheduler_tick()
4817 */
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +02004818void cpu_load_update_active(struct rq *this_rq)
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004819{
Yuyang Du7ea241a2015-07-15 08:04:42 +08004820 unsigned long load = weighted_cpuload(cpu_of(this_rq));
Frederic Weisbecker1f419062016-04-13 15:56:51 +02004821
4822 if (tick_nohz_tick_stopped())
4823 cpu_load_update_nohz(this_rq, READ_ONCE(jiffies), load);
4824 else
4825 cpu_load_update_periodic(this_rq, load);
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02004826}
4827
Peter Zijlstra029632f2011-10-25 10:00:11 +02004828/*
4829 * Return a low guess at the load of a migration-source cpu weighted
4830 * according to the scheduling class and "nice" value.
4831 *
4832 * We want to under-estimate the load of migration sources, to
4833 * balance conservatively.
4834 */
4835static unsigned long source_load(int cpu, int type)
4836{
4837 struct rq *rq = cpu_rq(cpu);
4838 unsigned long total = weighted_cpuload(cpu);
4839
4840 if (type == 0 || !sched_feat(LB_BIAS))
4841 return total;
4842
4843 return min(rq->cpu_load[type-1], total);
4844}
4845
4846/*
4847 * Return a high guess at the load of a migration-target cpu weighted
4848 * according to the scheduling class and "nice" value.
4849 */
4850static unsigned long target_load(int cpu, int type)
4851{
4852 struct rq *rq = cpu_rq(cpu);
4853 unsigned long total = weighted_cpuload(cpu);
4854
4855 if (type == 0 || !sched_feat(LB_BIAS))
4856 return total;
4857
4858 return max(rq->cpu_load[type-1], total);
4859}
4860
Nicolas Pitreced549f2014-05-26 18:19:38 -04004861static unsigned long capacity_of(int cpu)
Peter Zijlstra029632f2011-10-25 10:00:11 +02004862{
Nicolas Pitreced549f2014-05-26 18:19:38 -04004863 return cpu_rq(cpu)->cpu_capacity;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004864}
4865
Vincent Guittotca6d75e2015-02-27 16:54:09 +01004866static unsigned long capacity_orig_of(int cpu)
4867{
4868 return cpu_rq(cpu)->cpu_capacity_orig;
4869}
4870
Peter Zijlstra029632f2011-10-25 10:00:11 +02004871static unsigned long cpu_avg_load_per_task(int cpu)
4872{
4873 struct rq *rq = cpu_rq(cpu);
Jason Low316c1608d2015-04-28 13:00:20 -07004874 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
Yuyang Du7ea241a2015-07-15 08:04:42 +08004875 unsigned long load_avg = weighted_cpuload(cpu);
Peter Zijlstra029632f2011-10-25 10:00:11 +02004876
4877 if (nr_running)
Alex Shib92486c2013-06-20 10:18:50 +08004878 return load_avg / nr_running;
Peter Zijlstra029632f2011-10-25 10:00:11 +02004879
4880 return 0;
4881}
4882
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004883#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02004884/*
4885 * effective_load() calculates the load change as seen from the root_task_group
4886 *
4887 * Adding load to a group doesn't make a group heavier, but can cause movement
4888 * of group shares between cpus. Assuming the shares were perfectly aligned one
4889 * can calculate the shift in shares.
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004890 *
4891 * Calculate the effective load difference if @wl is added (subtracted) to @tg
4892 * on this @cpu and results in a total addition (subtraction) of @wg to the
4893 * total group weight.
4894 *
4895 * Given a runqueue weight distribution (rw_i) we can compute a shares
4896 * distribution (s_i) using:
4897 *
4898 * s_i = rw_i / \Sum rw_j (1)
4899 *
4900 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
4901 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
4902 * shares distribution (s_i):
4903 *
4904 * rw_i = { 2, 4, 1, 0 }
4905 * s_i = { 2/7, 4/7, 1/7, 0 }
4906 *
4907 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
4908 * task used to run on and the CPU the waker is running on), we need to
4909 * compute the effect of waking a task on either CPU and, in case of a sync
4910 * wakeup, compute the effect of the current task going to sleep.
4911 *
4912 * So for a change of @wl to the local @cpu with an overall group weight change
4913 * of @wl we can compute the new shares distribution (s'_i) using:
4914 *
4915 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
4916 *
4917 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
4918 * differences in waking a task to CPU 0. The additional task changes the
4919 * weight and shares distributions like:
4920 *
4921 * rw'_i = { 3, 4, 1, 0 }
4922 * s'_i = { 3/8, 4/8, 1/8, 0 }
4923 *
4924 * We can then compute the difference in effective weight by using:
4925 *
4926 * dw_i = S * (s'_i - s_i) (3)
4927 *
4928 * Where 'S' is the group weight as seen by its parent.
4929 *
4930 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
4931 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
4932 * 4/7) times the weight of the group.
Peter Zijlstraf5bfb7d2008-06-27 13:41:39 +02004933 */
Peter Zijlstra2069dd72010-11-15 15:47:00 -08004934static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004935{
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004936 struct sched_entity *se = tg->se[cpu];
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02004937
Rik van Riel9722c2d2014-01-06 11:39:12 +00004938 if (!tg->parent) /* the trivial, non-cgroup case */
Peter Zijlstraf1d239f2008-06-27 13:41:38 +02004939 return wl;
4940
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004941 for_each_sched_entity(se) {
Peter Zijlstra7dd49122016-06-24 15:53:54 +02004942 struct cfs_rq *cfs_rq = se->my_q;
4943 long W, w = cfs_rq_load_avg(cfs_rq);
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004944
Peter Zijlstra7dd49122016-06-24 15:53:54 +02004945 tg = cfs_rq->tg;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004946
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004947 /*
4948 * W = @wg + \Sum rw_j
4949 */
Peter Zijlstra7dd49122016-06-24 15:53:54 +02004950 W = wg + atomic_long_read(&tg->load_avg);
4951
4952 /* Ensure \Sum rw_j >= rw_i */
4953 W -= cfs_rq->tg_load_avg_contrib;
4954 W += w;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004955
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004956 /*
4957 * w = rw_i + @wl
4958 */
Peter Zijlstra7dd49122016-06-24 15:53:54 +02004959 w += wl;
Peter Zijlstra940959e2008-09-23 15:33:42 +02004960
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004961 /*
4962 * wl = S * s'_i; see (2)
4963 */
4964 if (W > 0 && w < W)
Yuyang Du32a8df42014-12-19 08:29:56 +08004965 wl = (w * (long)tg->shares) / W;
Paul Turner977dda72011-01-14 17:57:50 -08004966 else
4967 wl = tg->shares;
Peter Zijlstra940959e2008-09-23 15:33:42 +02004968
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004969 /*
4970 * Per the above, wl is the new se->load.weight value; since
4971 * those are clipped to [MIN_SHARES, ...) do so now. See
4972 * calc_cfs_shares().
4973 */
Paul Turner977dda72011-01-14 17:57:50 -08004974 if (wl < MIN_SHARES)
4975 wl = MIN_SHARES;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004976
4977 /*
4978 * wl = dw_i = S * (s'_i - s_i); see (3)
4979 */
Yuyang Du9d89c252015-07-15 08:04:37 +08004980 wl -= se->avg.load_avg;
Peter Zijlstracf5f0ac2011-10-13 16:52:28 +02004981
4982 /*
4983 * Recursively apply this logic to all parent groups to compute
4984 * the final effective load change on the root group. Since
4985 * only the @tg group gets extra weight, all parent groups can
4986 * only redistribute existing shares. @wl is the shift in shares
4987 * resulting from this level per the above.
4988 */
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004989 wg = 0;
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004990 }
4991
4992 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004993}
4994#else
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004995
Mel Gorman58d081b2013-10-07 11:29:10 +01004996static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02004997{
Peter Zijlstra83378262008-06-27 13:41:37 +02004998 return wl;
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02004999}
Peter Zijlstra4be9daa2008-06-27 13:41:30 +02005000
Peter Zijlstrabb3469a2008-06-27 13:41:27 +02005001#endif
5002
Peter Zijlstrac58d25f2016-05-12 09:19:59 +02005003static void record_wakee(struct task_struct *p)
5004{
5005 /*
5006 * Only decay a single time; tasks that have less then 1 wakeup per
5007 * jiffy will not have built up many flips.
5008 */
5009 if (time_after(jiffies, current->wakee_flip_decay_ts + HZ)) {
5010 current->wakee_flips >>= 1;
5011 current->wakee_flip_decay_ts = jiffies;
5012 }
5013
5014 if (current->last_wakee != p) {
5015 current->last_wakee = p;
5016 current->wakee_flips++;
5017 }
5018}
5019
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005020/*
5021 * Detect M:N waker/wakee relationships via a switching-frequency heuristic.
Peter Zijlstrac58d25f2016-05-12 09:19:59 +02005022 *
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005023 * A waker of many should wake a different task than the one last awakened
Peter Zijlstrac58d25f2016-05-12 09:19:59 +02005024 * at a frequency roughly N times higher than one of its wakees.
5025 *
5026 * In order to determine whether we should let the load spread vs consolidating
5027 * to shared cache, we look for a minimum 'flip' frequency of llc_size in one
5028 * partner, and a factor of lls_size higher frequency in the other.
5029 *
5030 * With both conditions met, we can be relatively sure that the relationship is
5031 * non-monogamous, with partner count exceeding socket size.
5032 *
5033 * Waker/wakee being client/server, worker/dispatcher, interrupt source or
5034 * whatever is irrelevant, spread criteria is apparent partner count exceeds
5035 * socket size.
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005036 */
Michael Wang62470412013-07-04 12:55:51 +08005037static int wake_wide(struct task_struct *p)
5038{
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005039 unsigned int master = current->wakee_flips;
5040 unsigned int slave = p->wakee_flips;
Peter Zijlstra7d9ffa82013-07-04 12:56:46 +08005041 int factor = this_cpu_read(sd_llc_size);
Michael Wang62470412013-07-04 12:55:51 +08005042
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005043 if (master < slave)
5044 swap(master, slave);
5045 if (slave < factor || master < slave * factor)
5046 return 0;
5047 return 1;
Michael Wang62470412013-07-04 12:55:51 +08005048}
5049
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005050static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005051{
Paul Turnere37b6a72011-01-21 20:44:59 -08005052 s64 this_load, load;
Vincent Guittotbd61c982014-08-26 13:06:50 +02005053 s64 this_eff_load, prev_eff_load;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005054 int idx, this_cpu, prev_cpu;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005055 struct task_group *tg;
Peter Zijlstra83378262008-06-27 13:41:37 +02005056 unsigned long weight;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02005057 int balanced;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005058
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005059 idx = sd->wake_idx;
5060 this_cpu = smp_processor_id();
5061 prev_cpu = task_cpu(p);
5062 load = source_load(prev_cpu, idx);
5063 this_load = target_load(this_cpu, idx);
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005064
5065 /*
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005066 * If sync wakeup then subtract the (maximum possible)
5067 * effect of the currently running task from the load
5068 * of the current CPU:
5069 */
Peter Zijlstra83378262008-06-27 13:41:37 +02005070 if (sync) {
5071 tg = task_group(current);
Yuyang Du9d89c252015-07-15 08:04:37 +08005072 weight = current->se.avg.load_avg;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005073
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005074 this_load += effective_load(tg, this_cpu, -weight, -weight);
Peter Zijlstra83378262008-06-27 13:41:37 +02005075 load += effective_load(tg, prev_cpu, 0, -weight);
5076 }
5077
5078 tg = task_group(p);
Yuyang Du9d89c252015-07-15 08:04:37 +08005079 weight = p->se.avg.load_avg;
Peter Zijlstra83378262008-06-27 13:41:37 +02005080
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02005081 /*
5082 * In low-load situations, where prev_cpu is idle and this_cpu is idle
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005083 * due to the sync cause above having dropped this_load to 0, we'll
5084 * always have an imbalance, but there's really nothing you can do
5085 * about that, so that's good too.
Peter Zijlstra71a29aa2009-09-07 18:28:05 +02005086 *
5087 * Otherwise check if either cpus are near enough in load to allow this
5088 * task to be woken on this_cpu.
5089 */
Vincent Guittotbd61c982014-08-26 13:06:50 +02005090 this_eff_load = 100;
5091 this_eff_load *= capacity_of(prev_cpu);
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02005092
Vincent Guittotbd61c982014-08-26 13:06:50 +02005093 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
5094 prev_eff_load *= capacity_of(this_cpu);
5095
5096 if (this_load > 0) {
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02005097 this_eff_load *= this_load +
5098 effective_load(tg, this_cpu, weight, weight);
5099
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02005100 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
Vincent Guittotbd61c982014-08-26 13:06:50 +02005101 }
Peter Zijlstrae51fd5e2010-05-31 12:37:30 +02005102
Vincent Guittotbd61c982014-08-26 13:06:50 +02005103 balanced = this_eff_load <= prev_eff_load;
Mike Galbraithb3137bc2008-05-29 11:11:41 +02005104
Lucas De Marchi41acab82010-03-10 23:37:45 -03005105 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
Mike Galbraithb3137bc2008-05-29 11:11:41 +02005106
Vincent Guittot05bfb652014-08-26 13:06:45 +02005107 if (!balanced)
5108 return 0;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005109
Vincent Guittot05bfb652014-08-26 13:06:45 +02005110 schedstat_inc(sd, ttwu_move_affine);
5111 schedstat_inc(p, se.statistics.nr_wakeups_affine);
5112
5113 return 1;
Ingo Molnar098fb9d2008-03-16 20:36:10 +01005114}
5115
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005116/*
5117 * find_idlest_group finds and returns the least busy CPU group within the
5118 * domain.
5119 */
5120static struct sched_group *
Peter Zijlstra78e7ed52009-09-03 13:16:51 +02005121find_idlest_group(struct sched_domain *sd, struct task_struct *p,
Vincent Guittotc44f2a02013-10-18 13:52:21 +02005122 int this_cpu, int sd_flag)
Gregory Haskinse7693a32008-01-25 21:08:09 +01005123{
Andi Kleenb3bd3de2010-08-10 14:17:51 -07005124 struct sched_group *idlest = NULL, *group = sd->groups;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005125 unsigned long min_load = ULONG_MAX, this_load = 0;
Vincent Guittotc44f2a02013-10-18 13:52:21 +02005126 int load_idx = sd->forkexec_idx;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005127 int imbalance = 100 + (sd->imbalance_pct-100)/2;
Gregory Haskinse7693a32008-01-25 21:08:09 +01005128
Vincent Guittotc44f2a02013-10-18 13:52:21 +02005129 if (sd_flag & SD_BALANCE_WAKE)
5130 load_idx = sd->wake_idx;
5131
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005132 do {
5133 unsigned long load, avg_load;
5134 int local_group;
5135 int i;
Gregory Haskinse7693a32008-01-25 21:08:09 +01005136
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005137 /* Skip over this group if it has no CPUs allowed */
5138 if (!cpumask_intersects(sched_group_cpus(group),
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005139 tsk_cpus_allowed(p)))
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005140 continue;
5141
5142 local_group = cpumask_test_cpu(this_cpu,
5143 sched_group_cpus(group));
5144
5145 /* Tally up the load of all CPUs in the group */
5146 avg_load = 0;
5147
5148 for_each_cpu(i, sched_group_cpus(group)) {
5149 /* Bias balancing toward cpus of our domain */
5150 if (local_group)
5151 load = source_load(i, load_idx);
5152 else
5153 load = target_load(i, load_idx);
5154
5155 avg_load += load;
5156 }
5157
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04005158 /* Adjust by relative CPU capacity of the group */
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04005159 avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005160
5161 if (local_group) {
5162 this_load = avg_load;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005163 } else if (avg_load < min_load) {
5164 min_load = avg_load;
5165 idlest = group;
5166 }
5167 } while (group = group->next, group != sd->groups);
5168
5169 if (!idlest || 100*this_load < imbalance*min_load)
5170 return NULL;
5171 return idlest;
5172}
5173
5174/*
5175 * find_idlest_cpu - find the idlest cpu among the cpus in group.
5176 */
5177static int
5178find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
5179{
5180 unsigned long load, min_load = ULONG_MAX;
Nicolas Pitre83a0a962014-09-04 11:32:10 -04005181 unsigned int min_exit_latency = UINT_MAX;
5182 u64 latest_idle_timestamp = 0;
5183 int least_loaded_cpu = this_cpu;
5184 int shallowest_idle_cpu = -1;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005185 int i;
5186
5187 /* Traverse only the allowed CPUs */
Peter Zijlstrafa17b502011-06-16 12:23:22 +02005188 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
Nicolas Pitre83a0a962014-09-04 11:32:10 -04005189 if (idle_cpu(i)) {
5190 struct rq *rq = cpu_rq(i);
5191 struct cpuidle_state *idle = idle_get_state(rq);
5192 if (idle && idle->exit_latency < min_exit_latency) {
5193 /*
5194 * We give priority to a CPU whose idle state
5195 * has the smallest exit latency irrespective
5196 * of any idle timestamp.
5197 */
5198 min_exit_latency = idle->exit_latency;
5199 latest_idle_timestamp = rq->idle_stamp;
5200 shallowest_idle_cpu = i;
5201 } else if ((!idle || idle->exit_latency == min_exit_latency) &&
5202 rq->idle_stamp > latest_idle_timestamp) {
5203 /*
5204 * If equal or no active idle state, then
5205 * the most recently idled CPU might have
5206 * a warmer cache.
5207 */
5208 latest_idle_timestamp = rq->idle_stamp;
5209 shallowest_idle_cpu = i;
5210 }
Yao Dongdong9f967422014-10-28 04:08:06 +00005211 } else if (shallowest_idle_cpu == -1) {
Nicolas Pitre83a0a962014-09-04 11:32:10 -04005212 load = weighted_cpuload(i);
5213 if (load < min_load || (load == min_load && i == this_cpu)) {
5214 min_load = load;
5215 least_loaded_cpu = i;
5216 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01005217 }
5218 }
5219
Nicolas Pitre83a0a962014-09-04 11:32:10 -04005220 return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005221}
Gregory Haskinse7693a32008-01-25 21:08:09 +01005222
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005223/*
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005224 * Try and locate an idle CPU in the sched_domain.
5225 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07005226static int select_idle_sibling(struct task_struct *p, int target)
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005227{
Suresh Siddha99bd5e22010-03-31 16:47:45 -07005228 struct sched_domain *sd;
Linus Torvalds37407ea2012-09-16 12:29:43 -07005229 struct sched_group *sg;
Mike Galbraithe0a79f52013-01-28 12:19:25 +01005230 int i = task_cpu(p);
5231
5232 if (idle_cpu(target))
5233 return target;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005234
5235 /*
Mike Galbraithe0a79f52013-01-28 12:19:25 +01005236 * If the prevous cpu is cache affine and idle, don't be stupid.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005237 */
Mike Galbraithe0a79f52013-01-28 12:19:25 +01005238 if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
5239 return i;
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005240
5241 /*
Matt Flemingd4335582016-03-09 14:59:08 +00005242 * Otherwise, iterate the domains and find an eligible idle cpu.
5243 *
5244 * A completely idle sched group at higher domains is more
5245 * desirable than an idle group at a lower level, because lower
5246 * domains have smaller groups and usually share hardware
5247 * resources which causes tasks to contend on them, e.g. x86
5248 * hyperthread siblings in the lowest domain (SMT) can contend
5249 * on the shared cpu pipeline.
5250 *
5251 * However, while we prefer idle groups at higher domains
5252 * finding an idle cpu at the lowest domain is still better than
5253 * returning 'target', which we've already established, isn't
5254 * idle.
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005255 */
Peter Zijlstra518cd622011-12-07 15:07:31 +01005256 sd = rcu_dereference(per_cpu(sd_llc, target));
Suresh Siddha77e81362011-11-17 11:08:23 -08005257 for_each_lower_domain(sd) {
Linus Torvalds37407ea2012-09-16 12:29:43 -07005258 sg = sd->groups;
5259 do {
5260 if (!cpumask_intersects(sched_group_cpus(sg),
5261 tsk_cpus_allowed(p)))
5262 goto next;
Mike Galbraith970e1782012-06-12 05:18:32 +02005263
Matt Flemingd4335582016-03-09 14:59:08 +00005264 /* Ensure the entire group is idle */
Linus Torvalds37407ea2012-09-16 12:29:43 -07005265 for_each_cpu(i, sched_group_cpus(sg)) {
Mike Galbraithe0a79f52013-01-28 12:19:25 +01005266 if (i == target || !idle_cpu(i))
Linus Torvalds37407ea2012-09-16 12:29:43 -07005267 goto next;
5268 }
5269
Matt Flemingd4335582016-03-09 14:59:08 +00005270 /*
5271 * It doesn't matter which cpu we pick, the
5272 * whole group is idle.
5273 */
Linus Torvalds37407ea2012-09-16 12:29:43 -07005274 target = cpumask_first_and(sched_group_cpus(sg),
5275 tsk_cpus_allowed(p));
5276 goto done;
5277next:
5278 sg = sg->next;
5279 } while (sg != sd->groups);
5280 }
5281done:
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005282 return target;
5283}
Dietmar Eggemann231678b2015-08-14 17:23:13 +01005284
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005285/*
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01005286 * cpu_util returns the amount of capacity of a CPU that is used by CFS
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005287 * tasks. The unit of the return value must be the one of capacity so we can
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01005288 * compare the utilization with the capacity of the CPU that is available for
5289 * CFS task (ie cpu_capacity).
Dietmar Eggemann231678b2015-08-14 17:23:13 +01005290 *
5291 * cfs_rq.avg.util_avg is the sum of running time of runnable tasks plus the
5292 * recent utilization of currently non-runnable tasks on a CPU. It represents
5293 * the amount of utilization of a CPU in the range [0..capacity_orig] where
5294 * capacity_orig is the cpu_capacity available at the highest frequency
5295 * (arch_scale_freq_capacity()).
5296 * The utilization of a CPU converges towards a sum equal to or less than the
5297 * current capacity (capacity_curr <= capacity_orig) of the CPU because it is
5298 * the running time on this CPU scaled by capacity_curr.
5299 *
5300 * Nevertheless, cfs_rq.avg.util_avg can be higher than capacity_curr or even
5301 * higher than capacity_orig because of unfortunate rounding in
5302 * cfs.avg.util_avg or just after migrating tasks and new task wakeups until
5303 * the average stabilizes with the new running time. We need to check that the
5304 * utilization stays within the range of [0..capacity_orig] and cap it if
5305 * necessary. Without utilization capping, a group could be seen as overloaded
5306 * (CPU0 utilization at 121% + CPU1 utilization at 80%) whereas CPU1 has 20% of
5307 * available capacity. We allow utilization to overshoot capacity_curr (but not
5308 * capacity_orig) as it useful for predicting the capacity required after task
5309 * migrations (scheduler-driven DVFS).
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005310 */
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01005311static int cpu_util(int cpu)
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005312{
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01005313 unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005314 unsigned long capacity = capacity_orig_of(cpu);
5315
Dietmar Eggemann231678b2015-08-14 17:23:13 +01005316 return (util >= capacity) ? capacity : util;
Vincent Guittot8bb5b002015-03-04 08:48:47 +01005317}
Peter Zijlstraa50bde52009-11-12 15:55:28 +01005318
5319/*
Morten Rasmussende91b9c2014-02-18 14:14:24 +00005320 * select_task_rq_fair: Select target runqueue for the waking task in domains
5321 * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
5322 * SD_BALANCE_FORK, or SD_BALANCE_EXEC.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005323 *
Morten Rasmussende91b9c2014-02-18 14:14:24 +00005324 * Balances load by selecting the idlest cpu in the idlest group, or under
5325 * certain conditions an idle sibling cpu if the domain has SD_WAKE_AFFINE set.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005326 *
Morten Rasmussende91b9c2014-02-18 14:14:24 +00005327 * Returns the target cpu number.
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005328 *
5329 * preempt must be disabled.
5330 */
Peter Zijlstra0017d732010-03-24 18:34:10 +01005331static int
Peter Zijlstraac66f542013-10-07 11:29:16 +01005332select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005333{
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02005334 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005335 int cpu = smp_processor_id();
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005336 int new_cpu = prev_cpu;
Suresh Siddha99bd5e22010-03-31 16:47:45 -07005337 int want_affine = 0;
Peter Zijlstra5158f4e2009-09-16 13:46:59 +02005338 int sync = wake_flags & WF_SYNC;
Gregory Haskinse7693a32008-01-25 21:08:09 +01005339
Peter Zijlstrac58d25f2016-05-12 09:19:59 +02005340 if (sd_flag & SD_BALANCE_WAKE) {
5341 record_wakee(p);
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005342 want_affine = !wake_wide(p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p));
Peter Zijlstrac58d25f2016-05-12 09:19:59 +02005343 }
Gregory Haskinse7693a32008-01-25 21:08:09 +01005344
Peter Zijlstradce840a2011-04-07 14:09:50 +02005345 rcu_read_lock();
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005346 for_each_domain(cpu, tmp) {
Peter Zijlstrae4f42882009-12-16 18:04:34 +01005347 if (!(tmp->flags & SD_LOAD_BALANCE))
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005348 break;
Peter Zijlstrae4f42882009-12-16 18:04:34 +01005349
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005350 /*
Suresh Siddha99bd5e22010-03-31 16:47:45 -07005351 * If both cpu and prev_cpu are part of this domain,
5352 * cpu is a valid SD_WAKE_AFFINE target.
Peter Zijlstrafe3bcfe2009-11-12 15:55:29 +01005353 */
Suresh Siddha99bd5e22010-03-31 16:47:45 -07005354 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
5355 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
5356 affine_sd = tmp;
Alex Shif03542a2012-07-26 08:55:34 +08005357 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005358 }
5359
Alex Shif03542a2012-07-26 08:55:34 +08005360 if (tmp->flags & sd_flag)
Peter Zijlstra29cd8ba2009-09-17 09:01:14 +02005361 sd = tmp;
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005362 else if (!want_affine)
5363 break;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005364 }
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005365
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005366 if (affine_sd) {
5367 sd = NULL; /* Prefer wake_affine over balance flags */
5368 if (cpu != prev_cpu && wake_affine(affine_sd, p, sync))
5369 new_cpu = cpu;
Mike Galbraith8b911ac2010-03-11 17:17:16 +01005370 }
Peter Zijlstra3b640892009-09-16 13:44:33 +02005371
Mike Galbraith63b0e9e2015-07-14 17:39:50 +02005372 if (!sd) {
5373 if (sd_flag & SD_BALANCE_WAKE) /* XXX always ? */
5374 new_cpu = select_idle_sibling(p, new_cpu);
5375
5376 } else while (sd) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005377 struct sched_group *group;
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005378 int weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005379
Peter Zijlstra0763a662009-09-14 19:37:39 +02005380 if (!(sd->flags & sd_flag)) {
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005381 sd = sd->child;
5382 continue;
5383 }
5384
Vincent Guittotc44f2a02013-10-18 13:52:21 +02005385 group = find_idlest_group(sd, p, cpu, sd_flag);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005386 if (!group) {
5387 sd = sd->child;
5388 continue;
5389 }
5390
Peter Zijlstrad7c33c42009-09-11 12:45:38 +02005391 new_cpu = find_idlest_cpu(group, p, cpu);
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005392 if (new_cpu == -1 || new_cpu == cpu) {
5393 /* Now try balancing at a lower domain level of cpu */
5394 sd = sd->child;
5395 continue;
5396 }
5397
5398 /* Now try balancing at a lower domain level of new_cpu */
5399 cpu = new_cpu;
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005400 weight = sd->span_weight;
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005401 sd = NULL;
5402 for_each_domain(cpu, tmp) {
Peter Zijlstra669c55e2010-04-16 14:59:29 +02005403 if (weight <= tmp->span_weight)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005404 break;
Peter Zijlstra0763a662009-09-14 19:37:39 +02005405 if (tmp->flags & sd_flag)
Peter Zijlstraaaee1202009-09-10 13:36:25 +02005406 sd = tmp;
5407 }
5408 /* while loop will break here if sd == NULL */
Gregory Haskinse7693a32008-01-25 21:08:09 +01005409 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02005410 rcu_read_unlock();
Gregory Haskinse7693a32008-01-25 21:08:09 +01005411
Peter Zijlstrac88d5912009-09-10 13:50:02 +02005412 return new_cpu;
Gregory Haskinse7693a32008-01-25 21:08:09 +01005413}
Paul Turner0a74bef2012-10-04 13:18:30 +02005414
5415/*
5416 * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
5417 * cfs_rq_of(p) references at time of call are still valid and identify the
Byungchul Park525628c2015-11-18 09:34:59 +09005418 * previous cpu. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
Paul Turner0a74bef2012-10-04 13:18:30 +02005419 */
xiaofeng.yan5a4fd032015-09-23 14:55:59 +08005420static void migrate_task_rq_fair(struct task_struct *p)
Paul Turner0a74bef2012-10-04 13:18:30 +02005421{
Paul Turneraff3e492012-10-04 13:18:30 +02005422 /*
Peter Zijlstra59efa0b2016-05-10 18:24:37 +02005423 * As blocked tasks retain absolute vruntime the migration needs to
5424 * deal with this by subtracting the old and adding the new
5425 * min_vruntime -- the latter is done by enqueue_entity() when placing
5426 * the task on the new runqueue.
5427 */
5428 if (p->state == TASK_WAKING) {
5429 struct sched_entity *se = &p->se;
5430 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5431 u64 min_vruntime;
5432
5433#ifndef CONFIG_64BIT
5434 u64 min_vruntime_copy;
5435
5436 do {
5437 min_vruntime_copy = cfs_rq->min_vruntime_copy;
5438 smp_rmb();
5439 min_vruntime = cfs_rq->min_vruntime;
5440 } while (min_vruntime != min_vruntime_copy);
5441#else
5442 min_vruntime = cfs_rq->min_vruntime;
5443#endif
5444
5445 se->vruntime -= min_vruntime;
5446 }
5447
5448 /*
Yuyang Du9d89c252015-07-15 08:04:37 +08005449 * We are supposed to update the task to "current" time, then its up to date
5450 * and ready to go to new CPU/cfs_rq. But we have difficulty in getting
5451 * what current time is, so simply throw away the out-of-date time. This
5452 * will result in the wakee task is less decayed, but giving the wakee more
5453 * load sounds not bad.
Paul Turneraff3e492012-10-04 13:18:30 +02005454 */
Yuyang Du9d89c252015-07-15 08:04:37 +08005455 remove_entity_load_avg(&p->se);
5456
5457 /* Tell new CPU we are migrated */
5458 p->se.avg.last_update_time = 0;
Ben Segall3944a922014-05-15 15:59:20 -07005459
5460 /* We have migrated, no longer consider this task hot */
Yuyang Du9d89c252015-07-15 08:04:37 +08005461 p->se.exec_start = 0;
Paul Turner0a74bef2012-10-04 13:18:30 +02005462}
Yuyang Du12695572015-07-15 08:04:40 +08005463
5464static void task_dead_fair(struct task_struct *p)
5465{
5466 remove_entity_load_avg(&p->se);
5467}
Gregory Haskinse7693a32008-01-25 21:08:09 +01005468#endif /* CONFIG_SMP */
5469
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01005470static unsigned long
5471wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02005472{
5473 unsigned long gran = sysctl_sched_wakeup_granularity;
5474
5475 /*
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01005476 * Since its curr running now, convert the gran from real-time
5477 * to virtual-time in his units.
Mike Galbraith13814d42010-03-11 17:17:04 +01005478 *
5479 * By using 'se' instead of 'curr' we penalize light tasks, so
5480 * they get preempted easier. That is, if 'se' < 'curr' then
5481 * the resulting gran will be larger, therefore penalizing the
5482 * lighter, if otoh 'se' > 'curr' then the resulting gran will
5483 * be smaller, again penalizing the lighter task.
5484 *
5485 * This is especially important for buddies when the leftmost
5486 * task is higher priority than the buddy.
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02005487 */
Shaohua Lif4ad9bd2011-04-08 12:53:09 +08005488 return calc_delta_fair(gran, se);
Peter Zijlstra0bbd3332008-04-19 19:44:57 +02005489}
5490
5491/*
Peter Zijlstra464b7522008-10-24 11:06:15 +02005492 * Should 'se' preempt 'curr'.
5493 *
5494 * |s1
5495 * |s2
5496 * |s3
5497 * g
5498 * |<--->|c
5499 *
5500 * w(c, s1) = -1
5501 * w(c, s2) = 0
5502 * w(c, s3) = 1
5503 *
5504 */
5505static int
5506wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
5507{
5508 s64 gran, vdiff = curr->vruntime - se->vruntime;
5509
5510 if (vdiff <= 0)
5511 return -1;
5512
Peter Zijlstrae52fb7c2009-01-14 12:39:19 +01005513 gran = wakeup_gran(curr, se);
Peter Zijlstra464b7522008-10-24 11:06:15 +02005514 if (vdiff > gran)
5515 return 1;
5516
5517 return 0;
5518}
5519
Peter Zijlstra02479092008-11-04 21:25:10 +01005520static void set_last_buddy(struct sched_entity *se)
5521{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07005522 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5523 return;
5524
5525 for_each_sched_entity(se)
5526 cfs_rq_of(se)->last = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01005527}
5528
5529static void set_next_buddy(struct sched_entity *se)
5530{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07005531 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
5532 return;
5533
5534 for_each_sched_entity(se)
5535 cfs_rq_of(se)->next = se;
Peter Zijlstra02479092008-11-04 21:25:10 +01005536}
5537
Rik van Rielac53db52011-02-01 09:51:03 -05005538static void set_skip_buddy(struct sched_entity *se)
5539{
Venkatesh Pallipadi69c80f32011-04-13 18:21:09 -07005540 for_each_sched_entity(se)
5541 cfs_rq_of(se)->skip = se;
Rik van Rielac53db52011-02-01 09:51:03 -05005542}
5543
Peter Zijlstra464b7522008-10-24 11:06:15 +02005544/*
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005545 * Preempt the current task with a newly woken task if needed:
5546 */
Peter Zijlstra5a9b86f2009-09-16 13:47:58 +02005547static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005548{
5549 struct task_struct *curr = rq->curr;
Srivatsa Vaddagiri8651a862007-10-15 17:00:12 +02005550 struct sched_entity *se = &curr->se, *pse = &p->se;
Mike Galbraith03e89e42008-12-16 08:45:30 +01005551 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
Mike Galbraithf685cea2009-10-23 23:09:22 +02005552 int scale = cfs_rq->nr_running >= sched_nr_latency;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005553 int next_buddy_marked = 0;
Mike Galbraith03e89e42008-12-16 08:45:30 +01005554
Ingo Molnar4ae7d5c2008-03-19 01:42:00 +01005555 if (unlikely(se == pse))
5556 return;
5557
Paul Turner5238cdd2011-07-21 09:43:37 -07005558 /*
Kirill Tkhai163122b2014-08-20 13:48:29 +04005559 * This is possible from callers such as attach_tasks(), in which we
Paul Turner5238cdd2011-07-21 09:43:37 -07005560 * unconditionally check_prempt_curr() after an enqueue (which may have
5561 * lead to a throttle). This both saves work and prevents false
5562 * next-buddy nomination below.
5563 */
5564 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
5565 return;
5566
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005567 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
Mike Galbraith3cb63d52009-09-11 12:01:17 +02005568 set_next_buddy(pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005569 next_buddy_marked = 1;
5570 }
Peter Zijlstra57fdc262008-09-23 15:33:45 +02005571
Bharata B Raoaec0a512008-08-28 14:42:49 +05305572 /*
5573 * We can come here with TIF_NEED_RESCHED already set from new task
5574 * wake up path.
Paul Turner5238cdd2011-07-21 09:43:37 -07005575 *
5576 * Note: this also catches the edge-case of curr being in a throttled
5577 * group (e.g. via set_curr_task), since update_curr() (in the
5578 * enqueue of curr) will have resulted in resched being set. This
5579 * prevents us from potentially nominating it as a false LAST_BUDDY
5580 * below.
Bharata B Raoaec0a512008-08-28 14:42:49 +05305581 */
5582 if (test_tsk_need_resched(curr))
5583 return;
5584
Darren Harta2f5c9a2011-02-22 13:04:33 -08005585 /* Idle tasks are by definition preempted by non-idle tasks. */
5586 if (unlikely(curr->policy == SCHED_IDLE) &&
5587 likely(p->policy != SCHED_IDLE))
5588 goto preempt;
5589
Ingo Molnar91c234b2007-10-15 17:00:18 +02005590 /*
Darren Harta2f5c9a2011-02-22 13:04:33 -08005591 * Batch and idle tasks do not preempt non-idle tasks (their preemption
5592 * is driven by the tick):
Ingo Molnar91c234b2007-10-15 17:00:18 +02005593 */
Ingo Molnar8ed92e52012-10-14 14:28:50 +02005594 if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION))
Ingo Molnar91c234b2007-10-15 17:00:18 +02005595 return;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005596
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005597 find_matching_se(&se, &pse);
Paul Turner9bbd7372011-07-05 19:07:21 -07005598 update_curr(cfs_rq_of(se));
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005599 BUG_ON(!pse);
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005600 if (wakeup_preempt_entity(se, pse) == 1) {
5601 /*
5602 * Bias pick_next to pick the sched entity that is
5603 * triggering this preemption.
5604 */
5605 if (!next_buddy_marked)
5606 set_next_buddy(pse);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005607 goto preempt;
Venkatesh Pallipadi2f368252011-04-14 10:30:53 -07005608 }
Jupyung Leea65ac742009-11-17 18:51:40 +09005609
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005610 return;
5611
5612preempt:
Kirill Tkhai88751252014-06-29 00:03:57 +04005613 resched_curr(rq);
Peter Zijlstra3a7e73a2009-11-28 18:51:02 +01005614 /*
5615 * Only set the backward buddy when the current task is still
5616 * on the rq. This can happen when a wakeup gets interleaved
5617 * with schedule on the ->pre_schedule() or idle_balance()
5618 * point, either of which can * drop the rq lock.
5619 *
5620 * Also, during early boot the idle thread is in the fair class,
5621 * for obvious reasons its a bad idea to schedule back to it.
5622 */
5623 if (unlikely(!se->on_rq || curr == rq->idle))
5624 return;
5625
5626 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
5627 set_last_buddy(se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005628}
5629
Peter Zijlstra606dba22012-02-11 06:05:00 +01005630static struct task_struct *
Peter Zijlstrae7904a22015-08-01 19:25:08 +02005631pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005632{
5633 struct cfs_rq *cfs_rq = &rq->cfs;
5634 struct sched_entity *se;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005635 struct task_struct *p;
Peter Zijlstra37e117c2014-02-14 12:25:08 +01005636 int new_tasks;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005637
Peter Zijlstra6e831252014-02-11 16:11:48 +01005638again:
Peter Zijlstra678d5712012-02-11 06:05:00 +01005639#ifdef CONFIG_FAIR_GROUP_SCHED
5640 if (!cfs_rq->nr_running)
Peter Zijlstra38033c32014-01-23 20:32:21 +01005641 goto idle;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005642
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01005643 if (prev->sched_class != &fair_sched_class)
Peter Zijlstra678d5712012-02-11 06:05:00 +01005644 goto simple;
5645
5646 /*
5647 * Because of the set_next_buddy() in dequeue_task_fair() it is rather
5648 * likely that a next task is from the same cgroup as the current.
5649 *
5650 * Therefore attempt to avoid putting and setting the entire cgroup
5651 * hierarchy, only change the part that actually changes.
5652 */
5653
5654 do {
5655 struct sched_entity *curr = cfs_rq->curr;
5656
5657 /*
5658 * Since we got here without doing put_prev_entity() we also
5659 * have to consider cfs_rq->curr. If it is still a runnable
5660 * entity, update_curr() will update its vruntime, otherwise
5661 * forget we've ever seen it.
5662 */
Ben Segall54d27362015-04-06 15:28:10 -07005663 if (curr) {
5664 if (curr->on_rq)
5665 update_curr(cfs_rq);
5666 else
5667 curr = NULL;
Peter Zijlstra678d5712012-02-11 06:05:00 +01005668
Ben Segall54d27362015-04-06 15:28:10 -07005669 /*
5670 * This call to check_cfs_rq_runtime() will do the
5671 * throttle and dequeue its entity in the parent(s).
5672 * Therefore the 'simple' nr_running test will indeed
5673 * be correct.
5674 */
5675 if (unlikely(check_cfs_rq_runtime(cfs_rq)))
5676 goto simple;
5677 }
Peter Zijlstra678d5712012-02-11 06:05:00 +01005678
5679 se = pick_next_entity(cfs_rq, curr);
5680 cfs_rq = group_cfs_rq(se);
5681 } while (cfs_rq);
5682
5683 p = task_of(se);
5684
5685 /*
5686 * Since we haven't yet done put_prev_entity and if the selected task
5687 * is a different task than we started out with, try and touch the
5688 * least amount of cfs_rqs.
5689 */
5690 if (prev != p) {
5691 struct sched_entity *pse = &prev->se;
5692
5693 while (!(cfs_rq = is_same_group(se, pse))) {
5694 int se_depth = se->depth;
5695 int pse_depth = pse->depth;
5696
5697 if (se_depth <= pse_depth) {
5698 put_prev_entity(cfs_rq_of(pse), pse);
5699 pse = parent_entity(pse);
5700 }
5701 if (se_depth >= pse_depth) {
5702 set_next_entity(cfs_rq_of(se), se);
5703 se = parent_entity(se);
5704 }
5705 }
5706
5707 put_prev_entity(cfs_rq, pse);
5708 set_next_entity(cfs_rq, se);
5709 }
5710
5711 if (hrtick_enabled(rq))
5712 hrtick_start_fair(rq, p);
5713
5714 return p;
5715simple:
5716 cfs_rq = &rq->cfs;
5717#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005718
Tim Blechmann36ace272009-11-24 11:55:45 +01005719 if (!cfs_rq->nr_running)
Peter Zijlstra38033c32014-01-23 20:32:21 +01005720 goto idle;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005721
Peter Zijlstra3f1d2a32014-02-12 10:49:30 +01005722 put_prev_task(rq, prev);
Peter Zijlstra606dba22012-02-11 06:05:00 +01005723
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005724 do {
Peter Zijlstra678d5712012-02-11 06:05:00 +01005725 se = pick_next_entity(cfs_rq, NULL);
Peter Zijlstraf4b67552008-11-04 21:25:07 +01005726 set_next_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005727 cfs_rq = group_cfs_rq(se);
5728 } while (cfs_rq);
5729
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005730 p = task_of(se);
Peter Zijlstra678d5712012-02-11 06:05:00 +01005731
Mike Galbraithb39e66e2011-11-22 15:20:07 +01005732 if (hrtick_enabled(rq))
5733 hrtick_start_fair(rq, p);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01005734
5735 return p;
Peter Zijlstra38033c32014-01-23 20:32:21 +01005736
5737idle:
Peter Zijlstracbce1a62015-06-11 14:46:54 +02005738 /*
5739 * This is OK, because current is on_cpu, which avoids it being picked
5740 * for load-balance and preemption/IRQs are still disabled avoiding
5741 * further scheduler activity on it and we're being very careful to
5742 * re-start the picking loop.
5743 */
Peter Zijlstrae7904a22015-08-01 19:25:08 +02005744 lockdep_unpin_lock(&rq->lock, cookie);
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04005745 new_tasks = idle_balance(rq);
Peter Zijlstrae7904a22015-08-01 19:25:08 +02005746 lockdep_repin_lock(&rq->lock, cookie);
Peter Zijlstra37e117c2014-02-14 12:25:08 +01005747 /*
5748 * Because idle_balance() releases (and re-acquires) rq->lock, it is
5749 * possible for any higher priority task to appear. In that case we
5750 * must re-start the pick_next_entity() loop.
5751 */
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04005752 if (new_tasks < 0)
Peter Zijlstra37e117c2014-02-14 12:25:08 +01005753 return RETRY_TASK;
5754
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04005755 if (new_tasks > 0)
Peter Zijlstra38033c32014-01-23 20:32:21 +01005756 goto again;
Peter Zijlstra38033c32014-01-23 20:32:21 +01005757
5758 return NULL;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005759}
5760
5761/*
5762 * Account for a descheduled task:
5763 */
Ingo Molnar31ee5292007-08-09 11:16:49 +02005764static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005765{
5766 struct sched_entity *se = &prev->se;
5767 struct cfs_rq *cfs_rq;
5768
5769 for_each_sched_entity(se) {
5770 cfs_rq = cfs_rq_of(se);
Ingo Molnarab6cde22007-08-09 11:16:48 +02005771 put_prev_entity(cfs_rq, se);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005772 }
5773}
5774
Rik van Rielac53db52011-02-01 09:51:03 -05005775/*
5776 * sched_yield() is very simple
5777 *
5778 * The magic of dealing with the ->skip buddy is in pick_next_entity.
5779 */
5780static void yield_task_fair(struct rq *rq)
5781{
5782 struct task_struct *curr = rq->curr;
5783 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
5784 struct sched_entity *se = &curr->se;
5785
5786 /*
5787 * Are we the only task in the tree?
5788 */
5789 if (unlikely(rq->nr_running == 1))
5790 return;
5791
5792 clear_buddies(cfs_rq, se);
5793
5794 if (curr->policy != SCHED_BATCH) {
5795 update_rq_clock(rq);
5796 /*
5797 * Update run-time statistics of the 'current'.
5798 */
5799 update_curr(cfs_rq);
Mike Galbraith916671c2011-11-22 15:21:26 +01005800 /*
5801 * Tell update_rq_clock() that we've just updated,
5802 * so we don't do microscopic update in schedule()
5803 * and double the fastpath cost.
5804 */
Peter Zijlstra9edfbfe2015-01-05 11:18:11 +01005805 rq_clock_skip_update(rq, true);
Rik van Rielac53db52011-02-01 09:51:03 -05005806 }
5807
5808 set_skip_buddy(se);
5809}
5810
Mike Galbraithd95f4122011-02-01 09:50:51 -05005811static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
5812{
5813 struct sched_entity *se = &p->se;
5814
Paul Turner5238cdd2011-07-21 09:43:37 -07005815 /* throttled hierarchies are not runnable */
5816 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
Mike Galbraithd95f4122011-02-01 09:50:51 -05005817 return false;
5818
5819 /* Tell the scheduler that we'd really like pse to run next. */
5820 set_next_buddy(se);
5821
Mike Galbraithd95f4122011-02-01 09:50:51 -05005822 yield_task_fair(rq);
5823
5824 return true;
5825}
5826
Peter Williams681f3e62007-10-24 18:23:51 +02005827#ifdef CONFIG_SMP
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005828/**************************************************
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005829 * Fair scheduling class load-balancing methods.
5830 *
5831 * BASICS
5832 *
5833 * The purpose of load-balancing is to achieve the same basic fairness the
5834 * per-cpu scheduler provides, namely provide a proportional amount of compute
5835 * time to each task. This is expressed in the following equation:
5836 *
5837 * W_i,n/P_i == W_j,n/P_j for all i,j (1)
5838 *
5839 * Where W_i,n is the n-th weight average for cpu i. The instantaneous weight
5840 * W_i,0 is defined as:
5841 *
5842 * W_i,0 = \Sum_j w_i,j (2)
5843 *
5844 * Where w_i,j is the weight of the j-th runnable task on cpu i. This weight
Yuyang Du1c3de5e2016-03-30 07:07:51 +08005845 * is derived from the nice value as per sched_prio_to_weight[].
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005846 *
5847 * The weight average is an exponential decay average of the instantaneous
5848 * weight:
5849 *
5850 * W'_i,n = (2^n - 1) / 2^n * W_i,n + 1 / 2^n * W_i,0 (3)
5851 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005852 * C_i is the compute capacity of cpu i, typically it is the
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005853 * fraction of 'recent' time available for SCHED_OTHER task execution. But it
5854 * can also include other factors [XXX].
5855 *
5856 * To achieve this balance we define a measure of imbalance which follows
5857 * directly from (1):
5858 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04005859 * imb_i,j = max{ avg(W/C), W_i/C_i } - min{ avg(W/C), W_j/C_j } (4)
Peter Zijlstrae9c84cb2012-07-03 13:53:26 +02005860 *
5861 * We them move tasks around to minimize the imbalance. In the continuous
5862 * function space it is obvious this converges, in the discrete case we get
5863 * a few fun cases generally called infeasible weight scenarios.
5864 *
5865 * [XXX expand on:
5866 * - infeasible weights;
5867 * - local vs global optima in the discrete case. ]
5868 *
5869 *
5870 * SCHED DOMAINS
5871 *
5872 * In order to solve the imbalance equation (4), and avoid the obvious O(n^2)
5873 * for all i,j solution, we create a tree of cpus that follows the hardware
5874 * topology where each level pairs two lower groups (or better). This results
5875 * in O(log n) layers. Furthermore we reduce the number of cpus going up the
5876 * tree to only the first of the previous level and we decrease the frequency
5877 * of load-balance at each level inv. proportional to the number of cpus in
5878 * the groups.
5879 *
5880 * This yields:
5881 *
5882 * log_2 n 1 n
5883 * \Sum { --- * --- * 2^i } = O(n) (5)
5884 * i = 0 2^i 2^i
5885 * `- size of each group
5886 * | | `- number of cpus doing load-balance
5887 * | `- freq
5888 * `- sum over all levels
5889 *
5890 * Coupled with a limit on how many tasks we can migrate every balance pass,
5891 * this makes (5) the runtime complexity of the balancer.
5892 *
5893 * An important property here is that each CPU is still (indirectly) connected
5894 * to every other cpu in at most O(log n) steps:
5895 *
5896 * The adjacency matrix of the resulting graph is given by:
5897 *
5898 * log_2 n
5899 * A_i,j = \Union (i % 2^k == 0) && i / 2^(k+1) == j / 2^(k+1) (6)
5900 * k = 0
5901 *
5902 * And you'll find that:
5903 *
5904 * A^(log_2 n)_i,j != 0 for all i,j (7)
5905 *
5906 * Showing there's indeed a path between every cpu in at most O(log n) steps.
5907 * The task movement gives a factor of O(m), giving a convergence complexity
5908 * of:
5909 *
5910 * O(nm log n), n := nr_cpus, m := nr_tasks (8)
5911 *
5912 *
5913 * WORK CONSERVING
5914 *
5915 * In order to avoid CPUs going idle while there's still work to do, new idle
5916 * balancing is more aggressive and has the newly idle cpu iterate up the domain
5917 * tree itself instead of relying on other CPUs to bring it work.
5918 *
5919 * This adds some complexity to both (5) and (8) but it reduces the total idle
5920 * time.
5921 *
5922 * [XXX more?]
5923 *
5924 *
5925 * CGROUPS
5926 *
5927 * Cgroups make a horror show out of (2), instead of a simple sum we get:
5928 *
5929 * s_k,i
5930 * W_i,0 = \Sum_j \Prod_k w_k * ----- (9)
5931 * S_k
5932 *
5933 * Where
5934 *
5935 * s_k,i = \Sum_j w_i,j,k and S_k = \Sum_i s_k,i (10)
5936 *
5937 * w_i,j,k is the weight of the j-th runnable task in the k-th cgroup on cpu i.
5938 *
5939 * The big problem is S_k, its a global sum needed to compute a local (W_i)
5940 * property.
5941 *
5942 * [XXX write more on how we solve this.. _after_ merging pjt's patches that
5943 * rewrite all of this once again.]
5944 */
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02005945
Hiroshi Shimamotoed387b72012-01-31 11:40:32 +09005946static unsigned long __read_mostly max_load_balance_interval = HZ/10;
5947
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005948enum fbq_type { regular, remote, all };
5949
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005950#define LBF_ALL_PINNED 0x01
Peter Zijlstra367456c2012-02-20 21:49:09 +01005951#define LBF_NEED_BREAK 0x02
Peter Zijlstra62633222013-08-19 12:41:09 +02005952#define LBF_DST_PINNED 0x04
5953#define LBF_SOME_PINNED 0x08
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005954
5955struct lb_env {
5956 struct sched_domain *sd;
5957
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005958 struct rq *src_rq;
Prashanth Nageshappa85c1e7d2012-06-19 17:47:34 +05305959 int src_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005960
5961 int dst_cpu;
5962 struct rq *dst_rq;
5963
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05305964 struct cpumask *dst_grpmask;
5965 int new_dst_cpu;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005966 enum cpu_idle_type idle;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02005967 long imbalance;
Michael Wangb94031302012-07-12 16:10:13 +08005968 /* The set of CPUs under consideration for load-balancing */
5969 struct cpumask *cpus;
5970
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005971 unsigned int flags;
Peter Zijlstra367456c2012-02-20 21:49:09 +01005972
5973 unsigned int loop;
5974 unsigned int loop_break;
5975 unsigned int loop_max;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01005976
5977 enum fbq_type fbq_type;
Kirill Tkhai163122b2014-08-20 13:48:29 +04005978 struct list_head tasks;
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01005979};
5980
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01005981/*
Peter Zijlstra029632f2011-10-25 10:00:11 +02005982 * Is this task likely cache-hot:
5983 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005984static int task_hot(struct task_struct *p, struct lb_env *env)
Peter Zijlstra029632f2011-10-25 10:00:11 +02005985{
5986 s64 delta;
5987
Kirill Tkhaie5673f22014-08-20 13:48:01 +04005988 lockdep_assert_held(&env->src_rq->lock);
5989
Peter Zijlstra029632f2011-10-25 10:00:11 +02005990 if (p->sched_class != &fair_sched_class)
5991 return 0;
5992
5993 if (unlikely(p->policy == SCHED_IDLE))
5994 return 0;
5995
5996 /*
5997 * Buddy candidates are cache hot:
5998 */
Hillf Danton5d5e2b12014-06-10 10:58:43 +02005999 if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
Peter Zijlstra029632f2011-10-25 10:00:11 +02006000 (&p->se == cfs_rq_of(&p->se)->next ||
6001 &p->se == cfs_rq_of(&p->se)->last))
6002 return 1;
6003
6004 if (sysctl_sched_migration_cost == -1)
6005 return 1;
6006 if (sysctl_sched_migration_cost == 0)
6007 return 0;
6008
Hillf Danton5d5e2b12014-06-10 10:58:43 +02006009 delta = rq_clock_task(env->src_rq) - p->se.exec_start;
Peter Zijlstra029632f2011-10-25 10:00:11 +02006010
6011 return delta < (s64)sysctl_sched_migration_cost;
6012}
6013
Mel Gorman3a7053b2013-10-07 11:29:00 +01006014#ifdef CONFIG_NUMA_BALANCING
Rik van Rielc1ceac62015-05-14 22:59:36 -04006015/*
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306016 * Returns 1, if task migration degrades locality
6017 * Returns 0, if task migration improves locality i.e migration preferred.
6018 * Returns -1, if task migration is not affected by locality.
Rik van Rielc1ceac62015-05-14 22:59:36 -04006019 */
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306020static int migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
Mel Gorman3a7053b2013-10-07 11:29:00 +01006021{
Rik van Rielb1ad0652014-05-15 13:03:06 -04006022 struct numa_group *numa_group = rcu_dereference(p->numa_group);
Rik van Rielc1ceac62015-05-14 22:59:36 -04006023 unsigned long src_faults, dst_faults;
Mel Gorman3a7053b2013-10-07 11:29:00 +01006024 int src_nid, dst_nid;
6025
Srikar Dronamraju2a595722015-08-11 21:54:21 +05306026 if (!static_branch_likely(&sched_numa_balancing))
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306027 return -1;
6028
Srikar Dronamrajuc3b9bc52015-08-11 16:30:12 +05306029 if (!p->numa_faults || !(env->sd->flags & SD_NUMA))
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306030 return -1;
Mel Gorman7a0f3082013-10-07 11:29:01 +01006031
6032 src_nid = cpu_to_node(env->src_cpu);
6033 dst_nid = cpu_to_node(env->dst_cpu);
6034
Mel Gorman83e1d2c2013-10-07 11:29:27 +01006035 if (src_nid == dst_nid)
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306036 return -1;
Mel Gorman7a0f3082013-10-07 11:29:01 +01006037
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306038 /* Migrating away from the preferred node is always bad. */
6039 if (src_nid == p->numa_preferred_nid) {
6040 if (env->src_rq->nr_running > env->src_rq->nr_preferred_running)
6041 return 1;
6042 else
6043 return -1;
6044 }
Mel Gorman83e1d2c2013-10-07 11:29:27 +01006045
Rik van Rielc1ceac62015-05-14 22:59:36 -04006046 /* Encourage migration to the preferred node. */
6047 if (dst_nid == p->numa_preferred_nid)
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306048 return 0;
Rik van Rielc1ceac62015-05-14 22:59:36 -04006049
6050 if (numa_group) {
6051 src_faults = group_faults(p, src_nid);
6052 dst_faults = group_faults(p, dst_nid);
6053 } else {
6054 src_faults = task_faults(p, src_nid);
6055 dst_faults = task_faults(p, dst_nid);
6056 }
6057
6058 return dst_faults < src_faults;
Mel Gorman7a0f3082013-10-07 11:29:01 +01006059}
6060
Mel Gorman3a7053b2013-10-07 11:29:00 +01006061#else
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306062static inline int migrate_degrades_locality(struct task_struct *p,
Mel Gorman3a7053b2013-10-07 11:29:00 +01006063 struct lb_env *env)
6064{
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306065 return -1;
Mel Gorman7a0f3082013-10-07 11:29:01 +01006066}
Mel Gorman3a7053b2013-10-07 11:29:00 +01006067#endif
6068
Peter Zijlstra029632f2011-10-25 10:00:11 +02006069/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006070 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
6071 */
6072static
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006073int can_migrate_task(struct task_struct *p, struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006074{
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306075 int tsk_cache_hot;
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006076
6077 lockdep_assert_held(&env->src_rq->lock);
6078
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006079 /*
6080 * We do not migrate tasks that are:
Joonsoo Kimd3198082013-04-23 17:27:40 +09006081 * 1) throttled_lb_pair, or
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006082 * 2) cannot be migrated to this CPU due to cpus_allowed, or
Joonsoo Kimd3198082013-04-23 17:27:40 +09006083 * 3) running (obviously), or
6084 * 4) are cache-hot on their current CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006085 */
Joonsoo Kimd3198082013-04-23 17:27:40 +09006086 if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
6087 return 0;
6088
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006089 if (!cpumask_test_cpu(env->dst_cpu, tsk_cpus_allowed(p))) {
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006090 int cpu;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306091
Lucas De Marchi41acab82010-03-10 23:37:45 -03006092 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306093
Peter Zijlstra62633222013-08-19 12:41:09 +02006094 env->flags |= LBF_SOME_PINNED;
6095
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306096 /*
6097 * Remember if this task can be migrated to any other cpu in
6098 * our sched_group. We may want to revisit it if we couldn't
6099 * meet load balance goals by pulling other tasks on src_cpu.
6100 *
6101 * Also avoid computing new_dst_cpu if we have already computed
6102 * one in current iteration.
6103 */
Peter Zijlstra62633222013-08-19 12:41:09 +02006104 if (!env->dst_grpmask || (env->flags & LBF_DST_PINNED))
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306105 return 0;
6106
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006107 /* Prevent to re-select dst_cpu via env's cpus */
6108 for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
6109 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
Peter Zijlstra62633222013-08-19 12:41:09 +02006110 env->flags |= LBF_DST_PINNED;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006111 env->new_dst_cpu = cpu;
6112 break;
6113 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306114 }
Joonsoo Kime02e60c2013-04-23 17:27:42 +09006115
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006116 return 0;
6117 }
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05306118
6119 /* Record that we found atleast one task that could run on dst_cpu */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006120 env->flags &= ~LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006121
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01006122 if (task_running(env->src_rq, p)) {
Lucas De Marchi41acab82010-03-10 23:37:45 -03006123 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006124 return 0;
6125 }
6126
6127 /*
6128 * Aggressive migration if:
Mel Gorman3a7053b2013-10-07 11:29:00 +01006129 * 1) destination numa is preferred
6130 * 2) task is cache cold, or
6131 * 3) too many balance attempts have failed.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006132 */
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306133 tsk_cache_hot = migrate_degrades_locality(p, env);
6134 if (tsk_cache_hot == -1)
6135 tsk_cache_hot = task_hot(p, env);
Mel Gorman3a7053b2013-10-07 11:29:00 +01006136
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306137 if (tsk_cache_hot <= 0 ||
Kirill Tkhai7a96c232014-09-22 22:36:12 +04006138 env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
Srikar Dronamraju2a1ed242015-06-16 17:25:59 +05306139 if (tsk_cache_hot == 1) {
Mel Gorman3a7053b2013-10-07 11:29:00 +01006140 schedstat_inc(env->sd, lb_hot_gained[env->idle]);
6141 schedstat_inc(p, se.statistics.nr_forced_migrations);
6142 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006143 return 1;
6144 }
6145
Zhang Hang4e2dcb72013-04-10 14:04:55 +08006146 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
6147 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006148}
6149
Peter Zijlstra897c3952009-12-17 17:45:42 +01006150/*
Kirill Tkhai163122b2014-08-20 13:48:29 +04006151 * detach_task() -- detach the task for the migration specified in env
Peter Zijlstra897c3952009-12-17 17:45:42 +01006152 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04006153static void detach_task(struct task_struct *p, struct lb_env *env)
6154{
6155 lockdep_assert_held(&env->src_rq->lock);
6156
Kirill Tkhai163122b2014-08-20 13:48:29 +04006157 p->on_rq = TASK_ON_RQ_MIGRATING;
Joonwoo Park3ea94de2015-11-12 19:38:54 -08006158 deactivate_task(env->src_rq, p, 0);
Kirill Tkhai163122b2014-08-20 13:48:29 +04006159 set_task_cpu(p, env->dst_cpu);
6160}
6161
6162/*
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006163 * detach_one_task() -- tries to dequeue exactly one task from env->src_rq, as
Peter Zijlstra897c3952009-12-17 17:45:42 +01006164 * part of active balancing operations within "domain".
Peter Zijlstra897c3952009-12-17 17:45:42 +01006165 *
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006166 * Returns a task if successful and NULL otherwise.
Peter Zijlstra897c3952009-12-17 17:45:42 +01006167 */
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006168static struct task_struct *detach_one_task(struct lb_env *env)
Peter Zijlstra897c3952009-12-17 17:45:42 +01006169{
6170 struct task_struct *p, *n;
Peter Zijlstra897c3952009-12-17 17:45:42 +01006171
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006172 lockdep_assert_held(&env->src_rq->lock);
6173
Peter Zijlstra367456c2012-02-20 21:49:09 +01006174 list_for_each_entry_safe(p, n, &env->src_rq->cfs_tasks, se.group_node) {
Peter Zijlstra367456c2012-02-20 21:49:09 +01006175 if (!can_migrate_task(p, env))
6176 continue;
Peter Zijlstra897c3952009-12-17 17:45:42 +01006177
Kirill Tkhai163122b2014-08-20 13:48:29 +04006178 detach_task(p, env);
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006179
Peter Zijlstra367456c2012-02-20 21:49:09 +01006180 /*
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006181 * Right now, this is only the second place where
Kirill Tkhai163122b2014-08-20 13:48:29 +04006182 * lb_gained[env->idle] is updated (other is detach_tasks)
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006183 * so we can safely collect stats here rather than
Kirill Tkhai163122b2014-08-20 13:48:29 +04006184 * inside detach_tasks().
Peter Zijlstra367456c2012-02-20 21:49:09 +01006185 */
6186 schedstat_inc(env->sd, lb_gained[env->idle]);
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006187 return p;
Peter Zijlstra897c3952009-12-17 17:45:42 +01006188 }
Kirill Tkhaie5673f22014-08-20 13:48:01 +04006189 return NULL;
Peter Zijlstra897c3952009-12-17 17:45:42 +01006190}
6191
Peter Zijlstraeb953082012-04-17 13:38:40 +02006192static const unsigned int sched_nr_migrate_break = 32;
6193
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006194/*
Kirill Tkhai163122b2014-08-20 13:48:29 +04006195 * detach_tasks() -- tries to detach up to imbalance weighted load from
6196 * busiest_rq, as part of a balancing operation within domain "sd".
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006197 *
Kirill Tkhai163122b2014-08-20 13:48:29 +04006198 * Returns number of detached tasks if successful and 0 otherwise.
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006199 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04006200static int detach_tasks(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006201{
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006202 struct list_head *tasks = &env->src_rq->cfs_tasks;
6203 struct task_struct *p;
Peter Zijlstra367456c2012-02-20 21:49:09 +01006204 unsigned long load;
Kirill Tkhai163122b2014-08-20 13:48:29 +04006205 int detached = 0;
6206
6207 lockdep_assert_held(&env->src_rq->lock);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006208
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006209 if (env->imbalance <= 0)
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006210 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006211
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006212 while (!list_empty(tasks)) {
Yuyang Du985d3a42015-07-06 06:11:51 +08006213 /*
6214 * We don't want to steal all, otherwise we may be treated likewise,
6215 * which could at worst lead to a livelock crash.
6216 */
6217 if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1)
6218 break;
6219
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006220 p = list_first_entry(tasks, struct task_struct, se.group_node);
6221
Peter Zijlstra367456c2012-02-20 21:49:09 +01006222 env->loop++;
6223 /* We've more or less seen every task there is, call it quits */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006224 if (env->loop > env->loop_max)
Peter Zijlstra367456c2012-02-20 21:49:09 +01006225 break;
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006226
6227 /* take a breather every nr_migrate tasks */
Peter Zijlstra367456c2012-02-20 21:49:09 +01006228 if (env->loop > env->loop_break) {
Peter Zijlstraeb953082012-04-17 13:38:40 +02006229 env->loop_break += sched_nr_migrate_break;
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01006230 env->flags |= LBF_NEED_BREAK;
Peter Zijlstraee00e662009-12-17 17:25:20 +01006231 break;
Peter Zijlstraa195f002011-09-22 15:30:18 +02006232 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006233
Joonsoo Kimd3198082013-04-23 17:27:40 +09006234 if (!can_migrate_task(p, env))
Peter Zijlstra367456c2012-02-20 21:49:09 +01006235 goto next;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006236
Peter Zijlstra367456c2012-02-20 21:49:09 +01006237 load = task_h_load(p);
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006238
Peter Zijlstraeb953082012-04-17 13:38:40 +02006239 if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
Peter Zijlstra367456c2012-02-20 21:49:09 +01006240 goto next;
6241
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006242 if ((load / 2) > env->imbalance)
Peter Zijlstra367456c2012-02-20 21:49:09 +01006243 goto next;
6244
Kirill Tkhai163122b2014-08-20 13:48:29 +04006245 detach_task(p, env);
6246 list_add(&p->se.group_node, &env->tasks);
6247
6248 detached++;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006249 env->imbalance -= load;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006250
6251#ifdef CONFIG_PREEMPT
Peter Zijlstraee00e662009-12-17 17:25:20 +01006252 /*
6253 * NEWIDLE balancing is a source of latency, so preemptible
Kirill Tkhai163122b2014-08-20 13:48:29 +04006254 * kernels will stop after the first task is detached to minimize
Peter Zijlstraee00e662009-12-17 17:25:20 +01006255 * the critical section.
6256 */
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006257 if (env->idle == CPU_NEWLY_IDLE)
Peter Zijlstraee00e662009-12-17 17:25:20 +01006258 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006259#endif
6260
Peter Zijlstraee00e662009-12-17 17:25:20 +01006261 /*
6262 * We only want to steal up to the prescribed amount of
6263 * weighted load.
6264 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006265 if (env->imbalance <= 0)
Peter Zijlstraee00e662009-12-17 17:25:20 +01006266 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006267
Peter Zijlstra367456c2012-02-20 21:49:09 +01006268 continue;
6269next:
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006270 list_move_tail(&p->se.group_node, tasks);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006271 }
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01006272
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006273 /*
Kirill Tkhai163122b2014-08-20 13:48:29 +04006274 * Right now, this is one of only two places we collect this stat
6275 * so we can safely collect detach_one_task() stats here rather
6276 * than inside detach_one_task().
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006277 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04006278 schedstat_add(env->sd, lb_gained[env->idle], detached);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006279
Kirill Tkhai163122b2014-08-20 13:48:29 +04006280 return detached;
6281}
6282
6283/*
6284 * attach_task() -- attach the task detached by detach_task() to its new rq.
6285 */
6286static void attach_task(struct rq *rq, struct task_struct *p)
6287{
6288 lockdep_assert_held(&rq->lock);
6289
6290 BUG_ON(task_rq(p) != rq);
Kirill Tkhai163122b2014-08-20 13:48:29 +04006291 activate_task(rq, p, 0);
Joonwoo Park3ea94de2015-11-12 19:38:54 -08006292 p->on_rq = TASK_ON_RQ_QUEUED;
Kirill Tkhai163122b2014-08-20 13:48:29 +04006293 check_preempt_curr(rq, p, 0);
6294}
6295
6296/*
6297 * attach_one_task() -- attaches the task returned from detach_one_task() to
6298 * its new rq.
6299 */
6300static void attach_one_task(struct rq *rq, struct task_struct *p)
6301{
6302 raw_spin_lock(&rq->lock);
6303 attach_task(rq, p);
6304 raw_spin_unlock(&rq->lock);
6305}
6306
6307/*
6308 * attach_tasks() -- attaches all tasks detached by detach_tasks() to their
6309 * new rq.
6310 */
6311static void attach_tasks(struct lb_env *env)
6312{
6313 struct list_head *tasks = &env->tasks;
6314 struct task_struct *p;
6315
6316 raw_spin_lock(&env->dst_rq->lock);
6317
6318 while (!list_empty(tasks)) {
6319 p = list_first_entry(tasks, struct task_struct, se.group_node);
6320 list_del_init(&p->se.group_node);
6321
6322 attach_task(env->dst_rq, p);
6323 }
6324
6325 raw_spin_unlock(&env->dst_rq->lock);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006326}
6327
Peter Zijlstra230059de2009-12-17 17:47:12 +01006328#ifdef CONFIG_FAIR_GROUP_SCHED
Paul Turner48a16752012-10-04 13:18:31 +02006329static void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08006330{
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08006331 struct rq *rq = cpu_rq(cpu);
Paul Turner48a16752012-10-04 13:18:31 +02006332 struct cfs_rq *cfs_rq;
6333 unsigned long flags;
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08006334
Paul Turner48a16752012-10-04 13:18:31 +02006335 raw_spin_lock_irqsave(&rq->lock, flags);
6336 update_rq_clock(rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08006337
Peter Zijlstra9763b672011-07-13 13:09:25 +02006338 /*
6339 * Iterates the task_group tree in a bottom up fashion, see
6340 * list_add_leaf_cfs_rq() for details.
6341 */
Paul Turner64660c82011-07-21 09:43:36 -07006342 for_each_leaf_cfs_rq(rq, cfs_rq) {
Yuyang Du9d89c252015-07-15 08:04:37 +08006343 /* throttled entities do not contribute to load */
6344 if (throttled_hierarchy(cfs_rq))
6345 continue;
Paul Turner48a16752012-10-04 13:18:31 +02006346
Steve Mucklea2c6c912016-03-24 15:26:07 -07006347 if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true))
Yuyang Du9d89c252015-07-15 08:04:37 +08006348 update_tg_load_avg(cfs_rq, 0);
6349 }
Paul Turner48a16752012-10-04 13:18:31 +02006350 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08006351}
6352
Peter Zijlstra9763b672011-07-13 13:09:25 +02006353/*
Vladimir Davydov68520792013-07-15 17:49:19 +04006354 * Compute the hierarchical load factor for cfs_rq and all its ascendants.
Peter Zijlstra9763b672011-07-13 13:09:25 +02006355 * This needs to be done in a top-down fashion because the load of a child
6356 * group is a fraction of its parents load.
6357 */
Vladimir Davydov68520792013-07-15 17:49:19 +04006358static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
Peter Zijlstra9763b672011-07-13 13:09:25 +02006359{
Vladimir Davydov68520792013-07-15 17:49:19 +04006360 struct rq *rq = rq_of(cfs_rq);
6361 struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
Peter Zijlstraa35b6462012-08-08 21:46:40 +02006362 unsigned long now = jiffies;
Vladimir Davydov68520792013-07-15 17:49:19 +04006363 unsigned long load;
Peter Zijlstraa35b6462012-08-08 21:46:40 +02006364
Vladimir Davydov68520792013-07-15 17:49:19 +04006365 if (cfs_rq->last_h_load_update == now)
Peter Zijlstraa35b6462012-08-08 21:46:40 +02006366 return;
6367
Vladimir Davydov68520792013-07-15 17:49:19 +04006368 cfs_rq->h_load_next = NULL;
6369 for_each_sched_entity(se) {
6370 cfs_rq = cfs_rq_of(se);
6371 cfs_rq->h_load_next = se;
6372 if (cfs_rq->last_h_load_update == now)
6373 break;
6374 }
Peter Zijlstraa35b6462012-08-08 21:46:40 +02006375
Vladimir Davydov68520792013-07-15 17:49:19 +04006376 if (!se) {
Yuyang Du7ea241a2015-07-15 08:04:42 +08006377 cfs_rq->h_load = cfs_rq_load_avg(cfs_rq);
Vladimir Davydov68520792013-07-15 17:49:19 +04006378 cfs_rq->last_h_load_update = now;
6379 }
6380
6381 while ((se = cfs_rq->h_load_next) != NULL) {
6382 load = cfs_rq->h_load;
Yuyang Du7ea241a2015-07-15 08:04:42 +08006383 load = div64_ul(load * se->avg.load_avg,
6384 cfs_rq_load_avg(cfs_rq) + 1);
Vladimir Davydov68520792013-07-15 17:49:19 +04006385 cfs_rq = group_cfs_rq(se);
6386 cfs_rq->h_load = load;
6387 cfs_rq->last_h_load_update = now;
6388 }
Peter Zijlstra9763b672011-07-13 13:09:25 +02006389}
6390
Peter Zijlstra367456c2012-02-20 21:49:09 +01006391static unsigned long task_h_load(struct task_struct *p)
Peter Zijlstra230059de2009-12-17 17:47:12 +01006392{
Peter Zijlstra367456c2012-02-20 21:49:09 +01006393 struct cfs_rq *cfs_rq = task_cfs_rq(p);
Peter Zijlstra230059de2009-12-17 17:47:12 +01006394
Vladimir Davydov68520792013-07-15 17:49:19 +04006395 update_cfs_rq_h_load(cfs_rq);
Yuyang Du9d89c252015-07-15 08:04:37 +08006396 return div64_ul(p->se.avg.load_avg * cfs_rq->h_load,
Yuyang Du7ea241a2015-07-15 08:04:42 +08006397 cfs_rq_load_avg(cfs_rq) + 1);
Peter Zijlstra230059de2009-12-17 17:47:12 +01006398}
6399#else
Paul Turner48a16752012-10-04 13:18:31 +02006400static inline void update_blocked_averages(int cpu)
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08006401{
Vincent Guittot6c1d47c2015-07-15 08:04:38 +08006402 struct rq *rq = cpu_rq(cpu);
6403 struct cfs_rq *cfs_rq = &rq->cfs;
6404 unsigned long flags;
6405
6406 raw_spin_lock_irqsave(&rq->lock, flags);
6407 update_rq_clock(rq);
Steve Mucklea2c6c912016-03-24 15:26:07 -07006408 update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true);
Vincent Guittot6c1d47c2015-07-15 08:04:38 +08006409 raw_spin_unlock_irqrestore(&rq->lock, flags);
Peter Zijlstra9e3081c2010-11-15 15:47:02 -08006410}
6411
Peter Zijlstra367456c2012-02-20 21:49:09 +01006412static unsigned long task_h_load(struct task_struct *p)
6413{
Yuyang Du9d89c252015-07-15 08:04:37 +08006414 return p->se.avg.load_avg;
Peter Zijlstra230059de2009-12-17 17:47:12 +01006415}
6416#endif
6417
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006418/********** Helpers for find_busiest_group ************************/
Rik van Rielcaeb1782014-07-28 14:16:28 -04006419
6420enum group_type {
6421 group_other = 0,
6422 group_imbalanced,
6423 group_overloaded,
6424};
6425
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006426/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006427 * sg_lb_stats - stats of a sched_group required for load_balancing
6428 */
6429struct sg_lb_stats {
6430 unsigned long avg_load; /*Avg load across the CPUs of the group */
6431 unsigned long group_load; /* Total load over the CPUs of the group */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006432 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006433 unsigned long load_per_task;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006434 unsigned long group_capacity;
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01006435 unsigned long group_util; /* Total utilization of the group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006436 unsigned int sum_nr_running; /* Nr tasks running in the group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006437 unsigned int idle_cpus;
6438 unsigned int group_weight;
Rik van Rielcaeb1782014-07-28 14:16:28 -04006439 enum group_type group_type;
Vincent Guittotea678212015-02-27 16:54:11 +01006440 int group_no_capacity;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006441#ifdef CONFIG_NUMA_BALANCING
6442 unsigned int nr_numa_running;
6443 unsigned int nr_preferred_running;
6444#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006445};
6446
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006447/*
6448 * sd_lb_stats - Structure to store the statistics of a sched_domain
6449 * during load balancing.
6450 */
6451struct sd_lb_stats {
6452 struct sched_group *busiest; /* Busiest group in this sd */
6453 struct sched_group *local; /* Local group in this sd */
6454 unsigned long total_load; /* Total load of all groups in sd */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006455 unsigned long total_capacity; /* Total capacity of all groups in sd */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006456 unsigned long avg_load; /* Average load across all groups in sd */
6457
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006458 struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006459 struct sg_lb_stats local_stat; /* Statistics of the local group */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006460};
6461
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006462static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
6463{
6464 /*
6465 * Skimp on the clearing to avoid duplicate work. We can avoid clearing
6466 * local_stat because update_sg_lb_stats() does a full clear/assignment.
6467 * We must however clear busiest_stat::avg_load because
6468 * update_sd_pick_busiest() reads this before assignment.
6469 */
6470 *sds = (struct sd_lb_stats){
6471 .busiest = NULL,
6472 .local = NULL,
6473 .total_load = 0UL,
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006474 .total_capacity = 0UL,
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006475 .busiest_stat = {
6476 .avg_load = 0UL,
Rik van Rielcaeb1782014-07-28 14:16:28 -04006477 .sum_nr_running = 0,
6478 .group_type = group_other,
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02006479 },
6480 };
6481}
6482
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006483/**
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006484 * get_sd_load_idx - Obtain the load index for a given sched domain.
6485 * @sd: The sched_domain whose load_idx is to be obtained.
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05306486 * @idle: The idle status of the CPU for whose sd load_idx is obtained.
Yacine Belkadie69f6182013-07-12 20:45:47 +02006487 *
6488 * Return: The load index.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006489 */
6490static inline int get_sd_load_idx(struct sched_domain *sd,
6491 enum cpu_idle_type idle)
6492{
6493 int load_idx;
6494
6495 switch (idle) {
6496 case CPU_NOT_IDLE:
6497 load_idx = sd->busy_idx;
6498 break;
6499
6500 case CPU_NEWLY_IDLE:
6501 load_idx = sd->newidle_idx;
6502 break;
6503 default:
6504 load_idx = sd->idle_idx;
6505 break;
6506 }
6507
6508 return load_idx;
6509}
6510
Nicolas Pitreced549f2014-05-26 18:19:38 -04006511static unsigned long scale_rt_capacity(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006512{
6513 struct rq *rq = cpu_rq(cpu);
Vincent Guittotb5b48602015-02-27 16:54:08 +01006514 u64 total, used, age_stamp, avg;
Peter Zijlstracadefd32014-02-27 10:40:35 +01006515 s64 delta;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006516
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02006517 /*
6518 * Since we're reading these variables without serialization make sure
6519 * we read them once before doing sanity checks on them.
6520 */
Jason Low316c1608d2015-04-28 13:00:20 -07006521 age_stamp = READ_ONCE(rq->age_stamp);
6522 avg = READ_ONCE(rq->rt_avg);
Peter Zijlstracebde6d2015-01-05 11:18:10 +01006523 delta = __rq_clock_broken(rq) - age_stamp;
Venkatesh Pallipadiaa483802010-10-04 17:03:22 -07006524
Peter Zijlstracadefd32014-02-27 10:40:35 +01006525 if (unlikely(delta < 0))
6526 delta = 0;
6527
6528 total = sched_avg_period() + delta;
Peter Zijlstrab654f7d2012-05-22 14:04:28 +02006529
Vincent Guittotb5b48602015-02-27 16:54:08 +01006530 used = div_u64(avg, total);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006531
Vincent Guittotb5b48602015-02-27 16:54:08 +01006532 if (likely(used < SCHED_CAPACITY_SCALE))
6533 return SCHED_CAPACITY_SCALE - used;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006534
Vincent Guittotb5b48602015-02-27 16:54:08 +01006535 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006536}
6537
Nicolas Pitreced549f2014-05-26 18:19:38 -04006538static void update_cpu_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006539{
Morten Rasmussen8cd56012015-08-14 17:23:10 +01006540 unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006541 struct sched_group *sdg = sd->groups;
6542
Vincent Guittotca6d75e2015-02-27 16:54:09 +01006543 cpu_rq(cpu)->cpu_capacity_orig = capacity;
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006544
Nicolas Pitreced549f2014-05-26 18:19:38 -04006545 capacity *= scale_rt_capacity(cpu);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006546 capacity >>= SCHED_CAPACITY_SHIFT;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006547
Nicolas Pitreced549f2014-05-26 18:19:38 -04006548 if (!capacity)
6549 capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006550
Nicolas Pitreced549f2014-05-26 18:19:38 -04006551 cpu_rq(cpu)->cpu_capacity = capacity;
6552 sdg->sgc->capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006553}
6554
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006555void update_group_capacity(struct sched_domain *sd, int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006556{
6557 struct sched_domain *child = sd->child;
6558 struct sched_group *group, *sdg = sd->groups;
Vincent Guittotdc7ff762015-03-03 11:35:03 +01006559 unsigned long capacity;
Vincent Guittot4ec44122011-12-12 20:21:08 +01006560 unsigned long interval;
6561
6562 interval = msecs_to_jiffies(sd->balance_interval);
6563 interval = clamp(interval, 1UL, max_load_balance_interval);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006564 sdg->sgc->next_update = jiffies + interval;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006565
6566 if (!child) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04006567 update_cpu_capacity(sd, cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006568 return;
6569 }
6570
Vincent Guittotdc7ff762015-03-03 11:35:03 +01006571 capacity = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006572
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02006573 if (child->flags & SD_OVERLAP) {
6574 /*
6575 * SD_OVERLAP domains cannot assume that child groups
6576 * span the current group.
6577 */
6578
Peter Zijlstra863bffc2013-08-28 11:44:39 +02006579 for_each_cpu(cpu, sched_group_cpus(sdg)) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006580 struct sched_group_capacity *sgc;
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306581 struct rq *rq = cpu_rq(cpu);
Peter Zijlstra863bffc2013-08-28 11:44:39 +02006582
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306583 /*
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006584 * build_sched_domains() -> init_sched_groups_capacity()
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306585 * gets here before we've attached the domains to the
6586 * runqueues.
6587 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04006588 * Use capacity_of(), which is set irrespective of domains
6589 * in update_cpu_capacity().
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306590 *
Vincent Guittotdc7ff762015-03-03 11:35:03 +01006591 * This avoids capacity from being 0 and
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306592 * causing divide-by-zero issues on boot.
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306593 */
6594 if (unlikely(!rq->sd)) {
Nicolas Pitreced549f2014-05-26 18:19:38 -04006595 capacity += capacity_of(cpu);
Srikar Dronamraju9abf24d2013-11-12 22:11:26 +05306596 continue;
6597 }
6598
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006599 sgc = rq->sd->groups->sgc;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006600 capacity += sgc->capacity;
Peter Zijlstra863bffc2013-08-28 11:44:39 +02006601 }
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02006602 } else {
6603 /*
6604 * !SD_OVERLAP domains can assume that child groups
6605 * span the current group.
6606 */
6607
6608 group = child->groups;
6609 do {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006610 capacity += group->sgc->capacity;
Peter Zijlstra74a5ce22012-05-23 18:00:43 +02006611 group = group->next;
6612 } while (group != child->groups);
6613 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006614
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006615 sdg->sgc->capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006616}
6617
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006618/*
Vincent Guittotea678212015-02-27 16:54:11 +01006619 * Check whether the capacity of the rq has been noticeably reduced by side
6620 * activity. The imbalance_pct is used for the threshold.
6621 * Return true is the capacity is reduced
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006622 */
6623static inline int
Vincent Guittotea678212015-02-27 16:54:11 +01006624check_cpu_capacity(struct rq *rq, struct sched_domain *sd)
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006625{
Vincent Guittotea678212015-02-27 16:54:11 +01006626 return ((rq->cpu_capacity * sd->imbalance_pct) <
6627 (rq->cpu_capacity_orig * 100));
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10006628}
6629
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006630/*
6631 * Group imbalance indicates (and tries to solve) the problem where balancing
6632 * groups is inadequate due to tsk_cpus_allowed() constraints.
6633 *
6634 * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
6635 * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
6636 * Something like:
6637 *
6638 * { 0 1 2 3 } { 4 5 6 7 }
6639 * * * * *
6640 *
6641 * If we were to balance group-wise we'd place two tasks in the first group and
6642 * two tasks in the second group. Clearly this is undesired as it will overload
6643 * cpu 3 and leave one of the cpus in the second group unused.
6644 *
6645 * The current solution to this issue is detecting the skew in the first group
Peter Zijlstra62633222013-08-19 12:41:09 +02006646 * by noticing the lower domain failed to reach balance and had difficulty
6647 * moving tasks due to affinity constraints.
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006648 *
6649 * When this is so detected; this group becomes a candidate for busiest; see
Kamalesh Babulaled1b7732013-10-13 23:06:15 +05306650 * update_sd_pick_busiest(). And calculate_imbalance() and
Peter Zijlstra62633222013-08-19 12:41:09 +02006651 * find_busiest_group() avoid some of the usual balance conditions to allow it
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006652 * to create an effective group imbalance.
6653 *
6654 * This is a somewhat tricky proposition since the next run might not find the
6655 * group imbalance and decide the groups need to be balanced again. A most
6656 * subtle and fragile situation.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006657 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006658
Peter Zijlstra62633222013-08-19 12:41:09 +02006659static inline int sg_imbalanced(struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006660{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006661 return group->sgc->imbalance;
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006662}
6663
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006664/*
Vincent Guittotea678212015-02-27 16:54:11 +01006665 * group_has_capacity returns true if the group has spare capacity that could
6666 * be used by some tasks.
6667 * We consider that a group has spare capacity if the * number of task is
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01006668 * smaller than the number of CPUs or if the utilization is lower than the
6669 * available capacity for CFS tasks.
Vincent Guittotea678212015-02-27 16:54:11 +01006670 * For the latter, we use a threshold to stabilize the state, to take into
6671 * account the variance of the tasks' load and to return true if the available
6672 * capacity in meaningful for the load balancer.
6673 * As an example, an available capacity of 1% can appear but it doesn't make
6674 * any benefit for the load balance.
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006675 */
Vincent Guittotea678212015-02-27 16:54:11 +01006676static inline bool
6677group_has_capacity(struct lb_env *env, struct sg_lb_stats *sgs)
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006678{
Vincent Guittotea678212015-02-27 16:54:11 +01006679 if (sgs->sum_nr_running < sgs->group_weight)
6680 return true;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006681
Vincent Guittotea678212015-02-27 16:54:11 +01006682 if ((sgs->group_capacity * 100) >
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01006683 (sgs->group_util * env->sd->imbalance_pct))
Vincent Guittotea678212015-02-27 16:54:11 +01006684 return true;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006685
Vincent Guittotea678212015-02-27 16:54:11 +01006686 return false;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006687}
6688
Vincent Guittotea678212015-02-27 16:54:11 +01006689/*
6690 * group_is_overloaded returns true if the group has more tasks than it can
6691 * handle.
6692 * group_is_overloaded is not equals to !group_has_capacity because a group
6693 * with the exact right number of tasks, has no more spare capacity but is not
6694 * overloaded so both group_has_capacity and group_is_overloaded return
6695 * false.
6696 */
6697static inline bool
6698group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
Rik van Rielcaeb1782014-07-28 14:16:28 -04006699{
Vincent Guittotea678212015-02-27 16:54:11 +01006700 if (sgs->sum_nr_running <= sgs->group_weight)
6701 return false;
6702
6703 if ((sgs->group_capacity * 100) <
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01006704 (sgs->group_util * env->sd->imbalance_pct))
Vincent Guittotea678212015-02-27 16:54:11 +01006705 return true;
6706
6707 return false;
6708}
6709
Leo Yan79a89f92015-09-15 18:56:45 +08006710static inline enum
6711group_type group_classify(struct sched_group *group,
6712 struct sg_lb_stats *sgs)
Vincent Guittotea678212015-02-27 16:54:11 +01006713{
6714 if (sgs->group_no_capacity)
Rik van Rielcaeb1782014-07-28 14:16:28 -04006715 return group_overloaded;
6716
6717 if (sg_imbalanced(group))
6718 return group_imbalanced;
6719
6720 return group_other;
6721}
6722
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006723/**
6724 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
6725 * @env: The load balancing environment.
6726 * @group: sched_group whose statistics are to be updated.
6727 * @load_idx: Load index of sched_domain of this_cpu for load calc.
6728 * @local_group: Does group contain this_cpu.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006729 * @sgs: variable to hold the statistics for this group.
Masanari Iidacd3bd4e2014-07-28 12:38:06 +09006730 * @overload: Indicate more than one runnable task for any CPU.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006731 */
6732static inline void update_sg_lb_stats(struct lb_env *env,
6733 struct sched_group *group, int load_idx,
Tim Chen4486edd2014-06-23 12:16:49 -07006734 int local_group, struct sg_lb_stats *sgs,
6735 bool *overload)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006736{
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02006737 unsigned long load;
Waiman Longa426f992015-11-25 14:09:38 -05006738 int i, nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006739
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006740 memset(sgs, 0, sizeof(*sgs));
6741
Michael Wangb94031302012-07-12 16:10:13 +08006742 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006743 struct rq *rq = cpu_rq(i);
6744
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006745 /* Bias balancing toward cpus of our domain */
Peter Zijlstra62633222013-08-19 12:41:09 +02006746 if (local_group)
Peter Zijlstra04f733b2012-05-11 00:12:02 +02006747 load = target_load(i, load_idx);
Peter Zijlstra62633222013-08-19 12:41:09 +02006748 else
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006749 load = source_load(i, load_idx);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006750
6751 sgs->group_load += load;
Dietmar Eggemann9e91d612015-08-14 17:23:12 +01006752 sgs->group_util += cpu_util(i);
Vincent Guittot65fdac02014-08-26 13:06:46 +02006753 sgs->sum_nr_running += rq->cfs.h_nr_running;
Tim Chen4486edd2014-06-23 12:16:49 -07006754
Waiman Longa426f992015-11-25 14:09:38 -05006755 nr_running = rq->nr_running;
6756 if (nr_running > 1)
Tim Chen4486edd2014-06-23 12:16:49 -07006757 *overload = true;
6758
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006759#ifdef CONFIG_NUMA_BALANCING
6760 sgs->nr_numa_running += rq->nr_numa_running;
6761 sgs->nr_preferred_running += rq->nr_preferred_running;
6762#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006763 sgs->sum_weighted_load += weighted_cpuload(i);
Waiman Longa426f992015-11-25 14:09:38 -05006764 /*
6765 * No need to call idle_cpu() if nr_running is not 0
6766 */
6767 if (!nr_running && idle_cpu(i))
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006768 sgs->idle_cpus++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006769 }
6770
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006771 /* Adjust by relative CPU capacity of the group */
6772 sgs->group_capacity = group->sgc->capacity;
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006773 sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006774
Suresh Siddhadd5feea2010-02-23 16:13:52 -08006775 if (sgs->sum_nr_running)
Peter Zijlstra38d0f772013-08-15 19:47:56 +02006776 sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006777
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07006778 sgs->group_weight = group->group_weight;
Peter Zijlstrab37d9312013-08-28 11:50:34 +02006779
Vincent Guittotea678212015-02-27 16:54:11 +01006780 sgs->group_no_capacity = group_is_overloaded(env, sgs);
Leo Yan79a89f92015-09-15 18:56:45 +08006781 sgs->group_type = group_classify(group, sgs);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006782}
6783
6784/**
Michael Neuling532cb4c2010-06-08 14:57:02 +10006785 * update_sd_pick_busiest - return 1 on busiest group
Randy Dunlapcd968912012-06-08 13:18:33 -07006786 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006787 * @sds: sched_domain statistics
6788 * @sg: sched_group candidate to be checked for being the busiest
Michael Neulingb6b12292010-06-10 12:06:21 +10006789 * @sgs: sched_group statistics
Michael Neuling532cb4c2010-06-08 14:57:02 +10006790 *
6791 * Determine if @sg is a busier group than the previously selected
6792 * busiest group.
Yacine Belkadie69f6182013-07-12 20:45:47 +02006793 *
6794 * Return: %true if @sg is a busier group than the previously selected
6795 * busiest group. %false otherwise.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006796 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006797static bool update_sd_pick_busiest(struct lb_env *env,
Michael Neuling532cb4c2010-06-08 14:57:02 +10006798 struct sd_lb_stats *sds,
6799 struct sched_group *sg,
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006800 struct sg_lb_stats *sgs)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006801{
Rik van Rielcaeb1782014-07-28 14:16:28 -04006802 struct sg_lb_stats *busiest = &sds->busiest_stat;
Michael Neuling532cb4c2010-06-08 14:57:02 +10006803
Rik van Rielcaeb1782014-07-28 14:16:28 -04006804 if (sgs->group_type > busiest->group_type)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006805 return true;
6806
Rik van Rielcaeb1782014-07-28 14:16:28 -04006807 if (sgs->group_type < busiest->group_type)
6808 return false;
6809
6810 if (sgs->avg_load <= busiest->avg_load)
6811 return false;
6812
6813 /* This is the busiest node in its class. */
6814 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006815 return true;
6816
Srikar Dronamraju1f621e02016-04-06 18:47:40 +05306817 /* No ASYM_PACKING if target cpu is already busy */
6818 if (env->idle == CPU_NOT_IDLE)
6819 return true;
Michael Neuling532cb4c2010-06-08 14:57:02 +10006820 /*
6821 * ASYM_PACKING needs to move all the work to the lowest
6822 * numbered CPUs in the group, therefore mark all groups
6823 * higher than ourself as busy.
6824 */
Rik van Rielcaeb1782014-07-28 14:16:28 -04006825 if (sgs->sum_nr_running && env->dst_cpu < group_first_cpu(sg)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006826 if (!sds->busiest)
6827 return true;
6828
Srikar Dronamraju1f621e02016-04-06 18:47:40 +05306829 /* Prefer to move from highest possible cpu's work */
6830 if (group_first_cpu(sds->busiest) < group_first_cpu(sg))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006831 return true;
6832 }
6833
6834 return false;
6835}
6836
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006837#ifdef CONFIG_NUMA_BALANCING
6838static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6839{
6840 if (sgs->sum_nr_running > sgs->nr_numa_running)
6841 return regular;
6842 if (sgs->sum_nr_running > sgs->nr_preferred_running)
6843 return remote;
6844 return all;
6845}
6846
6847static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6848{
6849 if (rq->nr_running > rq->nr_numa_running)
6850 return regular;
6851 if (rq->nr_running > rq->nr_preferred_running)
6852 return remote;
6853 return all;
6854}
6855#else
6856static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
6857{
6858 return all;
6859}
6860
6861static inline enum fbq_type fbq_classify_rq(struct rq *rq)
6862{
6863 return regular;
6864}
6865#endif /* CONFIG_NUMA_BALANCING */
6866
Michael Neuling532cb4c2010-06-08 14:57:02 +10006867/**
Hui Kang461819a2011-10-11 23:00:59 -04006868 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07006869 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006870 * @sds: variable to hold the statistics for this sched_domain.
6871 */
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006872static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006873{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006874 struct sched_domain *child = env->sd->child;
6875 struct sched_group *sg = env->sd->groups;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006876 struct sg_lb_stats tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006877 int load_idx, prefer_sibling = 0;
Tim Chen4486edd2014-06-23 12:16:49 -07006878 bool overload = false;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006879
6880 if (child && child->flags & SD_PREFER_SIBLING)
6881 prefer_sibling = 1;
6882
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006883 load_idx = get_sd_load_idx(env->sd, env->idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006884
6885 do {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006886 struct sg_lb_stats *sgs = &tmp_sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006887 int local_group;
6888
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006889 local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006890 if (local_group) {
6891 sds->local = sg;
6892 sgs = &sds->local_stat;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006893
6894 if (env->idle != CPU_NEWLY_IDLE ||
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006895 time_after_eq(jiffies, sg->sgc->next_update))
6896 update_group_capacity(env->sd, env->dst_cpu);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006897 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006898
Tim Chen4486edd2014-06-23 12:16:49 -07006899 update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
6900 &overload);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006901
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006902 if (local_group)
6903 goto next_group;
6904
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006905 /*
6906 * In case the child domain prefers tasks go to siblings
Vincent Guittotea678212015-02-27 16:54:11 +01006907 * first, lower the sg capacity so that we'll try
Nikhil Rao75dd3212010-10-15 13:12:30 -07006908 * and move all the excess tasks away. We lower the capacity
6909 * of a group only if the local group has the capacity to fit
Vincent Guittotea678212015-02-27 16:54:11 +01006910 * these excess tasks. The extra check prevents the case where
6911 * you always pull from the heaviest group when it is already
6912 * under-utilized (possible with a large weight task outweighs
6913 * the tasks on the system).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006914 */
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006915 if (prefer_sibling && sds->local &&
Vincent Guittotea678212015-02-27 16:54:11 +01006916 group_has_capacity(env, &sds->local_stat) &&
6917 (sgs->sum_nr_running > 1)) {
6918 sgs->group_no_capacity = 1;
Leo Yan79a89f92015-09-15 18:56:45 +08006919 sgs->group_type = group_classify(sg, sgs);
Wanpeng Licb0b9f22014-11-05 07:44:50 +08006920 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006921
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006922 if (update_sd_pick_busiest(env, sds, sg, sgs)) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10006923 sds->busiest = sg;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09006924 sds->busiest_stat = *sgs;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006925 }
6926
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006927next_group:
6928 /* Now, start updating sd_lb_stats */
6929 sds->total_load += sgs->group_load;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006930 sds->total_capacity += sgs->group_capacity;
Peter Zijlstrab72ff132013-08-28 10:32:32 +02006931
Michael Neuling532cb4c2010-06-08 14:57:02 +10006932 sg = sg->next;
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006933 } while (sg != env->sd->groups);
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01006934
6935 if (env->sd->flags & SD_NUMA)
6936 env->fbq_type = fbq_classify_group(&sds->busiest_stat);
Tim Chen4486edd2014-06-23 12:16:49 -07006937
6938 if (!env->sd->parent) {
6939 /* update overload indicator if we are at root domain */
6940 if (env->dst_rq->rd->overload != overload)
6941 env->dst_rq->rd->overload = overload;
6942 }
6943
Michael Neuling532cb4c2010-06-08 14:57:02 +10006944}
6945
Michael Neuling532cb4c2010-06-08 14:57:02 +10006946/**
6947 * check_asym_packing - Check to see if the group is packed into the
6948 * sched doman.
6949 *
6950 * This is primarily intended to used at the sibling level. Some
6951 * cores like POWER7 prefer to use lower numbered SMT threads. In the
6952 * case of POWER7, it can move to lower SMT modes only when higher
6953 * threads are idle. When in lower SMT modes, the threads will
6954 * perform better since they share less core resources. Hence when we
6955 * have idle threads, we want them to be the higher ones.
6956 *
6957 * This packing function is run on idle threads. It checks to see if
6958 * the busiest CPU in this domain (core in the P7 case) has a higher
6959 * CPU number than the packing function is being run on. Here we are
6960 * assuming lower CPU number will be equivalent to lower a SMT thread
6961 * number.
6962 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02006963 * Return: 1 when packing is required and a task should be moved to
Michael Neulingb6b12292010-06-10 12:06:21 +10006964 * this CPU. The amount of the imbalance is returned in *imbalance.
6965 *
Randy Dunlapcd968912012-06-08 13:18:33 -07006966 * @env: The load balancing environment.
Michael Neuling532cb4c2010-06-08 14:57:02 +10006967 * @sds: Statistics of the sched_domain which is to be packed
Michael Neuling532cb4c2010-06-08 14:57:02 +10006968 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006969static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006970{
6971 int busiest_cpu;
6972
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006973 if (!(env->sd->flags & SD_ASYM_PACKING))
Michael Neuling532cb4c2010-06-08 14:57:02 +10006974 return 0;
6975
Srikar Dronamraju1f621e02016-04-06 18:47:40 +05306976 if (env->idle == CPU_NOT_IDLE)
6977 return 0;
6978
Michael Neuling532cb4c2010-06-08 14:57:02 +10006979 if (!sds->busiest)
6980 return 0;
6981
6982 busiest_cpu = group_first_cpu(sds->busiest);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006983 if (env->dst_cpu > busiest_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10006984 return 0;
6985
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006986 env->imbalance = DIV_ROUND_CLOSEST(
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04006987 sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity,
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04006988 SCHED_CAPACITY_SCALE);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02006989
Michael Neuling532cb4c2010-06-08 14:57:02 +10006990 return 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006991}
6992
6993/**
6994 * fix_small_imbalance - Calculate the minor imbalance that exists
6995 * amongst the groups of a sched_domain, during
6996 * load balancing.
Randy Dunlapcd968912012-06-08 13:18:33 -07006997 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006998 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01006999 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007000static inline
7001void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007002{
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007003 unsigned long tmp, capa_now = 0, capa_move = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007004 unsigned int imbn = 2;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007005 unsigned long scaled_busy_load_per_task;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007006 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007007
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007008 local = &sds->local_stat;
7009 busiest = &sds->busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007010
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007011 if (!local->sum_nr_running)
7012 local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
7013 else if (busiest->load_per_task > local->load_per_task)
7014 imbn = 1;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007015
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007016 scaled_busy_load_per_task =
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007017 (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007018 busiest->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007019
Vladimir Davydov3029ede2013-09-15 17:49:14 +04007020 if (busiest->avg_load + scaled_busy_load_per_task >=
7021 local->avg_load + (scaled_busy_load_per_task * imbn)) {
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007022 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007023 return;
7024 }
7025
7026 /*
7027 * OK, we don't have enough imbalance to justify moving tasks,
Nicolas Pitreced549f2014-05-26 18:19:38 -04007028 * however we may be able to increase total CPU capacity used by
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007029 * moving them.
7030 */
7031
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007032 capa_now += busiest->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007033 min(busiest->load_per_task, busiest->avg_load);
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007034 capa_now += local->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007035 min(local->load_per_task, local->avg_load);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007036 capa_now /= SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007037
7038 /* Amount of load we'd subtract */
Vincent Guittota2cd4262014-03-11 17:26:06 +01007039 if (busiest->avg_load > scaled_busy_load_per_task) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007040 capa_move += busiest->group_capacity *
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007041 min(busiest->load_per_task,
Vincent Guittota2cd4262014-03-11 17:26:06 +01007042 busiest->avg_load - scaled_busy_load_per_task);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007043 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007044
7045 /* Amount of load we'd add */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007046 if (busiest->avg_load * busiest->group_capacity <
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007047 busiest->load_per_task * SCHED_CAPACITY_SCALE) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007048 tmp = (busiest->avg_load * busiest->group_capacity) /
7049 local->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007050 } else {
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007051 tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) /
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007052 local->group_capacity;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007053 }
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007054 capa_move += local->group_capacity *
Peter Zijlstra3ae11c92013-08-15 20:37:48 +02007055 min(local->load_per_task, local->avg_load + tmp);
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007056 capa_move /= SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007057
7058 /* Move if we gain throughput */
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007059 if (capa_move > capa_now)
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007060 env->imbalance = busiest->load_per_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007061}
7062
7063/**
7064 * calculate_imbalance - Calculate the amount of imbalance present within the
7065 * groups of a given sched_domain during load balance.
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007066 * @env: load balance environment
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007067 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007068 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007069static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007070{
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007071 unsigned long max_pull, load_above_capacity = ~0UL;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007072 struct sg_lb_stats *local, *busiest;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007073
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007074 local = &sds->local_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007075 busiest = &sds->busiest_stat;
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007076
Rik van Rielcaeb1782014-07-28 14:16:28 -04007077 if (busiest->group_type == group_imbalanced) {
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02007078 /*
7079 * In the group_imb case we cannot rely on group-wide averages
7080 * to ensure cpu-load equilibrium, look at wider averages. XXX
7081 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007082 busiest->load_per_task =
7083 min(busiest->load_per_task, sds->avg_load);
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007084 }
7085
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007086 /*
Dietmar Eggemann885e5422016-04-29 20:32:39 +01007087 * Avg load of busiest sg can be less and avg load of local sg can
7088 * be greater than avg load across all sgs of sd because avg load
7089 * factors in sg capacity and sgs with smaller group_type are
7090 * skipped when updating the busiest sg:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007091 */
Vladimir Davydovb1885552013-09-15 17:49:13 +04007092 if (busiest->avg_load <= sds->avg_load ||
7093 local->avg_load >= sds->avg_load) {
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007094 env->imbalance = 0;
7095 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007096 }
7097
Peter Zijlstra9a5d9ba2014-07-29 17:15:11 +02007098 /*
7099 * If there aren't any idle cpus, avoid creating some.
7100 */
7101 if (busiest->group_type == group_overloaded &&
7102 local->group_type == group_overloaded) {
Peter Zijlstra1be0eb22016-05-06 12:21:23 +02007103 load_above_capacity = busiest->sum_nr_running * SCHED_CAPACITY_SCALE;
Morten Rasmussencfa10332016-04-29 20:32:40 +01007104 if (load_above_capacity > busiest->group_capacity) {
Vincent Guittotea678212015-02-27 16:54:11 +01007105 load_above_capacity -= busiest->group_capacity;
Morten Rasmussencfa10332016-04-29 20:32:40 +01007106 load_above_capacity *= NICE_0_LOAD;
7107 load_above_capacity /= busiest->group_capacity;
7108 } else
Vincent Guittotea678212015-02-27 16:54:11 +01007109 load_above_capacity = ~0UL;
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007110 }
7111
7112 /*
7113 * We're trying to get all the cpus to the average_load, so we don't
7114 * want to push ourselves above the average load, nor do we wish to
7115 * reduce the max loaded cpu below the average load. At the same time,
Dietmar Eggemann0a9b23c2016-04-29 20:32:38 +01007116 * we also don't want to reduce the group load below the group
7117 * capacity. Thus we look for the minimum possible imbalance.
Suresh Siddhadd5feea2010-02-23 16:13:52 -08007118 */
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02007119 max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007120
7121 /* How much load to actually move to equalise the imbalance */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007122 env->imbalance = min(
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007123 max_pull * busiest->group_capacity,
7124 (sds->avg_load - local->avg_load) * local->group_capacity
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007125 ) / SCHED_CAPACITY_SCALE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007126
7127 /*
7128 * if *imbalance is less than the average load per runnable task
Lucas De Marchi25985ed2011-03-30 22:57:33 -03007129 * there is no guarantee that any tasks will be moved so we'll have
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007130 * a think about bumping its value to force at least one task to be
7131 * moved
7132 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007133 if (env->imbalance < busiest->load_per_task)
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007134 return fix_small_imbalance(env, sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007135}
Nikhil Raofab47622010-10-15 13:12:29 -07007136
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007137/******* find_busiest_group() helpers end here *********************/
7138
7139/**
7140 * find_busiest_group - Returns the busiest group within the sched_domain
Dietmar Eggemann0a9b23c2016-04-29 20:32:38 +01007141 * if there is an imbalance.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007142 *
7143 * Also calculates the amount of weighted load which should be moved
7144 * to restore balance.
7145 *
Randy Dunlapcd968912012-06-08 13:18:33 -07007146 * @env: The load balancing environment.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007147 *
Yacine Belkadie69f6182013-07-12 20:45:47 +02007148 * Return: - The busiest group if imbalance exists.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007149 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007150static struct sched_group *find_busiest_group(struct lb_env *env)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007151{
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007152 struct sg_lb_stats *local, *busiest;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007153 struct sd_lb_stats sds;
7154
Peter Zijlstra147c5fc2013-08-19 15:22:57 +02007155 init_sd_lb_stats(&sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007156
7157 /*
7158 * Compute the various statistics relavent for load balancing at
7159 * this level.
7160 */
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007161 update_sd_lb_stats(env, &sds);
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007162 local = &sds.local_stat;
7163 busiest = &sds.busiest_stat;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007164
Vincent Guittotea678212015-02-27 16:54:11 +01007165 /* ASYM feature bypasses nice load balance check */
Srikar Dronamraju1f621e02016-04-06 18:47:40 +05307166 if (check_asym_packing(env, &sds))
Michael Neuling532cb4c2010-06-08 14:57:02 +10007167 return sds.busiest;
7168
Peter Zijlstracc57aa82011-02-21 18:55:32 +01007169 /* There is no busy sibling group to pull tasks from */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007170 if (!sds.busiest || busiest->sum_nr_running == 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007171 goto out_balanced;
7172
Nicolas Pitreca8ce3d2014-05-26 18:19:39 -04007173 sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
7174 / sds.total_capacity;
Ken Chenb0432d82011-04-07 17:23:22 -07007175
Peter Zijlstra866ab432011-02-21 18:56:47 +01007176 /*
7177 * If the busiest group is imbalanced the below checks don't
Peter Zijlstra30ce5da2013-08-15 20:29:29 +02007178 * work because they assume all things are equal, which typically
Peter Zijlstra866ab432011-02-21 18:56:47 +01007179 * isn't true due to cpus_allowed constraints and the like.
7180 */
Rik van Rielcaeb1782014-07-28 14:16:28 -04007181 if (busiest->group_type == group_imbalanced)
Peter Zijlstra866ab432011-02-21 18:56:47 +01007182 goto force_balance;
7183
Peter Zijlstracc57aa82011-02-21 18:55:32 +01007184 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
Vincent Guittotea678212015-02-27 16:54:11 +01007185 if (env->idle == CPU_NEWLY_IDLE && group_has_capacity(env, local) &&
7186 busiest->group_no_capacity)
Nikhil Raofab47622010-10-15 13:12:29 -07007187 goto force_balance;
7188
Peter Zijlstracc57aa82011-02-21 18:55:32 +01007189 /*
Zhihui Zhang9c58c792014-09-20 21:24:36 -04007190 * If the local group is busier than the selected busiest group
Peter Zijlstracc57aa82011-02-21 18:55:32 +01007191 * don't try and pull any tasks.
7192 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007193 if (local->avg_load >= busiest->avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007194 goto out_balanced;
7195
Peter Zijlstracc57aa82011-02-21 18:55:32 +01007196 /*
7197 * Don't pull any tasks if this group is already above the domain
7198 * average load.
7199 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007200 if (local->avg_load >= sds.avg_load)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007201 goto out_balanced;
7202
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007203 if (env->idle == CPU_IDLE) {
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007204 /*
Vincent Guittot43f4d662014-10-01 15:38:55 +02007205 * This cpu is idle. If the busiest group is not overloaded
7206 * and there is no imbalance between this and busiest group
7207 * wrt idle cpus, it is balanced. The imbalance becomes
7208 * significant if the diff is greater than 1 otherwise we
7209 * might end up to just move the imbalance on another group
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007210 */
Vincent Guittot43f4d662014-10-01 15:38:55 +02007211 if ((busiest->group_type != group_overloaded) &&
7212 (local->idle_cpus <= (busiest->idle_cpus + 1)))
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007213 goto out_balanced;
Peter Zijlstrac186faf2011-02-21 18:52:53 +01007214 } else {
7215 /*
7216 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
7217 * imbalance_pct to be conservative.
7218 */
Joonsoo Kim56cf5152013-08-06 17:36:43 +09007219 if (100 * busiest->avg_load <=
7220 env->sd->imbalance_pct * local->avg_load)
Peter Zijlstrac186faf2011-02-21 18:52:53 +01007221 goto out_balanced;
Suresh Siddhaaae6d3d2010-09-17 15:02:32 -07007222 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007223
Nikhil Raofab47622010-10-15 13:12:29 -07007224force_balance:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007225 /* Looks like there is an imbalance. Compute it */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007226 calculate_imbalance(env, &sds);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007227 return sds.busiest;
7228
7229out_balanced:
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007230 env->imbalance = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007231 return NULL;
7232}
7233
7234/*
7235 * find_busiest_queue - find the busiest runqueue among the cpus in group.
7236 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007237static struct rq *find_busiest_queue(struct lb_env *env,
Michael Wangb94031302012-07-12 16:10:13 +08007238 struct sched_group *group)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007239{
7240 struct rq *busiest = NULL, *rq;
Nicolas Pitreced549f2014-05-26 18:19:38 -04007241 unsigned long busiest_load = 0, busiest_capacity = 1;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007242 int i;
7243
Peter Zijlstra6906a402013-08-19 15:20:21 +02007244 for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
Vincent Guittotea678212015-02-27 16:54:11 +01007245 unsigned long capacity, wl;
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01007246 enum fbq_type rt;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007247
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01007248 rq = cpu_rq(i);
7249 rt = fbq_classify_rq(rq);
7250
7251 /*
7252 * We classify groups/runqueues into three groups:
7253 * - regular: there are !numa tasks
7254 * - remote: there are numa tasks that run on the 'wrong' node
7255 * - all: there is no distinction
7256 *
7257 * In order to avoid migrating ideally placed numa tasks,
7258 * ignore those when there's better options.
7259 *
7260 * If we ignore the actual busiest queue to migrate another
7261 * task, the next balance pass can still reduce the busiest
7262 * queue by moving tasks around inside the node.
7263 *
7264 * If we cannot move enough load due to this classification
7265 * the next pass will adjust the group classification and
7266 * allow migration of more tasks.
7267 *
7268 * Both cases only affect the total convergence complexity.
7269 */
7270 if (rt > env->fbq_type)
7271 continue;
7272
Nicolas Pitreced549f2014-05-26 18:19:38 -04007273 capacity = capacity_of(i);
Srivatsa Vaddagiri9d5efe02010-06-08 14:57:02 +10007274
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01007275 wl = weighted_cpuload(i);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007276
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01007277 /*
7278 * When comparing with imbalance, use weighted_cpuload()
Nicolas Pitreced549f2014-05-26 18:19:38 -04007279 * which is not scaled with the cpu capacity.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01007280 */
Vincent Guittotea678212015-02-27 16:54:11 +01007281
7282 if (rq->nr_running == 1 && wl > env->imbalance &&
7283 !check_cpu_capacity(rq, env->sd))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007284 continue;
7285
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01007286 /*
7287 * For the load comparisons with the other cpu's, consider
Nicolas Pitreced549f2014-05-26 18:19:38 -04007288 * the weighted_cpuload() scaled with the cpu capacity, so
7289 * that the load can be moved away from the cpu that is
7290 * potentially running at a lower capacity.
Joonsoo Kim95a79b82013-08-06 17:36:41 +09007291 *
Nicolas Pitreced549f2014-05-26 18:19:38 -04007292 * Thus we're looking for max(wl_i / capacity_i), crosswise
Joonsoo Kim95a79b82013-08-06 17:36:41 +09007293 * multiplication to rid ourselves of the division works out
Nicolas Pitreced549f2014-05-26 18:19:38 -04007294 * to: wl_i * capacity_j > wl_j * capacity_i; where j is
7295 * our previous maximum.
Thomas Gleixner6e40f5b2010-02-16 16:48:56 +01007296 */
Nicolas Pitreced549f2014-05-26 18:19:38 -04007297 if (wl * busiest_capacity > busiest_load * capacity) {
Joonsoo Kim95a79b82013-08-06 17:36:41 +09007298 busiest_load = wl;
Nicolas Pitreced549f2014-05-26 18:19:38 -04007299 busiest_capacity = capacity;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007300 busiest = rq;
7301 }
7302 }
7303
7304 return busiest;
7305}
7306
7307/*
7308 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
7309 * so long as it is large enough.
7310 */
7311#define MAX_PINNED_INTERVAL 512
7312
7313/* Working cpumask for load_balance and load_balance_newidle. */
Joonsoo Kime6252c32013-04-23 17:27:41 +09007314DEFINE_PER_CPU(cpumask_var_t, load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007315
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007316static int need_active_balance(struct lb_env *env)
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01007317{
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007318 struct sched_domain *sd = env->sd;
7319
7320 if (env->idle == CPU_NEWLY_IDLE) {
Michael Neuling532cb4c2010-06-08 14:57:02 +10007321
7322 /*
7323 * ASYM_PACKING needs to force migrate tasks from busy but
7324 * higher numbered CPUs in order to pack all tasks in the
7325 * lowest numbered CPUs.
7326 */
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007327 if ((sd->flags & SD_ASYM_PACKING) && env->src_cpu > env->dst_cpu)
Michael Neuling532cb4c2010-06-08 14:57:02 +10007328 return 1;
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01007329 }
7330
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007331 /*
7332 * The dst_cpu is idle and the src_cpu CPU has only 1 CFS task.
7333 * It's worth migrating the task if the src_cpu's capacity is reduced
7334 * because of other sched_class or IRQs if more capacity stays
7335 * available on dst_cpu.
7336 */
7337 if ((env->idle != CPU_NOT_IDLE) &&
7338 (env->src_rq->cfs.h_nr_running == 1)) {
7339 if ((check_cpu_capacity(env->src_rq, sd)) &&
7340 (capacity_of(env->src_cpu)*sd->imbalance_pct < capacity_of(env->dst_cpu)*100))
7341 return 1;
7342 }
7343
Peter Zijlstra1af3ed32009-12-23 15:10:31 +01007344 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
7345}
7346
Tejun Heo969c7922010-05-06 18:49:21 +02007347static int active_load_balance_cpu_stop(void *data);
7348
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007349static int should_we_balance(struct lb_env *env)
7350{
7351 struct sched_group *sg = env->sd->groups;
7352 struct cpumask *sg_cpus, *sg_mask;
7353 int cpu, balance_cpu = -1;
7354
7355 /*
7356 * In the newly idle case, we will allow all the cpu's
7357 * to do the newly idle load balance.
7358 */
7359 if (env->idle == CPU_NEWLY_IDLE)
7360 return 1;
7361
7362 sg_cpus = sched_group_cpus(sg);
7363 sg_mask = sched_group_mask(sg);
7364 /* Try to find first idle cpu */
7365 for_each_cpu_and(cpu, sg_cpus, env->cpus) {
7366 if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
7367 continue;
7368
7369 balance_cpu = cpu;
7370 break;
7371 }
7372
7373 if (balance_cpu == -1)
7374 balance_cpu = group_balance_cpu(sg);
7375
7376 /*
7377 * First idle cpu or the first cpu(busiest) in this sched group
7378 * is eligible for doing load balancing at this and above domains.
7379 */
Joonsoo Kimb0cff9d2013-09-10 15:54:49 +09007380 return balance_cpu == env->dst_cpu;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007381}
7382
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007383/*
7384 * Check this_cpu to ensure it is balanced within domain. Attempt to move
7385 * tasks if there is an imbalance.
7386 */
7387static int load_balance(int this_cpu, struct rq *this_rq,
7388 struct sched_domain *sd, enum cpu_idle_type idle,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007389 int *continue_balancing)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007390{
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307391 int ld_moved, cur_ld_moved, active_balance = 0;
Peter Zijlstra62633222013-08-19 12:41:09 +02007392 struct sched_domain *sd_parent = sd->parent;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007393 struct sched_group *group;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007394 struct rq *busiest;
7395 unsigned long flags;
Christoph Lameter4ba29682014-08-26 19:12:21 -05007396 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007397
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007398 struct lb_env env = {
7399 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01007400 .dst_cpu = this_cpu,
7401 .dst_rq = this_rq,
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307402 .dst_grpmask = sched_group_cpus(sd->groups),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007403 .idle = idle,
Peter Zijlstraeb953082012-04-17 13:38:40 +02007404 .loop_break = sched_nr_migrate_break,
Michael Wangb94031302012-07-12 16:10:13 +08007405 .cpus = cpus,
Peter Zijlstra0ec8aa02013-10-07 11:29:33 +01007406 .fbq_type = all,
Kirill Tkhai163122b2014-08-20 13:48:29 +04007407 .tasks = LIST_HEAD_INIT(env.tasks),
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007408 };
7409
Joonsoo Kimcfc03112013-04-23 17:27:39 +09007410 /*
7411 * For NEWLY_IDLE load_balancing, we don't need to consider
7412 * other cpus in our group
7413 */
Joonsoo Kime02e60c2013-04-23 17:27:42 +09007414 if (idle == CPU_NEWLY_IDLE)
Joonsoo Kimcfc03112013-04-23 17:27:39 +09007415 env.dst_grpmask = NULL;
Joonsoo Kimcfc03112013-04-23 17:27:39 +09007416
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007417 cpumask_copy(cpus, cpu_active_mask);
7418
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007419 schedstat_inc(sd, lb_count[idle]);
7420
7421redo:
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007422 if (!should_we_balance(&env)) {
7423 *continue_balancing = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007424 goto out_balanced;
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007425 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007426
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007427 group = find_busiest_group(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007428 if (!group) {
7429 schedstat_inc(sd, lb_nobusyg[idle]);
7430 goto out_balanced;
7431 }
7432
Michael Wangb94031302012-07-12 16:10:13 +08007433 busiest = find_busiest_queue(&env, group);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007434 if (!busiest) {
7435 schedstat_inc(sd, lb_nobusyq[idle]);
7436 goto out_balanced;
7437 }
7438
Michael Wang78feefc2012-08-06 16:41:59 +08007439 BUG_ON(busiest == env.dst_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007440
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007441 schedstat_add(sd, lb_imbalance[idle], env.imbalance);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007442
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01007443 env.src_cpu = busiest->cpu;
7444 env.src_rq = busiest;
7445
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007446 ld_moved = 0;
7447 if (busiest->nr_running > 1) {
7448 /*
7449 * Attempt to move tasks. If find_busiest_group has found
7450 * an imbalance but busiest->nr_running <= 1, the group is
7451 * still unbalanced. ld_moved simply stays zero, so it is
7452 * correctly treated as an imbalance.
7453 */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007454 env.flags |= LBF_ALL_PINNED;
Peter Zijlstrac82513e2012-04-26 13:12:27 +02007455 env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007456
Peter Zijlstra5d6523e2012-03-10 00:07:36 +01007457more_balance:
Kirill Tkhai163122b2014-08-20 13:48:29 +04007458 raw_spin_lock_irqsave(&busiest->lock, flags);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307459
7460 /*
7461 * cur_ld_moved - load moved in current iteration
7462 * ld_moved - cumulative load moved across iterations
7463 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04007464 cur_ld_moved = detach_tasks(&env);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007465
7466 /*
Kirill Tkhai163122b2014-08-20 13:48:29 +04007467 * We've detached some tasks from busiest_rq. Every
7468 * task is masked "TASK_ON_RQ_MIGRATING", so we can safely
7469 * unlock busiest->lock, and we are able to be sure
7470 * that nobody can manipulate the tasks in parallel.
7471 * See task_rq_lock() family for the details.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007472 */
Kirill Tkhai163122b2014-08-20 13:48:29 +04007473
7474 raw_spin_unlock(&busiest->lock);
7475
7476 if (cur_ld_moved) {
7477 attach_tasks(&env);
7478 ld_moved += cur_ld_moved;
7479 }
7480
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007481 local_irq_restore(flags);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307482
Joonsoo Kimf1cd0852013-04-23 17:27:37 +09007483 if (env.flags & LBF_NEED_BREAK) {
7484 env.flags &= ~LBF_NEED_BREAK;
7485 goto more_balance;
7486 }
7487
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307488 /*
7489 * Revisit (affine) tasks on src_cpu that couldn't be moved to
7490 * us and move them to an alternate dst_cpu in our sched_group
7491 * where they can run. The upper limit on how many times we
7492 * iterate on same src_cpu is dependent on number of cpus in our
7493 * sched_group.
7494 *
7495 * This changes load balance semantics a bit on who can move
7496 * load to a given_cpu. In addition to the given_cpu itself
7497 * (or a ilb_cpu acting on its behalf where given_cpu is
7498 * nohz-idle), we now have balance_cpu in a position to move
7499 * load to given_cpu. In rare situations, this may cause
7500 * conflicts (balance_cpu and given_cpu/ilb_cpu deciding
7501 * _independently_ and at _same_ time to move some load to
7502 * given_cpu) causing exceess load to be moved to given_cpu.
7503 * This however should not happen so much in practice and
7504 * moreover subsequent load balance cycles should correct the
7505 * excess load moved.
7506 */
Peter Zijlstra62633222013-08-19 12:41:09 +02007507 if ((env.flags & LBF_DST_PINNED) && env.imbalance > 0) {
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307508
Vladimir Davydov7aff2e32013-09-15 21:30:13 +04007509 /* Prevent to re-select dst_cpu via env's cpus */
7510 cpumask_clear_cpu(env.dst_cpu, env.cpus);
7511
Michael Wang78feefc2012-08-06 16:41:59 +08007512 env.dst_rq = cpu_rq(env.new_dst_cpu);
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307513 env.dst_cpu = env.new_dst_cpu;
Peter Zijlstra62633222013-08-19 12:41:09 +02007514 env.flags &= ~LBF_DST_PINNED;
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307515 env.loop = 0;
7516 env.loop_break = sched_nr_migrate_break;
Joonsoo Kime02e60c2013-04-23 17:27:42 +09007517
Srivatsa Vaddagiri88b8dac2012-06-19 17:43:15 +05307518 /*
7519 * Go back to "more_balance" rather than "redo" since we
7520 * need to continue with same src_cpu.
7521 */
7522 goto more_balance;
7523 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007524
Peter Zijlstra62633222013-08-19 12:41:09 +02007525 /*
7526 * We failed to reach balance because of affinity.
7527 */
7528 if (sd_parent) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007529 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
Peter Zijlstra62633222013-08-19 12:41:09 +02007530
Vincent Guittotafdeee02014-08-26 13:06:44 +02007531 if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0)
Peter Zijlstra62633222013-08-19 12:41:09 +02007532 *group_imbalance = 1;
Peter Zijlstra62633222013-08-19 12:41:09 +02007533 }
7534
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007535 /* All tasks on this runqueue were pinned by CPU affinity */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007536 if (unlikely(env.flags & LBF_ALL_PINNED)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007537 cpumask_clear_cpu(cpu_of(busiest), cpus);
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05307538 if (!cpumask_empty(cpus)) {
7539 env.loop = 0;
7540 env.loop_break = sched_nr_migrate_break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007541 goto redo;
Prashanth Nageshappabbf18b12012-06-19 17:52:07 +05307542 }
Vincent Guittotafdeee02014-08-26 13:06:44 +02007543 goto out_all_pinned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007544 }
7545 }
7546
7547 if (!ld_moved) {
7548 schedstat_inc(sd, lb_failed[idle]);
Venkatesh Pallipadi58b26c42010-09-10 18:19:17 -07007549 /*
7550 * Increment the failure counter only on periodic balance.
7551 * We do not want newidle balance, which can be very
7552 * frequent, pollute the failure counter causing
7553 * excessive cache_hot migrations and active balances.
7554 */
7555 if (idle != CPU_NEWLY_IDLE)
7556 sd->nr_balance_failed++;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007557
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007558 if (need_active_balance(&env)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007559 raw_spin_lock_irqsave(&busiest->lock, flags);
7560
Tejun Heo969c7922010-05-06 18:49:21 +02007561 /* don't kick the active_load_balance_cpu_stop,
7562 * if the curr task on busiest cpu can't be
7563 * moved to this_cpu
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007564 */
7565 if (!cpumask_test_cpu(this_cpu,
Peter Zijlstrafa17b502011-06-16 12:23:22 +02007566 tsk_cpus_allowed(busiest->curr))) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007567 raw_spin_unlock_irqrestore(&busiest->lock,
7568 flags);
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007569 env.flags |= LBF_ALL_PINNED;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007570 goto out_one_pinned;
7571 }
7572
Tejun Heo969c7922010-05-06 18:49:21 +02007573 /*
7574 * ->active_balance synchronizes accesses to
7575 * ->active_balance_work. Once set, it's cleared
7576 * only after active load balance is finished.
7577 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007578 if (!busiest->active_balance) {
7579 busiest->active_balance = 1;
7580 busiest->push_cpu = this_cpu;
7581 active_balance = 1;
7582 }
7583 raw_spin_unlock_irqrestore(&busiest->lock, flags);
Tejun Heo969c7922010-05-06 18:49:21 +02007584
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007585 if (active_balance) {
Tejun Heo969c7922010-05-06 18:49:21 +02007586 stop_one_cpu_nowait(cpu_of(busiest),
7587 active_load_balance_cpu_stop, busiest,
7588 &busiest->active_balance_work);
Peter Zijlstrabd939f42012-05-02 14:20:37 +02007589 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007590
Srikar Dronamrajud02c071182016-03-23 17:54:44 +05307591 /* We've kicked active balancing, force task migration. */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007592 sd->nr_balance_failed = sd->cache_nice_tries+1;
7593 }
7594 } else
7595 sd->nr_balance_failed = 0;
7596
7597 if (likely(!active_balance)) {
7598 /* We were unbalanced, so reset the balancing interval */
7599 sd->balance_interval = sd->min_interval;
7600 } else {
7601 /*
7602 * If we've begun active balancing, start to back off. This
7603 * case may not be covered by the all_pinned logic if there
7604 * is only 1 task on the busy runqueue (because we don't call
Kirill Tkhai163122b2014-08-20 13:48:29 +04007605 * detach_tasks).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007606 */
7607 if (sd->balance_interval < sd->max_interval)
7608 sd->balance_interval *= 2;
7609 }
7610
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007611 goto out;
7612
7613out_balanced:
Vincent Guittotafdeee02014-08-26 13:06:44 +02007614 /*
7615 * We reach balance although we may have faced some affinity
7616 * constraints. Clear the imbalance flag if it was set.
7617 */
7618 if (sd_parent) {
7619 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
7620
7621 if (*group_imbalance)
7622 *group_imbalance = 0;
7623 }
7624
7625out_all_pinned:
7626 /*
7627 * We reach balance because all tasks are pinned at this level so
7628 * we can't migrate them. Let the imbalance flag set so parent level
7629 * can try to migrate them.
7630 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007631 schedstat_inc(sd, lb_balanced[idle]);
7632
7633 sd->nr_balance_failed = 0;
7634
7635out_one_pinned:
7636 /* tune up the balancing interval */
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007637 if (((env.flags & LBF_ALL_PINNED) &&
Peter Zijlstra5b54b562011-09-22 15:23:13 +02007638 sd->balance_interval < MAX_PINNED_INTERVAL) ||
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007639 (sd->balance_interval < sd->max_interval))
7640 sd->balance_interval *= 2;
7641
Venkatesh Pallipadi46e49b32011-02-14 14:38:50 -08007642 ld_moved = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007643out:
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007644 return ld_moved;
7645}
7646
Jason Low52a08ef2014-05-08 17:49:22 -07007647static inline unsigned long
7648get_sd_balance_interval(struct sched_domain *sd, int cpu_busy)
7649{
7650 unsigned long interval = sd->balance_interval;
7651
7652 if (cpu_busy)
7653 interval *= sd->busy_factor;
7654
7655 /* scale ms to jiffies */
7656 interval = msecs_to_jiffies(interval);
7657 interval = clamp(interval, 1UL, max_load_balance_interval);
7658
7659 return interval;
7660}
7661
7662static inline void
7663update_next_balance(struct sched_domain *sd, int cpu_busy, unsigned long *next_balance)
7664{
7665 unsigned long interval, next;
7666
7667 interval = get_sd_balance_interval(sd, cpu_busy);
7668 next = sd->last_balance + interval;
7669
7670 if (time_after(*next_balance, next))
7671 *next_balance = next;
7672}
7673
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007674/*
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007675 * idle_balance is called by schedule() if this_cpu is about to become
7676 * idle. Attempts to pull tasks from other CPUs.
7677 */
Peter Zijlstra6e831252014-02-11 16:11:48 +01007678static int idle_balance(struct rq *this_rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007679{
Jason Low52a08ef2014-05-08 17:49:22 -07007680 unsigned long next_balance = jiffies + HZ;
7681 int this_cpu = this_rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007682 struct sched_domain *sd;
7683 int pulled_task = 0;
Jason Low9bd721c2013-09-13 11:26:52 -07007684 u64 curr_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007685
Peter Zijlstra6e831252014-02-11 16:11:48 +01007686 /*
7687 * We must set idle_stamp _before_ calling idle_balance(), such that we
7688 * measure the duration of idle_balance() as idle time.
7689 */
7690 this_rq->idle_stamp = rq_clock(this_rq);
7691
Tim Chen4486edd2014-06-23 12:16:49 -07007692 if (this_rq->avg_idle < sysctl_sched_migration_cost ||
7693 !this_rq->rd->overload) {
Jason Low52a08ef2014-05-08 17:49:22 -07007694 rcu_read_lock();
7695 sd = rcu_dereference_check_sched_domain(this_rq->sd);
7696 if (sd)
7697 update_next_balance(sd, 0, &next_balance);
7698 rcu_read_unlock();
7699
Peter Zijlstra6e831252014-02-11 16:11:48 +01007700 goto out;
Jason Low52a08ef2014-05-08 17:49:22 -07007701 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007702
Peter Zijlstraf492e122009-12-23 15:29:42 +01007703 raw_spin_unlock(&this_rq->lock);
7704
Paul Turner48a16752012-10-04 13:18:31 +02007705 update_blocked_averages(this_cpu);
Peter Zijlstradce840a2011-04-07 14:09:50 +02007706 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007707 for_each_domain(this_cpu, sd) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007708 int continue_balancing = 1;
Jason Low9bd721c2013-09-13 11:26:52 -07007709 u64 t0, domain_cost;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007710
7711 if (!(sd->flags & SD_LOAD_BALANCE))
7712 continue;
7713
Jason Low52a08ef2014-05-08 17:49:22 -07007714 if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
7715 update_next_balance(sd, 0, &next_balance);
Jason Low9bd721c2013-09-13 11:26:52 -07007716 break;
Jason Low52a08ef2014-05-08 17:49:22 -07007717 }
Jason Low9bd721c2013-09-13 11:26:52 -07007718
Peter Zijlstraf492e122009-12-23 15:29:42 +01007719 if (sd->flags & SD_BALANCE_NEWIDLE) {
Jason Low9bd721c2013-09-13 11:26:52 -07007720 t0 = sched_clock_cpu(this_cpu);
7721
Peter Zijlstraf492e122009-12-23 15:29:42 +01007722 pulled_task = load_balance(this_cpu, this_rq,
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007723 sd, CPU_NEWLY_IDLE,
7724 &continue_balancing);
Jason Low9bd721c2013-09-13 11:26:52 -07007725
7726 domain_cost = sched_clock_cpu(this_cpu) - t0;
7727 if (domain_cost > sd->max_newidle_lb_cost)
7728 sd->max_newidle_lb_cost = domain_cost;
7729
7730 curr_cost += domain_cost;
Peter Zijlstraf492e122009-12-23 15:29:42 +01007731 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007732
Jason Low52a08ef2014-05-08 17:49:22 -07007733 update_next_balance(sd, 0, &next_balance);
Jason Low39a4d9c2014-04-23 18:30:35 -07007734
7735 /*
7736 * Stop searching for tasks to pull if there are
7737 * now runnable tasks on this rq.
7738 */
7739 if (pulled_task || this_rq->nr_running > 0)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007740 break;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007741 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007742 rcu_read_unlock();
Peter Zijlstraf492e122009-12-23 15:29:42 +01007743
7744 raw_spin_lock(&this_rq->lock);
7745
Jason Low0e5b5332014-04-28 15:45:54 -07007746 if (curr_cost > this_rq->max_idle_balance_cost)
7747 this_rq->max_idle_balance_cost = curr_cost;
7748
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01007749 /*
Jason Low0e5b5332014-04-28 15:45:54 -07007750 * While browsing the domains, we released the rq lock, a task could
7751 * have been enqueued in the meantime. Since we're not going idle,
7752 * pretend we pulled a task.
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01007753 */
Jason Low0e5b5332014-04-28 15:45:54 -07007754 if (this_rq->cfs.h_nr_running && !pulled_task)
Peter Zijlstra6e831252014-02-11 16:11:48 +01007755 pulled_task = 1;
Daniel Lezcanoe5fc6612014-01-17 10:04:02 +01007756
Peter Zijlstra6e831252014-02-11 16:11:48 +01007757out:
Jason Low52a08ef2014-05-08 17:49:22 -07007758 /* Move the next balance forward */
7759 if (time_after(this_rq->next_balance, next_balance))
7760 this_rq->next_balance = next_balance;
7761
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04007762 /* Is there a task of a high priority class? */
Kirill Tkhai46383642014-03-15 02:15:07 +04007763 if (this_rq->nr_running != this_rq->cfs.h_nr_running)
Kirill Tkhaie4aa3582014-03-06 13:31:55 +04007764 pulled_task = -1;
7765
Dietmar Eggemann38c6ade2015-10-20 13:04:41 +01007766 if (pulled_task)
Peter Zijlstra6e831252014-02-11 16:11:48 +01007767 this_rq->idle_stamp = 0;
7768
Daniel Lezcano3c4017c2014-01-17 10:04:03 +01007769 return pulled_task;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007770}
7771
7772/*
Tejun Heo969c7922010-05-06 18:49:21 +02007773 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
7774 * running tasks off the busiest CPU onto idle CPUs. It requires at
7775 * least 1 task to be running on each physical CPU where possible, and
7776 * avoids physical / logical imbalances.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007777 */
Tejun Heo969c7922010-05-06 18:49:21 +02007778static int active_load_balance_cpu_stop(void *data)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007779{
Tejun Heo969c7922010-05-06 18:49:21 +02007780 struct rq *busiest_rq = data;
7781 int busiest_cpu = cpu_of(busiest_rq);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007782 int target_cpu = busiest_rq->push_cpu;
Tejun Heo969c7922010-05-06 18:49:21 +02007783 struct rq *target_rq = cpu_rq(target_cpu);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007784 struct sched_domain *sd;
Kirill Tkhaie5673f22014-08-20 13:48:01 +04007785 struct task_struct *p = NULL;
Tejun Heo969c7922010-05-06 18:49:21 +02007786
7787 raw_spin_lock_irq(&busiest_rq->lock);
7788
7789 /* make sure the requested cpu hasn't gone down in the meantime */
7790 if (unlikely(busiest_cpu != smp_processor_id() ||
7791 !busiest_rq->active_balance))
7792 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007793
7794 /* Is there any task to move? */
7795 if (busiest_rq->nr_running <= 1)
Tejun Heo969c7922010-05-06 18:49:21 +02007796 goto out_unlock;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007797
7798 /*
7799 * This condition is "impossible", if it occurs
7800 * we need to fix it. Originally reported by
7801 * Bjorn Helgaas on a 128-cpu setup.
7802 */
7803 BUG_ON(busiest_rq == target_rq);
7804
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007805 /* Search for an sd spanning us and the target CPU. */
Peter Zijlstradce840a2011-04-07 14:09:50 +02007806 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007807 for_each_domain(target_cpu, sd) {
7808 if ((sd->flags & SD_LOAD_BALANCE) &&
7809 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
7810 break;
7811 }
7812
7813 if (likely(sd)) {
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007814 struct lb_env env = {
7815 .sd = sd,
Peter Zijlstraddcdf6e2012-02-22 19:27:40 +01007816 .dst_cpu = target_cpu,
7817 .dst_rq = target_rq,
7818 .src_cpu = busiest_rq->cpu,
7819 .src_rq = busiest_rq,
Peter Zijlstra8e45cb52012-02-22 12:47:19 +01007820 .idle = CPU_IDLE,
7821 };
7822
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007823 schedstat_inc(sd, alb_count);
7824
Kirill Tkhaie5673f22014-08-20 13:48:01 +04007825 p = detach_one_task(&env);
Srikar Dronamrajud02c071182016-03-23 17:54:44 +05307826 if (p) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007827 schedstat_inc(sd, alb_pushed);
Srikar Dronamrajud02c071182016-03-23 17:54:44 +05307828 /* Active balancing done, reset the failure counter. */
7829 sd->nr_balance_failed = 0;
7830 } else {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007831 schedstat_inc(sd, alb_failed);
Srikar Dronamrajud02c071182016-03-23 17:54:44 +05307832 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007833 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02007834 rcu_read_unlock();
Tejun Heo969c7922010-05-06 18:49:21 +02007835out_unlock:
7836 busiest_rq->active_balance = 0;
Kirill Tkhaie5673f22014-08-20 13:48:01 +04007837 raw_spin_unlock(&busiest_rq->lock);
7838
7839 if (p)
7840 attach_one_task(target_rq, p);
7841
7842 local_irq_enable();
7843
Tejun Heo969c7922010-05-06 18:49:21 +02007844 return 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007845}
7846
Mike Galbraithd987fc72011-12-05 10:01:47 +01007847static inline int on_null_domain(struct rq *rq)
7848{
7849 return unlikely(!rcu_dereference_sched(rq->sd));
7850}
7851
Frederic Weisbecker3451d022011-08-10 23:21:01 +02007852#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007853/*
7854 * idle load balancing details
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007855 * - When one of the busy CPUs notice that there may be an idle rebalancing
7856 * needed, they will kick the idle load balancer, which then does idle
7857 * load balancing for all the idle CPUs.
7858 */
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007859static struct {
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007860 cpumask_var_t idle_cpus_mask;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007861 atomic_t nr_cpus;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007862 unsigned long next_balance; /* in jiffy units */
7863} nohz ____cacheline_aligned;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007864
Daniel Lezcano3dd03372014-01-06 12:34:41 +01007865static inline int find_new_ilb(void)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007866{
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007867 int ilb = cpumask_first(nohz.idle_cpus_mask);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007868
Suresh Siddha786d6dc2011-12-01 17:07:35 -08007869 if (ilb < nr_cpu_ids && idle_cpu(ilb))
7870 return ilb;
7871
7872 return nr_cpu_ids;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007873}
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007874
7875/*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007876 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
7877 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
7878 * CPU (if there is one).
7879 */
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01007880static void nohz_balancer_kick(void)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007881{
7882 int ilb_cpu;
7883
7884 nohz.next_balance++;
7885
Daniel Lezcano3dd03372014-01-06 12:34:41 +01007886 ilb_cpu = find_new_ilb();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007887
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007888 if (ilb_cpu >= nr_cpu_ids)
7889 return;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007890
Suresh Siddhacd490c52011-12-06 11:26:34 -08007891 if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)))
Suresh Siddha1c792db2011-12-01 17:07:32 -08007892 return;
7893 /*
7894 * Use smp_send_reschedule() instead of resched_cpu().
7895 * This way we generate a sched IPI on the target cpu which
7896 * is idle. And the softirq performing nohz idle load balance
7897 * will be run before returning from the IPI.
7898 */
7899 smp_send_reschedule(ilb_cpu);
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007900 return;
7901}
7902
Thomas Gleixner20a5c8c2016-03-10 12:54:20 +01007903void nohz_balance_exit_idle(unsigned int cpu)
Suresh Siddha71325962012-01-19 18:28:57 -08007904{
7905 if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
Mike Galbraithd987fc72011-12-05 10:01:47 +01007906 /*
7907 * Completely isolated CPUs don't ever set, so we must test.
7908 */
7909 if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
7910 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
7911 atomic_dec(&nohz.nr_cpus);
7912 }
Suresh Siddha71325962012-01-19 18:28:57 -08007913 clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
7914 }
7915}
7916
Suresh Siddha69e1e812011-12-01 17:07:33 -08007917static inline void set_cpu_sd_state_busy(void)
7918{
7919 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307920 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08007921
Suresh Siddha69e1e812011-12-01 17:07:33 -08007922 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307923 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02007924
7925 if (!sd || !sd->nohz_idle)
7926 goto unlock;
7927 sd->nohz_idle = 0;
7928
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007929 atomic_inc(&sd->groups->sgc->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02007930unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08007931 rcu_read_unlock();
7932}
7933
7934void set_cpu_sd_state_idle(void)
7935{
7936 struct sched_domain *sd;
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307937 int cpu = smp_processor_id();
Suresh Siddha69e1e812011-12-01 17:07:33 -08007938
Suresh Siddha69e1e812011-12-01 17:07:33 -08007939 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05307940 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Vincent Guittot25f55d92013-04-23 16:59:02 +02007941
7942 if (!sd || sd->nohz_idle)
7943 goto unlock;
7944 sd->nohz_idle = 1;
7945
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04007946 atomic_dec(&sd->groups->sgc->nr_busy_cpus);
Vincent Guittot25f55d92013-04-23 16:59:02 +02007947unlock:
Suresh Siddha69e1e812011-12-01 17:07:33 -08007948 rcu_read_unlock();
7949}
7950
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07007951/*
Alex Shic1cc0172012-09-10 15:10:58 +08007952 * This routine will record that the cpu is going idle with tick stopped.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08007953 * This info will be used in performing idle load balancing in the future.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007954 */
Alex Shic1cc0172012-09-10 15:10:58 +08007955void nohz_balance_enter_idle(int cpu)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007956{
Suresh Siddha71325962012-01-19 18:28:57 -08007957 /*
7958 * If this cpu is going down, then nothing needs to be done.
7959 */
7960 if (!cpu_active(cpu))
7961 return;
7962
Alex Shic1cc0172012-09-10 15:10:58 +08007963 if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
7964 return;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007965
Mike Galbraithd987fc72011-12-05 10:01:47 +01007966 /*
7967 * If we're a completely isolated CPU, we don't play.
7968 */
7969 if (on_null_domain(cpu_rq(cpu)))
7970 return;
7971
Alex Shic1cc0172012-09-10 15:10:58 +08007972 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
7973 atomic_inc(&nohz.nr_cpus);
7974 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007975}
7976#endif
7977
7978static DEFINE_SPINLOCK(balancing);
7979
Peter Zijlstra49c022e2011-04-05 10:14:25 +02007980/*
7981 * Scale the max load_balance interval with the number of CPUs in the system.
7982 * This trades load-balance latency on larger machines for less cross talk.
7983 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02007984void update_max_interval(void)
Peter Zijlstra49c022e2011-04-05 10:14:25 +02007985{
7986 max_load_balance_interval = HZ*num_online_cpus()/10;
7987}
7988
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007989/*
7990 * It checks each scheduling domain to see if it is due to be balanced,
7991 * and initiates a balancing operation if so.
7992 *
Libinb9b08532013-04-01 19:14:01 +08007993 * Balancing parameters are set up in init_sched_domains.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007994 */
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007995static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007996{
Joonsoo Kim23f0d202013-08-06 17:36:42 +09007997 int continue_balancing = 1;
Daniel Lezcanof7ed0a82014-01-06 12:34:43 +01007998 int cpu = rq->cpu;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01007999 unsigned long interval;
Peter Zijlstra04f733b2012-05-11 00:12:02 +02008000 struct sched_domain *sd;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008001 /* Earliest time when we have to do rebalance again */
8002 unsigned long next_balance = jiffies + 60*HZ;
8003 int update_next_balance = 0;
Jason Lowf48627e2013-09-13 11:26:53 -07008004 int need_serialize, need_decay = 0;
8005 u64 max_cost = 0;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008006
Paul Turner48a16752012-10-04 13:18:31 +02008007 update_blocked_averages(cpu);
Peter Zijlstra2069dd72010-11-15 15:47:00 -08008008
Peter Zijlstradce840a2011-04-07 14:09:50 +02008009 rcu_read_lock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008010 for_each_domain(cpu, sd) {
Jason Lowf48627e2013-09-13 11:26:53 -07008011 /*
8012 * Decay the newidle max times here because this is a regular
8013 * visit to all the domains. Decay ~1% per second.
8014 */
8015 if (time_after(jiffies, sd->next_decay_max_lb_cost)) {
8016 sd->max_newidle_lb_cost =
8017 (sd->max_newidle_lb_cost * 253) / 256;
8018 sd->next_decay_max_lb_cost = jiffies + HZ;
8019 need_decay = 1;
8020 }
8021 max_cost += sd->max_newidle_lb_cost;
8022
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008023 if (!(sd->flags & SD_LOAD_BALANCE))
8024 continue;
8025
Jason Lowf48627e2013-09-13 11:26:53 -07008026 /*
8027 * Stop the load balance at this level. There is another
8028 * CPU in our sched group which is doing load balancing more
8029 * actively.
8030 */
8031 if (!continue_balancing) {
8032 if (need_decay)
8033 continue;
8034 break;
8035 }
8036
Jason Low52a08ef2014-05-08 17:49:22 -07008037 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008038
8039 need_serialize = sd->flags & SD_SERIALIZE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008040 if (need_serialize) {
8041 if (!spin_trylock(&balancing))
8042 goto out;
8043 }
8044
8045 if (time_after_eq(jiffies, sd->last_balance + interval)) {
Joonsoo Kim23f0d202013-08-06 17:36:42 +09008046 if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008047 /*
Peter Zijlstra62633222013-08-19 12:41:09 +02008048 * The LBF_DST_PINNED logic could have changed
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09008049 * env->dst_cpu, so we can't know our idle
8050 * state even if we migrated tasks. Update it.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008051 */
Joonsoo Kimde5eb2d2013-04-23 17:27:38 +09008052 idle = idle_cpu(cpu) ? CPU_IDLE : CPU_NOT_IDLE;
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008053 }
8054 sd->last_balance = jiffies;
Jason Low52a08ef2014-05-08 17:49:22 -07008055 interval = get_sd_balance_interval(sd, idle != CPU_IDLE);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008056 }
8057 if (need_serialize)
8058 spin_unlock(&balancing);
8059out:
8060 if (time_after(next_balance, sd->last_balance + interval)) {
8061 next_balance = sd->last_balance + interval;
8062 update_next_balance = 1;
8063 }
Jason Lowf48627e2013-09-13 11:26:53 -07008064 }
8065 if (need_decay) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008066 /*
Jason Lowf48627e2013-09-13 11:26:53 -07008067 * Ensure the rq-wide value also decays but keep it at a
8068 * reasonable floor to avoid funnies with rq->avg_idle.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008069 */
Jason Lowf48627e2013-09-13 11:26:53 -07008070 rq->max_idle_balance_cost =
8071 max((u64)sysctl_sched_migration_cost, max_cost);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008072 }
Peter Zijlstradce840a2011-04-07 14:09:50 +02008073 rcu_read_unlock();
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008074
8075 /*
8076 * next_balance will be updated only when there is a need.
8077 * When the cpu is attached to null domain for ex, it will not be
8078 * updated.
8079 */
Vincent Guittotc5afb6a2015-08-03 11:55:50 +02008080 if (likely(update_next_balance)) {
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008081 rq->next_balance = next_balance;
Vincent Guittotc5afb6a2015-08-03 11:55:50 +02008082
8083#ifdef CONFIG_NO_HZ_COMMON
8084 /*
8085 * If this CPU has been elected to perform the nohz idle
8086 * balance. Other idle CPUs have already rebalanced with
8087 * nohz_idle_balance() and nohz.next_balance has been
8088 * updated accordingly. This CPU is now running the idle load
8089 * balance for itself and we need to update the
8090 * nohz.next_balance accordingly.
8091 */
8092 if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
8093 nohz.next_balance = rq->next_balance;
8094#endif
8095 }
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008096}
8097
Frederic Weisbecker3451d022011-08-10 23:21:01 +02008098#ifdef CONFIG_NO_HZ_COMMON
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008099/*
Frederic Weisbecker3451d022011-08-10 23:21:01 +02008100 * In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008101 * rebalancing for all the cpus for whom scheduler ticks are stopped.
8102 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01008103static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008104{
Daniel Lezcano208cb162014-01-06 12:34:44 +01008105 int this_cpu = this_rq->cpu;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008106 struct rq *rq;
8107 int balance_cpu;
Vincent Guittotc5afb6a2015-08-03 11:55:50 +02008108 /* Earliest time when we have to do rebalance again */
8109 unsigned long next_balance = jiffies + 60*HZ;
8110 int update_next_balance = 0;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008111
Suresh Siddha1c792db2011-12-01 17:07:32 -08008112 if (idle != CPU_IDLE ||
8113 !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
8114 goto end;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008115
8116 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
Suresh Siddha8a6d42d2011-12-06 11:19:37 -08008117 if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008118 continue;
8119
8120 /*
8121 * If this cpu gets work to do, stop the load balancing
8122 * work being done for other cpus. Next load
8123 * balancing owner will pick it up.
8124 */
Suresh Siddha1c792db2011-12-01 17:07:32 -08008125 if (need_resched())
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008126 break;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008127
Vincent Guittot5ed4f1d2012-09-13 06:11:26 +02008128 rq = cpu_rq(balance_cpu);
8129
Tim Chened61bbc2014-05-20 14:39:27 -07008130 /*
8131 * If time for next balance is due,
8132 * do the balance.
8133 */
8134 if (time_after_eq(jiffies, rq->next_balance)) {
8135 raw_spin_lock_irq(&rq->lock);
8136 update_rq_clock(rq);
Frederic Weisbeckercee1afc2016-04-13 15:56:50 +02008137 cpu_load_update_idle(rq);
Tim Chened61bbc2014-05-20 14:39:27 -07008138 raw_spin_unlock_irq(&rq->lock);
8139 rebalance_domains(rq, CPU_IDLE);
8140 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008141
Vincent Guittotc5afb6a2015-08-03 11:55:50 +02008142 if (time_after(next_balance, rq->next_balance)) {
8143 next_balance = rq->next_balance;
8144 update_next_balance = 1;
8145 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008146 }
Vincent Guittotc5afb6a2015-08-03 11:55:50 +02008147
8148 /*
8149 * next_balance will be updated only when there is a need.
8150 * When the CPU is attached to null domain for ex, it will not be
8151 * updated.
8152 */
8153 if (likely(update_next_balance))
8154 nohz.next_balance = next_balance;
Suresh Siddha1c792db2011-12-01 17:07:32 -08008155end:
8156 clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008157}
8158
8159/*
Suresh Siddha0b005cf2011-12-01 17:07:34 -08008160 * Current heuristic for kicking the idle load balancer in the presence
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008161 * of an idle cpu in the system.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08008162 * - This rq has more than one task.
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008163 * - This rq has at least one CFS task and the capacity of the CPU is
8164 * significantly reduced because of RT tasks or IRQs.
8165 * - At parent of LLC scheduler domain level, this cpu's scheduler group has
8166 * multiple busy cpu.
Suresh Siddha0b005cf2011-12-01 17:07:34 -08008167 * - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
8168 * domain span are idle.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008169 */
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008170static inline bool nohz_kick_needed(struct rq *rq)
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008171{
8172 unsigned long now = jiffies;
Suresh Siddha0b005cf2011-12-01 17:07:34 -08008173 struct sched_domain *sd;
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04008174 struct sched_group_capacity *sgc;
Daniel Lezcano4a725622014-01-06 12:34:39 +01008175 int nr_busy, cpu = rq->cpu;
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008176 bool kick = false;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008177
Daniel Lezcano4a725622014-01-06 12:34:39 +01008178 if (unlikely(rq->idle_balance))
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008179 return false;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008180
Suresh Siddha1c792db2011-12-01 17:07:32 -08008181 /*
8182 * We may be recently in ticked or tickless idle mode. At the first
8183 * busy tick after returning from idle, we will update the busy stats.
8184 */
Suresh Siddha69e1e812011-12-01 17:07:33 -08008185 set_cpu_sd_state_busy();
Alex Shic1cc0172012-09-10 15:10:58 +08008186 nohz_balance_exit_idle(cpu);
Suresh Siddha0b005cf2011-12-01 17:07:34 -08008187
8188 /*
8189 * None are in tickless mode and hence no need for NOHZ idle load
8190 * balancing.
8191 */
8192 if (likely(!atomic_read(&nohz.nr_cpus)))
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008193 return false;
Suresh Siddha1c792db2011-12-01 17:07:32 -08008194
8195 if (time_before(now, nohz.next_balance))
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008196 return false;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008197
Suresh Siddha0b005cf2011-12-01 17:07:34 -08008198 if (rq->nr_running >= 2)
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008199 return true;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008200
Peter Zijlstra067491b2011-12-07 14:32:08 +01008201 rcu_read_lock();
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05308202 sd = rcu_dereference(per_cpu(sd_busy, cpu));
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05308203 if (sd) {
Nicolas Pitre63b2ca32014-05-26 18:19:37 -04008204 sgc = sd->groups->sgc;
8205 nr_busy = atomic_read(&sgc->nr_busy_cpus);
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05308206
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008207 if (nr_busy > 1) {
8208 kick = true;
8209 goto unlock;
8210 }
8211
8212 }
8213
8214 sd = rcu_dereference(rq->sd);
8215 if (sd) {
8216 if ((rq->cfs.h_nr_running >= 1) &&
8217 check_cpu_capacity(rq, sd)) {
8218 kick = true;
8219 goto unlock;
8220 }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008221 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05308222
8223 sd = rcu_dereference(per_cpu(sd_asym, cpu));
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05308224 if (sd && (cpumask_first_and(nohz.idle_cpus_mask,
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008225 sched_domain_span(sd)) < cpu)) {
8226 kick = true;
8227 goto unlock;
8228 }
Preeti U Murthy37dc6b52013-10-30 08:42:52 +05308229
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008230unlock:
Peter Zijlstra067491b2011-12-07 14:32:08 +01008231 rcu_read_unlock();
Vincent Guittot1aaf90a2015-02-27 16:54:14 +01008232 return kick;
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008233}
8234#else
Daniel Lezcano208cb162014-01-06 12:34:44 +01008235static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) { }
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008236#endif
8237
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008238/*
8239 * run_rebalance_domains is triggered when needed from the scheduler tick.
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008240 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008241 */
8242static void run_rebalance_domains(struct softirq_action *h)
8243{
Daniel Lezcano208cb162014-01-06 12:34:44 +01008244 struct rq *this_rq = this_rq();
Suresh Siddha6eb57e02011-10-03 15:09:01 -07008245 enum cpu_idle_type idle = this_rq->idle_balance ?
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008246 CPU_IDLE : CPU_NOT_IDLE;
8247
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008248 /*
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008249 * If this cpu has a pending nohz_balance_kick, then do the
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008250 * balancing on behalf of the other idle cpus whose ticks are
Preeti U Murthyd4573c32015-03-26 18:32:44 +05308251 * stopped. Do nohz_idle_balance *before* rebalance_domains to
8252 * give the idle cpus a chance to load balance. Else we may
8253 * load balance only within the local sched_domain hierarchy
8254 * and abort nohz_idle_balance altogether if we pull some load.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008255 */
Daniel Lezcano208cb162014-01-06 12:34:44 +01008256 nohz_idle_balance(this_rq, idle);
Preeti U Murthyd4573c32015-03-26 18:32:44 +05308257 rebalance_domains(this_rq, idle);
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008258}
8259
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008260/*
8261 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008262 */
Daniel Lezcano7caff662014-01-06 12:34:38 +01008263void trigger_load_balance(struct rq *rq)
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008264{
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008265 /* Don't need to rebalance while attached to NULL domain */
Daniel Lezcanoc7260992014-01-06 12:34:45 +01008266 if (unlikely(on_null_domain(rq)))
8267 return;
8268
8269 if (time_after_eq(jiffies, rq->next_balance))
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008270 raise_softirq(SCHED_SOFTIRQ);
Frederic Weisbecker3451d022011-08-10 23:21:01 +02008271#ifdef CONFIG_NO_HZ_COMMON
Daniel Lezcanoc7260992014-01-06 12:34:45 +01008272 if (nohz_kick_needed(rq))
Daniel Lezcano0aeeeeb2014-01-06 12:34:42 +01008273 nohz_balancer_kick();
Venkatesh Pallipadi83cd4fe2010-05-21 17:09:41 -07008274#endif
Peter Zijlstra1e3c88b2009-12-17 17:00:43 +01008275}
8276
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01008277static void rq_online_fair(struct rq *rq)
8278{
8279 update_sysctl();
Kirill Tkhai0e59bda2014-06-25 12:19:42 +04008280
8281 update_runtime_enabled(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01008282}
8283
8284static void rq_offline_fair(struct rq *rq)
8285{
8286 update_sysctl();
Peter Boonstoppela4c96ae2012-08-09 15:34:47 -07008287
8288 /* Ensure any throttled groups are reachable by pick_next_task */
8289 unthrottle_offline_cfs_rqs(rq);
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01008290}
8291
Dhaval Giani55e12e52008-06-24 23:39:43 +05308292#endif /* CONFIG_SMP */
Peter Williamse1d14842007-10-24 18:23:51 +02008293
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008294/*
8295 * scheduler tick hitting a task of our scheduling class:
8296 */
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008297static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008298{
8299 struct cfs_rq *cfs_rq;
8300 struct sched_entity *se = &curr->se;
8301
8302 for_each_sched_entity(se) {
8303 cfs_rq = cfs_rq_of(se);
Peter Zijlstra8f4d37e2008-01-25 21:08:29 +01008304 entity_tick(cfs_rq, se, queued);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008305 }
Ben Segall18bf2802012-10-04 12:51:20 +02008306
Srikar Dronamrajub52da862015-10-02 07:48:25 +05308307 if (static_branch_unlikely(&sched_numa_balancing))
Peter Zijlstracbee9f82012-10-25 14:16:43 +02008308 task_tick_numa(rq, curr);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008309}
8310
8311/*
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008312 * called on fork with the child task as argument from the parent's context
8313 * - child not yet on the tasklist
8314 * - preemption disabled
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008315 */
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008316static void task_fork_fair(struct task_struct *p)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008317{
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09008318 struct cfs_rq *cfs_rq;
8319 struct sched_entity *se = &p->se, *curr;
Ingo Molnar00bf7bf2007-10-15 17:00:14 +02008320 int this_cpu = smp_processor_id();
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008321 struct rq *rq = this_rq();
8322 unsigned long flags;
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008323
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008324 raw_spin_lock_irqsave(&rq->lock, flags);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008325
Peter Zijlstra861d0342010-08-19 13:31:43 +02008326 update_rq_clock(rq);
8327
Daisuke Nishimura4fc420c2011-12-15 14:36:55 +09008328 cfs_rq = task_cfs_rq(current);
8329 curr = cfs_rq->curr;
8330
Daisuke Nishimura6c9a27f2013-09-10 18:16:36 +09008331 /*
8332 * Not only the cpu but also the task_group of the parent might have
8333 * been changed after parent->se.parent,cfs_rq were copied to
8334 * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
8335 * of child point to valid ones.
8336 */
8337 rcu_read_lock();
8338 __set_task_cpu(p, this_cpu);
8339 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008340
Ting Yang7109c4422007-08-28 12:53:24 +02008341 update_curr(cfs_rq);
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008342
Mike Galbraithb5d9d732009-09-08 11:12:28 +02008343 if (curr)
8344 se->vruntime = curr->vruntime;
Peter Zijlstraaeb73b02007-10-15 17:00:05 +02008345 place_entity(cfs_rq, se, 1);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02008346
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008347 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
Dmitry Adamushko87fefa32007-10-15 17:00:08 +02008348 /*
Ingo Molnaredcb60a2007-10-15 17:00:08 +02008349 * Upon rescheduling, sched_class::put_prev_task() will place
8350 * 'current' within the tree based on its new key value.
8351 */
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02008352 swap(curr->vruntime, se->vruntime);
Kirill Tkhai88751252014-06-29 00:03:57 +04008353 resched_curr(rq);
Peter Zijlstra4d78e7b2007-10-15 17:00:04 +02008354 }
8355
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01008356 se->vruntime -= cfs_rq->min_vruntime;
8357
Thomas Gleixner05fa7852009-11-17 14:28:38 +01008358 raw_spin_unlock_irqrestore(&rq->lock, flags);
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008359}
8360
Steven Rostedtcb469842008-01-25 21:08:22 +01008361/*
8362 * Priority of the task has changed. Check to see if we preempt
8363 * the current task.
8364 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008365static void
8366prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
Steven Rostedtcb469842008-01-25 21:08:22 +01008367{
Kirill Tkhaida0c1e62014-08-20 13:47:32 +04008368 if (!task_on_rq_queued(p))
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008369 return;
8370
Steven Rostedtcb469842008-01-25 21:08:22 +01008371 /*
8372 * Reschedule if we are currently running on this runqueue and
8373 * our priority decreased, or if we are not currently running on
8374 * this runqueue and our priority is higher than the current's
8375 */
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008376 if (rq->curr == p) {
Steven Rostedtcb469842008-01-25 21:08:22 +01008377 if (p->prio > oldprio)
Kirill Tkhai88751252014-06-29 00:03:57 +04008378 resched_curr(rq);
Steven Rostedtcb469842008-01-25 21:08:22 +01008379 } else
Peter Zijlstra15afe092008-09-20 23:38:02 +02008380 check_preempt_curr(rq, p, 0);
Steven Rostedtcb469842008-01-25 21:08:22 +01008381}
8382
Byungchul Parkdaa59402015-08-20 20:22:00 +09008383static inline bool vruntime_normalized(struct task_struct *p)
8384{
8385 struct sched_entity *se = &p->se;
8386
8387 /*
8388 * In both the TASK_ON_RQ_QUEUED and TASK_ON_RQ_MIGRATING cases,
8389 * the dequeue_entity(.flags=0) will already have normalized the
8390 * vruntime.
8391 */
8392 if (p->on_rq)
8393 return true;
8394
8395 /*
8396 * When !on_rq, vruntime of the task has usually NOT been normalized.
8397 * But there are some cases where it has already been normalized:
8398 *
8399 * - A forked child which is waiting for being woken up by
8400 * wake_up_new_task().
8401 * - A task which has been woken up by try_to_wake_up() and
8402 * waiting for actually being woken up by sched_ttwu_pending().
8403 */
8404 if (!se->sum_exec_runtime || p->state == TASK_WAKING)
8405 return true;
8406
8407 return false;
8408}
8409
8410static void detach_task_cfs_rq(struct task_struct *p)
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008411{
8412 struct sched_entity *se = &p->se;
8413 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8414
Byungchul Parkdaa59402015-08-20 20:22:00 +09008415 if (!vruntime_normalized(p)) {
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008416 /*
8417 * Fix up our vruntime so that the current sleep doesn't
8418 * cause 'unlimited' sleep bonus.
8419 */
8420 place_entity(cfs_rq, se, 0);
8421 se->vruntime -= cfs_rq->min_vruntime;
8422 }
Paul Turner9ee474f2012-10-04 13:18:30 +02008423
Yuyang Du9d89c252015-07-15 08:04:37 +08008424 /* Catch up with the cfs_rq and remove our load when we leave */
Byungchul Parka05e8c52015-08-20 20:21:56 +09008425 detach_entity_load_avg(cfs_rq, se);
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008426}
8427
Byungchul Parkdaa59402015-08-20 20:22:00 +09008428static void attach_task_cfs_rq(struct task_struct *p)
Steven Rostedtcb469842008-01-25 21:08:22 +01008429{
Kirill Tkhaif36c0192014-08-06 12:06:01 +04008430 struct sched_entity *se = &p->se;
Byungchul Parkdaa59402015-08-20 20:22:00 +09008431 struct cfs_rq *cfs_rq = cfs_rq_of(se);
Byungchul Park7855a352015-08-10 18:02:55 +09008432
8433#ifdef CONFIG_FAIR_GROUP_SCHED
Michael wangeb7a59b2014-02-20 11:14:53 +08008434 /*
8435 * Since the real-depth could have been changed (only FAIR
8436 * class maintain depth value), reset depth properly.
8437 */
8438 se->depth = se->parent ? se->parent->depth + 1 : 0;
8439#endif
Byungchul Park7855a352015-08-10 18:02:55 +09008440
Byungchul Park6efdb102015-08-20 20:21:59 +09008441 /* Synchronize task with its cfs_rq */
Byungchul Parkdaa59402015-08-20 20:22:00 +09008442 attach_entity_load_avg(cfs_rq, se);
Byungchul Park6efdb102015-08-20 20:21:59 +09008443
Byungchul Parkdaa59402015-08-20 20:22:00 +09008444 if (!vruntime_normalized(p))
8445 se->vruntime += cfs_rq->min_vruntime;
8446}
Byungchul Park7855a352015-08-10 18:02:55 +09008447
Byungchul Parkdaa59402015-08-20 20:22:00 +09008448static void switched_from_fair(struct rq *rq, struct task_struct *p)
8449{
8450 detach_task_cfs_rq(p);
8451}
8452
8453static void switched_to_fair(struct rq *rq, struct task_struct *p)
8454{
8455 attach_task_cfs_rq(p);
8456
8457 if (task_on_rq_queued(p)) {
Byungchul Park7855a352015-08-10 18:02:55 +09008458 /*
Byungchul Parkdaa59402015-08-20 20:22:00 +09008459 * We were most likely switched from sched_rt, so
8460 * kick off the schedule if running, otherwise just see
8461 * if we can still preempt the current task.
Byungchul Park7855a352015-08-10 18:02:55 +09008462 */
Byungchul Parkdaa59402015-08-20 20:22:00 +09008463 if (rq->curr == p)
8464 resched_curr(rq);
8465 else
8466 check_preempt_curr(rq, p, 0);
Byungchul Park7855a352015-08-10 18:02:55 +09008467 }
Steven Rostedtcb469842008-01-25 21:08:22 +01008468}
8469
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02008470/* Account for a task changing its policy or group.
8471 *
8472 * This routine is mostly called to set cfs_rq->curr field when a task
8473 * migrates between groups/classes.
8474 */
8475static void set_curr_task_fair(struct rq *rq)
8476{
8477 struct sched_entity *se = &rq->curr->se;
8478
Paul Turnerec12cb72011-07-21 09:43:30 -07008479 for_each_sched_entity(se) {
8480 struct cfs_rq *cfs_rq = cfs_rq_of(se);
8481
8482 set_next_entity(cfs_rq, se);
8483 /* ensure bandwidth has been allocated on our new cfs_rq */
8484 account_cfs_rq_runtime(cfs_rq, 0);
8485 }
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02008486}
8487
Peter Zijlstra029632f2011-10-25 10:00:11 +02008488void init_cfs_rq(struct cfs_rq *cfs_rq)
8489{
8490 cfs_rq->tasks_timeline = RB_ROOT;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008491 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
8492#ifndef CONFIG_64BIT
8493 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
8494#endif
Alex Shi141965c2013-06-26 13:05:39 +08008495#ifdef CONFIG_SMP
Yuyang Du9d89c252015-07-15 08:04:37 +08008496 atomic_long_set(&cfs_rq->removed_load_avg, 0);
8497 atomic_long_set(&cfs_rq->removed_util_avg, 0);
Paul Turner9ee474f2012-10-04 13:18:30 +02008498#endif
Peter Zijlstra029632f2011-10-25 10:00:11 +02008499}
8500
Peter Zijlstra810b3812008-02-29 15:21:01 -05008501#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrabc54da22015-08-31 17:13:55 +02008502static void task_move_group_fair(struct task_struct *p)
Peter Zijlstra810b3812008-02-29 15:21:01 -05008503{
Byungchul Parkdaa59402015-08-20 20:22:00 +09008504 detach_task_cfs_rq(p);
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008505 set_task_rq(p, task_cpu(p));
Byungchul Park6efdb102015-08-20 20:21:59 +09008506
8507#ifdef CONFIG_SMP
8508 /* Tell se's cfs_rq has been changed -- migrated */
8509 p->se.avg.last_update_time = 0;
8510#endif
Byungchul Parkdaa59402015-08-20 20:22:00 +09008511 attach_task_cfs_rq(p);
Peter Zijlstra810b3812008-02-29 15:21:01 -05008512}
Peter Zijlstra029632f2011-10-25 10:00:11 +02008513
8514void free_fair_sched_group(struct task_group *tg)
8515{
8516 int i;
8517
8518 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
8519
8520 for_each_possible_cpu(i) {
8521 if (tg->cfs_rq)
8522 kfree(tg->cfs_rq[i]);
Peter Zijlstra6fe1f342016-01-21 22:24:16 +01008523 if (tg->se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02008524 kfree(tg->se[i]);
8525 }
8526
8527 kfree(tg->cfs_rq);
8528 kfree(tg->se);
8529}
8530
8531int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8532{
Peter Zijlstra029632f2011-10-25 10:00:11 +02008533 struct sched_entity *se;
Peter Zijlstrab7fa30c2016-06-09 15:07:50 +02008534 struct cfs_rq *cfs_rq;
8535 struct rq *rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008536 int i;
8537
8538 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
8539 if (!tg->cfs_rq)
8540 goto err;
8541 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
8542 if (!tg->se)
8543 goto err;
8544
8545 tg->shares = NICE_0_LOAD;
8546
8547 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
8548
8549 for_each_possible_cpu(i) {
Peter Zijlstrab7fa30c2016-06-09 15:07:50 +02008550 rq = cpu_rq(i);
8551
Peter Zijlstra029632f2011-10-25 10:00:11 +02008552 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
8553 GFP_KERNEL, cpu_to_node(i));
8554 if (!cfs_rq)
8555 goto err;
8556
8557 se = kzalloc_node(sizeof(struct sched_entity),
8558 GFP_KERNEL, cpu_to_node(i));
8559 if (!se)
8560 goto err_free_rq;
8561
8562 init_cfs_rq(cfs_rq);
8563 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
Yuyang Du540247f2015-07-15 08:04:39 +08008564 init_entity_runnable_average(se);
Peter Zijlstrab7fa30c2016-06-09 15:07:50 +02008565
8566 raw_spin_lock_irq(&rq->lock);
Yuyang Du2b8c41d2016-03-30 04:30:56 +08008567 post_init_entity_util_avg(se);
Peter Zijlstrab7fa30c2016-06-09 15:07:50 +02008568 raw_spin_unlock_irq(&rq->lock);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008569 }
8570
8571 return 1;
8572
8573err_free_rq:
8574 kfree(cfs_rq);
8575err:
8576 return 0;
8577}
8578
Peter Zijlstra6fe1f342016-01-21 22:24:16 +01008579void unregister_fair_sched_group(struct task_group *tg)
Peter Zijlstra029632f2011-10-25 10:00:11 +02008580{
Peter Zijlstra029632f2011-10-25 10:00:11 +02008581 unsigned long flags;
Peter Zijlstra6fe1f342016-01-21 22:24:16 +01008582 struct rq *rq;
8583 int cpu;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008584
Peter Zijlstra6fe1f342016-01-21 22:24:16 +01008585 for_each_possible_cpu(cpu) {
8586 if (tg->se[cpu])
8587 remove_entity_load_avg(tg->se[cpu]);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008588
Peter Zijlstra6fe1f342016-01-21 22:24:16 +01008589 /*
8590 * Only empty task groups can be destroyed; so we can speculatively
8591 * check on_list without danger of it being re-added.
8592 */
8593 if (!tg->cfs_rq[cpu]->on_list)
8594 continue;
8595
8596 rq = cpu_rq(cpu);
8597
8598 raw_spin_lock_irqsave(&rq->lock, flags);
8599 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
8600 raw_spin_unlock_irqrestore(&rq->lock, flags);
8601 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02008602}
8603
8604void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8605 struct sched_entity *se, int cpu,
8606 struct sched_entity *parent)
8607{
8608 struct rq *rq = cpu_rq(cpu);
8609
8610 cfs_rq->tg = tg;
8611 cfs_rq->rq = rq;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008612 init_cfs_rq_runtime(cfs_rq);
8613
8614 tg->cfs_rq[cpu] = cfs_rq;
8615 tg->se[cpu] = se;
8616
8617 /* se could be NULL for root_task_group */
8618 if (!se)
8619 return;
8620
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008621 if (!parent) {
Peter Zijlstra029632f2011-10-25 10:00:11 +02008622 se->cfs_rq = &rq->cfs;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008623 se->depth = 0;
8624 } else {
Peter Zijlstra029632f2011-10-25 10:00:11 +02008625 se->cfs_rq = parent->my_q;
Peter Zijlstrafed14d42012-02-11 06:05:00 +01008626 se->depth = parent->depth + 1;
8627 }
Peter Zijlstra029632f2011-10-25 10:00:11 +02008628
8629 se->my_q = cfs_rq;
Paul Turner0ac9b1c2013-10-16 11:16:27 -07008630 /* guarantee group entities always have weight */
8631 update_load_set(&se->load, NICE_0_LOAD);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008632 se->parent = parent;
8633}
8634
8635static DEFINE_MUTEX(shares_mutex);
8636
8637int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8638{
8639 int i;
8640 unsigned long flags;
8641
8642 /*
8643 * We can't change the weight of the root cgroup.
8644 */
8645 if (!tg->se[0])
8646 return -EINVAL;
8647
8648 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
8649
8650 mutex_lock(&shares_mutex);
8651 if (tg->shares == shares)
8652 goto done;
8653
8654 tg->shares = shares;
8655 for_each_possible_cpu(i) {
8656 struct rq *rq = cpu_rq(i);
8657 struct sched_entity *se;
8658
8659 se = tg->se[i];
8660 /* Propagate contribution to hierarchy */
8661 raw_spin_lock_irqsave(&rq->lock, flags);
Frederic Weisbecker71b1da42013-04-12 01:50:59 +02008662
8663 /* Possible calls to update_curr() need rq clock */
8664 update_rq_clock(rq);
Linus Torvalds17bc14b2012-12-14 07:20:43 -08008665 for_each_sched_entity(se)
Peter Zijlstra029632f2011-10-25 10:00:11 +02008666 update_cfs_shares(group_cfs_rq(se));
8667 raw_spin_unlock_irqrestore(&rq->lock, flags);
8668 }
8669
8670done:
8671 mutex_unlock(&shares_mutex);
8672 return 0;
8673}
8674#else /* CONFIG_FAIR_GROUP_SCHED */
8675
8676void free_fair_sched_group(struct task_group *tg) { }
8677
8678int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
8679{
8680 return 1;
8681}
8682
Peter Zijlstra6fe1f342016-01-21 22:24:16 +01008683void unregister_fair_sched_group(struct task_group *tg) { }
Peter Zijlstra029632f2011-10-25 10:00:11 +02008684
8685#endif /* CONFIG_FAIR_GROUP_SCHED */
8686
Peter Zijlstra810b3812008-02-29 15:21:01 -05008687
H Hartley Sweeten6d686f42010-01-13 20:21:52 -07008688static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
Peter Williams0d721ce2009-09-21 01:31:53 +00008689{
8690 struct sched_entity *se = &task->se;
Peter Williams0d721ce2009-09-21 01:31:53 +00008691 unsigned int rr_interval = 0;
8692
8693 /*
8694 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
8695 * idle runqueue:
8696 */
Peter Williams0d721ce2009-09-21 01:31:53 +00008697 if (rq->cfs.load.weight)
Zhu Yanhaia59f4e02013-01-08 12:56:52 +08008698 rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
Peter Williams0d721ce2009-09-21 01:31:53 +00008699
8700 return rr_interval;
8701}
8702
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008703/*
8704 * All the scheduling class methods:
8705 */
Peter Zijlstra029632f2011-10-25 10:00:11 +02008706const struct sched_class fair_sched_class = {
Ingo Molnar5522d5d2007-10-15 17:00:12 +02008707 .next = &idle_sched_class,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008708 .enqueue_task = enqueue_task_fair,
8709 .dequeue_task = dequeue_task_fair,
8710 .yield_task = yield_task_fair,
Mike Galbraithd95f4122011-02-01 09:50:51 -05008711 .yield_to_task = yield_to_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008712
Ingo Molnar2e09bf52007-10-15 17:00:05 +02008713 .check_preempt_curr = check_preempt_wakeup,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008714
8715 .pick_next_task = pick_next_task_fair,
8716 .put_prev_task = put_prev_task_fair,
8717
Peter Williams681f3e62007-10-24 18:23:51 +02008718#ifdef CONFIG_SMP
Li Zefan4ce72a22008-10-22 15:25:26 +08008719 .select_task_rq = select_task_rq_fair,
Paul Turner0a74bef2012-10-04 13:18:30 +02008720 .migrate_task_rq = migrate_task_rq_fair,
Alex Shi141965c2013-06-26 13:05:39 +08008721
Christian Ehrhardt0bcdcf22009-11-30 12:16:46 +01008722 .rq_online = rq_online_fair,
8723 .rq_offline = rq_offline_fair,
Peter Zijlstra88ec22d2009-12-16 18:04:41 +01008724
Yuyang Du12695572015-07-15 08:04:40 +08008725 .task_dead = task_dead_fair,
Peter Zijlstrac5b28032015-05-15 17:43:35 +02008726 .set_cpus_allowed = set_cpus_allowed_common,
Peter Williams681f3e62007-10-24 18:23:51 +02008727#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008728
Srivatsa Vaddagiri83b699e2007-10-15 17:00:08 +02008729 .set_curr_task = set_curr_task_fair,
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008730 .task_tick = task_tick_fair,
Peter Zijlstracd29fe62009-11-27 17:32:46 +01008731 .task_fork = task_fork_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01008732
8733 .prio_changed = prio_changed_fair,
Peter Zijlstrada7a7352011-01-17 17:03:27 +01008734 .switched_from = switched_from_fair,
Steven Rostedtcb469842008-01-25 21:08:22 +01008735 .switched_to = switched_to_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05008736
Peter Williams0d721ce2009-09-21 01:31:53 +00008737 .get_rr_interval = get_rr_interval_fair,
8738
Stanislaw Gruszka6e998912014-11-12 16:58:44 +01008739 .update_curr = update_curr_fair,
8740
Peter Zijlstra810b3812008-02-29 15:21:01 -05008741#ifdef CONFIG_FAIR_GROUP_SCHED
Peter Zijlstrab2b5ce02010-10-15 15:24:15 +02008742 .task_move_group = task_move_group_fair,
Peter Zijlstra810b3812008-02-29 15:21:01 -05008743#endif
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008744};
8745
8746#ifdef CONFIG_SCHED_DEBUG
Peter Zijlstra029632f2011-10-25 10:00:11 +02008747void print_cfs_stats(struct seq_file *m, int cpu)
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008748{
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008749 struct cfs_rq *cfs_rq;
8750
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01008751 rcu_read_lock();
Ingo Molnarc3b64f12007-08-09 11:16:51 +02008752 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
Ingo Molnar5cef9ec2007-08-09 11:16:47 +02008753 print_cfs_rq(m, cpu, cfs_rq);
Peter Zijlstra5973e5b2008-01-25 21:08:34 +01008754 rcu_read_unlock();
Ingo Molnarbf0f6f22007-07-09 18:51:58 +02008755}
Srikar Dronamraju397f2372015-06-25 22:51:43 +05308756
8757#ifdef CONFIG_NUMA_BALANCING
8758void show_numa_stats(struct task_struct *p, struct seq_file *m)
8759{
8760 int node;
8761 unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
8762
8763 for_each_online_node(node) {
8764 if (p->numa_faults) {
8765 tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
8766 tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
8767 }
8768 if (p->numa_group) {
8769 gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
8770 gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
8771 }
8772 print_numa_stats(m, node, tsf, tpf, gsf, gpf);
8773 }
8774}
8775#endif /* CONFIG_NUMA_BALANCING */
8776#endif /* CONFIG_SCHED_DEBUG */
Peter Zijlstra029632f2011-10-25 10:00:11 +02008777
8778__init void init_sched_fair_class(void)
8779{
8780#ifdef CONFIG_SMP
8781 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
8782
Frederic Weisbecker3451d022011-08-10 23:21:01 +02008783#ifdef CONFIG_NO_HZ_COMMON
Diwakar Tundlam554ceca2012-03-07 14:44:26 -08008784 nohz.next_balance = jiffies;
Peter Zijlstra029632f2011-10-25 10:00:11 +02008785 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
Peter Zijlstra029632f2011-10-25 10:00:11 +02008786#endif
8787#endif /* SMP */
8788
8789}