blob: a171c12581096333b5a900a6a7c390ac84e2b44a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Paul Gortmaker45ceebf2013-04-19 15:10:49 -04002/*
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02003 * kernel/sched/loadavg.c
Paul Gortmaker45ceebf2013-04-19 15:10:49 -04004 *
Peter Zijlstra3289bdb2015-04-14 13:19:42 +02005 * This file contains the magic bits required to compute the global loadavg
6 * figure. Its a silly number but people think its important. We go through
7 * great pains to make it work on big machines and tickless kernels.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -04008 */
Paul Gortmaker45ceebf2013-04-19 15:10:49 -04009#include "sched.h"
10
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040011/*
12 * Global load-average calculations
13 *
14 * We take a distributed and async approach to calculating the global load-avg
15 * in order to minimize overhead.
16 *
17 * The global load average is an exponentially decaying average of nr_running +
18 * nr_uninterruptible.
19 *
20 * Once every LOAD_FREQ:
21 *
22 * nr_active = 0;
23 * for_each_possible_cpu(cpu)
24 * nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
25 *
26 * avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
27 *
28 * Due to a number of reasons the above turns in the mess below:
29 *
30 * - for_each_possible_cpu() is prohibitively expensive on machines with
Ingo Molnar97fb7a02018-03-03 14:01:12 +010031 * serious number of CPUs, therefore we need to take a distributed approach
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040032 * to calculating nr_active.
33 *
34 * \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
35 * = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
36 *
37 * So assuming nr_active := 0 when we start out -- true per definition, we
Ingo Molnar97fb7a02018-03-03 14:01:12 +010038 * can simply take per-CPU deltas and fold those into a global accumulate
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040039 * to obtain the same result. See calc_load_fold_active().
40 *
Ingo Molnar97fb7a02018-03-03 14:01:12 +010041 * Furthermore, in order to avoid synchronizing all per-CPU delta folding
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040042 * across the machine, we assume 10 ticks is sufficient time for every
Ingo Molnar97fb7a02018-03-03 14:01:12 +010043 * CPU to have completed this task.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040044 *
45 * This places an upper-bound on the IRQ-off latency of the machine. Then
46 * again, being late doesn't loose the delta, just wrecks the sample.
47 *
Ingo Molnar97fb7a02018-03-03 14:01:12 +010048 * - cpu_rq()->nr_uninterruptible isn't accurately tracked per-CPU because
49 * this would add another cross-CPU cacheline miss and atomic operation
50 * to the wakeup path. Instead we increment on whatever CPU the task ran
51 * when it went into uninterruptible state and decrement on whatever CPU
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040052 * did the wakeup. This means that only the sum of nr_uninterruptible over
Ingo Molnar97fb7a02018-03-03 14:01:12 +010053 * all CPUs yields the correct result.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040054 *
55 * This covers the NO_HZ=n code, for extra head-aches, see the comment below.
56 */
57
58/* Variables and functions for calc_load */
59atomic_long_t calc_load_tasks;
60unsigned long calc_load_update;
61unsigned long avenrun[3];
62EXPORT_SYMBOL(avenrun); /* should be removed */
63
64/**
65 * get_avenrun - get the load average array
66 * @loads: pointer to dest load array
67 * @offset: offset to add
68 * @shift: shift count to shift the result left
69 *
70 * These values are estimates at best, so no need for locking.
71 */
72void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
73{
74 loads[0] = (avenrun[0] + offset) << shift;
75 loads[1] = (avenrun[1] + offset) << shift;
76 loads[2] = (avenrun[2] + offset) << shift;
77}
78
Thomas Gleixnerd60585c2016-07-12 18:33:56 +020079long calc_load_fold_active(struct rq *this_rq, long adjust)
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040080{
81 long nr_active, delta = 0;
82
Thomas Gleixnerd60585c2016-07-12 18:33:56 +020083 nr_active = this_rq->nr_running - adjust;
Peter Zijlstra3289bdb2015-04-14 13:19:42 +020084 nr_active += (long)this_rq->nr_uninterruptible;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -040085
86 if (nr_active != this_rq->calc_load_active) {
87 delta = nr_active - this_rq->calc_load_active;
88 this_rq->calc_load_active = nr_active;
89 }
90
91 return delta;
92}
93
94/*
95 * a1 = a0 * e + a * (1 - e)
96 */
97static unsigned long
98calc_load(unsigned long load, unsigned long exp, unsigned long active)
99{
Vik Heyndrickx20878232016-04-28 20:46:28 +0200100 unsigned long newload;
101
102 newload = load * exp + active * (FIXED_1 - exp);
103 if (active >= load)
104 newload += FIXED_1-1;
105
106 return newload / FIXED_1;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400107}
108
109#ifdef CONFIG_NO_HZ_COMMON
110/*
111 * Handle NO_HZ for the global load-average.
112 *
113 * Since the above described distributed algorithm to compute the global
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100114 * load-average relies on per-CPU sampling from the tick, it is affected by
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400115 * NO_HZ.
116 *
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200117 * The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100118 * entering NO_HZ state such that we can include this as an 'extra' CPU delta
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400119 * when we read the global state.
120 *
121 * Obviously reality has to ruin such a delightfully simple scheme:
122 *
123 * - When we go NO_HZ idle during the window, we can negate our sample
124 * contribution, causing under-accounting.
125 *
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200126 * We avoid this by keeping two NO_HZ-delta counters and flipping them
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400127 * when the window starts, thus separating old and new NO_HZ load.
128 *
129 * The only trick is the slight shift in index flip for read vs write.
130 *
131 * 0s 5s 10s 15s
132 * +10 +10 +10 +10
133 * |-|-----------|-|-----------|-|-----------|-|
134 * r:0 0 1 1 0 0 1 1 0
135 * w:0 1 1 0 0 1 1 0 0
136 *
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200137 * This ensures we'll fold the old NO_HZ contribution in this window while
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400138 * accumlating the new one.
139 *
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200140 * - When we wake up from NO_HZ during the window, we push up our
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400141 * contribution, since we effectively move our sample point to a known
142 * busy state.
143 *
144 * This is solved by pushing the window forward, and thus skipping the
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100145 * sample, for this CPU (effectively using the NO_HZ-delta for this CPU which
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400146 * was in effect at the time the window opened). This also solves the issue
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100147 * of having to deal with a CPU having been in NO_HZ for multiple LOAD_FREQ
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200148 * intervals.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400149 *
150 * When making the ILB scale, we should try to pull this in as well.
151 */
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200152static atomic_long_t calc_load_nohz[2];
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400153static int calc_load_idx;
154
155static inline int calc_load_write_idx(void)
156{
157 int idx = calc_load_idx;
158
159 /*
160 * See calc_global_nohz(), if we observe the new index, we also
161 * need to observe the new update time.
162 */
163 smp_rmb();
164
165 /*
166 * If the folding window started, make sure we start writing in the
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200167 * next NO_HZ-delta.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400168 */
Matt Flemingcaeb5882017-02-17 12:07:31 +0000169 if (!time_before(jiffies, READ_ONCE(calc_load_update)))
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400170 idx++;
171
172 return idx & 1;
173}
174
175static inline int calc_load_read_idx(void)
176{
177 return calc_load_idx & 1;
178}
179
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200180void calc_load_nohz_start(void)
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400181{
182 struct rq *this_rq = this_rq();
183 long delta;
184
185 /*
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200186 * We're going into NO_HZ mode, if there's any pending delta, fold it
187 * into the pending NO_HZ delta.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400188 */
Thomas Gleixnerd60585c2016-07-12 18:33:56 +0200189 delta = calc_load_fold_active(this_rq, 0);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400190 if (delta) {
191 int idx = calc_load_write_idx();
Peter Zijlstra3289bdb2015-04-14 13:19:42 +0200192
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200193 atomic_long_add(delta, &calc_load_nohz[idx]);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400194 }
195}
196
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200197void calc_load_nohz_stop(void)
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400198{
199 struct rq *this_rq = this_rq();
200
201 /*
Matt Fleming6e5f32f2017-02-17 12:07:30 +0000202 * If we're still before the pending sample window, we're done.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400203 */
Matt Flemingcaeb5882017-02-17 12:07:31 +0000204 this_rq->calc_load_update = READ_ONCE(calc_load_update);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400205 if (time_before(jiffies, this_rq->calc_load_update))
206 return;
207
208 /*
209 * We woke inside or after the sample window, this means we're already
210 * accounted through the nohz accounting, so skip the entire deal and
211 * sync up for the next window.
212 */
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400213 if (time_before(jiffies, this_rq->calc_load_update + 10))
214 this_rq->calc_load_update += LOAD_FREQ;
215}
216
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200217static long calc_load_nohz_fold(void)
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400218{
219 int idx = calc_load_read_idx();
220 long delta = 0;
221
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200222 if (atomic_long_read(&calc_load_nohz[idx]))
223 delta = atomic_long_xchg(&calc_load_nohz[idx], 0);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400224
225 return delta;
226}
227
228/**
229 * fixed_power_int - compute: x^n, in O(log n) time
230 *
231 * @x: base of the power
232 * @frac_bits: fractional bits of @x
233 * @n: power to raise @x to.
234 *
235 * By exploiting the relation between the definition of the natural power
236 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
237 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
238 * (where: n_i \elem {0, 1}, the binary vector representing n),
239 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
240 * of course trivially computable in O(log_2 n), the length of our binary
241 * vector.
242 */
243static unsigned long
244fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
245{
246 unsigned long result = 1UL << frac_bits;
247
Peter Zijlstra3289bdb2015-04-14 13:19:42 +0200248 if (n) {
249 for (;;) {
250 if (n & 1) {
251 result *= x;
252 result += 1UL << (frac_bits - 1);
253 result >>= frac_bits;
254 }
255 n >>= 1;
256 if (!n)
257 break;
258 x *= x;
259 x += 1UL << (frac_bits - 1);
260 x >>= frac_bits;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400261 }
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400262 }
263
264 return result;
265}
266
267/*
268 * a1 = a0 * e + a * (1 - e)
269 *
270 * a2 = a1 * e + a * (1 - e)
271 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
272 * = a0 * e^2 + a * (1 - e) * (1 + e)
273 *
274 * a3 = a2 * e + a * (1 - e)
275 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
276 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
277 *
278 * ...
279 *
280 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
281 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
282 * = a0 * e^n + a * (1 - e^n)
283 *
284 * [1] application of the geometric series:
285 *
286 * n 1 - x^(n+1)
287 * S_n := \Sum x^i = -------------
288 * i=0 1 - x
289 */
290static unsigned long
291calc_load_n(unsigned long load, unsigned long exp,
292 unsigned long active, unsigned int n)
293{
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400294 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
295}
296
297/*
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100298 * NO_HZ can leave us missing all per-CPU ticks calling
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200299 * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
300 * calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
301 * in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400302 *
303 * Once we've updated the global active value, we need to apply the exponential
304 * weights adjusted to the number of cycles missed.
305 */
306static void calc_global_nohz(void)
307{
Matt Flemingcaeb5882017-02-17 12:07:31 +0000308 unsigned long sample_window;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400309 long delta, active, n;
310
Matt Flemingcaeb5882017-02-17 12:07:31 +0000311 sample_window = READ_ONCE(calc_load_update);
312 if (!time_before(jiffies, sample_window + 10)) {
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400313 /*
314 * Catch-up, fold however many we are behind still
315 */
Matt Flemingcaeb5882017-02-17 12:07:31 +0000316 delta = jiffies - sample_window - 10;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400317 n = 1 + (delta / LOAD_FREQ);
318
319 active = atomic_long_read(&calc_load_tasks);
320 active = active > 0 ? active * FIXED_1 : 0;
321
322 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
323 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
324 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
325
Matt Flemingcaeb5882017-02-17 12:07:31 +0000326 WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400327 }
328
329 /*
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200330 * Flip the NO_HZ index...
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400331 *
332 * Make sure we first write the new time then flip the index, so that
333 * calc_load_write_idx() will see the new time when it reads the new
334 * index, this avoids a double flip messing things up.
335 */
336 smp_wmb();
337 calc_load_idx++;
338}
339#else /* !CONFIG_NO_HZ_COMMON */
340
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200341static inline long calc_load_nohz_fold(void) { return 0; }
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400342static inline void calc_global_nohz(void) { }
343
344#endif /* CONFIG_NO_HZ_COMMON */
345
346/*
347 * calc_load - update the avenrun load estimates 10 ticks after the
348 * CPUs have updated calc_load_tasks.
Peter Zijlstra3289bdb2015-04-14 13:19:42 +0200349 *
350 * Called from the global timer code.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400351 */
352void calc_global_load(unsigned long ticks)
353{
Matt Flemingcaeb5882017-02-17 12:07:31 +0000354 unsigned long sample_window;
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400355 long active, delta;
356
Matt Flemingcaeb5882017-02-17 12:07:31 +0000357 sample_window = READ_ONCE(calc_load_update);
358 if (time_before(jiffies, sample_window + 10))
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400359 return;
360
361 /*
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100362 * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400363 */
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200364 delta = calc_load_nohz_fold();
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400365 if (delta)
366 atomic_long_add(delta, &calc_load_tasks);
367
368 active = atomic_long_read(&calc_load_tasks);
369 active = active > 0 ? active * FIXED_1 : 0;
370
371 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
372 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
373 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
374
Matt Flemingcaeb5882017-02-17 12:07:31 +0000375 WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400376
377 /*
Frederic Weisbecker3c85d6d2017-06-19 04:12:00 +0200378 * In case we went to NO_HZ for multiple LOAD_FREQ intervals
379 * catch up in bulk.
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400380 */
381 calc_global_nohz();
382}
383
384/*
Peter Zijlstra3289bdb2015-04-14 13:19:42 +0200385 * Called from scheduler_tick() to periodically update this CPU's
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400386 * active count.
387 */
Peter Zijlstra3289bdb2015-04-14 13:19:42 +0200388void calc_global_load_tick(struct rq *this_rq)
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400389{
390 long delta;
391
392 if (time_before(jiffies, this_rq->calc_load_update))
393 return;
394
Thomas Gleixnerd60585c2016-07-12 18:33:56 +0200395 delta = calc_load_fold_active(this_rq, 0);
Paul Gortmaker45ceebf2013-04-19 15:10:49 -0400396 if (delta)
397 atomic_long_add(delta, &calc_load_tasks);
398
399 this_rq->calc_load_update += LOAD_FREQ;
400}