blob: bd006b79b3608b9d5d371996dfe523b7f5bbfacd [file] [log] [blame]
Vincent Guittotc0796292018-06-28 17:45:04 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Per Entity Load Tracking
4 *
5 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 *
7 * Interactivity improvements by Mike Galbraith
8 * (C) 2007 Mike Galbraith <efault@gmx.de>
9 *
10 * Various enhancements by Dmitry Adamushko.
11 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
12 *
13 * Group scheduling enhancements by Srivatsa Vaddagiri
14 * Copyright IBM Corporation, 2007
15 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
16 *
17 * Scaled math optimizations by Thomas Gleixner
18 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
19 *
20 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
21 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
22 *
23 * Move PELT related code from fair.c into this pelt.c file
24 * Author: Vincent Guittot <vincent.guittot@linaro.org>
25 */
26
27#include <linux/sched.h>
28#include "sched.h"
Vincent Guittotc0796292018-06-28 17:45:04 +020029#include "pelt.h"
30
Qais Yousefba19f512019-06-04 12:14:56 +010031#include <trace/events/sched.h>
32
Vincent Guittotc0796292018-06-28 17:45:04 +020033/*
34 * Approximate:
35 * val * y^n, where y^32 ~= 0.5 (~1 scheduling period)
36 */
37static u64 decay_load(u64 val, u64 n)
38{
39 unsigned int local_n;
40
41 if (unlikely(n > LOAD_AVG_PERIOD * 63))
42 return 0;
43
44 /* after bounds checking we can collapse to 32-bit */
45 local_n = n;
46
47 /*
48 * As y^PERIOD = 1/2, we can combine
49 * y^n = 1/2^(n/PERIOD) * y^(n%PERIOD)
50 * With a look-up table which covers y^n (n<PERIOD)
51 *
52 * To achieve constant time decay_load.
53 */
54 if (unlikely(local_n >= LOAD_AVG_PERIOD)) {
55 val >>= local_n / LOAD_AVG_PERIOD;
56 local_n %= LOAD_AVG_PERIOD;
57 }
58
59 val = mul_u64_u32_shr(val, runnable_avg_yN_inv[local_n], 32);
60 return val;
61}
62
63static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
64{
65 u32 c1, c2, c3 = d3; /* y^0 == 1 */
66
67 /*
68 * c1 = d1 y^p
69 */
70 c1 = decay_load((u64)d1, periods);
71
72 /*
73 * p-1
74 * c2 = 1024 \Sum y^n
75 * n=1
76 *
77 * inf inf
78 * = 1024 ( \Sum y^n - \Sum y^n - y^0 )
79 * n=0 n=p
80 */
81 c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
82
83 return c1 + c2 + c3;
84}
85
86#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
87
88/*
89 * Accumulate the three separate parts of the sum; d1 the remainder
90 * of the last (incomplete) period, d2 the span of full periods and d3
91 * the remainder of the (incomplete) current period.
92 *
93 * d1 d2 d3
94 * ^ ^ ^
95 * | | |
96 * |<->|<----------------->|<--->|
97 * ... |---x---|------| ... |------|-----x (now)
98 *
99 * p-1
100 * u' = (u + d1) y^p + 1024 \Sum y^n + d3 y^0
101 * n=1
102 *
103 * = u y^p + (Step 1)
104 *
105 * p-1
106 * d1 y^p + 1024 \Sum y^n + d3 y^0 (Step 2)
107 * n=1
108 */
109static __always_inline u32
Vincent Guittot23127292019-01-23 16:26:53 +0100110accumulate_sum(u64 delta, struct sched_avg *sa,
Vincent Guittotc0796292018-06-28 17:45:04 +0200111 unsigned long load, unsigned long runnable, int running)
112{
Vincent Guittotc0796292018-06-28 17:45:04 +0200113 u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
114 u64 periods;
115
Vincent Guittotc0796292018-06-28 17:45:04 +0200116 delta += sa->period_contrib;
117 periods = delta / 1024; /* A period is 1024us (~1ms) */
118
119 /*
120 * Step 1: decay old *_sum if we crossed period boundaries.
121 */
122 if (periods) {
123 sa->load_sum = decay_load(sa->load_sum, periods);
124 sa->runnable_load_sum =
125 decay_load(sa->runnable_load_sum, periods);
126 sa->util_sum = decay_load((u64)(sa->util_sum), periods);
127
128 /*
129 * Step 2
130 */
131 delta %= 1024;
Peng Wangd040e072019-12-13 11:45:40 +0800132 if (load) {
133 /*
134 * This relies on the:
135 *
136 * if (!load)
137 * runnable = running = 0;
138 *
139 * clause from ___update_load_sum(); this results in
140 * the below usage of @contrib to dissapear entirely,
141 * so no point in calculating it.
142 */
143 contrib = __accumulate_pelt_segments(periods,
144 1024 - sa->period_contrib, delta);
145 }
Vincent Guittotc0796292018-06-28 17:45:04 +0200146 }
147 sa->period_contrib = delta;
148
Vincent Guittotc0796292018-06-28 17:45:04 +0200149 if (load)
150 sa->load_sum += load * contrib;
151 if (runnable)
152 sa->runnable_load_sum += runnable * contrib;
153 if (running)
Vincent Guittot23127292019-01-23 16:26:53 +0100154 sa->util_sum += contrib << SCHED_CAPACITY_SHIFT;
Vincent Guittotc0796292018-06-28 17:45:04 +0200155
156 return periods;
157}
158
159/*
160 * We can represent the historical contribution to runnable average as the
161 * coefficients of a geometric series. To do this we sub-divide our runnable
162 * history into segments of approximately 1ms (1024us); label the segment that
163 * occurred N-ms ago p_N, with p_0 corresponding to the current period, e.g.
164 *
165 * [<- 1024us ->|<- 1024us ->|<- 1024us ->| ...
166 * p0 p1 p2
167 * (now) (~1ms ago) (~2ms ago)
168 *
169 * Let u_i denote the fraction of p_i that the entity was runnable.
170 *
171 * We then designate the fractions u_i as our co-efficients, yielding the
172 * following representation of historical load:
173 * u_0 + u_1*y + u_2*y^2 + u_3*y^3 + ...
174 *
175 * We choose y based on the with of a reasonably scheduling period, fixing:
176 * y^32 = 0.5
177 *
178 * This means that the contribution to load ~32ms ago (u_32) will be weighted
179 * approximately half as much as the contribution to load within the last ms
180 * (u_0).
181 *
182 * When a period "rolls over" and we have new u_0`, multiplying the previous
183 * sum again by y is sufficient to update:
184 * load_avg = u_0` + y*(u_0 + u_1*y + u_2*y^2 + ... )
185 * = u_0 + u_1*y + u_2*y^2 + ... [re-labeling u_i --> u_{i+1}]
186 */
187static __always_inline int
Vincent Guittot23127292019-01-23 16:26:53 +0100188___update_load_sum(u64 now, struct sched_avg *sa,
Vincent Guittotc0796292018-06-28 17:45:04 +0200189 unsigned long load, unsigned long runnable, int running)
190{
191 u64 delta;
192
193 delta = now - sa->last_update_time;
194 /*
195 * This should only happen when time goes backwards, which it
196 * unfortunately does during sched clock init when we swap over to TSC.
197 */
198 if ((s64)delta < 0) {
199 sa->last_update_time = now;
200 return 0;
201 }
202
203 /*
204 * Use 1024ns as the unit of measurement since it's a reasonable
205 * approximation of 1us and fast to compute.
206 */
207 delta >>= 10;
208 if (!delta)
209 return 0;
210
211 sa->last_update_time += delta << 10;
212
213 /*
214 * running is a subset of runnable (weight) so running can't be set if
215 * runnable is clear. But there are some corner cases where the current
216 * se has been already dequeued but cfs_rq->curr still points to it.
217 * This means that weight will be 0 but not running for a sched_entity
218 * but also for a cfs_rq if the latter becomes idle. As an example,
219 * this happens during idle_balance() which calls
Peng Wangd040e072019-12-13 11:45:40 +0800220 * update_blocked_averages().
221 *
222 * Also see the comment in accumulate_sum().
Vincent Guittotc0796292018-06-28 17:45:04 +0200223 */
224 if (!load)
225 runnable = running = 0;
226
227 /*
228 * Now we know we crossed measurement unit boundaries. The *_avg
229 * accrues by two steps:
230 *
231 * Step 1: accumulate *_sum since last_update_time. If we haven't
232 * crossed period boundaries, finish.
233 */
Vincent Guittot23127292019-01-23 16:26:53 +0100234 if (!accumulate_sum(delta, sa, load, runnable, running))
Vincent Guittotc0796292018-06-28 17:45:04 +0200235 return 0;
236
237 return 1;
238}
239
240static __always_inline void
241___update_load_avg(struct sched_avg *sa, unsigned long load, unsigned long runnable)
242{
243 u32 divider = LOAD_AVG_MAX - 1024 + sa->period_contrib;
244
245 /*
246 * Step 2: update *_avg.
247 */
248 sa->load_avg = div_u64(load * sa->load_sum, divider);
249 sa->runnable_load_avg = div_u64(runnable * sa->runnable_load_sum, divider);
Vincent Guittot523e9792018-06-28 17:45:12 +0200250 WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
Vincent Guittotc0796292018-06-28 17:45:04 +0200251}
252
253/*
254 * sched_entity:
255 *
256 * task:
257 * se_runnable() == se_weight()
258 *
259 * group: [ see update_cfs_group() ]
260 * se_weight() = tg->weight * grq->load_avg / tg->load_avg
261 * se_runnable() = se_weight(se) * grq->runnable_load_avg / grq->load_avg
262 *
263 * load_sum := runnable_sum
264 * load_avg = se_weight(se) * runnable_avg
265 *
266 * runnable_load_sum := runnable_sum
267 * runnable_load_avg = se_runnable(se) * runnable_avg
268 *
269 * XXX collapse load_sum and runnable_load_sum
270 *
271 * cfq_rq:
272 *
273 * load_sum = \Sum se_weight(se) * se->avg.load_sum
274 * load_avg = \Sum se->avg.load_avg
275 *
276 * runnable_load_sum = \Sum se_runnable(se) * se->avg.runnable_load_sum
277 * runnable_load_avg = \Sum se->avg.runable_load_avg
278 */
279
Vincent Guittot23127292019-01-23 16:26:53 +0100280int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
Vincent Guittotc0796292018-06-28 17:45:04 +0200281{
Vincent Guittot23127292019-01-23 16:26:53 +0100282 if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
Vincent Guittotc0796292018-06-28 17:45:04 +0200283 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
Qais Yousef8de62422019-06-04 12:14:57 +0100284 trace_pelt_se_tp(se);
Vincent Guittotc0796292018-06-28 17:45:04 +0200285 return 1;
286 }
287
288 return 0;
289}
290
Vincent Guittot23127292019-01-23 16:26:53 +0100291int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
Vincent Guittotc0796292018-06-28 17:45:04 +0200292{
Vincent Guittot23127292019-01-23 16:26:53 +0100293 if (___update_load_sum(now, &se->avg, !!se->on_rq, !!se->on_rq,
Vincent Guittotc0796292018-06-28 17:45:04 +0200294 cfs_rq->curr == se)) {
295
296 ___update_load_avg(&se->avg, se_weight(se), se_runnable(se));
297 cfs_se_util_change(&se->avg);
Qais Yousef8de62422019-06-04 12:14:57 +0100298 trace_pelt_se_tp(se);
Vincent Guittotc0796292018-06-28 17:45:04 +0200299 return 1;
300 }
301
302 return 0;
303}
304
Vincent Guittot23127292019-01-23 16:26:53 +0100305int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
Vincent Guittotc0796292018-06-28 17:45:04 +0200306{
Vincent Guittot23127292019-01-23 16:26:53 +0100307 if (___update_load_sum(now, &cfs_rq->avg,
Vincent Guittotc0796292018-06-28 17:45:04 +0200308 scale_load_down(cfs_rq->load.weight),
309 scale_load_down(cfs_rq->runnable_weight),
310 cfs_rq->curr != NULL)) {
311
312 ___update_load_avg(&cfs_rq->avg, 1, 1);
Qais Yousefba19f512019-06-04 12:14:56 +0100313 trace_pelt_cfs_tp(cfs_rq);
Vincent Guittotc0796292018-06-28 17:45:04 +0200314 return 1;
315 }
316
317 return 0;
318}
Vincent Guittot371bf422018-06-28 17:45:05 +0200319
320/*
321 * rt_rq:
322 *
323 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
324 * util_sum = cpu_scale * load_sum
325 * runnable_load_sum = load_sum
326 *
327 * load_avg and runnable_load_avg are not supported and meaningless.
328 *
329 */
330
331int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
332{
Vincent Guittot23127292019-01-23 16:26:53 +0100333 if (___update_load_sum(now, &rq->avg_rt,
Vincent Guittot371bf422018-06-28 17:45:05 +0200334 running,
335 running,
336 running)) {
337
338 ___update_load_avg(&rq->avg_rt, 1, 1);
Qais Yousefba19f512019-06-04 12:14:56 +0100339 trace_pelt_rt_tp(rq);
Vincent Guittot371bf422018-06-28 17:45:05 +0200340 return 1;
341 }
342
343 return 0;
344}
Vincent Guittot3727e0e2018-06-28 17:45:07 +0200345
346/*
347 * dl_rq:
348 *
349 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
350 * util_sum = cpu_scale * load_sum
351 * runnable_load_sum = load_sum
352 *
353 */
354
355int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
356{
Vincent Guittot23127292019-01-23 16:26:53 +0100357 if (___update_load_sum(now, &rq->avg_dl,
Vincent Guittot3727e0e2018-06-28 17:45:07 +0200358 running,
359 running,
360 running)) {
361
362 ___update_load_avg(&rq->avg_dl, 1, 1);
Qais Yousefba19f512019-06-04 12:14:56 +0100363 trace_pelt_dl_tp(rq);
Vincent Guittot3727e0e2018-06-28 17:45:07 +0200364 return 1;
365 }
366
367 return 0;
368}
Vincent Guittot91c27492018-06-28 17:45:09 +0200369
Vincent Guittot11d4afd2018-09-25 11:17:42 +0200370#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
Vincent Guittot91c27492018-06-28 17:45:09 +0200371/*
372 * irq:
373 *
374 * util_sum = \Sum se->avg.util_sum but se->avg.util_sum is not tracked
375 * util_sum = cpu_scale * load_sum
376 * runnable_load_sum = load_sum
377 *
378 */
379
380int update_irq_load_avg(struct rq *rq, u64 running)
381{
382 int ret = 0;
Vincent Guittot23127292019-01-23 16:26:53 +0100383
384 /*
385 * We can't use clock_pelt because irq time is not accounted in
386 * clock_task. Instead we directly scale the running time to
387 * reflect the real amount of computation
388 */
389 running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200390 running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
Vincent Guittot23127292019-01-23 16:26:53 +0100391
Vincent Guittot91c27492018-06-28 17:45:09 +0200392 /*
393 * We know the time that has been used by interrupt since last update
394 * but we don't when. Let be pessimistic and assume that interrupt has
395 * happened just before the update. This is not so far from reality
396 * because interrupt will most probably wake up task and trig an update
Vincent Guittot23127292019-01-23 16:26:53 +0100397 * of rq clock during which the metric is updated.
Vincent Guittot91c27492018-06-28 17:45:09 +0200398 * We start to decay with normal context time and then we add the
399 * interrupt context time.
400 * We can safely remove running from rq->clock because
401 * rq->clock += delta with delta >= running
402 */
Vincent Guittot23127292019-01-23 16:26:53 +0100403 ret = ___update_load_sum(rq->clock - running, &rq->avg_irq,
Vincent Guittot91c27492018-06-28 17:45:09 +0200404 0,
405 0,
406 0);
Vincent Guittot23127292019-01-23 16:26:53 +0100407 ret += ___update_load_sum(rq->clock, &rq->avg_irq,
Vincent Guittot91c27492018-06-28 17:45:09 +0200408 1,
409 1,
410 1);
411
Qais Yousefba19f512019-06-04 12:14:56 +0100412 if (ret) {
Vincent Guittot91c27492018-06-28 17:45:09 +0200413 ___update_load_avg(&rq->avg_irq, 1, 1);
Qais Yousefba19f512019-06-04 12:14:56 +0100414 trace_pelt_irq_tp(rq);
415 }
Vincent Guittot91c27492018-06-28 17:45:09 +0200416
417 return ret;
418}
419#endif