blob: e06071bf3472c4eb40744ae9be0ffeffa07c9df8 [file] [log] [blame]
Vincent Guittotc0796292018-06-28 17:45:04 +02001#ifdef CONFIG_SMP
Vincent Guittot23127292019-01-23 16:26:53 +01002#include "sched-pelt.h"
Vincent Guittotc0796292018-06-28 17:45:04 +02003
Vincent Guittot23127292019-01-23 16:26:53 +01004int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
Vincent Guittot371bf422018-06-28 17:45:05 +02007int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
Vincent Guittot3727e0e2018-06-28 17:45:07 +02008int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
Vincent Guittotc0796292018-06-28 17:45:04 +02009
Thara Gopinath76504792020-02-21 19:52:05 -050010#ifdef CONFIG_SCHED_THERMAL_PRESSURE
11int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
12
13static inline u64 thermal_load_avg(struct rq *rq)
14{
15 return READ_ONCE(rq->avg_thermal.load_avg);
16}
17#else
18static inline int
19update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
20{
21 return 0;
22}
23
24static inline u64 thermal_load_avg(struct rq *rq)
25{
26 return 0;
27}
28#endif
29
Vincent Guittot11d4afd2018-09-25 11:17:42 +020030#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
Vincent Guittot91c27492018-06-28 17:45:09 +020031int update_irq_load_avg(struct rq *rq, u64 running);
32#else
33static inline int
34update_irq_load_avg(struct rq *rq, u64 running)
35{
36 return 0;
37}
38#endif
39
Vincent Guittot87e867b2020-06-12 17:47:03 +020040static inline u32 get_pelt_divider(struct sched_avg *avg)
41{
42 return LOAD_AVG_MAX - 1024 + avg->period_contrib;
43}
44
Vincent Guittotc0796292018-06-28 17:45:04 +020045static inline void cfs_se_util_change(struct sched_avg *avg)
46{
47 unsigned int enqueued;
48
49 if (!sched_feat(UTIL_EST))
50 return;
51
Dietmar Eggemann68d7a192021-06-02 16:58:08 +020052 /* Avoid store if the flag has been already reset */
Vincent Guittotc0796292018-06-28 17:45:04 +020053 enqueued = avg->util_est.enqueued;
54 if (!(enqueued & UTIL_AVG_UNCHANGED))
55 return;
56
57 /* Reset flag to report util_avg has been updated */
58 enqueued &= ~UTIL_AVG_UNCHANGED;
59 WRITE_ONCE(avg->util_est.enqueued, enqueued);
60}
61
Vincent Guittot23127292019-01-23 16:26:53 +010062/*
63 * The clock_pelt scales the time to reflect the effective amount of
64 * computation done during the running delta time but then sync back to
65 * clock_task when rq is idle.
66 *
67 *
68 * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
69 * @ max capacity ------******---------------******---------------
70 * @ half capacity ------************---------************---------
71 * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16
72 *
73 */
74static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
75{
76 if (unlikely(is_idle_task(rq->curr))) {
77 /* The rq is idle, we can sync to clock_task */
78 rq->clock_pelt = rq_clock_task(rq);
79 return;
80 }
81
82 /*
83 * When a rq runs at a lower compute capacity, it will need
84 * more time to do the same amount of work than at max
85 * capacity. In order to be invariant, we scale the delta to
86 * reflect how much work has been really done.
87 * Running longer results in stealing idle time that will
88 * disturb the load signal compared to max capacity. This
89 * stolen idle time will be automatically reflected when the
90 * rq will be idle and the clock will be synced with
91 * rq_clock_task.
92 */
93
94 /*
95 * Scale the elapsed time to reflect the real amount of
96 * computation
97 */
Vincent Guittot8ec59c02019-06-17 17:00:17 +020098 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
Vincent Guittot23127292019-01-23 16:26:53 +010099 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
100
101 rq->clock_pelt += delta;
102}
103
104/*
105 * When rq becomes idle, we have to check if it has lost idle time
106 * because it was fully busy. A rq is fully used when the /Sum util_sum
107 * is greater or equal to:
108 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
109 * For optimization and computing rounding purpose, we don't take into account
110 * the position in the current window (period_contrib) and we use the higher
111 * bound of util_sum to decide.
112 */
113static inline void update_idle_rq_clock_pelt(struct rq *rq)
114{
115 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
116 u32 util_sum = rq->cfs.avg.util_sum;
117 util_sum += rq->avg_rt.util_sum;
118 util_sum += rq->avg_dl.util_sum;
119
120 /*
121 * Reflecting stolen time makes sense only if the idle
122 * phase would be present at max capacity. As soon as the
123 * utilization of a rq has reached the maximum value, it is
Ingo Molnar3b037062021-03-18 13:38:50 +0100124 * considered as an always running rq without idle time to
Vincent Guittot23127292019-01-23 16:26:53 +0100125 * steal. This potential idle time is considered as lost in
126 * this case. We keep track of this lost idle time compare to
127 * rq's clock_task.
128 */
129 if (util_sum >= divider)
130 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
131}
132
133static inline u64 rq_clock_pelt(struct rq *rq)
134{
Peter Zijlstra5cb9eaa2020-11-17 18:19:31 -0500135 lockdep_assert_rq_held(rq);
Vincent Guittot23127292019-01-23 16:26:53 +0100136 assert_clock_updated(rq);
137
138 return rq->clock_pelt - rq->lost_idle_time;
139}
140
141#ifdef CONFIG_CFS_BANDWIDTH
142/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
143static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
144{
145 if (unlikely(cfs_rq->throttle_count))
146 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
147
148 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
149}
150#else
151static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
152{
153 return rq_clock_pelt(rq_of(cfs_rq));
154}
155#endif
156
Vincent Guittotc0796292018-06-28 17:45:04 +0200157#else
158
159static inline int
160update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
161{
162 return 0;
163}
164
Vincent Guittot371bf422018-06-28 17:45:05 +0200165static inline int
166update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
167{
168 return 0;
169}
170
Vincent Guittot3727e0e2018-06-28 17:45:07 +0200171static inline int
172update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
173{
174 return 0;
175}
Vincent Guittot91c27492018-06-28 17:45:09 +0200176
177static inline int
Thara Gopinath76504792020-02-21 19:52:05 -0500178update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
179{
180 return 0;
181}
182
183static inline u64 thermal_load_avg(struct rq *rq)
184{
185 return 0;
186}
187
188static inline int
Vincent Guittot91c27492018-06-28 17:45:09 +0200189update_irq_load_avg(struct rq *rq, u64 running)
190{
191 return 0;
192}
Vincent Guittot23127292019-01-23 16:26:53 +0100193
194static inline u64 rq_clock_pelt(struct rq *rq)
195{
196 return rq_clock_task(rq);
197}
198
199static inline void
200update_rq_clock_pelt(struct rq *rq, s64 delta) { }
201
202static inline void
203update_idle_rq_clock_pelt(struct rq *rq) { }
204
Vincent Guittotc0796292018-06-28 17:45:04 +0200205#endif
206
207