blob: c336f5f481bca25c781991eea01f956fa60c320d [file] [log] [blame]
Vincent Guittotc0796292018-06-28 17:45:04 +02001#ifdef CONFIG_SMP
Vincent Guittot23127292019-01-23 16:26:53 +01002#include "sched-pelt.h"
Vincent Guittotc0796292018-06-28 17:45:04 +02003
Vincent Guittot23127292019-01-23 16:26:53 +01004int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
Vincent Guittot371bf422018-06-28 17:45:05 +02007int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
Vincent Guittot3727e0e2018-06-28 17:45:07 +02008int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
Vincent Guittotc0796292018-06-28 17:45:04 +02009
Thara Gopinath76504792020-02-21 19:52:05 -050010#ifdef CONFIG_SCHED_THERMAL_PRESSURE
11int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
12
13static inline u64 thermal_load_avg(struct rq *rq)
14{
15 return READ_ONCE(rq->avg_thermal.load_avg);
16}
17#else
18static inline int
19update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
20{
21 return 0;
22}
23
24static inline u64 thermal_load_avg(struct rq *rq)
25{
26 return 0;
27}
28#endif
29
Vincent Guittot11d4afd2018-09-25 11:17:42 +020030#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
Vincent Guittot91c27492018-06-28 17:45:09 +020031int update_irq_load_avg(struct rq *rq, u64 running);
32#else
33static inline int
34update_irq_load_avg(struct rq *rq, u64 running)
35{
36 return 0;
37}
38#endif
39
Vincent Guittot98b0d892022-01-11 14:46:56 +010040#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
41
Vincent Guittot87e867b2020-06-12 17:47:03 +020042static inline u32 get_pelt_divider(struct sched_avg *avg)
43{
Vincent Guittot98b0d892022-01-11 14:46:56 +010044 return PELT_MIN_DIVIDER + avg->period_contrib;
Vincent Guittot87e867b2020-06-12 17:47:03 +020045}
46
Vincent Guittotc0796292018-06-28 17:45:04 +020047static inline void cfs_se_util_change(struct sched_avg *avg)
48{
49 unsigned int enqueued;
50
51 if (!sched_feat(UTIL_EST))
52 return;
53
Dietmar Eggemann68d7a192021-06-02 16:58:08 +020054 /* Avoid store if the flag has been already reset */
Vincent Guittotc0796292018-06-28 17:45:04 +020055 enqueued = avg->util_est.enqueued;
56 if (!(enqueued & UTIL_AVG_UNCHANGED))
57 return;
58
59 /* Reset flag to report util_avg has been updated */
60 enqueued &= ~UTIL_AVG_UNCHANGED;
61 WRITE_ONCE(avg->util_est.enqueued, enqueued);
62}
63
Vincent Guittot23127292019-01-23 16:26:53 +010064/*
65 * The clock_pelt scales the time to reflect the effective amount of
66 * computation done during the running delta time but then sync back to
67 * clock_task when rq is idle.
68 *
69 *
70 * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
71 * @ max capacity ------******---------------******---------------
72 * @ half capacity ------************---------************---------
73 * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16
74 *
75 */
76static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
77{
78 if (unlikely(is_idle_task(rq->curr))) {
79 /* The rq is idle, we can sync to clock_task */
80 rq->clock_pelt = rq_clock_task(rq);
81 return;
82 }
83
84 /*
85 * When a rq runs at a lower compute capacity, it will need
86 * more time to do the same amount of work than at max
87 * capacity. In order to be invariant, we scale the delta to
88 * reflect how much work has been really done.
89 * Running longer results in stealing idle time that will
90 * disturb the load signal compared to max capacity. This
91 * stolen idle time will be automatically reflected when the
92 * rq will be idle and the clock will be synced with
93 * rq_clock_task.
94 */
95
96 /*
97 * Scale the elapsed time to reflect the real amount of
98 * computation
99 */
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200100 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
Vincent Guittot23127292019-01-23 16:26:53 +0100101 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
102
103 rq->clock_pelt += delta;
104}
105
106/*
107 * When rq becomes idle, we have to check if it has lost idle time
108 * because it was fully busy. A rq is fully used when the /Sum util_sum
109 * is greater or equal to:
110 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
111 * For optimization and computing rounding purpose, we don't take into account
112 * the position in the current window (period_contrib) and we use the higher
113 * bound of util_sum to decide.
114 */
115static inline void update_idle_rq_clock_pelt(struct rq *rq)
116{
117 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
118 u32 util_sum = rq->cfs.avg.util_sum;
119 util_sum += rq->avg_rt.util_sum;
120 util_sum += rq->avg_dl.util_sum;
121
122 /*
123 * Reflecting stolen time makes sense only if the idle
124 * phase would be present at max capacity. As soon as the
125 * utilization of a rq has reached the maximum value, it is
Ingo Molnar3b037062021-03-18 13:38:50 +0100126 * considered as an always running rq without idle time to
Vincent Guittot23127292019-01-23 16:26:53 +0100127 * steal. This potential idle time is considered as lost in
128 * this case. We keep track of this lost idle time compare to
129 * rq's clock_task.
130 */
131 if (util_sum >= divider)
132 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
133}
134
135static inline u64 rq_clock_pelt(struct rq *rq)
136{
Peter Zijlstra5cb9eaa2020-11-17 18:19:31 -0500137 lockdep_assert_rq_held(rq);
Vincent Guittot23127292019-01-23 16:26:53 +0100138 assert_clock_updated(rq);
139
140 return rq->clock_pelt - rq->lost_idle_time;
141}
142
143#ifdef CONFIG_CFS_BANDWIDTH
144/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
145static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
146{
147 if (unlikely(cfs_rq->throttle_count))
148 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
149
150 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
151}
152#else
153static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
154{
155 return rq_clock_pelt(rq_of(cfs_rq));
156}
157#endif
158
Vincent Guittotc0796292018-06-28 17:45:04 +0200159#else
160
161static inline int
162update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
163{
164 return 0;
165}
166
Vincent Guittot371bf422018-06-28 17:45:05 +0200167static inline int
168update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
169{
170 return 0;
171}
172
Vincent Guittot3727e0e2018-06-28 17:45:07 +0200173static inline int
174update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
175{
176 return 0;
177}
Vincent Guittot91c27492018-06-28 17:45:09 +0200178
179static inline int
Thara Gopinath76504792020-02-21 19:52:05 -0500180update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
181{
182 return 0;
183}
184
185static inline u64 thermal_load_avg(struct rq *rq)
186{
187 return 0;
188}
189
190static inline int
Vincent Guittot91c27492018-06-28 17:45:09 +0200191update_irq_load_avg(struct rq *rq, u64 running)
192{
193 return 0;
194}
Vincent Guittot23127292019-01-23 16:26:53 +0100195
196static inline u64 rq_clock_pelt(struct rq *rq)
197{
198 return rq_clock_task(rq);
199}
200
201static inline void
202update_rq_clock_pelt(struct rq *rq, s64 delta) { }
203
204static inline void
205update_idle_rq_clock_pelt(struct rq *rq) { }
206
Vincent Guittotc0796292018-06-28 17:45:04 +0200207#endif
208
209