blob: eb034d9f024da54a5caf32807822c5f9721ba6b0 [file] [log] [blame]
Vincent Guittotc0796292018-06-28 17:45:04 +02001#ifdef CONFIG_SMP
Vincent Guittot23127292019-01-23 16:26:53 +01002#include "sched-pelt.h"
Vincent Guittotc0796292018-06-28 17:45:04 +02003
Vincent Guittot23127292019-01-23 16:26:53 +01004int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
5int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
6int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
Vincent Guittot371bf422018-06-28 17:45:05 +02007int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
Vincent Guittot3727e0e2018-06-28 17:45:07 +02008int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
Vincent Guittotc0796292018-06-28 17:45:04 +02009
Thara Gopinath76504792020-02-21 19:52:05 -050010#ifdef CONFIG_SCHED_THERMAL_PRESSURE
11int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
12
13static inline u64 thermal_load_avg(struct rq *rq)
14{
15 return READ_ONCE(rq->avg_thermal.load_avg);
16}
17#else
18static inline int
19update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
20{
21 return 0;
22}
23
24static inline u64 thermal_load_avg(struct rq *rq)
25{
26 return 0;
27}
28#endif
29
Vincent Guittot11d4afd2018-09-25 11:17:42 +020030#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
Vincent Guittot91c27492018-06-28 17:45:09 +020031int update_irq_load_avg(struct rq *rq, u64 running);
32#else
33static inline int
34update_irq_load_avg(struct rq *rq, u64 running)
35{
36 return 0;
37}
38#endif
39
Vincent Guittotc0796292018-06-28 17:45:04 +020040/*
41 * When a task is dequeued, its estimated utilization should not be update if
42 * its util_avg has not been updated at least once.
43 * This flag is used to synchronize util_avg updates with util_est updates.
44 * We map this information into the LSB bit of the utilization saved at
45 * dequeue time (i.e. util_est.dequeued).
46 */
47#define UTIL_AVG_UNCHANGED 0x1
48
49static inline void cfs_se_util_change(struct sched_avg *avg)
50{
51 unsigned int enqueued;
52
53 if (!sched_feat(UTIL_EST))
54 return;
55
56 /* Avoid store if the flag has been already set */
57 enqueued = avg->util_est.enqueued;
58 if (!(enqueued & UTIL_AVG_UNCHANGED))
59 return;
60
61 /* Reset flag to report util_avg has been updated */
62 enqueued &= ~UTIL_AVG_UNCHANGED;
63 WRITE_ONCE(avg->util_est.enqueued, enqueued);
64}
65
Vincent Guittot23127292019-01-23 16:26:53 +010066/*
67 * The clock_pelt scales the time to reflect the effective amount of
68 * computation done during the running delta time but then sync back to
69 * clock_task when rq is idle.
70 *
71 *
72 * absolute time | 1| 2| 3| 4| 5| 6| 7| 8| 9|10|11|12|13|14|15|16
73 * @ max capacity ------******---------------******---------------
74 * @ half capacity ------************---------************---------
75 * clock pelt | 1| 2| 3| 4| 7| 8| 9| 10| 11|14|15|16
76 *
77 */
78static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
79{
80 if (unlikely(is_idle_task(rq->curr))) {
81 /* The rq is idle, we can sync to clock_task */
82 rq->clock_pelt = rq_clock_task(rq);
83 return;
84 }
85
86 /*
87 * When a rq runs at a lower compute capacity, it will need
88 * more time to do the same amount of work than at max
89 * capacity. In order to be invariant, we scale the delta to
90 * reflect how much work has been really done.
91 * Running longer results in stealing idle time that will
92 * disturb the load signal compared to max capacity. This
93 * stolen idle time will be automatically reflected when the
94 * rq will be idle and the clock will be synced with
95 * rq_clock_task.
96 */
97
98 /*
99 * Scale the elapsed time to reflect the real amount of
100 * computation
101 */
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200102 delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
Vincent Guittot23127292019-01-23 16:26:53 +0100103 delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
104
105 rq->clock_pelt += delta;
106}
107
108/*
109 * When rq becomes idle, we have to check if it has lost idle time
110 * because it was fully busy. A rq is fully used when the /Sum util_sum
111 * is greater or equal to:
112 * (LOAD_AVG_MAX - 1024 + rq->cfs.avg.period_contrib) << SCHED_CAPACITY_SHIFT;
113 * For optimization and computing rounding purpose, we don't take into account
114 * the position in the current window (period_contrib) and we use the higher
115 * bound of util_sum to decide.
116 */
117static inline void update_idle_rq_clock_pelt(struct rq *rq)
118{
119 u32 divider = ((LOAD_AVG_MAX - 1024) << SCHED_CAPACITY_SHIFT) - LOAD_AVG_MAX;
120 u32 util_sum = rq->cfs.avg.util_sum;
121 util_sum += rq->avg_rt.util_sum;
122 util_sum += rq->avg_dl.util_sum;
123
124 /*
125 * Reflecting stolen time makes sense only if the idle
126 * phase would be present at max capacity. As soon as the
127 * utilization of a rq has reached the maximum value, it is
128 * considered as an always runnig rq without idle time to
129 * steal. This potential idle time is considered as lost in
130 * this case. We keep track of this lost idle time compare to
131 * rq's clock_task.
132 */
133 if (util_sum >= divider)
134 rq->lost_idle_time += rq_clock_task(rq) - rq->clock_pelt;
135}
136
137static inline u64 rq_clock_pelt(struct rq *rq)
138{
139 lockdep_assert_held(&rq->lock);
140 assert_clock_updated(rq);
141
142 return rq->clock_pelt - rq->lost_idle_time;
143}
144
145#ifdef CONFIG_CFS_BANDWIDTH
146/* rq->task_clock normalized against any time this cfs_rq has spent throttled */
147static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
148{
149 if (unlikely(cfs_rq->throttle_count))
150 return cfs_rq->throttled_clock_task - cfs_rq->throttled_clock_task_time;
151
152 return rq_clock_pelt(rq_of(cfs_rq)) - cfs_rq->throttled_clock_task_time;
153}
154#else
155static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
156{
157 return rq_clock_pelt(rq_of(cfs_rq));
158}
159#endif
160
Vincent Guittotc0796292018-06-28 17:45:04 +0200161#else
162
163static inline int
164update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
165{
166 return 0;
167}
168
Vincent Guittot371bf422018-06-28 17:45:05 +0200169static inline int
170update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
171{
172 return 0;
173}
174
Vincent Guittot3727e0e2018-06-28 17:45:07 +0200175static inline int
176update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
177{
178 return 0;
179}
Vincent Guittot91c27492018-06-28 17:45:09 +0200180
181static inline int
Thara Gopinath76504792020-02-21 19:52:05 -0500182update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
183{
184 return 0;
185}
186
187static inline u64 thermal_load_avg(struct rq *rq)
188{
189 return 0;
190}
191
192static inline int
Vincent Guittot91c27492018-06-28 17:45:09 +0200193update_irq_load_avg(struct rq *rq, u64 running)
194{
195 return 0;
196}
Vincent Guittot23127292019-01-23 16:26:53 +0100197
198static inline u64 rq_clock_pelt(struct rq *rq)
199{
200 return rq_clock_task(rq);
201}
202
203static inline void
204update_rq_clock_pelt(struct rq *rq, s64 delta) { }
205
206static inline void
207update_idle_rq_clock_pelt(struct rq *rq) { }
208
Vincent Guittotc0796292018-06-28 17:45:04 +0200209#endif
210
211