blob: 50cbad89f7faf2be3035e9d1d0362cd673f1f322 [file] [log] [blame]
Daniel Lezcano108c35a2018-12-03 11:29:29 +01001// SPDX-License-Identifier: GPL-2.0
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +02002/*
3 * CPUFreq governor based on scheduler-provided CPU utilization data.
4 *
5 * Copyright (C) 2016, Intel Corporation
6 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +02007 */
8
Viresh Kumar60f05e82016-05-18 17:55:28 +05309#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020011#include "sched.h"
12
Quentin Perret938e5e42018-12-03 09:56:15 +000013#include <linux/sched/cpufreq.h>
Ingo Molnar325ea102018-03-03 12:20:47 +010014#include <trace/events/power.h>
15
Rafael J. Wysocki9eca5442019-03-28 11:33:21 +010016#define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8)
17
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020018struct sugov_tunables {
Ingo Molnar97fb7a02018-03-03 14:01:12 +010019 struct gov_attr_set attr_set;
20 unsigned int rate_limit_us;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020021};
22
23struct sugov_policy {
Ingo Molnar97fb7a02018-03-03 14:01:12 +010024 struct cpufreq_policy *policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020025
Ingo Molnar97fb7a02018-03-03 14:01:12 +010026 struct sugov_tunables *tunables;
27 struct list_head tunables_hook;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020028
Yue Hue209cb52021-02-18 17:37:53 +080029 raw_spinlock_t update_lock;
Ingo Molnar97fb7a02018-03-03 14:01:12 +010030 u64 last_freq_update_time;
31 s64 freq_update_delay_ns;
32 unsigned int next_freq;
33 unsigned int cached_raw_freq;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020034
Ingo Molnar97fb7a02018-03-03 14:01:12 +010035 /* The next fields are only needed if fast switch cannot be used: */
36 struct irq_work irq_work;
37 struct kthread_work work;
38 struct mutex work_lock;
39 struct kthread_worker worker;
40 struct task_struct *thread;
41 bool work_in_progress;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020042
Viresh Kumar600f5ba2019-08-07 12:36:01 +053043 bool limits_changed;
Ingo Molnar97fb7a02018-03-03 14:01:12 +010044 bool need_freq_update;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020045};
46
47struct sugov_cpu {
Ingo Molnar97fb7a02018-03-03 14:01:12 +010048 struct update_util_data update_util;
49 struct sugov_policy *sg_policy;
50 unsigned int cpu;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020051
Ingo Molnar97fb7a02018-03-03 14:01:12 +010052 bool iowait_boost_pending;
53 unsigned int iowait_boost;
Patrick Bellasifd7d5282018-05-22 12:07:54 +010054 u64 last_update;
Steve Muckle5cbea462016-07-13 13:25:26 -070055
Rafael J. Wysockica6827d2020-12-14 21:04:11 +010056 unsigned long util;
Vincent Guittot8cc90512018-06-28 17:45:08 +020057 unsigned long bw_dl;
Ingo Molnar97fb7a02018-03-03 14:01:12 +010058 unsigned long max;
Rafael J. Wysockib7eaf1a2017-03-22 00:08:50 +010059
Ingo Molnar97fb7a02018-03-03 14:01:12 +010060 /* The field below is for single-CPU policies only: */
Rafael J. Wysockib7eaf1a2017-03-22 00:08:50 +010061#ifdef CONFIG_NO_HZ_COMMON
Ingo Molnar97fb7a02018-03-03 14:01:12 +010062 unsigned long saved_idle_calls;
Rafael J. Wysockib7eaf1a2017-03-22 00:08:50 +010063#endif
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020064};
65
66static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
67
68/************************ Governor internals ***********************/
69
70static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
71{
72 s64 delta_ns;
73
Viresh Kumar674e7542017-07-28 12:16:38 +053074 /*
75 * Since cpufreq_update_util() is called with rq->lock held for
Ingo Molnar97fb7a02018-03-03 14:01:12 +010076 * the @target_cpu, our per-CPU data is fully serialized.
Viresh Kumar674e7542017-07-28 12:16:38 +053077 *
Ingo Molnar97fb7a02018-03-03 14:01:12 +010078 * However, drivers cannot in general deal with cross-CPU
Viresh Kumar674e7542017-07-28 12:16:38 +053079 * requests, so while get_next_freq() will work, our
Viresh Kumarc49cbc12017-08-14 14:50:16 +053080 * sugov_update_commit() call may not for the fast switching platforms.
Viresh Kumar674e7542017-07-28 12:16:38 +053081 *
82 * Hence stop here for remote requests if they aren't supported
83 * by the hardware, as calculating the frequency is pointless if
84 * we cannot in fact act on it.
Viresh Kumarc49cbc12017-08-14 14:50:16 +053085 *
Rafael J. Wysocki85572c22019-12-11 11:28:41 +010086 * This is needed on the slow switching platforms too to prevent CPUs
87 * going offline from leaving stale IRQ work items behind.
Viresh Kumar674e7542017-07-28 12:16:38 +053088 */
Rafael J. Wysocki85572c22019-12-11 11:28:41 +010089 if (!cpufreq_this_cpu_can_update(sg_policy->policy))
Viresh Kumar674e7542017-07-28 12:16:38 +053090 return false;
91
Viresh Kumar600f5ba2019-08-07 12:36:01 +053092 if (unlikely(sg_policy->limits_changed)) {
93 sg_policy->limits_changed = false;
94 sg_policy->need_freq_update = true;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020095 return true;
Viresh Kumar600f5ba2019-08-07 12:36:01 +053096 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +020097
98 delta_ns = time - sg_policy->last_freq_update_time;
Ingo Molnar97fb7a02018-03-03 14:01:12 +010099
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200100 return delta_ns >= sg_policy->freq_update_delay_ns;
101}
102
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200103static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
104 unsigned int next_freq)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200105{
Rafael J. Wysocki90ac9082020-11-12 20:26:42 +0100106 if (sg_policy->need_freq_update)
Viresh Kumar23a88182020-10-30 12:51:08 +0530107 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
Rafael J. Wysocki90ac9082020-11-12 20:26:42 +0100108 else if (sg_policy->next_freq == next_freq)
109 return false;
Rafael J. Wysocki38d4ea22017-03-22 18:32:47 +0100110
111 sg_policy->next_freq = next_freq;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200112 sg_policy->last_freq_update_time = time;
113
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200114 return true;
115}
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200116
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200117static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
118 unsigned int next_freq)
119{
Rafael J. Wysocki86836ba2020-10-06 14:01:31 +0200120 if (sugov_update_next_freq(sg_policy, time, next_freq))
121 cpufreq_driver_fast_switch(sg_policy->policy, next_freq);
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200122}
123
124static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
125 unsigned int next_freq)
126{
127 if (!sugov_update_next_freq(sg_policy, time, next_freq))
128 return;
129
130 if (!sg_policy->work_in_progress) {
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200131 sg_policy->work_in_progress = true;
132 irq_work_queue(&sg_policy->irq_work);
133 }
134}
135
136/**
137 * get_next_freq - Compute a new frequency for a given cpufreq policy.
Viresh Kumar655cb1e2017-03-02 14:03:21 +0530138 * @sg_policy: schedutil policy object to compute the new frequency for.
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200139 * @util: Current CPU utilization.
140 * @max: CPU capacity.
141 *
142 * If the utilization is frequency-invariant, choose the new frequency to be
143 * proportional to it, that is
144 *
145 * next_freq = C * max_freq * util / max
146 *
147 * Otherwise, approximate the would-be frequency-invariant utilization by
148 * util_raw * (curr_freq / max_freq) which leads to
149 *
150 * next_freq = C * curr_freq * util_raw / max
151 *
152 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
Steve Muckle5cbea462016-07-13 13:25:26 -0700153 *
154 * The lowest driver-supported frequency which is equal or greater than the raw
155 * next_freq (as calculated above) is returned, subject to policy min/max and
156 * cpufreq driver limitations.
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200157 */
Viresh Kumar655cb1e2017-03-02 14:03:21 +0530158static unsigned int get_next_freq(struct sugov_policy *sg_policy,
159 unsigned long util, unsigned long max)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200160{
Steve Muckle5cbea462016-07-13 13:25:26 -0700161 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200162 unsigned int freq = arch_scale_freq_invariant() ?
163 policy->cpuinfo.max_freq : policy->cur;
164
Quentin Perret938e5e42018-12-03 09:56:15 +0000165 freq = map_util_freq(util, freq, max);
Steve Muckle5cbea462016-07-13 13:25:26 -0700166
Viresh Kumarecd28842018-05-09 16:05:24 +0530167 if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
Steve Muckle5cbea462016-07-13 13:25:26 -0700168 return sg_policy->next_freq;
Viresh Kumarecd28842018-05-09 16:05:24 +0530169
Viresh Kumar6c4f0fa2017-03-02 14:03:20 +0530170 sg_policy->cached_raw_freq = freq;
Steve Muckle5cbea462016-07-13 13:25:26 -0700171 return cpufreq_driver_resolve_freq(policy, freq);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200172}
173
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100174static void sugov_get_util(struct sugov_cpu *sg_cpu)
Quentin Perret938e5e42018-12-03 09:56:15 +0000175{
176 struct rq *rq = cpu_rq(sg_cpu->cpu);
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200177 unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
Quentin Perret938e5e42018-12-03 09:56:15 +0000178
179 sg_cpu->max = max;
180 sg_cpu->bw_dl = cpu_bw_dl(rq);
Viresh Kumara5418be2020-12-08 09:46:56 +0530181 sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(rq), max,
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100182 FREQUENCY_UTIL, NULL);
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200183}
184
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100185/**
186 * sugov_iowait_reset() - Reset the IO boost status of a CPU.
187 * @sg_cpu: the sugov data for the CPU to boost
188 * @time: the update time from the caller
189 * @set_iowait_boost: true if an IO boost has been requested
190 *
191 * The IO wait boost of a task is disabled after a tick since the last update
192 * of a CPU. If a new IO wait boost is requested after more then a tick, then
Rafael J. Wysocki9eca5442019-03-28 11:33:21 +0100193 * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy
194 * efficiency by ignoring sporadic wakeups from IO.
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100195 */
196static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time,
197 bool set_iowait_boost)
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200198{
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100199 s64 delta_ns = time - sg_cpu->last_update;
Joel Fernandesa5a08092017-07-23 08:54:25 -0700200
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100201 /* Reset boost only if a tick has elapsed since last request */
202 if (delta_ns <= TICK_NSEC)
203 return false;
Joel Fernandesa5a08092017-07-23 08:54:25 -0700204
Rafael J. Wysocki9eca5442019-03-28 11:33:21 +0100205 sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0;
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100206 sg_cpu->iowait_boost_pending = set_iowait_boost;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200207
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100208 return true;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200209}
210
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100211/**
212 * sugov_iowait_boost() - Updates the IO boost status of a CPU.
213 * @sg_cpu: the sugov data for the CPU to boost
214 * @time: the update time from the caller
215 * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait
216 *
217 * Each time a task wakes up after an IO operation, the CPU utilization can be
218 * boosted to a certain utilization which doubles at each "frequent and
Rafael J. Wysocki9eca5442019-03-28 11:33:21 +0100219 * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization
220 * of the maximum OPP.
221 *
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100222 * To keep doubling, an IO boost has to be requested at least once per tick,
223 * otherwise we restart from the utilization of the minimum OPP.
224 */
225static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
226 unsigned int flags)
227{
228 bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT;
229
230 /* Reset boost if the CPU appears to have been idle enough */
231 if (sg_cpu->iowait_boost &&
232 sugov_iowait_reset(sg_cpu, time, set_iowait_boost))
233 return;
234
235 /* Boost only tasks waking up after IO */
236 if (!set_iowait_boost)
237 return;
238
239 /* Ensure boost doubles only one time at each request */
240 if (sg_cpu->iowait_boost_pending)
241 return;
242 sg_cpu->iowait_boost_pending = true;
243
244 /* Double the boost at each request */
245 if (sg_cpu->iowait_boost) {
Peter Zijlstraa23314e2019-03-05 09:32:02 +0100246 sg_cpu->iowait_boost =
247 min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE);
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100248 return;
249 }
250
251 /* First wakeup after IO: start with minimum boost */
Rafael J. Wysocki9eca5442019-03-28 11:33:21 +0100252 sg_cpu->iowait_boost = IOWAIT_BOOST_MIN;
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100253}
254
255/**
256 * sugov_iowait_apply() - Apply the IO boost to a CPU.
257 * @sg_cpu: the sugov data for the cpu to boost
258 * @time: the update time from the caller
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100259 *
260 * A CPU running a task which woken up after an IO operation can have its
261 * utilization boosted to speed up the completion of those IO operations.
262 * The IO boost value is increased each time a task wakes up from IO, in
263 * sugov_iowait_apply(), and it's instead decreased by this function,
264 * each time an increase has not been requested (!iowait_boost_pending).
265 *
266 * A CPU which also appears to have been idle for at least one tick has also
267 * its IO boost utilization reset.
268 *
269 * This mechanism is designed to boost high frequently IO waiting tasks, while
270 * being more conservative on tasks which does sporadic IO operations.
271 */
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100272static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time)
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200273{
Peter Zijlstraa23314e2019-03-05 09:32:02 +0100274 unsigned long boost;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200275
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100276 /* No boost currently required */
Joel Fernandesa5a08092017-07-23 08:54:25 -0700277 if (!sg_cpu->iowait_boost)
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100278 return;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200279
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100280 /* Reset boost if the CPU appears to have been idle enough */
281 if (sugov_iowait_reset(sg_cpu, time, false))
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100282 return;
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100283
Peter Zijlstraa23314e2019-03-05 09:32:02 +0100284 if (!sg_cpu->iowait_boost_pending) {
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100285 /*
Peter Zijlstraa23314e2019-03-05 09:32:02 +0100286 * No boost pending; reduce the boost value.
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100287 */
Joel Fernandesa5a08092017-07-23 08:54:25 -0700288 sg_cpu->iowait_boost >>= 1;
Rafael J. Wysocki9eca5442019-03-28 11:33:21 +0100289 if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) {
Joel Fernandesa5a08092017-07-23 08:54:25 -0700290 sg_cpu->iowait_boost = 0;
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100291 return;
Joel Fernandesa5a08092017-07-23 08:54:25 -0700292 }
293 }
294
Peter Zijlstraa23314e2019-03-05 09:32:02 +0100295 sg_cpu->iowait_boost_pending = false;
296
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100297 /*
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100298 * sg_cpu->util is already in capacity scale; convert iowait_boost
Peter Zijlstraa23314e2019-03-05 09:32:02 +0100299 * into the same scale so we can compare.
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100300 */
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100301 boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT;
302 if (sg_cpu->util < boost)
303 sg_cpu->util = boost;
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200304}
305
Rafael J. Wysockib7eaf1a2017-03-22 00:08:50 +0100306#ifdef CONFIG_NO_HZ_COMMON
307static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
308{
Joel Fernandes466a2b42017-12-21 02:22:45 +0100309 unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu);
Rafael J. Wysockib7eaf1a2017-03-22 00:08:50 +0100310 bool ret = idle_calls == sg_cpu->saved_idle_calls;
311
312 sg_cpu->saved_idle_calls = idle_calls;
313 return ret;
314}
315#else
316static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
317#endif /* CONFIG_NO_HZ_COMMON */
318
Claudio Scordinoe97a90f2018-03-13 11:35:40 +0100319/*
320 * Make sugov_should_update_freq() ignore the rate limit when DL
321 * has increased the utilization.
322 */
Yue Hu71f13092021-02-18 17:01:32 +0800323static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
Claudio Scordinoe97a90f2018-03-13 11:35:40 +0100324{
Vincent Guittot8cc90512018-06-28 17:45:08 +0200325 if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
Yue Hu71f13092021-02-18 17:01:32 +0800326 sg_cpu->sg_policy->limits_changed = true;
Claudio Scordinoe97a90f2018-03-13 11:35:40 +0100327}
328
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100329static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
330 u64 time, unsigned int flags)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200331{
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100332 sugov_iowait_boost(sg_cpu, time, flags);
Rafael J. Wysocki21ca6d22016-09-10 00:00:31 +0200333 sg_cpu->last_update = time;
334
Yue Hu71f13092021-02-18 17:01:32 +0800335 ignore_dl_rate_limit(sg_cpu);
Claudio Scordinoe97a90f2018-03-13 11:35:40 +0100336
Yue Hu71f13092021-02-18 17:01:32 +0800337 if (!sugov_should_update_freq(sg_cpu->sg_policy, time))
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100338 return false;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200339
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100340 sugov_get_util(sg_cpu);
341 sugov_iowait_apply(sg_cpu, time);
342
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100343 return true;
344}
345
346static void sugov_update_single_freq(struct update_util_data *hook, u64 time,
347 unsigned int flags)
348{
349 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
350 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
351 unsigned int cached_freq = sg_policy->cached_raw_freq;
352 unsigned int next_f;
353
354 if (!sugov_update_single_common(sg_cpu, time, flags))
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200355 return;
356
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100357 next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max);
Peter Zijlstra8f111bc2017-12-20 16:26:12 +0100358 /*
359 * Do not reduce the frequency if the CPU has not been idle
360 * recently, as the reduction is likely to be premature then.
361 */
Viresh Kumar23a88182020-10-30 12:51:08 +0530362 if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) {
Peter Zijlstra8f111bc2017-12-20 16:26:12 +0100363 next_f = sg_policy->next_freq;
Viresh Kumar07458f62017-11-08 20:23:55 +0530364
Wei Wang0070ea22020-10-16 11:17:22 -0700365 /* Restore cached freq as next_freq has changed */
366 sg_policy->cached_raw_freq = cached_freq;
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200367 }
Peter Zijlstra8f111bc2017-12-20 16:26:12 +0100368
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200369 /*
370 * This code runs under rq->lock for the target CPU, so it won't run
371 * concurrently on two different CPUs for the same target and it is not
372 * necessary to acquire the lock in the fast switch case.
373 */
374 if (sg_policy->policy->fast_switch_enabled) {
375 sugov_fast_switch(sg_policy, time, next_f);
376 } else {
377 raw_spin_lock(&sg_policy->update_lock);
378 sugov_deferred_update(sg_policy, time, next_f);
379 raw_spin_unlock(&sg_policy->update_lock);
380 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200381}
382
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100383static void sugov_update_single_perf(struct update_util_data *hook, u64 time,
384 unsigned int flags)
385{
386 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
387 unsigned long prev_util = sg_cpu->util;
388
389 /*
390 * Fall back to the "frequency" path if frequency invariance is not
391 * supported, because the direct mapping between the utilization and
392 * the performance levels depends on the frequency invariance.
393 */
394 if (!arch_scale_freq_invariant()) {
395 sugov_update_single_freq(hook, time, flags);
396 return;
397 }
398
399 if (!sugov_update_single_common(sg_cpu, time, flags))
400 return;
401
402 /*
403 * Do not reduce the target performance level if the CPU has not been
404 * idle recently, as the reduction is likely to be premature then.
405 */
406 if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util)
407 sg_cpu->util = prev_util;
408
409 cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl),
410 map_util_perf(sg_cpu->util), sg_cpu->max);
411
412 sg_cpu->sg_policy->last_freq_update_time = time;
413}
414
Juri Lellid86ab9c2017-05-03 14:30:48 +0100415static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200416{
Steve Muckle5cbea462016-07-13 13:25:26 -0700417 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200418 struct cpufreq_policy *policy = sg_policy->policy;
Viresh Kumarcba1dfb2017-03-09 09:34:54 +0530419 unsigned long util = 0, max = 1;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200420 unsigned int j;
421
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200422 for_each_cpu(j, policy->cpus) {
Viresh Kumarcba1dfb2017-03-09 09:34:54 +0530423 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200424 unsigned long j_util, j_max;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200425
Rafael J. Wysockica6827d2020-12-14 21:04:11 +0100426 sugov_get_util(j_sg_cpu);
427 sugov_iowait_apply(j_sg_cpu, time);
428 j_util = j_sg_cpu->util;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200429 j_max = j_sg_cpu->max;
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100430
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200431 if (j_util * max > j_max * util) {
432 util = j_util;
433 max = j_max;
434 }
435 }
436
Viresh Kumar655cb1e2017-03-02 14:03:21 +0530437 return get_next_freq(sg_policy, util, max);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200438}
439
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100440static void
441sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200442{
443 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
444 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
445 unsigned int next_f;
446
447 raw_spin_lock(&sg_policy->update_lock);
448
Patrick Bellasifd7d5282018-05-22 12:07:54 +0100449 sugov_iowait_boost(sg_cpu, time, flags);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200450 sg_cpu->last_update = time;
451
Yue Hu71f13092021-02-18 17:01:32 +0800452 ignore_dl_rate_limit(sg_cpu);
Viresh Kumarcba1dfb2017-03-09 09:34:54 +0530453
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200454 if (sugov_should_update_freq(sg_policy, time)) {
Peter Zijlstra8f111bc2017-12-20 16:26:12 +0100455 next_f = sugov_next_freq_shared(sg_cpu, time);
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200456
457 if (sg_policy->policy->fast_switch_enabled)
458 sugov_fast_switch(sg_policy, time, next_f);
459 else
460 sugov_deferred_update(sg_policy, time, next_f);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200461 }
462
463 raw_spin_unlock(&sg_policy->update_lock);
464}
465
466static void sugov_work(struct kthread_work *work)
467{
468 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
Joel Fernandes (Google)152db032018-05-22 15:55:53 -0700469 unsigned int freq;
470 unsigned long flags;
471
472 /*
473 * Hold sg_policy->update_lock shortly to handle the case where:
474 * incase sg_policy->next_freq is read here, and then updated by
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200475 * sugov_deferred_update() just before work_in_progress is set to false
Joel Fernandes (Google)152db032018-05-22 15:55:53 -0700476 * here, we may miss queueing the new update.
477 *
478 * Note: If a work was queued after the update_lock is released,
Rafael J. Wysockia61dec72018-05-23 11:47:45 +0200479 * sugov_work() will just be called again by kthread_work code; and the
Joel Fernandes (Google)152db032018-05-22 15:55:53 -0700480 * request will be proceed before the sugov thread sleeps.
481 */
482 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
483 freq = sg_policy->next_freq;
484 sg_policy->work_in_progress = false;
485 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200486
487 mutex_lock(&sg_policy->work_lock);
Joel Fernandes (Google)152db032018-05-22 15:55:53 -0700488 __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200489 mutex_unlock(&sg_policy->work_lock);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200490}
491
492static void sugov_irq_work(struct irq_work *irq_work)
493{
494 struct sugov_policy *sg_policy;
495
496 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530497
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530498 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200499}
500
501/************************** sysfs interface ************************/
502
503static struct sugov_tunables *global_tunables;
504static DEFINE_MUTEX(global_tunables_lock);
505
506static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
507{
508 return container_of(attr_set, struct sugov_tunables, attr_set);
509}
510
511static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
512{
513 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
514
515 return sprintf(buf, "%u\n", tunables->rate_limit_us);
516}
517
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100518static ssize_t
519rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200520{
521 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
522 struct sugov_policy *sg_policy;
523 unsigned int rate_limit_us;
524
525 if (kstrtouint(buf, 10, &rate_limit_us))
526 return -EINVAL;
527
528 tunables->rate_limit_us = rate_limit_us;
529
530 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
531 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
532
533 return count;
534}
535
536static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
537
Kimberly Brown9782ade2019-04-01 22:51:53 -0400538static struct attribute *sugov_attrs[] = {
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200539 &rate_limit_us.attr,
540 NULL
541};
Kimberly Brown9782ade2019-04-01 22:51:53 -0400542ATTRIBUTE_GROUPS(sugov);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200543
544static struct kobj_type sugov_tunables_ktype = {
Kimberly Brown9782ade2019-04-01 22:51:53 -0400545 .default_groups = sugov_groups,
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200546 .sysfs_ops = &governor_sysfs_ops,
547};
548
549/********************** cpufreq governor interface *********************/
550
Quentin Perret531b5c92018-12-03 09:56:21 +0000551struct cpufreq_governor schedutil_gov;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200552
553static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
554{
555 struct sugov_policy *sg_policy;
556
557 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
558 if (!sg_policy)
559 return NULL;
560
561 sg_policy->policy = policy;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200562 raw_spin_lock_init(&sg_policy->update_lock);
563 return sg_policy;
564}
565
566static void sugov_policy_free(struct sugov_policy *sg_policy)
567{
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200568 kfree(sg_policy);
569}
570
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530571static int sugov_kthread_create(struct sugov_policy *sg_policy)
572{
573 struct task_struct *thread;
Juri Lelli794a56e2017-12-04 11:23:20 +0100574 struct sched_attr attr = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100575 .size = sizeof(struct sched_attr),
576 .sched_policy = SCHED_DEADLINE,
577 .sched_flags = SCHED_FLAG_SUGOV,
578 .sched_nice = 0,
579 .sched_priority = 0,
Juri Lelli794a56e2017-12-04 11:23:20 +0100580 /*
581 * Fake (unused) bandwidth; workaround to "fix"
582 * priority inheritance.
583 */
584 .sched_runtime = 1000000,
585 .sched_deadline = 10000000,
586 .sched_period = 10000000,
587 };
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530588 struct cpufreq_policy *policy = sg_policy->policy;
589 int ret;
590
591 /* kthread only required for slow path */
592 if (policy->fast_switch_enabled)
593 return 0;
594
595 kthread_init_work(&sg_policy->work, sugov_work);
596 kthread_init_worker(&sg_policy->worker);
597 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
598 "sugov:%d",
599 cpumask_first(policy->related_cpus));
600 if (IS_ERR(thread)) {
601 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
602 return PTR_ERR(thread);
603 }
604
Juri Lelli794a56e2017-12-04 11:23:20 +0100605 ret = sched_setattr_nocheck(thread, &attr);
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530606 if (ret) {
607 kthread_stop(thread);
Juri Lelli794a56e2017-12-04 11:23:20 +0100608 pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530609 return ret;
610 }
611
612 sg_policy->thread = thread;
Dietmar Eggemann1b047222018-05-08 08:33:40 +0100613 kthread_bind_mask(thread, policy->related_cpus);
Viresh Kumar21ef5722016-11-15 13:53:23 +0530614 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
615 mutex_init(&sg_policy->work_lock);
616
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530617 wake_up_process(thread);
618
619 return 0;
620}
621
622static void sugov_kthread_stop(struct sugov_policy *sg_policy)
623{
624 /* kthread only required for slow path */
625 if (sg_policy->policy->fast_switch_enabled)
626 return;
627
628 kthread_flush_worker(&sg_policy->worker);
629 kthread_stop(sg_policy->thread);
Viresh Kumar21ef5722016-11-15 13:53:23 +0530630 mutex_destroy(&sg_policy->work_lock);
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530631}
632
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200633static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
634{
635 struct sugov_tunables *tunables;
636
637 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
638 if (tunables) {
639 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
640 if (!have_governor_per_policy())
641 global_tunables = tunables;
642 }
643 return tunables;
644}
645
646static void sugov_tunables_free(struct sugov_tunables *tunables)
647{
648 if (!have_governor_per_policy())
649 global_tunables = NULL;
650
651 kfree(tunables);
652}
653
654static int sugov_init(struct cpufreq_policy *policy)
655{
656 struct sugov_policy *sg_policy;
657 struct sugov_tunables *tunables;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200658 int ret = 0;
659
660 /* State should be equivalent to EXIT */
661 if (policy->governor_data)
662 return -EBUSY;
663
Viresh Kumar4a71ce42016-11-15 13:53:21 +0530664 cpufreq_enable_fast_switch(policy);
665
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200666 sg_policy = sugov_policy_alloc(policy);
Viresh Kumar4a71ce42016-11-15 13:53:21 +0530667 if (!sg_policy) {
668 ret = -ENOMEM;
669 goto disable_fast_switch;
670 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200671
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530672 ret = sugov_kthread_create(sg_policy);
673 if (ret)
674 goto free_sg_policy;
675
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200676 mutex_lock(&global_tunables_lock);
677
678 if (global_tunables) {
679 if (WARN_ON(have_governor_per_policy())) {
680 ret = -EINVAL;
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530681 goto stop_kthread;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200682 }
683 policy->governor_data = sg_policy;
684 sg_policy->tunables = global_tunables;
685
686 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
687 goto out;
688 }
689
690 tunables = sugov_tunables_alloc(sg_policy);
691 if (!tunables) {
692 ret = -ENOMEM;
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530693 goto stop_kthread;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200694 }
695
Viresh Kumaraa7519a2017-07-19 15:42:42 +0530696 tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200697
698 policy->governor_data = sg_policy;
699 sg_policy->tunables = tunables;
700
701 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
702 get_governor_parent_kobj(policy), "%s",
703 schedutil_gov.name);
704 if (ret)
705 goto fail;
706
Viresh Kumar8e2ddb02016-11-15 13:53:20 +0530707out:
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200708 mutex_unlock(&global_tunables_lock);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200709 return 0;
710
Viresh Kumar8e2ddb02016-11-15 13:53:20 +0530711fail:
Tobin C. Harding9a4f26c2019-04-30 10:11:44 +1000712 kobject_put(&tunables->attr_set.kobj);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200713 policy->governor_data = NULL;
714 sugov_tunables_free(tunables);
715
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530716stop_kthread:
717 sugov_kthread_stop(sg_policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200718 mutex_unlock(&global_tunables_lock);
719
Jules Maselbas1b5d43cf2018-03-29 15:43:01 +0100720free_sg_policy:
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200721 sugov_policy_free(sg_policy);
Viresh Kumar4a71ce42016-11-15 13:53:21 +0530722
723disable_fast_switch:
724 cpufreq_disable_fast_switch(policy);
725
Viresh Kumar60f05e82016-05-18 17:55:28 +0530726 pr_err("initialization failed (error %d)\n", ret);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200727 return ret;
728}
729
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200730static void sugov_exit(struct cpufreq_policy *policy)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200731{
732 struct sugov_policy *sg_policy = policy->governor_data;
733 struct sugov_tunables *tunables = sg_policy->tunables;
734 unsigned int count;
735
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200736 mutex_lock(&global_tunables_lock);
737
738 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
739 policy->governor_data = NULL;
740 if (!count)
741 sugov_tunables_free(tunables);
742
743 mutex_unlock(&global_tunables_lock);
744
Viresh Kumar02a7b1e2016-11-15 13:53:22 +0530745 sugov_kthread_stop(sg_policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200746 sugov_policy_free(sg_policy);
Viresh Kumar4a71ce42016-11-15 13:53:21 +0530747 cpufreq_disable_fast_switch(policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200748}
749
750static int sugov_start(struct cpufreq_policy *policy)
751{
752 struct sugov_policy *sg_policy = policy->governor_data;
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100753 void (*uu)(struct update_util_data *data, u64 time, unsigned int flags);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200754 unsigned int cpu;
755
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100756 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
757 sg_policy->last_freq_update_time = 0;
Viresh Kumarecd28842018-05-09 16:05:24 +0530758 sg_policy->next_freq = 0;
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100759 sg_policy->work_in_progress = false;
Viresh Kumar600f5ba2019-08-07 12:36:01 +0530760 sg_policy->limits_changed = false;
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100761 sg_policy->cached_raw_freq = 0;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200762
Viresh Kumar23a88182020-10-30 12:51:08 +0530763 sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
764
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200765 for_each_cpu(cpu, policy->cpus) {
766 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
767
Rafael J. Wysocki4296f232017-03-19 14:30:02 +0100768 memset(sg_cpu, 0, sizeof(*sg_cpu));
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100769 sg_cpu->cpu = cpu;
770 sg_cpu->sg_policy = sg_policy;
Vikram Mulukutlaab2f7cf2017-07-06 10:53:20 -0700771 }
772
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100773 if (policy_is_shared(policy))
774 uu = sugov_update_shared;
775 else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf())
776 uu = sugov_update_single_perf;
777 else
778 uu = sugov_update_single_freq;
779
Vikram Mulukutlaab2f7cf2017-07-06 10:53:20 -0700780 for_each_cpu(cpu, policy->cpus) {
781 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
782
Rafael J. Wysockiee2cc422020-12-14 21:08:00 +0100783 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200784 }
785 return 0;
786}
787
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200788static void sugov_stop(struct cpufreq_policy *policy)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200789{
790 struct sugov_policy *sg_policy = policy->governor_data;
791 unsigned int cpu;
792
793 for_each_cpu(cpu, policy->cpus)
794 cpufreq_remove_update_util_hook(cpu);
795
Paul E. McKenneyb290ebc2018-11-06 19:13:54 -0800796 synchronize_rcu();
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200797
Viresh Kumar21ef5722016-11-15 13:53:23 +0530798 if (!policy->fast_switch_enabled) {
799 irq_work_sync(&sg_policy->irq_work);
800 kthread_cancel_work_sync(&sg_policy->work);
801 }
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200802}
803
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200804static void sugov_limits(struct cpufreq_policy *policy)
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200805{
806 struct sugov_policy *sg_policy = policy->governor_data;
807
808 if (!policy->fast_switch_enabled) {
809 mutex_lock(&sg_policy->work_lock);
Viresh Kumarbf2be2d2016-05-18 17:55:31 +0530810 cpufreq_policy_apply_limits(policy);
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200811 mutex_unlock(&sg_policy->work_lock);
812 }
813
Viresh Kumar600f5ba2019-08-07 12:36:01 +0530814 sg_policy->limits_changed = true;
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200815}
816
Quentin Perret531b5c92018-12-03 09:56:21 +0000817struct cpufreq_governor schedutil_gov = {
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100818 .name = "schedutil",
819 .owner = THIS_MODULE,
Rafael J. Wysocki9a2a9eb2020-11-10 18:25:57 +0100820 .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING,
Ingo Molnar97fb7a02018-03-03 14:01:12 +0100821 .init = sugov_init,
822 .exit = sugov_exit,
823 .start = sugov_start,
824 .stop = sugov_stop,
825 .limits = sugov_limits,
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200826};
827
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200828#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
829struct cpufreq_governor *cpufreq_default_governor(void)
830{
831 return &schedutil_gov;
832}
Rafael J. Wysocki9bdcb442016-04-02 01:09:12 +0200833#endif
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200834
Quentin Perret10dd8572020-06-29 13:54:59 +0530835cpufreq_governor_init(schedutil_gov);
Quentin Perret531b5c92018-12-03 09:56:21 +0000836
837#ifdef CONFIG_ENERGY_MODEL
Quentin Perret531b5c92018-12-03 09:56:21 +0000838static void rebuild_sd_workfn(struct work_struct *work)
839{
Ionela Voinescu31f6a8c2020-10-27 18:07:11 +0000840 rebuild_sched_domains_energy();
Quentin Perret531b5c92018-12-03 09:56:21 +0000841}
842static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
843
844/*
845 * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
846 * on governor changes to make sure the scheduler knows about it.
847 */
848void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
849 struct cpufreq_governor *old_gov)
850{
851 if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
852 /*
853 * When called from the cpufreq_register_driver() path, the
854 * cpu_hotplug_lock is already held, so use a work item to
855 * avoid nested locking in rebuild_sched_domains().
856 */
857 schedule_work(&rebuild_sd_work);
858 }
859
860}
861#endif