blob: 32b67ebd27e13ff5edf31e9f18452dcf8ccb2f59 [file] [log] [blame]
Rafael J. Wysockib1d09762016-04-02 01:09:12 +02001/*
2 * CPUFreq governor based on scheduler-provided CPU utilization data.
3 *
4 * Copyright (C) 2016, Intel Corporation
5 * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
Viresh Kumar87ecf322016-05-18 17:55:28 +053012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020014#include <linux/cpufreq.h>
Viresh Kumara231c652016-11-15 13:53:22 +053015#include <linux/kthread.h>
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020016#include <linux/slab.h>
17#include <trace/events/power.h>
Jonathan Avila703e2432017-08-10 13:49:49 -070018#include <linux/sched/sysctl.h>
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020019#include "sched.h"
Juri Lellic6e94382016-12-14 16:10:10 +000020#include "tune.h"
21
Kyle Yane2486b72017-08-25 14:36:53 -070022#ifdef CONFIG_SCHED_WALT
Juri Lellic6e94382016-12-14 16:10:10 +000023unsigned long boosted_cpu_util(int cpu);
Kyle Yane2486b72017-08-25 14:36:53 -070024#endif
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020025
Viresh Kumara231c652016-11-15 13:53:22 +053026#define SUGOV_KTHREAD_PRIORITY 50
27
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020028struct sugov_tunables {
29 struct gov_attr_set attr_set;
30 unsigned int rate_limit_us;
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -070031 unsigned int hispeed_load;
Rohit Gupta30249632017-02-02 18:39:07 -080032 unsigned int hispeed_freq;
Saravana Kannand0a7c6b2017-05-23 17:26:32 -070033 bool pl;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020034};
35
36struct sugov_policy {
37 struct cpufreq_policy *policy;
38
39 struct sugov_tunables *tunables;
40 struct list_head tunables_hook;
41
42 raw_spinlock_t update_lock; /* For shared policies */
43 u64 last_freq_update_time;
44 s64 freq_update_delay_ns;
Saravana Kannan0f34ee92017-06-28 21:44:14 -070045 u64 last_ws;
46 u64 curr_cycles;
47 u64 last_cyc_update_time;
48 unsigned long avg_cap;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020049 unsigned int next_freq;
Viresh Kumar55a6d462017-03-02 14:03:20 +053050 unsigned int cached_raw_freq;
Rohit Gupta30249632017-02-02 18:39:07 -080051 unsigned long hispeed_util;
52 unsigned long max;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020053
54 /* The next fields are only needed if fast switch cannot be used. */
55 struct irq_work irq_work;
Viresh Kumara231c652016-11-15 13:53:22 +053056 struct kthread_work work;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020057 struct mutex work_lock;
Viresh Kumara231c652016-11-15 13:53:22 +053058 struct kthread_worker worker;
59 struct task_struct *thread;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020060 bool work_in_progress;
61
62 bool need_freq_update;
63};
64
65struct sugov_cpu {
66 struct update_util_data update_util;
67 struct sugov_policy *sg_policy;
68
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +020069 unsigned long iowait_boost;
70 unsigned long iowait_boost_max;
71 u64 last_update;
Steve Muckled7439bc2016-07-13 13:25:26 -070072
Saravana Kannan05649862017-05-04 19:03:53 -070073 struct sched_walt_cpu_load walt_load;
74
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020075 /* The fields below are only needed when sharing a policy. */
76 unsigned long util;
77 unsigned long max;
Rafael J. Wysockic4568722016-08-16 22:14:55 +020078 unsigned int flags;
Vikram Mulukutla857cffa2017-05-04 19:47:06 -070079 unsigned int cpu;
Chris Redpath595ae4a2017-05-25 15:24:58 +010080
81 /* The field below is for single-CPU policies only. */
82#ifdef CONFIG_NO_HZ_COMMON
83 unsigned long saved_idle_calls;
84#endif
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020085};
86
87static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
Saravana Kannan7070a9e2017-08-24 17:02:49 -070088static unsigned int stale_ns;
Rohit Guptaae5c8d22017-09-25 14:23:48 -070089static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020090
91/************************ Governor internals ***********************/
92
93static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
94{
95 s64 delta_ns;
96
Rafael J. Wysockib1d09762016-04-02 01:09:12 +020097 if (unlikely(sg_policy->need_freq_update)) {
98 sg_policy->need_freq_update = false;
99 /*
100 * This happens when limits change, so forget the previous
101 * next_freq value and force an update.
102 */
103 sg_policy->next_freq = UINT_MAX;
104 return true;
105 }
106
107 delta_ns = time - sg_policy->last_freq_update_time;
108 return delta_ns >= sg_policy->freq_update_delay_ns;
109}
110
111static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
112 unsigned int next_freq)
113{
114 struct cpufreq_policy *policy = sg_policy->policy;
115
Chris Redpath6702ce12017-05-25 15:27:07 +0100116 if (sg_policy->next_freq == next_freq)
117 return;
118
119 sg_policy->next_freq = next_freq;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200120 sg_policy->last_freq_update_time = time;
121
122 if (policy->fast_switch_enabled) {
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200123 next_freq = cpufreq_driver_fast_switch(policy, next_freq);
124 if (next_freq == CPUFREQ_ENTRY_INVALID)
125 return;
126
127 policy->cur = next_freq;
128 trace_cpu_frequency(next_freq, smp_processor_id());
Chris Redpath6702ce12017-05-25 15:27:07 +0100129 } else {
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200130 sg_policy->work_in_progress = true;
131 irq_work_queue(&sg_policy->irq_work);
132 }
133}
134
Rohit Gupta30249632017-02-02 18:39:07 -0800135#define TARGET_LOAD 80
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200136/**
137 * get_next_freq - Compute a new frequency for a given cpufreq policy.
Viresh Kumare74f7f12017-03-02 14:03:21 +0530138 * @sg_policy: schedutil policy object to compute the new frequency for.
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200139 * @util: Current CPU utilization.
140 * @max: CPU capacity.
141 *
142 * If the utilization is frequency-invariant, choose the new frequency to be
143 * proportional to it, that is
144 *
145 * next_freq = C * max_freq * util / max
146 *
147 * Otherwise, approximate the would-be frequency-invariant utilization by
148 * util_raw * (curr_freq / max_freq) which leads to
149 *
150 * next_freq = C * curr_freq * util_raw / max
151 *
152 * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8.
Steve Muckled7439bc2016-07-13 13:25:26 -0700153 *
154 * The lowest driver-supported frequency which is equal or greater than the raw
155 * next_freq (as calculated above) is returned, subject to policy min/max and
156 * cpufreq driver limitations.
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200157 */
Viresh Kumare74f7f12017-03-02 14:03:21 +0530158static unsigned int get_next_freq(struct sugov_policy *sg_policy,
159 unsigned long util, unsigned long max)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200160{
Steve Muckled7439bc2016-07-13 13:25:26 -0700161 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200162 unsigned int freq = arch_scale_freq_invariant() ?
163 policy->cpuinfo.max_freq : policy->cur;
164
Steve Muckled7439bc2016-07-13 13:25:26 -0700165 freq = (freq + (freq >> 2)) * util / max;
Saravana Kannan2cf86dc2017-10-12 16:54:36 -0700166 trace_sugov_next_freq(policy->cpu, util, max, freq);
Steve Muckled7439bc2016-07-13 13:25:26 -0700167
Viresh Kumar55a6d462017-03-02 14:03:20 +0530168 if (freq == sg_policy->cached_raw_freq && sg_policy->next_freq != UINT_MAX)
Steve Muckled7439bc2016-07-13 13:25:26 -0700169 return sg_policy->next_freq;
Viresh Kumar55a6d462017-03-02 14:03:20 +0530170 sg_policy->cached_raw_freq = freq;
Steve Muckled7439bc2016-07-13 13:25:26 -0700171 return cpufreq_driver_resolve_freq(policy, freq);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200172}
173
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700174static void sugov_get_util(unsigned long *util, unsigned long *max, int cpu)
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200175{
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700176 struct rq *rq = cpu_rq(cpu);
Steve Muckle097cf682016-08-26 11:40:47 -0700177 unsigned long cfs_max;
Saravana Kannan05649862017-05-04 19:03:53 -0700178 struct sugov_cpu *loadcpu = &per_cpu(sugov_cpu, cpu);
Steve Muckle097cf682016-08-26 11:40:47 -0700179
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700180 cfs_max = arch_scale_cpu_capacity(NULL, cpu);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200181
182 *util = min(rq->cfs.avg.util_avg, cfs_max);
183 *max = cfs_max;
Saravana Kannan05649862017-05-04 19:03:53 -0700184
185 *util = cpu_util_freq(cpu, &loadcpu->walt_load);
Chris Redpatha6d67352017-03-24 17:37:28 +0000186 *util = boosted_cpu_util(cpu);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200187}
188
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200189static void sugov_set_iowait_boost(struct sugov_cpu *sg_cpu, u64 time,
190 unsigned int flags)
191{
192 if (flags & SCHED_CPUFREQ_IOWAIT) {
193 sg_cpu->iowait_boost = sg_cpu->iowait_boost_max;
194 } else if (sg_cpu->iowait_boost) {
195 s64 delta_ns = time - sg_cpu->last_update;
196
197 /* Clear iowait_boost if the CPU apprears to have been idle. */
198 if (delta_ns > TICK_NSEC)
199 sg_cpu->iowait_boost = 0;
200 }
201}
202
203static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
204 unsigned long *max)
205{
206 unsigned long boost_util = sg_cpu->iowait_boost;
207 unsigned long boost_max = sg_cpu->iowait_boost_max;
208
209 if (!boost_util)
210 return;
211
212 if (*util * boost_max < *max * boost_util) {
213 *util = boost_util;
214 *max = boost_max;
215 }
216 sg_cpu->iowait_boost >>= 1;
217}
218
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700219static unsigned long freq_to_util(struct sugov_policy *sg_policy,
220 unsigned int freq)
221{
222 return mult_frac(sg_policy->max, freq,
223 sg_policy->policy->cpuinfo.max_freq);
224}
225
226#define KHZ 1000
227static void sugov_track_cycles(struct sugov_policy *sg_policy,
228 unsigned int prev_freq,
229 u64 upto)
230{
231 u64 delta_ns, cycles;
Jonathan Avila703e2432017-08-10 13:49:49 -0700232
233 if (unlikely(!sysctl_sched_use_walt_cpu_util))
234 return;
235
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700236 /* Track cycles in current window */
237 delta_ns = upto - sg_policy->last_cyc_update_time;
Maria Yubba987e2017-11-10 19:05:58 +0800238 delta_ns *= prev_freq;
239 do_div(delta_ns, (NSEC_PER_SEC / KHZ));
240 cycles = delta_ns;
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700241 sg_policy->curr_cycles += cycles;
242 sg_policy->last_cyc_update_time = upto;
243}
244
245static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
246 unsigned int prev_freq)
247{
248 u64 last_ws = sg_policy->last_ws;
249 unsigned int avg_freq;
250
Jonathan Avila703e2432017-08-10 13:49:49 -0700251 if (unlikely(!sysctl_sched_use_walt_cpu_util))
252 return;
253
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700254 WARN_ON(curr_ws < last_ws);
255 if (curr_ws <= last_ws)
256 return;
257
258 /* If we skipped some windows */
259 if (curr_ws > (last_ws + sched_ravg_window)) {
260 avg_freq = prev_freq;
261 /* Reset tracking history */
262 sg_policy->last_cyc_update_time = curr_ws;
263 } else {
264 sugov_track_cycles(sg_policy, prev_freq, curr_ws);
265 avg_freq = sg_policy->curr_cycles;
266 avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
267 }
268 sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
269 sg_policy->curr_cycles = 0;
270 sg_policy->last_ws = curr_ws;
271}
272
Rohit Gupta30249632017-02-02 18:39:07 -0800273#define NL_RATIO 75
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700274#define DEFAULT_HISPEED_LOAD 90
Rohit Gupta30249632017-02-02 18:39:07 -0800275static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
276 unsigned long *max)
277{
278 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rohit Gupta30249632017-02-02 18:39:07 -0800279 bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
280 unsigned long nl = sg_cpu->walt_load.nl;
281 unsigned long cpu_util = sg_cpu->util;
282 bool is_hiload;
283
Jonathan Avila703e2432017-08-10 13:49:49 -0700284 if (unlikely(!sysctl_sched_use_walt_cpu_util))
285 return;
286
Saravana Kannan36faa282017-06-28 21:44:56 -0700287 is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700288 sg_policy->tunables->hispeed_load,
Rohit Gupta30249632017-02-02 18:39:07 -0800289 100));
290
Rohit Gupta5573e732017-06-21 10:08:07 -0700291 if (is_hiload && !is_migration)
Rohit Gupta30249632017-02-02 18:39:07 -0800292 *util = max(*util, sg_policy->hispeed_util);
Rohit Gupta30249632017-02-02 18:39:07 -0800293
294 if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
295 *util = *max;
296
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700297 if (sg_policy->tunables->pl)
298 *util = max(*util, sg_cpu->walt_load.pl);
Rohit Gupta30249632017-02-02 18:39:07 -0800299}
300
Chris Redpath595ae4a2017-05-25 15:24:58 +0100301#ifdef CONFIG_NO_HZ_COMMON
302static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
303{
304 unsigned long idle_calls = tick_nohz_get_idle_calls();
305 bool ret = idle_calls == sg_cpu->saved_idle_calls;
306
307 sg_cpu->saved_idle_calls = idle_calls;
308 return ret;
309}
310#else
311static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
312#endif /* CONFIG_NO_HZ_COMMON */
313
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200314static void sugov_update_single(struct update_util_data *hook, u64 time,
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200315 unsigned int flags)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200316{
317 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
318 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
319 struct cpufreq_policy *policy = sg_policy->policy;
Saravana Kannanc703c032017-10-12 17:20:33 -0700320 unsigned long util, max, hs_util;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200321 unsigned int next_f;
Chris Redpath595ae4a2017-05-25 15:24:58 +0100322 bool busy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200323
Saravana Kannanc703c032017-10-12 17:20:33 -0700324 flags &= ~SCHED_CPUFREQ_RT_DL;
325
Rohit Guptaa07c4352017-10-18 16:48:34 -0700326 if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
327 return;
328
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200329 sugov_set_iowait_boost(sg_cpu, time, flags);
330 sg_cpu->last_update = time;
331
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200332 if (!sugov_should_update_freq(sg_policy, time))
333 return;
334
Chris Redpath595ae4a2017-05-25 15:24:58 +0100335 busy = sugov_cpu_is_busy(sg_cpu);
Saravana Kannan76ef1712017-05-04 19:44:36 -0700336
Saravana Kannanc703c032017-10-12 17:20:33 -0700337 raw_spin_lock(&sg_policy->update_lock);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200338 if (flags & SCHED_CPUFREQ_RT_DL) {
339 next_f = policy->cpuinfo.max_freq;
340 } else {
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700341 sugov_get_util(&util, &max, sg_cpu->cpu);
Saravana Kannanc703c032017-10-12 17:20:33 -0700342 if (sg_policy->max != max) {
343 sg_policy->max = max;
344 hs_util = freq_to_util(sg_policy,
345 sg_policy->tunables->hispeed_freq);
346 hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
347 sg_policy->hispeed_util = hs_util;
348 }
349
350 sg_cpu->util = util;
351 sg_cpu->max = max;
352 sg_cpu->flags = flags;
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700353 sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
354 sg_policy->policy->cur);
Saravana Kannanc703c032017-10-12 17:20:33 -0700355 trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
356 sg_policy->avg_cap,
357 max, sg_cpu->walt_load.nl,
358 sg_cpu->walt_load.pl, flags);
359 sugov_iowait_boost(sg_cpu, &util, &max);
Rohit Gupta30249632017-02-02 18:39:07 -0800360 sugov_walt_adjust(sg_cpu, &util, &max);
Viresh Kumare74f7f12017-03-02 14:03:21 +0530361 next_f = get_next_freq(sg_policy, util, max);
Chris Redpath595ae4a2017-05-25 15:24:58 +0100362 /*
363 * Do not reduce the frequency if the CPU has not been idle
364 * recently, as the reduction is likely to be premature then.
365 */
366 if (busy && next_f < sg_policy->next_freq)
367 next_f = sg_policy->next_freq;
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200368 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200369 sugov_update_commit(sg_policy, time, next_f);
Saravana Kannanc703c032017-10-12 17:20:33 -0700370 raw_spin_unlock(&sg_policy->update_lock);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200371}
372
Chris Redpath39151862017-05-25 15:22:59 +0100373static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200374{
Steve Muckled7439bc2016-07-13 13:25:26 -0700375 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200376 struct cpufreq_policy *policy = sg_policy->policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200377 u64 last_freq_update_time = sg_policy->last_freq_update_time;
Chris Redpath39151862017-05-25 15:22:59 +0100378 unsigned long util = 0, max = 1;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200379 unsigned int j;
380
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200381 for_each_cpu(j, policy->cpus) {
Chris Redpath39151862017-05-25 15:22:59 +0100382 struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200383 unsigned long j_util, j_max;
384 s64 delta_ns;
385
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200386 /*
387 * If the CPU utilization was last updated before the previous
388 * frequency update and the time elapsed between the last update
389 * of the CPU utilization and the last frequency update is long
390 * enough, don't take the CPU into account as it probably is
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200391 * idle now (and clear iowait_boost for it).
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200392 */
393 delta_ns = last_freq_update_time - j_sg_cpu->last_update;
Saravana Kannan7070a9e2017-08-24 17:02:49 -0700394 if (delta_ns > stale_ns) {
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200395 j_sg_cpu->iowait_boost = 0;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200396 continue;
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200397 }
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200398 if (j_sg_cpu->flags & SCHED_CPUFREQ_RT_DL)
Chris Redpath39151862017-05-25 15:22:59 +0100399 return policy->cpuinfo.max_freq;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200400
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200401 j_util = j_sg_cpu->util;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200402 j_max = j_sg_cpu->max;
Saravana Kannan750f9702017-10-12 15:56:12 -0700403 if (j_util * max >= j_max * util) {
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200404 util = j_util;
405 max = j_max;
406 }
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200407
408 sugov_iowait_boost(j_sg_cpu, &util, &max);
Rohit Gupta30249632017-02-02 18:39:07 -0800409 sugov_walt_adjust(j_sg_cpu, &util, &max);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200410 }
411
Viresh Kumare74f7f12017-03-02 14:03:21 +0530412 return get_next_freq(sg_policy, util, max);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200413}
414
415static void sugov_update_shared(struct update_util_data *hook, u64 time,
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200416 unsigned int flags)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200417{
418 struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
419 struct sugov_policy *sg_policy = sg_cpu->sg_policy;
Rohit Gupta30249632017-02-02 18:39:07 -0800420 unsigned long util, max, hs_util;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200421 unsigned int next_f;
422
Rohit Guptaa07c4352017-10-18 16:48:34 -0700423 if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
424 return;
425
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700426 sugov_get_util(&util, &max, sg_cpu->cpu);
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200427
Saravana Kannan76ef1712017-05-04 19:44:36 -0700428 flags &= ~SCHED_CPUFREQ_RT_DL;
429
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200430 raw_spin_lock(&sg_policy->update_lock);
431
Rohit Gupta30249632017-02-02 18:39:07 -0800432 if (sg_policy->max != max) {
Saravana Kannanaf541e12017-06-28 20:16:15 -0700433 sg_policy->max = max;
434 hs_util = freq_to_util(sg_policy,
435 sg_policy->tunables->hispeed_freq);
Rohit Gupta30249632017-02-02 18:39:07 -0800436 hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
437 sg_policy->hispeed_util = hs_util;
Rohit Gupta30249632017-02-02 18:39:07 -0800438 }
439
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200440 sg_cpu->util = util;
441 sg_cpu->max = max;
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200442 sg_cpu->flags = flags;
Rafael J. Wysocki193d25c2016-09-10 00:00:31 +0200443
444 sugov_set_iowait_boost(sg_cpu, time, flags);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200445 sg_cpu->last_update = time;
446
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700447 sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
448 sg_policy->policy->cur);
449
Rohit Gupta2cd0e9e2017-08-07 10:50:35 -0700450 trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
451 max, sg_cpu->walt_load.nl,
Saravana Kannan12a35ed2017-03-27 15:46:28 -0700452 sg_cpu->walt_load.pl, flags);
453
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200454 if (sugov_should_update_freq(sg_policy, time)) {
Kyle Yane2486b72017-08-25 14:36:53 -0700455 if (flags & SCHED_CPUFREQ_RT_DL)
Chris Redpath39151862017-05-25 15:22:59 +0100456 next_f = sg_policy->policy->cpuinfo.max_freq;
457 else
458 next_f = sugov_next_freq_shared(sg_cpu);
459
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200460 sugov_update_commit(sg_policy, time, next_f);
461 }
462
463 raw_spin_unlock(&sg_policy->update_lock);
464}
465
Viresh Kumara231c652016-11-15 13:53:22 +0530466static void sugov_work(struct kthread_work *work)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200467{
468 struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
Saravana Kannan01adff122017-07-11 17:39:53 -0700469 unsigned long flags;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200470
471 mutex_lock(&sg_policy->work_lock);
Saravana Kannan01adff122017-07-11 17:39:53 -0700472 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700473 sugov_track_cycles(sg_policy, sg_policy->policy->cur,
Stephen Boyd24c18122017-08-15 10:39:25 -0700474 ktime_get_ns());
Saravana Kannan01adff122017-07-11 17:39:53 -0700475 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200476 __cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
477 CPUFREQ_RELATION_L);
478 mutex_unlock(&sg_policy->work_lock);
479
480 sg_policy->work_in_progress = false;
481}
482
483static void sugov_irq_work(struct irq_work *irq_work)
484{
485 struct sugov_policy *sg_policy;
486
487 sg_policy = container_of(irq_work, struct sugov_policy, irq_work);
Viresh Kumara231c652016-11-15 13:53:22 +0530488
489 /*
Viresh Kumar75526a22016-11-24 13:51:11 +0530490 * For RT and deadline tasks, the schedutil governor shoots the
491 * frequency to maximum. Special care must be taken to ensure that this
492 * kthread doesn't result in the same behavior.
Viresh Kumara231c652016-11-15 13:53:22 +0530493 *
494 * This is (mostly) guaranteed by the work_in_progress flag. The flag is
Viresh Kumar75526a22016-11-24 13:51:11 +0530495 * updated only at the end of the sugov_work() function and before that
496 * the schedutil governor rejects all other frequency scaling requests.
Viresh Kumara231c652016-11-15 13:53:22 +0530497 *
Viresh Kumar75526a22016-11-24 13:51:11 +0530498 * There is a very rare case though, where the RT thread yields right
Viresh Kumara231c652016-11-15 13:53:22 +0530499 * after the work_in_progress flag is cleared. The effects of that are
500 * neglected for now.
501 */
502 kthread_queue_work(&sg_policy->worker, &sg_policy->work);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200503}
504
505/************************** sysfs interface ************************/
506
507static struct sugov_tunables *global_tunables;
508static DEFINE_MUTEX(global_tunables_lock);
509
510static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set)
511{
512 return container_of(attr_set, struct sugov_tunables, attr_set);
513}
514
515static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
516{
517 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
518
519 return sprintf(buf, "%u\n", tunables->rate_limit_us);
520}
521
522static ssize_t rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf,
523 size_t count)
524{
525 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
526 struct sugov_policy *sg_policy;
527 unsigned int rate_limit_us;
528
529 if (kstrtouint(buf, 10, &rate_limit_us))
530 return -EINVAL;
531
532 tunables->rate_limit_us = rate_limit_us;
533
534 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook)
535 sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC;
536
537 return count;
538}
539
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700540static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
541{
542 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
543
Saravana Kannan95e1d762017-10-11 17:46:11 -0700544 return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700545}
546
547static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
548 const char *buf, size_t count)
549{
550 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
551
552 if (kstrtouint(buf, 10, &tunables->hispeed_load))
553 return -EINVAL;
554
555 tunables->hispeed_load = min(100U, tunables->hispeed_load);
556
557 return count;
558}
559
Rohit Gupta30249632017-02-02 18:39:07 -0800560static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
561{
562 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
563
Saravana Kannan95e1d762017-10-11 17:46:11 -0700564 return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
Rohit Gupta30249632017-02-02 18:39:07 -0800565}
566
567static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
568 const char *buf, size_t count)
569{
570 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
571 unsigned int val;
572 struct sugov_policy *sg_policy;
573 unsigned long hs_util;
Saravana Kannan01adff122017-07-11 17:39:53 -0700574 unsigned long flags;
Rohit Gupta30249632017-02-02 18:39:07 -0800575
576 if (kstrtouint(buf, 10, &val))
577 return -EINVAL;
578
579 tunables->hispeed_freq = val;
580 list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
Saravana Kannan01adff122017-07-11 17:39:53 -0700581 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
Saravana Kannanaf541e12017-06-28 20:16:15 -0700582 hs_util = freq_to_util(sg_policy,
583 sg_policy->tunables->hispeed_freq);
Rohit Gupta30249632017-02-02 18:39:07 -0800584 hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
585 sg_policy->hispeed_util = hs_util;
Saravana Kannan01adff122017-07-11 17:39:53 -0700586 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Rohit Gupta30249632017-02-02 18:39:07 -0800587 }
588
589 return count;
590}
591
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700592static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
593{
594 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
595
Saravana Kannan95e1d762017-10-11 17:46:11 -0700596 return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700597}
598
599static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
600 size_t count)
601{
602 struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
603
604 if (kstrtobool(buf, &tunables->pl))
605 return -EINVAL;
606
607 return count;
608}
609
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200610static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us);
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700611static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
Rohit Gupta30249632017-02-02 18:39:07 -0800612static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700613static struct governor_attr pl = __ATTR_RW(pl);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200614
615static struct attribute *sugov_attributes[] = {
616 &rate_limit_us.attr,
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700617 &hispeed_load.attr,
Rohit Gupta30249632017-02-02 18:39:07 -0800618 &hispeed_freq.attr,
Saravana Kannand0a7c6b2017-05-23 17:26:32 -0700619 &pl.attr,
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200620 NULL
621};
622
623static struct kobj_type sugov_tunables_ktype = {
624 .default_attrs = sugov_attributes,
625 .sysfs_ops = &governor_sysfs_ops,
626};
627
628/********************** cpufreq governor interface *********************/
629
630static struct cpufreq_governor schedutil_gov;
631
632static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
633{
634 struct sugov_policy *sg_policy;
635
636 sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL);
637 if (!sg_policy)
638 return NULL;
639
640 sg_policy->policy = policy;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200641 raw_spin_lock_init(&sg_policy->update_lock);
642 return sg_policy;
643}
644
645static void sugov_policy_free(struct sugov_policy *sg_policy)
646{
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200647 kfree(sg_policy);
648}
649
Viresh Kumara231c652016-11-15 13:53:22 +0530650static int sugov_kthread_create(struct sugov_policy *sg_policy)
651{
652 struct task_struct *thread;
653 struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO / 2 };
654 struct cpufreq_policy *policy = sg_policy->policy;
655 int ret;
656
657 /* kthread only required for slow path */
658 if (policy->fast_switch_enabled)
659 return 0;
660
661 kthread_init_work(&sg_policy->work, sugov_work);
662 kthread_init_worker(&sg_policy->worker);
663 thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
664 "sugov:%d",
665 cpumask_first(policy->related_cpus));
666 if (IS_ERR(thread)) {
667 pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread));
668 return PTR_ERR(thread);
669 }
670
671 ret = sched_setscheduler_nocheck(thread, SCHED_FIFO, &param);
672 if (ret) {
673 kthread_stop(thread);
674 pr_warn("%s: failed to set SCHED_FIFO\n", __func__);
675 return ret;
676 }
677
678 sg_policy->thread = thread;
679 kthread_bind_mask(thread, policy->related_cpus);
Viresh Kumar0ced0be2016-11-15 13:53:23 +0530680 init_irq_work(&sg_policy->irq_work, sugov_irq_work);
681 mutex_init(&sg_policy->work_lock);
682
Viresh Kumara231c652016-11-15 13:53:22 +0530683 wake_up_process(thread);
684
685 return 0;
686}
687
688static void sugov_kthread_stop(struct sugov_policy *sg_policy)
689{
690 /* kthread only required for slow path */
691 if (sg_policy->policy->fast_switch_enabled)
692 return;
693
694 kthread_flush_worker(&sg_policy->worker);
695 kthread_stop(sg_policy->thread);
Viresh Kumar0ced0be2016-11-15 13:53:23 +0530696 mutex_destroy(&sg_policy->work_lock);
Viresh Kumara231c652016-11-15 13:53:22 +0530697}
698
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200699static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy)
700{
701 struct sugov_tunables *tunables;
702
703 tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
704 if (tunables) {
705 gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook);
706 if (!have_governor_per_policy())
707 global_tunables = tunables;
708 }
709 return tunables;
710}
711
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700712static void sugov_tunables_save(struct cpufreq_policy *policy,
713 struct sugov_tunables *tunables)
714{
715 int cpu;
716 struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
717
718 if (!have_governor_per_policy())
719 return;
720
721 if (!cached) {
722 cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
723 if (!cached) {
724 pr_warn("Couldn't allocate tunables for caching\n");
725 return;
726 }
727 for_each_cpu(cpu, policy->related_cpus)
728 per_cpu(cached_tunables, cpu) = cached;
729 }
730
731 cached->pl = tunables->pl;
732 cached->hispeed_load = tunables->hispeed_load;
733 cached->hispeed_freq = tunables->hispeed_freq;
734 cached->rate_limit_us = tunables->rate_limit_us;
735}
736
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200737static void sugov_tunables_free(struct sugov_tunables *tunables)
738{
739 if (!have_governor_per_policy())
740 global_tunables = NULL;
741
742 kfree(tunables);
743}
744
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700745static void sugov_tunables_restore(struct cpufreq_policy *policy)
746{
747 struct sugov_policy *sg_policy = policy->governor_data;
748 struct sugov_tunables *tunables = sg_policy->tunables;
749 struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
750
751 if (!cached)
752 return;
753
754 tunables->pl = cached->pl;
755 tunables->hispeed_load = cached->hispeed_load;
756 tunables->hispeed_freq = cached->hispeed_freq;
757 tunables->rate_limit_us = cached->rate_limit_us;
758 sg_policy->freq_update_delay_ns =
759 tunables->rate_limit_us * NSEC_PER_USEC;
760}
761
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200762static int sugov_init(struct cpufreq_policy *policy)
763{
764 struct sugov_policy *sg_policy;
765 struct sugov_tunables *tunables;
766 unsigned int lat;
767 int ret = 0;
768
769 /* State should be equivalent to EXIT */
770 if (policy->governor_data)
771 return -EBUSY;
772
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530773 cpufreq_enable_fast_switch(policy);
774
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200775 sg_policy = sugov_policy_alloc(policy);
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530776 if (!sg_policy) {
777 ret = -ENOMEM;
778 goto disable_fast_switch;
779 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200780
Viresh Kumara231c652016-11-15 13:53:22 +0530781 ret = sugov_kthread_create(sg_policy);
782 if (ret)
783 goto free_sg_policy;
784
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200785 mutex_lock(&global_tunables_lock);
786
787 if (global_tunables) {
788 if (WARN_ON(have_governor_per_policy())) {
789 ret = -EINVAL;
Viresh Kumara231c652016-11-15 13:53:22 +0530790 goto stop_kthread;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200791 }
792 policy->governor_data = sg_policy;
793 sg_policy->tunables = global_tunables;
794
795 gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook);
796 goto out;
797 }
798
799 tunables = sugov_tunables_alloc(sg_policy);
800 if (!tunables) {
801 ret = -ENOMEM;
Viresh Kumara231c652016-11-15 13:53:22 +0530802 goto stop_kthread;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200803 }
804
805 tunables->rate_limit_us = LATENCY_MULTIPLIER;
Saravana Kannan3ca7d7a2017-09-11 12:33:54 -0700806 tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
Rohit Gupta30249632017-02-02 18:39:07 -0800807 tunables->hispeed_freq = 0;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200808 lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
809 if (lat)
810 tunables->rate_limit_us *= lat;
811
812 policy->governor_data = sg_policy;
813 sg_policy->tunables = tunables;
Saravana Kannan7070a9e2017-08-24 17:02:49 -0700814 stale_ns = sched_ravg_window + (sched_ravg_window >> 3);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200815
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700816 sugov_tunables_restore(policy);
817
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200818 ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
819 get_governor_parent_kobj(policy), "%s",
820 schedutil_gov.name);
821 if (ret)
822 goto fail;
823
Viresh Kumarb5b11602016-11-15 13:53:20 +0530824out:
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200825 mutex_unlock(&global_tunables_lock);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200826 return 0;
827
Viresh Kumarb5b11602016-11-15 13:53:20 +0530828fail:
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200829 policy->governor_data = NULL;
830 sugov_tunables_free(tunables);
831
Viresh Kumara231c652016-11-15 13:53:22 +0530832stop_kthread:
833 sugov_kthread_stop(sg_policy);
834
Viresh Kumarb5b11602016-11-15 13:53:20 +0530835free_sg_policy:
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200836 mutex_unlock(&global_tunables_lock);
837
838 sugov_policy_free(sg_policy);
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530839
840disable_fast_switch:
841 cpufreq_disable_fast_switch(policy);
842
Viresh Kumar87ecf322016-05-18 17:55:28 +0530843 pr_err("initialization failed (error %d)\n", ret);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200844 return ret;
845}
846
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200847static void sugov_exit(struct cpufreq_policy *policy)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200848{
849 struct sugov_policy *sg_policy = policy->governor_data;
850 struct sugov_tunables *tunables = sg_policy->tunables;
851 unsigned int count;
852
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200853 mutex_lock(&global_tunables_lock);
854
855 count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
856 policy->governor_data = NULL;
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700857 if (!count) {
858 sugov_tunables_save(policy, tunables);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200859 sugov_tunables_free(tunables);
Rohit Guptaae5c8d22017-09-25 14:23:48 -0700860 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200861
862 mutex_unlock(&global_tunables_lock);
863
Viresh Kumara231c652016-11-15 13:53:22 +0530864 sugov_kthread_stop(sg_policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200865 sugov_policy_free(sg_policy);
Viresh Kumar6d5551b2016-11-15 13:53:21 +0530866 cpufreq_disable_fast_switch(policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200867}
868
869static int sugov_start(struct cpufreq_policy *policy)
870{
871 struct sugov_policy *sg_policy = policy->governor_data;
872 unsigned int cpu;
873
874 sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC;
875 sg_policy->last_freq_update_time = 0;
876 sg_policy->next_freq = UINT_MAX;
877 sg_policy->work_in_progress = false;
878 sg_policy->need_freq_update = false;
Viresh Kumar55a6d462017-03-02 14:03:20 +0530879 sg_policy->cached_raw_freq = 0;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200880
881 for_each_cpu(cpu, policy->cpus) {
882 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
883
Rafael J. Wysocki7922ae42017-03-19 14:30:02 +0100884 memset(sg_cpu, 0, sizeof(*sg_cpu));
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200885 sg_cpu->sg_policy = sg_policy;
Vikram Mulukutla857cffa2017-05-04 19:47:06 -0700886 sg_cpu->cpu = cpu;
Rafael J. Wysocki7922ae42017-03-19 14:30:02 +0100887 sg_cpu->flags = SCHED_CPUFREQ_RT;
888 sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
Vikram Mulukutlaeb843722017-07-06 10:05:52 -0700889 }
890
891 for_each_cpu(cpu, policy->cpus) {
892 struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
893
Rafael J. Wysocki7922ae42017-03-19 14:30:02 +0100894 cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util,
895 policy_is_shared(policy) ?
896 sugov_update_shared :
897 sugov_update_single);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200898 }
899 return 0;
900}
901
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200902static void sugov_stop(struct cpufreq_policy *policy)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200903{
904 struct sugov_policy *sg_policy = policy->governor_data;
905 unsigned int cpu;
906
907 for_each_cpu(cpu, policy->cpus)
908 cpufreq_remove_update_util_hook(cpu);
909
910 synchronize_sched();
911
Viresh Kumar0ced0be2016-11-15 13:53:23 +0530912 if (!policy->fast_switch_enabled) {
913 irq_work_sync(&sg_policy->irq_work);
914 kthread_cancel_work_sync(&sg_policy->work);
915 }
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200916}
917
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200918static void sugov_limits(struct cpufreq_policy *policy)
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200919{
920 struct sugov_policy *sg_policy = policy->governor_data;
Saravana Kannan01adff122017-07-11 17:39:53 -0700921 unsigned long flags;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200922
923 if (!policy->fast_switch_enabled) {
924 mutex_lock(&sg_policy->work_lock);
Saravana Kannan01adff122017-07-11 17:39:53 -0700925 raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
Saravana Kannan0f34ee92017-06-28 21:44:14 -0700926 sugov_track_cycles(sg_policy, sg_policy->policy->cur,
Stephen Boyd24c18122017-08-15 10:39:25 -0700927 ktime_get_ns());
Saravana Kannan01adff122017-07-11 17:39:53 -0700928 raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
Viresh Kumar73d427c2016-05-18 17:55:31 +0530929 cpufreq_policy_apply_limits(policy);
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200930 mutex_unlock(&sg_policy->work_lock);
931 }
932
933 sg_policy->need_freq_update = true;
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200934}
935
936static struct cpufreq_governor schedutil_gov = {
937 .name = "schedutil",
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200938 .owner = THIS_MODULE,
Rafael J. Wysocki48f5adc2016-06-02 23:24:15 +0200939 .init = sugov_init,
940 .exit = sugov_exit,
941 .start = sugov_start,
942 .stop = sugov_stop,
943 .limits = sugov_limits,
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200944};
945
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200946#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
947struct cpufreq_governor *cpufreq_default_governor(void)
948{
949 return &schedutil_gov;
950}
Rafael J. Wysockib1d09762016-04-02 01:09:12 +0200951#endif
Rafael J. Wysockic4568722016-08-16 22:14:55 +0200952
953static int __init sugov_register(void)
954{
955 return cpufreq_register_governor(&schedutil_gov);
956}
957fs_initcall(sugov_register);