Daniel Lezcano | 108c35a | 2018-12-03 11:29:29 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 2 | /* |
| 3 | * CPUFreq governor based on scheduler-provided CPU utilization data. |
| 4 | * |
| 5 | * Copyright (C) 2016, Intel Corporation |
| 6 | * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 7 | */ |
| 8 | |
Viresh Kumar | 60f05e8 | 2016-05-18 17:55:28 +0530 | [diff] [blame] | 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 10 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 11 | #include "sched.h" |
| 12 | |
Quentin Perret | 938e5e4 | 2018-12-03 09:56:15 +0000 | [diff] [blame] | 13 | #include <linux/sched/cpufreq.h> |
Ingo Molnar | 325ea10 | 2018-03-03 12:20:47 +0100 | [diff] [blame] | 14 | #include <trace/events/power.h> |
| 15 | |
Rafael J. Wysocki | 9eca544 | 2019-03-28 11:33:21 +0100 | [diff] [blame] | 16 | #define IOWAIT_BOOST_MIN (SCHED_CAPACITY_SCALE / 8) |
| 17 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 18 | struct sugov_tunables { |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 19 | struct gov_attr_set attr_set; |
| 20 | unsigned int rate_limit_us; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 21 | }; |
| 22 | |
| 23 | struct sugov_policy { |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 24 | struct cpufreq_policy *policy; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 25 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 26 | struct sugov_tunables *tunables; |
| 27 | struct list_head tunables_hook; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 28 | |
Yue Hu | e209cb5 | 2021-02-18 17:37:53 +0800 | [diff] [blame] | 29 | raw_spinlock_t update_lock; |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 30 | u64 last_freq_update_time; |
| 31 | s64 freq_update_delay_ns; |
| 32 | unsigned int next_freq; |
| 33 | unsigned int cached_raw_freq; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 34 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 35 | /* The next fields are only needed if fast switch cannot be used: */ |
| 36 | struct irq_work irq_work; |
| 37 | struct kthread_work work; |
| 38 | struct mutex work_lock; |
| 39 | struct kthread_worker worker; |
| 40 | struct task_struct *thread; |
| 41 | bool work_in_progress; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 42 | |
Viresh Kumar | 600f5ba | 2019-08-07 12:36:01 +0530 | [diff] [blame] | 43 | bool limits_changed; |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 44 | bool need_freq_update; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 45 | }; |
| 46 | |
| 47 | struct sugov_cpu { |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 48 | struct update_util_data update_util; |
| 49 | struct sugov_policy *sg_policy; |
| 50 | unsigned int cpu; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 51 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 52 | bool iowait_boost_pending; |
| 53 | unsigned int iowait_boost; |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 54 | u64 last_update; |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 55 | |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 56 | unsigned long util; |
Vincent Guittot | 8cc9051 | 2018-06-28 17:45:08 +0200 | [diff] [blame] | 57 | unsigned long bw_dl; |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 58 | unsigned long max; |
Rafael J. Wysocki | b7eaf1a | 2017-03-22 00:08:50 +0100 | [diff] [blame] | 59 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 60 | /* The field below is for single-CPU policies only: */ |
Rafael J. Wysocki | b7eaf1a | 2017-03-22 00:08:50 +0100 | [diff] [blame] | 61 | #ifdef CONFIG_NO_HZ_COMMON |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 62 | unsigned long saved_idle_calls; |
Rafael J. Wysocki | b7eaf1a | 2017-03-22 00:08:50 +0100 | [diff] [blame] | 63 | #endif |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 64 | }; |
| 65 | |
| 66 | static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu); |
| 67 | |
| 68 | /************************ Governor internals ***********************/ |
| 69 | |
| 70 | static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) |
| 71 | { |
| 72 | s64 delta_ns; |
| 73 | |
Viresh Kumar | 674e754 | 2017-07-28 12:16:38 +0530 | [diff] [blame] | 74 | /* |
| 75 | * Since cpufreq_update_util() is called with rq->lock held for |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 76 | * the @target_cpu, our per-CPU data is fully serialized. |
Viresh Kumar | 674e754 | 2017-07-28 12:16:38 +0530 | [diff] [blame] | 77 | * |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 78 | * However, drivers cannot in general deal with cross-CPU |
Viresh Kumar | 674e754 | 2017-07-28 12:16:38 +0530 | [diff] [blame] | 79 | * requests, so while get_next_freq() will work, our |
Viresh Kumar | c49cbc1 | 2017-08-14 14:50:16 +0530 | [diff] [blame] | 80 | * sugov_update_commit() call may not for the fast switching platforms. |
Viresh Kumar | 674e754 | 2017-07-28 12:16:38 +0530 | [diff] [blame] | 81 | * |
| 82 | * Hence stop here for remote requests if they aren't supported |
| 83 | * by the hardware, as calculating the frequency is pointless if |
| 84 | * we cannot in fact act on it. |
Viresh Kumar | c49cbc1 | 2017-08-14 14:50:16 +0530 | [diff] [blame] | 85 | * |
Rafael J. Wysocki | 85572c2 | 2019-12-11 11:28:41 +0100 | [diff] [blame] | 86 | * This is needed on the slow switching platforms too to prevent CPUs |
| 87 | * going offline from leaving stale IRQ work items behind. |
Viresh Kumar | 674e754 | 2017-07-28 12:16:38 +0530 | [diff] [blame] | 88 | */ |
Rafael J. Wysocki | 85572c2 | 2019-12-11 11:28:41 +0100 | [diff] [blame] | 89 | if (!cpufreq_this_cpu_can_update(sg_policy->policy)) |
Viresh Kumar | 674e754 | 2017-07-28 12:16:38 +0530 | [diff] [blame] | 90 | return false; |
| 91 | |
Viresh Kumar | 600f5ba | 2019-08-07 12:36:01 +0530 | [diff] [blame] | 92 | if (unlikely(sg_policy->limits_changed)) { |
| 93 | sg_policy->limits_changed = false; |
| 94 | sg_policy->need_freq_update = true; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 95 | return true; |
Viresh Kumar | 600f5ba | 2019-08-07 12:36:01 +0530 | [diff] [blame] | 96 | } |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 97 | |
| 98 | delta_ns = time - sg_policy->last_freq_update_time; |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 99 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 100 | return delta_ns >= sg_policy->freq_update_delay_ns; |
| 101 | } |
| 102 | |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 103 | static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, |
| 104 | unsigned int next_freq) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 105 | { |
Rafael J. Wysocki | 90ac908 | 2020-11-12 20:26:42 +0100 | [diff] [blame] | 106 | if (sg_policy->need_freq_update) |
Viresh Kumar | 23a8818 | 2020-10-30 12:51:08 +0530 | [diff] [blame] | 107 | sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); |
Rafael J. Wysocki | 90ac908 | 2020-11-12 20:26:42 +0100 | [diff] [blame] | 108 | else if (sg_policy->next_freq == next_freq) |
| 109 | return false; |
Rafael J. Wysocki | 38d4ea2 | 2017-03-22 18:32:47 +0100 | [diff] [blame] | 110 | |
| 111 | sg_policy->next_freq = next_freq; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 112 | sg_policy->last_freq_update_time = time; |
| 113 | |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 114 | return true; |
| 115 | } |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 116 | |
Yue Hu | 389e4ec | 2021-02-24 14:39:27 +0800 | [diff] [blame] | 117 | static void sugov_deferred_update(struct sugov_policy *sg_policy) |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 118 | { |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 119 | if (!sg_policy->work_in_progress) { |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 120 | sg_policy->work_in_progress = true; |
| 121 | irq_work_queue(&sg_policy->irq_work); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | /** |
| 126 | * get_next_freq - Compute a new frequency for a given cpufreq policy. |
Viresh Kumar | 655cb1e | 2017-03-02 14:03:21 +0530 | [diff] [blame] | 127 | * @sg_policy: schedutil policy object to compute the new frequency for. |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 128 | * @util: Current CPU utilization. |
| 129 | * @max: CPU capacity. |
| 130 | * |
| 131 | * If the utilization is frequency-invariant, choose the new frequency to be |
| 132 | * proportional to it, that is |
| 133 | * |
| 134 | * next_freq = C * max_freq * util / max |
| 135 | * |
| 136 | * Otherwise, approximate the would-be frequency-invariant utilization by |
| 137 | * util_raw * (curr_freq / max_freq) which leads to |
| 138 | * |
| 139 | * next_freq = C * curr_freq * util_raw / max |
| 140 | * |
| 141 | * Take C = 1.25 for the frequency tipping point at (util / max) = 0.8. |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 142 | * |
| 143 | * The lowest driver-supported frequency which is equal or greater than the raw |
| 144 | * next_freq (as calculated above) is returned, subject to policy min/max and |
| 145 | * cpufreq driver limitations. |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 146 | */ |
Viresh Kumar | 655cb1e | 2017-03-02 14:03:21 +0530 | [diff] [blame] | 147 | static unsigned int get_next_freq(struct sugov_policy *sg_policy, |
| 148 | unsigned long util, unsigned long max) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 149 | { |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 150 | struct cpufreq_policy *policy = sg_policy->policy; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 151 | unsigned int freq = arch_scale_freq_invariant() ? |
| 152 | policy->cpuinfo.max_freq : policy->cur; |
| 153 | |
Lukasz Luba | 8f1b971 | 2021-06-14 20:12:38 +0100 | [diff] [blame] | 154 | util = map_util_perf(util); |
Quentin Perret | 938e5e4 | 2018-12-03 09:56:15 +0000 | [diff] [blame] | 155 | freq = map_util_freq(util, freq, max); |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 156 | |
Viresh Kumar | ecd2884 | 2018-05-09 16:05:24 +0530 | [diff] [blame] | 157 | if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 158 | return sg_policy->next_freq; |
Viresh Kumar | ecd2884 | 2018-05-09 16:05:24 +0530 | [diff] [blame] | 159 | |
Viresh Kumar | 6c4f0fa | 2017-03-02 14:03:20 +0530 | [diff] [blame] | 160 | sg_policy->cached_raw_freq = freq; |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 161 | return cpufreq_driver_resolve_freq(policy, freq); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 162 | } |
| 163 | |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 164 | static void sugov_get_util(struct sugov_cpu *sg_cpu) |
Quentin Perret | 938e5e4 | 2018-12-03 09:56:15 +0000 | [diff] [blame] | 165 | { |
| 166 | struct rq *rq = cpu_rq(sg_cpu->cpu); |
Vincent Guittot | 8ec59c0 | 2019-06-17 17:00:17 +0200 | [diff] [blame] | 167 | unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); |
Quentin Perret | 938e5e4 | 2018-12-03 09:56:15 +0000 | [diff] [blame] | 168 | |
| 169 | sg_cpu->max = max; |
| 170 | sg_cpu->bw_dl = cpu_bw_dl(rq); |
Dietmar Eggemann | 82762d2 | 2021-11-18 17:42:40 +0100 | [diff] [blame] | 171 | sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), max, |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 172 | FREQUENCY_UTIL, NULL); |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 173 | } |
| 174 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 175 | /** |
| 176 | * sugov_iowait_reset() - Reset the IO boost status of a CPU. |
| 177 | * @sg_cpu: the sugov data for the CPU to boost |
| 178 | * @time: the update time from the caller |
| 179 | * @set_iowait_boost: true if an IO boost has been requested |
| 180 | * |
| 181 | * The IO wait boost of a task is disabled after a tick since the last update |
| 182 | * of a CPU. If a new IO wait boost is requested after more then a tick, then |
Rafael J. Wysocki | 9eca544 | 2019-03-28 11:33:21 +0100 | [diff] [blame] | 183 | * we enable the boost starting from IOWAIT_BOOST_MIN, which improves energy |
| 184 | * efficiency by ignoring sporadic wakeups from IO. |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 185 | */ |
| 186 | static bool sugov_iowait_reset(struct sugov_cpu *sg_cpu, u64 time, |
| 187 | bool set_iowait_boost) |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 188 | { |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 189 | s64 delta_ns = time - sg_cpu->last_update; |
Joel Fernandes | a5a0809 | 2017-07-23 08:54:25 -0700 | [diff] [blame] | 190 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 191 | /* Reset boost only if a tick has elapsed since last request */ |
| 192 | if (delta_ns <= TICK_NSEC) |
| 193 | return false; |
Joel Fernandes | a5a0809 | 2017-07-23 08:54:25 -0700 | [diff] [blame] | 194 | |
Rafael J. Wysocki | 9eca544 | 2019-03-28 11:33:21 +0100 | [diff] [blame] | 195 | sg_cpu->iowait_boost = set_iowait_boost ? IOWAIT_BOOST_MIN : 0; |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 196 | sg_cpu->iowait_boost_pending = set_iowait_boost; |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 197 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 198 | return true; |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 199 | } |
| 200 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 201 | /** |
| 202 | * sugov_iowait_boost() - Updates the IO boost status of a CPU. |
| 203 | * @sg_cpu: the sugov data for the CPU to boost |
| 204 | * @time: the update time from the caller |
| 205 | * @flags: SCHED_CPUFREQ_IOWAIT if the task is waking up after an IO wait |
| 206 | * |
| 207 | * Each time a task wakes up after an IO operation, the CPU utilization can be |
| 208 | * boosted to a certain utilization which doubles at each "frequent and |
Rafael J. Wysocki | 9eca544 | 2019-03-28 11:33:21 +0100 | [diff] [blame] | 209 | * successive" wakeup from IO, ranging from IOWAIT_BOOST_MIN to the utilization |
| 210 | * of the maximum OPP. |
| 211 | * |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 212 | * To keep doubling, an IO boost has to be requested at least once per tick, |
| 213 | * otherwise we restart from the utilization of the minimum OPP. |
| 214 | */ |
| 215 | static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, u64 time, |
| 216 | unsigned int flags) |
| 217 | { |
| 218 | bool set_iowait_boost = flags & SCHED_CPUFREQ_IOWAIT; |
| 219 | |
| 220 | /* Reset boost if the CPU appears to have been idle enough */ |
| 221 | if (sg_cpu->iowait_boost && |
| 222 | sugov_iowait_reset(sg_cpu, time, set_iowait_boost)) |
| 223 | return; |
| 224 | |
| 225 | /* Boost only tasks waking up after IO */ |
| 226 | if (!set_iowait_boost) |
| 227 | return; |
| 228 | |
| 229 | /* Ensure boost doubles only one time at each request */ |
| 230 | if (sg_cpu->iowait_boost_pending) |
| 231 | return; |
| 232 | sg_cpu->iowait_boost_pending = true; |
| 233 | |
| 234 | /* Double the boost at each request */ |
| 235 | if (sg_cpu->iowait_boost) { |
Peter Zijlstra | a23314e | 2019-03-05 09:32:02 +0100 | [diff] [blame] | 236 | sg_cpu->iowait_boost = |
| 237 | min_t(unsigned int, sg_cpu->iowait_boost << 1, SCHED_CAPACITY_SCALE); |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 238 | return; |
| 239 | } |
| 240 | |
| 241 | /* First wakeup after IO: start with minimum boost */ |
Rafael J. Wysocki | 9eca544 | 2019-03-28 11:33:21 +0100 | [diff] [blame] | 242 | sg_cpu->iowait_boost = IOWAIT_BOOST_MIN; |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | /** |
| 246 | * sugov_iowait_apply() - Apply the IO boost to a CPU. |
| 247 | * @sg_cpu: the sugov data for the cpu to boost |
| 248 | * @time: the update time from the caller |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 249 | * |
| 250 | * A CPU running a task which woken up after an IO operation can have its |
| 251 | * utilization boosted to speed up the completion of those IO operations. |
| 252 | * The IO boost value is increased each time a task wakes up from IO, in |
| 253 | * sugov_iowait_apply(), and it's instead decreased by this function, |
| 254 | * each time an increase has not been requested (!iowait_boost_pending). |
| 255 | * |
| 256 | * A CPU which also appears to have been idle for at least one tick has also |
| 257 | * its IO boost utilization reset. |
| 258 | * |
| 259 | * This mechanism is designed to boost high frequently IO waiting tasks, while |
| 260 | * being more conservative on tasks which does sporadic IO operations. |
| 261 | */ |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 262 | static void sugov_iowait_apply(struct sugov_cpu *sg_cpu, u64 time) |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 263 | { |
Peter Zijlstra | a23314e | 2019-03-05 09:32:02 +0100 | [diff] [blame] | 264 | unsigned long boost; |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 265 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 266 | /* No boost currently required */ |
Joel Fernandes | a5a0809 | 2017-07-23 08:54:25 -0700 | [diff] [blame] | 267 | if (!sg_cpu->iowait_boost) |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 268 | return; |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 269 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 270 | /* Reset boost if the CPU appears to have been idle enough */ |
| 271 | if (sugov_iowait_reset(sg_cpu, time, false)) |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 272 | return; |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 273 | |
Peter Zijlstra | a23314e | 2019-03-05 09:32:02 +0100 | [diff] [blame] | 274 | if (!sg_cpu->iowait_boost_pending) { |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 275 | /* |
Peter Zijlstra | a23314e | 2019-03-05 09:32:02 +0100 | [diff] [blame] | 276 | * No boost pending; reduce the boost value. |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 277 | */ |
Joel Fernandes | a5a0809 | 2017-07-23 08:54:25 -0700 | [diff] [blame] | 278 | sg_cpu->iowait_boost >>= 1; |
Rafael J. Wysocki | 9eca544 | 2019-03-28 11:33:21 +0100 | [diff] [blame] | 279 | if (sg_cpu->iowait_boost < IOWAIT_BOOST_MIN) { |
Joel Fernandes | a5a0809 | 2017-07-23 08:54:25 -0700 | [diff] [blame] | 280 | sg_cpu->iowait_boost = 0; |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 281 | return; |
Joel Fernandes | a5a0809 | 2017-07-23 08:54:25 -0700 | [diff] [blame] | 282 | } |
| 283 | } |
| 284 | |
Peter Zijlstra | a23314e | 2019-03-05 09:32:02 +0100 | [diff] [blame] | 285 | sg_cpu->iowait_boost_pending = false; |
| 286 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 287 | /* |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 288 | * sg_cpu->util is already in capacity scale; convert iowait_boost |
Peter Zijlstra | a23314e | 2019-03-05 09:32:02 +0100 | [diff] [blame] | 289 | * into the same scale so we can compare. |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 290 | */ |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 291 | boost = (sg_cpu->iowait_boost * sg_cpu->max) >> SCHED_CAPACITY_SHIFT; |
| 292 | if (sg_cpu->util < boost) |
| 293 | sg_cpu->util = boost; |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 294 | } |
| 295 | |
Rafael J. Wysocki | b7eaf1a | 2017-03-22 00:08:50 +0100 | [diff] [blame] | 296 | #ifdef CONFIG_NO_HZ_COMMON |
| 297 | static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) |
| 298 | { |
Joel Fernandes | 466a2b4 | 2017-12-21 02:22:45 +0100 | [diff] [blame] | 299 | unsigned long idle_calls = tick_nohz_get_idle_calls_cpu(sg_cpu->cpu); |
Rafael J. Wysocki | b7eaf1a | 2017-03-22 00:08:50 +0100 | [diff] [blame] | 300 | bool ret = idle_calls == sg_cpu->saved_idle_calls; |
| 301 | |
| 302 | sg_cpu->saved_idle_calls = idle_calls; |
| 303 | return ret; |
| 304 | } |
| 305 | #else |
| 306 | static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } |
| 307 | #endif /* CONFIG_NO_HZ_COMMON */ |
| 308 | |
Claudio Scordino | e97a90f | 2018-03-13 11:35:40 +0100 | [diff] [blame] | 309 | /* |
| 310 | * Make sugov_should_update_freq() ignore the rate limit when DL |
| 311 | * has increased the utilization. |
| 312 | */ |
Yue Hu | 71f1309 | 2021-02-18 17:01:32 +0800 | [diff] [blame] | 313 | static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) |
Claudio Scordino | e97a90f | 2018-03-13 11:35:40 +0100 | [diff] [blame] | 314 | { |
Vincent Guittot | 8cc9051 | 2018-06-28 17:45:08 +0200 | [diff] [blame] | 315 | if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) |
Yue Hu | 71f1309 | 2021-02-18 17:01:32 +0800 | [diff] [blame] | 316 | sg_cpu->sg_policy->limits_changed = true; |
Claudio Scordino | e97a90f | 2018-03-13 11:35:40 +0100 | [diff] [blame] | 317 | } |
| 318 | |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 319 | static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, |
| 320 | u64 time, unsigned int flags) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 321 | { |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 322 | sugov_iowait_boost(sg_cpu, time, flags); |
Rafael J. Wysocki | 21ca6d2 | 2016-09-10 00:00:31 +0200 | [diff] [blame] | 323 | sg_cpu->last_update = time; |
| 324 | |
Yue Hu | 71f1309 | 2021-02-18 17:01:32 +0800 | [diff] [blame] | 325 | ignore_dl_rate_limit(sg_cpu); |
Claudio Scordino | e97a90f | 2018-03-13 11:35:40 +0100 | [diff] [blame] | 326 | |
Yue Hu | 71f1309 | 2021-02-18 17:01:32 +0800 | [diff] [blame] | 327 | if (!sugov_should_update_freq(sg_cpu->sg_policy, time)) |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 328 | return false; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 329 | |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 330 | sugov_get_util(sg_cpu); |
| 331 | sugov_iowait_apply(sg_cpu, time); |
| 332 | |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 333 | return true; |
| 334 | } |
| 335 | |
| 336 | static void sugov_update_single_freq(struct update_util_data *hook, u64 time, |
| 337 | unsigned int flags) |
| 338 | { |
| 339 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
| 340 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
| 341 | unsigned int cached_freq = sg_policy->cached_raw_freq; |
| 342 | unsigned int next_f; |
| 343 | |
| 344 | if (!sugov_update_single_common(sg_cpu, time, flags)) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 345 | return; |
| 346 | |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 347 | next_f = get_next_freq(sg_policy, sg_cpu->util, sg_cpu->max); |
Peter Zijlstra | 8f111bc | 2017-12-20 16:26:12 +0100 | [diff] [blame] | 348 | /* |
| 349 | * Do not reduce the frequency if the CPU has not been idle |
| 350 | * recently, as the reduction is likely to be premature then. |
| 351 | */ |
Viresh Kumar | 23a8818 | 2020-10-30 12:51:08 +0530 | [diff] [blame] | 352 | if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { |
Peter Zijlstra | 8f111bc | 2017-12-20 16:26:12 +0100 | [diff] [blame] | 353 | next_f = sg_policy->next_freq; |
Viresh Kumar | 07458f6 | 2017-11-08 20:23:55 +0530 | [diff] [blame] | 354 | |
Wei Wang | 0070ea2 | 2020-10-16 11:17:22 -0700 | [diff] [blame] | 355 | /* Restore cached freq as next_freq has changed */ |
| 356 | sg_policy->cached_raw_freq = cached_freq; |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 357 | } |
Peter Zijlstra | 8f111bc | 2017-12-20 16:26:12 +0100 | [diff] [blame] | 358 | |
Yue Hu | 389e4ec | 2021-02-24 14:39:27 +0800 | [diff] [blame] | 359 | if (!sugov_update_next_freq(sg_policy, time, next_f)) |
| 360 | return; |
| 361 | |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 362 | /* |
| 363 | * This code runs under rq->lock for the target CPU, so it won't run |
| 364 | * concurrently on two different CPUs for the same target and it is not |
| 365 | * necessary to acquire the lock in the fast switch case. |
| 366 | */ |
| 367 | if (sg_policy->policy->fast_switch_enabled) { |
Yue Hu | 389e4ec | 2021-02-24 14:39:27 +0800 | [diff] [blame] | 368 | cpufreq_driver_fast_switch(sg_policy->policy, next_f); |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 369 | } else { |
| 370 | raw_spin_lock(&sg_policy->update_lock); |
Yue Hu | 389e4ec | 2021-02-24 14:39:27 +0800 | [diff] [blame] | 371 | sugov_deferred_update(sg_policy); |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 372 | raw_spin_unlock(&sg_policy->update_lock); |
| 373 | } |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 374 | } |
| 375 | |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 376 | static void sugov_update_single_perf(struct update_util_data *hook, u64 time, |
| 377 | unsigned int flags) |
| 378 | { |
| 379 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
| 380 | unsigned long prev_util = sg_cpu->util; |
| 381 | |
| 382 | /* |
| 383 | * Fall back to the "frequency" path if frequency invariance is not |
| 384 | * supported, because the direct mapping between the utilization and |
| 385 | * the performance levels depends on the frequency invariance. |
| 386 | */ |
| 387 | if (!arch_scale_freq_invariant()) { |
| 388 | sugov_update_single_freq(hook, time, flags); |
| 389 | return; |
| 390 | } |
| 391 | |
| 392 | if (!sugov_update_single_common(sg_cpu, time, flags)) |
| 393 | return; |
| 394 | |
| 395 | /* |
| 396 | * Do not reduce the target performance level if the CPU has not been |
| 397 | * idle recently, as the reduction is likely to be premature then. |
| 398 | */ |
| 399 | if (sugov_cpu_is_busy(sg_cpu) && sg_cpu->util < prev_util) |
| 400 | sg_cpu->util = prev_util; |
| 401 | |
| 402 | cpufreq_driver_adjust_perf(sg_cpu->cpu, map_util_perf(sg_cpu->bw_dl), |
| 403 | map_util_perf(sg_cpu->util), sg_cpu->max); |
| 404 | |
| 405 | sg_cpu->sg_policy->last_freq_update_time = time; |
| 406 | } |
| 407 | |
Juri Lelli | d86ab9c | 2017-05-03 14:30:48 +0100 | [diff] [blame] | 408 | static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 409 | { |
Steve Muckle | 5cbea46 | 2016-07-13 13:25:26 -0700 | [diff] [blame] | 410 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 411 | struct cpufreq_policy *policy = sg_policy->policy; |
Viresh Kumar | cba1dfb | 2017-03-09 09:34:54 +0530 | [diff] [blame] | 412 | unsigned long util = 0, max = 1; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 413 | unsigned int j; |
| 414 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 415 | for_each_cpu(j, policy->cpus) { |
Viresh Kumar | cba1dfb | 2017-03-09 09:34:54 +0530 | [diff] [blame] | 416 | struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 417 | unsigned long j_util, j_max; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 418 | |
Rafael J. Wysocki | ca6827d | 2020-12-14 21:04:11 +0100 | [diff] [blame] | 419 | sugov_get_util(j_sg_cpu); |
| 420 | sugov_iowait_apply(j_sg_cpu, time); |
| 421 | j_util = j_sg_cpu->util; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 422 | j_max = j_sg_cpu->max; |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 423 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 424 | if (j_util * max > j_max * util) { |
| 425 | util = j_util; |
| 426 | max = j_max; |
| 427 | } |
| 428 | } |
| 429 | |
Viresh Kumar | 655cb1e | 2017-03-02 14:03:21 +0530 | [diff] [blame] | 430 | return get_next_freq(sg_policy, util, max); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 431 | } |
| 432 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 433 | static void |
| 434 | sugov_update_shared(struct update_util_data *hook, u64 time, unsigned int flags) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 435 | { |
| 436 | struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util); |
| 437 | struct sugov_policy *sg_policy = sg_cpu->sg_policy; |
| 438 | unsigned int next_f; |
| 439 | |
| 440 | raw_spin_lock(&sg_policy->update_lock); |
| 441 | |
Patrick Bellasi | fd7d528 | 2018-05-22 12:07:54 +0100 | [diff] [blame] | 442 | sugov_iowait_boost(sg_cpu, time, flags); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 443 | sg_cpu->last_update = time; |
| 444 | |
Yue Hu | 71f1309 | 2021-02-18 17:01:32 +0800 | [diff] [blame] | 445 | ignore_dl_rate_limit(sg_cpu); |
Viresh Kumar | cba1dfb | 2017-03-09 09:34:54 +0530 | [diff] [blame] | 446 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 447 | if (sugov_should_update_freq(sg_policy, time)) { |
Peter Zijlstra | 8f111bc | 2017-12-20 16:26:12 +0100 | [diff] [blame] | 448 | next_f = sugov_next_freq_shared(sg_cpu, time); |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 449 | |
Yue Hu | 389e4ec | 2021-02-24 14:39:27 +0800 | [diff] [blame] | 450 | if (!sugov_update_next_freq(sg_policy, time, next_f)) |
| 451 | goto unlock; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 452 | |
Yue Hu | 389e4ec | 2021-02-24 14:39:27 +0800 | [diff] [blame] | 453 | if (sg_policy->policy->fast_switch_enabled) |
| 454 | cpufreq_driver_fast_switch(sg_policy->policy, next_f); |
| 455 | else |
| 456 | sugov_deferred_update(sg_policy); |
| 457 | } |
| 458 | unlock: |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 459 | raw_spin_unlock(&sg_policy->update_lock); |
| 460 | } |
| 461 | |
| 462 | static void sugov_work(struct kthread_work *work) |
| 463 | { |
| 464 | struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work); |
Joel Fernandes (Google) | 152db03 | 2018-05-22 15:55:53 -0700 | [diff] [blame] | 465 | unsigned int freq; |
| 466 | unsigned long flags; |
| 467 | |
| 468 | /* |
| 469 | * Hold sg_policy->update_lock shortly to handle the case where: |
Ingo Molnar | 3b03706 | 2021-03-18 13:38:50 +0100 | [diff] [blame] | 470 | * in case sg_policy->next_freq is read here, and then updated by |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 471 | * sugov_deferred_update() just before work_in_progress is set to false |
Joel Fernandes (Google) | 152db03 | 2018-05-22 15:55:53 -0700 | [diff] [blame] | 472 | * here, we may miss queueing the new update. |
| 473 | * |
| 474 | * Note: If a work was queued after the update_lock is released, |
Rafael J. Wysocki | a61dec7 | 2018-05-23 11:47:45 +0200 | [diff] [blame] | 475 | * sugov_work() will just be called again by kthread_work code; and the |
Joel Fernandes (Google) | 152db03 | 2018-05-22 15:55:53 -0700 | [diff] [blame] | 476 | * request will be proceed before the sugov thread sleeps. |
| 477 | */ |
| 478 | raw_spin_lock_irqsave(&sg_policy->update_lock, flags); |
| 479 | freq = sg_policy->next_freq; |
| 480 | sg_policy->work_in_progress = false; |
| 481 | raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 482 | |
| 483 | mutex_lock(&sg_policy->work_lock); |
Joel Fernandes (Google) | 152db03 | 2018-05-22 15:55:53 -0700 | [diff] [blame] | 484 | __cpufreq_driver_target(sg_policy->policy, freq, CPUFREQ_RELATION_L); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 485 | mutex_unlock(&sg_policy->work_lock); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 486 | } |
| 487 | |
| 488 | static void sugov_irq_work(struct irq_work *irq_work) |
| 489 | { |
| 490 | struct sugov_policy *sg_policy; |
| 491 | |
| 492 | sg_policy = container_of(irq_work, struct sugov_policy, irq_work); |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 493 | |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 494 | kthread_queue_work(&sg_policy->worker, &sg_policy->work); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 495 | } |
| 496 | |
| 497 | /************************** sysfs interface ************************/ |
| 498 | |
| 499 | static struct sugov_tunables *global_tunables; |
| 500 | static DEFINE_MUTEX(global_tunables_lock); |
| 501 | |
| 502 | static inline struct sugov_tunables *to_sugov_tunables(struct gov_attr_set *attr_set) |
| 503 | { |
| 504 | return container_of(attr_set, struct sugov_tunables, attr_set); |
| 505 | } |
| 506 | |
| 507 | static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf) |
| 508 | { |
| 509 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
| 510 | |
| 511 | return sprintf(buf, "%u\n", tunables->rate_limit_us); |
| 512 | } |
| 513 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 514 | static ssize_t |
| 515 | rate_limit_us_store(struct gov_attr_set *attr_set, const char *buf, size_t count) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 516 | { |
| 517 | struct sugov_tunables *tunables = to_sugov_tunables(attr_set); |
| 518 | struct sugov_policy *sg_policy; |
| 519 | unsigned int rate_limit_us; |
| 520 | |
| 521 | if (kstrtouint(buf, 10, &rate_limit_us)) |
| 522 | return -EINVAL; |
| 523 | |
| 524 | tunables->rate_limit_us = rate_limit_us; |
| 525 | |
| 526 | list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) |
| 527 | sg_policy->freq_update_delay_ns = rate_limit_us * NSEC_PER_USEC; |
| 528 | |
| 529 | return count; |
| 530 | } |
| 531 | |
| 532 | static struct governor_attr rate_limit_us = __ATTR_RW(rate_limit_us); |
| 533 | |
Kimberly Brown | 9782ade | 2019-04-01 22:51:53 -0400 | [diff] [blame] | 534 | static struct attribute *sugov_attrs[] = { |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 535 | &rate_limit_us.attr, |
| 536 | NULL |
| 537 | }; |
Kimberly Brown | 9782ade | 2019-04-01 22:51:53 -0400 | [diff] [blame] | 538 | ATTRIBUTE_GROUPS(sugov); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 539 | |
Kevin Hao | e5c6b31 | 2021-08-05 15:29:17 +0800 | [diff] [blame] | 540 | static void sugov_tunables_free(struct kobject *kobj) |
| 541 | { |
| 542 | struct gov_attr_set *attr_set = container_of(kobj, struct gov_attr_set, kobj); |
| 543 | |
| 544 | kfree(to_sugov_tunables(attr_set)); |
| 545 | } |
| 546 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 547 | static struct kobj_type sugov_tunables_ktype = { |
Kimberly Brown | 9782ade | 2019-04-01 22:51:53 -0400 | [diff] [blame] | 548 | .default_groups = sugov_groups, |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 549 | .sysfs_ops = &governor_sysfs_ops, |
Kevin Hao | e5c6b31 | 2021-08-05 15:29:17 +0800 | [diff] [blame] | 550 | .release = &sugov_tunables_free, |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 551 | }; |
| 552 | |
| 553 | /********************** cpufreq governor interface *********************/ |
| 554 | |
Quentin Perret | 531b5c9 | 2018-12-03 09:56:21 +0000 | [diff] [blame] | 555 | struct cpufreq_governor schedutil_gov; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 556 | |
| 557 | static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy) |
| 558 | { |
| 559 | struct sugov_policy *sg_policy; |
| 560 | |
| 561 | sg_policy = kzalloc(sizeof(*sg_policy), GFP_KERNEL); |
| 562 | if (!sg_policy) |
| 563 | return NULL; |
| 564 | |
| 565 | sg_policy->policy = policy; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 566 | raw_spin_lock_init(&sg_policy->update_lock); |
| 567 | return sg_policy; |
| 568 | } |
| 569 | |
| 570 | static void sugov_policy_free(struct sugov_policy *sg_policy) |
| 571 | { |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 572 | kfree(sg_policy); |
| 573 | } |
| 574 | |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 575 | static int sugov_kthread_create(struct sugov_policy *sg_policy) |
| 576 | { |
| 577 | struct task_struct *thread; |
Juri Lelli | 794a56e | 2017-12-04 11:23:20 +0100 | [diff] [blame] | 578 | struct sched_attr attr = { |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 579 | .size = sizeof(struct sched_attr), |
| 580 | .sched_policy = SCHED_DEADLINE, |
| 581 | .sched_flags = SCHED_FLAG_SUGOV, |
| 582 | .sched_nice = 0, |
| 583 | .sched_priority = 0, |
Juri Lelli | 794a56e | 2017-12-04 11:23:20 +0100 | [diff] [blame] | 584 | /* |
| 585 | * Fake (unused) bandwidth; workaround to "fix" |
| 586 | * priority inheritance. |
| 587 | */ |
| 588 | .sched_runtime = 1000000, |
| 589 | .sched_deadline = 10000000, |
| 590 | .sched_period = 10000000, |
| 591 | }; |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 592 | struct cpufreq_policy *policy = sg_policy->policy; |
| 593 | int ret; |
| 594 | |
| 595 | /* kthread only required for slow path */ |
| 596 | if (policy->fast_switch_enabled) |
| 597 | return 0; |
| 598 | |
| 599 | kthread_init_work(&sg_policy->work, sugov_work); |
| 600 | kthread_init_worker(&sg_policy->worker); |
| 601 | thread = kthread_create(kthread_worker_fn, &sg_policy->worker, |
| 602 | "sugov:%d", |
| 603 | cpumask_first(policy->related_cpus)); |
| 604 | if (IS_ERR(thread)) { |
| 605 | pr_err("failed to create sugov thread: %ld\n", PTR_ERR(thread)); |
| 606 | return PTR_ERR(thread); |
| 607 | } |
| 608 | |
Juri Lelli | 794a56e | 2017-12-04 11:23:20 +0100 | [diff] [blame] | 609 | ret = sched_setattr_nocheck(thread, &attr); |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 610 | if (ret) { |
| 611 | kthread_stop(thread); |
Juri Lelli | 794a56e | 2017-12-04 11:23:20 +0100 | [diff] [blame] | 612 | pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 613 | return ret; |
| 614 | } |
| 615 | |
| 616 | sg_policy->thread = thread; |
Dietmar Eggemann | 1b04722 | 2018-05-08 08:33:40 +0100 | [diff] [blame] | 617 | kthread_bind_mask(thread, policy->related_cpus); |
Viresh Kumar | 21ef572 | 2016-11-15 13:53:23 +0530 | [diff] [blame] | 618 | init_irq_work(&sg_policy->irq_work, sugov_irq_work); |
| 619 | mutex_init(&sg_policy->work_lock); |
| 620 | |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 621 | wake_up_process(thread); |
| 622 | |
| 623 | return 0; |
| 624 | } |
| 625 | |
| 626 | static void sugov_kthread_stop(struct sugov_policy *sg_policy) |
| 627 | { |
| 628 | /* kthread only required for slow path */ |
| 629 | if (sg_policy->policy->fast_switch_enabled) |
| 630 | return; |
| 631 | |
| 632 | kthread_flush_worker(&sg_policy->worker); |
| 633 | kthread_stop(sg_policy->thread); |
Viresh Kumar | 21ef572 | 2016-11-15 13:53:23 +0530 | [diff] [blame] | 634 | mutex_destroy(&sg_policy->work_lock); |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 635 | } |
| 636 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 637 | static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_policy) |
| 638 | { |
| 639 | struct sugov_tunables *tunables; |
| 640 | |
| 641 | tunables = kzalloc(sizeof(*tunables), GFP_KERNEL); |
| 642 | if (tunables) { |
| 643 | gov_attr_set_init(&tunables->attr_set, &sg_policy->tunables_hook); |
| 644 | if (!have_governor_per_policy()) |
| 645 | global_tunables = tunables; |
| 646 | } |
| 647 | return tunables; |
| 648 | } |
| 649 | |
Kevin Hao | e5c6b31 | 2021-08-05 15:29:17 +0800 | [diff] [blame] | 650 | static void sugov_clear_global_tunables(void) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 651 | { |
| 652 | if (!have_governor_per_policy()) |
| 653 | global_tunables = NULL; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 654 | } |
| 655 | |
| 656 | static int sugov_init(struct cpufreq_policy *policy) |
| 657 | { |
| 658 | struct sugov_policy *sg_policy; |
| 659 | struct sugov_tunables *tunables; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 660 | int ret = 0; |
| 661 | |
| 662 | /* State should be equivalent to EXIT */ |
| 663 | if (policy->governor_data) |
| 664 | return -EBUSY; |
| 665 | |
Viresh Kumar | 4a71ce4 | 2016-11-15 13:53:21 +0530 | [diff] [blame] | 666 | cpufreq_enable_fast_switch(policy); |
| 667 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 668 | sg_policy = sugov_policy_alloc(policy); |
Viresh Kumar | 4a71ce4 | 2016-11-15 13:53:21 +0530 | [diff] [blame] | 669 | if (!sg_policy) { |
| 670 | ret = -ENOMEM; |
| 671 | goto disable_fast_switch; |
| 672 | } |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 673 | |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 674 | ret = sugov_kthread_create(sg_policy); |
| 675 | if (ret) |
| 676 | goto free_sg_policy; |
| 677 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 678 | mutex_lock(&global_tunables_lock); |
| 679 | |
| 680 | if (global_tunables) { |
| 681 | if (WARN_ON(have_governor_per_policy())) { |
| 682 | ret = -EINVAL; |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 683 | goto stop_kthread; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 684 | } |
| 685 | policy->governor_data = sg_policy; |
| 686 | sg_policy->tunables = global_tunables; |
| 687 | |
| 688 | gov_attr_set_get(&global_tunables->attr_set, &sg_policy->tunables_hook); |
| 689 | goto out; |
| 690 | } |
| 691 | |
| 692 | tunables = sugov_tunables_alloc(sg_policy); |
| 693 | if (!tunables) { |
| 694 | ret = -ENOMEM; |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 695 | goto stop_kthread; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 696 | } |
| 697 | |
Viresh Kumar | aa7519a | 2017-07-19 15:42:42 +0530 | [diff] [blame] | 698 | tunables->rate_limit_us = cpufreq_policy_transition_delay_us(policy); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 699 | |
| 700 | policy->governor_data = sg_policy; |
| 701 | sg_policy->tunables = tunables; |
| 702 | |
| 703 | ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype, |
| 704 | get_governor_parent_kobj(policy), "%s", |
| 705 | schedutil_gov.name); |
| 706 | if (ret) |
| 707 | goto fail; |
| 708 | |
Viresh Kumar | 8e2ddb0 | 2016-11-15 13:53:20 +0530 | [diff] [blame] | 709 | out: |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 710 | mutex_unlock(&global_tunables_lock); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 711 | return 0; |
| 712 | |
Viresh Kumar | 8e2ddb0 | 2016-11-15 13:53:20 +0530 | [diff] [blame] | 713 | fail: |
Tobin C. Harding | 9a4f26c | 2019-04-30 10:11:44 +1000 | [diff] [blame] | 714 | kobject_put(&tunables->attr_set.kobj); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 715 | policy->governor_data = NULL; |
Kevin Hao | e5c6b31 | 2021-08-05 15:29:17 +0800 | [diff] [blame] | 716 | sugov_clear_global_tunables(); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 717 | |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 718 | stop_kthread: |
| 719 | sugov_kthread_stop(sg_policy); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 720 | mutex_unlock(&global_tunables_lock); |
| 721 | |
Jules Maselbas | 1b5d43cf | 2018-03-29 15:43:01 +0100 | [diff] [blame] | 722 | free_sg_policy: |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 723 | sugov_policy_free(sg_policy); |
Viresh Kumar | 4a71ce4 | 2016-11-15 13:53:21 +0530 | [diff] [blame] | 724 | |
| 725 | disable_fast_switch: |
| 726 | cpufreq_disable_fast_switch(policy); |
| 727 | |
Viresh Kumar | 60f05e8 | 2016-05-18 17:55:28 +0530 | [diff] [blame] | 728 | pr_err("initialization failed (error %d)\n", ret); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 729 | return ret; |
| 730 | } |
| 731 | |
Rafael J. Wysocki | e788892 | 2016-06-02 23:24:15 +0200 | [diff] [blame] | 732 | static void sugov_exit(struct cpufreq_policy *policy) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 733 | { |
| 734 | struct sugov_policy *sg_policy = policy->governor_data; |
| 735 | struct sugov_tunables *tunables = sg_policy->tunables; |
| 736 | unsigned int count; |
| 737 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 738 | mutex_lock(&global_tunables_lock); |
| 739 | |
| 740 | count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook); |
| 741 | policy->governor_data = NULL; |
| 742 | if (!count) |
Kevin Hao | e5c6b31 | 2021-08-05 15:29:17 +0800 | [diff] [blame] | 743 | sugov_clear_global_tunables(); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 744 | |
| 745 | mutex_unlock(&global_tunables_lock); |
| 746 | |
Viresh Kumar | 02a7b1e | 2016-11-15 13:53:22 +0530 | [diff] [blame] | 747 | sugov_kthread_stop(sg_policy); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 748 | sugov_policy_free(sg_policy); |
Viresh Kumar | 4a71ce4 | 2016-11-15 13:53:21 +0530 | [diff] [blame] | 749 | cpufreq_disable_fast_switch(policy); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 750 | } |
| 751 | |
| 752 | static int sugov_start(struct cpufreq_policy *policy) |
| 753 | { |
| 754 | struct sugov_policy *sg_policy = policy->governor_data; |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 755 | void (*uu)(struct update_util_data *data, u64 time, unsigned int flags); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 756 | unsigned int cpu; |
| 757 | |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 758 | sg_policy->freq_update_delay_ns = sg_policy->tunables->rate_limit_us * NSEC_PER_USEC; |
| 759 | sg_policy->last_freq_update_time = 0; |
Viresh Kumar | ecd2884 | 2018-05-09 16:05:24 +0530 | [diff] [blame] | 760 | sg_policy->next_freq = 0; |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 761 | sg_policy->work_in_progress = false; |
Viresh Kumar | 600f5ba | 2019-08-07 12:36:01 +0530 | [diff] [blame] | 762 | sg_policy->limits_changed = false; |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 763 | sg_policy->cached_raw_freq = 0; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 764 | |
Viresh Kumar | 23a8818 | 2020-10-30 12:51:08 +0530 | [diff] [blame] | 765 | sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); |
| 766 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 767 | for_each_cpu(cpu, policy->cpus) { |
| 768 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
| 769 | |
Rafael J. Wysocki | 4296f23 | 2017-03-19 14:30:02 +0100 | [diff] [blame] | 770 | memset(sg_cpu, 0, sizeof(*sg_cpu)); |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 771 | sg_cpu->cpu = cpu; |
| 772 | sg_cpu->sg_policy = sg_policy; |
Vikram Mulukutla | ab2f7cf | 2017-07-06 10:53:20 -0700 | [diff] [blame] | 773 | } |
| 774 | |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 775 | if (policy_is_shared(policy)) |
| 776 | uu = sugov_update_shared; |
| 777 | else if (policy->fast_switch_enabled && cpufreq_driver_has_adjust_perf()) |
| 778 | uu = sugov_update_single_perf; |
| 779 | else |
| 780 | uu = sugov_update_single_freq; |
| 781 | |
Vikram Mulukutla | ab2f7cf | 2017-07-06 10:53:20 -0700 | [diff] [blame] | 782 | for_each_cpu(cpu, policy->cpus) { |
| 783 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
| 784 | |
Rafael J. Wysocki | ee2cc42 | 2020-12-14 21:08:00 +0100 | [diff] [blame] | 785 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, uu); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 786 | } |
| 787 | return 0; |
| 788 | } |
| 789 | |
Rafael J. Wysocki | e788892 | 2016-06-02 23:24:15 +0200 | [diff] [blame] | 790 | static void sugov_stop(struct cpufreq_policy *policy) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 791 | { |
| 792 | struct sugov_policy *sg_policy = policy->governor_data; |
| 793 | unsigned int cpu; |
| 794 | |
| 795 | for_each_cpu(cpu, policy->cpus) |
| 796 | cpufreq_remove_update_util_hook(cpu); |
| 797 | |
Paul E. McKenney | b290ebc | 2018-11-06 19:13:54 -0800 | [diff] [blame] | 798 | synchronize_rcu(); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 799 | |
Viresh Kumar | 21ef572 | 2016-11-15 13:53:23 +0530 | [diff] [blame] | 800 | if (!policy->fast_switch_enabled) { |
| 801 | irq_work_sync(&sg_policy->irq_work); |
| 802 | kthread_cancel_work_sync(&sg_policy->work); |
| 803 | } |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 804 | } |
| 805 | |
Rafael J. Wysocki | e788892 | 2016-06-02 23:24:15 +0200 | [diff] [blame] | 806 | static void sugov_limits(struct cpufreq_policy *policy) |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 807 | { |
| 808 | struct sugov_policy *sg_policy = policy->governor_data; |
| 809 | |
| 810 | if (!policy->fast_switch_enabled) { |
| 811 | mutex_lock(&sg_policy->work_lock); |
Viresh Kumar | bf2be2d | 2016-05-18 17:55:31 +0530 | [diff] [blame] | 812 | cpufreq_policy_apply_limits(policy); |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 813 | mutex_unlock(&sg_policy->work_lock); |
| 814 | } |
| 815 | |
Viresh Kumar | 600f5ba | 2019-08-07 12:36:01 +0530 | [diff] [blame] | 816 | sg_policy->limits_changed = true; |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 817 | } |
| 818 | |
Quentin Perret | 531b5c9 | 2018-12-03 09:56:21 +0000 | [diff] [blame] | 819 | struct cpufreq_governor schedutil_gov = { |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 820 | .name = "schedutil", |
| 821 | .owner = THIS_MODULE, |
Rafael J. Wysocki | 9a2a9eb | 2020-11-10 18:25:57 +0100 | [diff] [blame] | 822 | .flags = CPUFREQ_GOV_DYNAMIC_SWITCHING, |
Ingo Molnar | 97fb7a0 | 2018-03-03 14:01:12 +0100 | [diff] [blame] | 823 | .init = sugov_init, |
| 824 | .exit = sugov_exit, |
| 825 | .start = sugov_start, |
| 826 | .stop = sugov_stop, |
| 827 | .limits = sugov_limits, |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 828 | }; |
| 829 | |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 830 | #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL |
| 831 | struct cpufreq_governor *cpufreq_default_governor(void) |
| 832 | { |
| 833 | return &schedutil_gov; |
| 834 | } |
Rafael J. Wysocki | 9bdcb44 | 2016-04-02 01:09:12 +0200 | [diff] [blame] | 835 | #endif |
Rafael J. Wysocki | 58919e8 | 2016-08-16 22:14:55 +0200 | [diff] [blame] | 836 | |
Quentin Perret | 10dd857 | 2020-06-29 13:54:59 +0530 | [diff] [blame] | 837 | cpufreq_governor_init(schedutil_gov); |
Quentin Perret | 531b5c9 | 2018-12-03 09:56:21 +0000 | [diff] [blame] | 838 | |
| 839 | #ifdef CONFIG_ENERGY_MODEL |
Quentin Perret | 531b5c9 | 2018-12-03 09:56:21 +0000 | [diff] [blame] | 840 | static void rebuild_sd_workfn(struct work_struct *work) |
| 841 | { |
Ionela Voinescu | 31f6a8c | 2020-10-27 18:07:11 +0000 | [diff] [blame] | 842 | rebuild_sched_domains_energy(); |
Quentin Perret | 531b5c9 | 2018-12-03 09:56:21 +0000 | [diff] [blame] | 843 | } |
| 844 | static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); |
| 845 | |
| 846 | /* |
| 847 | * EAS shouldn't be attempted without sugov, so rebuild the sched_domains |
| 848 | * on governor changes to make sure the scheduler knows about it. |
| 849 | */ |
| 850 | void sched_cpufreq_governor_change(struct cpufreq_policy *policy, |
| 851 | struct cpufreq_governor *old_gov) |
| 852 | { |
| 853 | if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) { |
| 854 | /* |
| 855 | * When called from the cpufreq_register_driver() path, the |
| 856 | * cpu_hotplug_lock is already held, so use a work item to |
| 857 | * avoid nested locking in rebuild_sched_domains(). |
| 858 | */ |
| 859 | schedule_work(&rebuild_sd_work); |
| 860 | } |
| 861 | |
| 862 | } |
| 863 | #endif |