blob: 1d50e97d49f192cd8bf1c5eb0ce569e9641cf43e [file] [log] [blame]
viresh kumar2aacdff2012-10-23 01:28:05 +02001/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
Viresh Kumar4471a342012-10-26 00:47:42 +02006 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
viresh kumar2aacdff2012-10-23 01:28:05 +020012 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Viresh Kumar4471a342012-10-26 00:47:42 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
viresh kumar2aacdff2012-10-23 01:28:05 +020019#include <linux/export.h>
20#include <linux/kernel_stat.h>
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000021#include <linux/slab.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022
23#include "cpufreq_governor.h"
24
Rafael J. Wysocki56026642017-12-18 02:15:32 +010025#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC)
26
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +010027static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
28
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +010029static DEFINE_MUTEX(gov_dbs_data_mutex);
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +010030
Viresh Kumaraded3872016-02-11 17:31:15 +053031/* Common sysfs tunables */
32/**
33 * store_sampling_rate - update sampling rate effective immediately if needed.
34 *
35 * If new rate is smaller than the old, simply updating
36 * dbs.sampling_rate might not be appropriate. For example, if the
37 * original sampling_rate was 1 second and the requested new sampling rate is 10
38 * ms because the user needs immediate reaction from ondemand governor, but not
39 * sure if higher frequency will be required or not, then, the governor may
40 * change the sampling rate too late; up to 1 second later. Thus, if we are
41 * reducing the sampling rate, we need to make the new value effective
42 * immediately.
43 *
Viresh Kumaraded3872016-02-11 17:31:15 +053044 * This must be called with dbs_data->mutex held, otherwise traversing
45 * policy_dbs_list isn't safe.
46 */
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +010047ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
Viresh Kumaraded3872016-02-11 17:31:15 +053048 size_t count)
49{
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +010050 struct dbs_data *dbs_data = to_dbs_data(attr_set);
Viresh Kumaraded3872016-02-11 17:31:15 +053051 struct policy_dbs_info *policy_dbs;
Rafael J. Wysocki56026642017-12-18 02:15:32 +010052 unsigned int sampling_interval;
Viresh Kumaraded3872016-02-11 17:31:15 +053053 int ret;
Rafael J. Wysocki56026642017-12-18 02:15:32 +010054
55 ret = sscanf(buf, "%u", &sampling_interval);
56 if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
Viresh Kumaraded3872016-02-11 17:31:15 +053057 return -EINVAL;
58
Rafael J. Wysocki56026642017-12-18 02:15:32 +010059 dbs_data->sampling_rate = sampling_interval;
60
Viresh Kumaraded3872016-02-11 17:31:15 +053061 /*
62 * We are operating under dbs_data->mutex and so the list and its
63 * entries can't be freed concurrently.
64 */
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +010065 list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
Viresh Kumar26f0dbc2016-11-08 11:06:33 +053066 mutex_lock(&policy_dbs->update_mutex);
Viresh Kumaraded3872016-02-11 17:31:15 +053067 /*
68 * On 32-bit architectures this may race with the
69 * sample_delay_ns read in dbs_update_util_handler(), but that
70 * really doesn't matter. If the read returns a value that's
71 * too big, the sample will be skipped, but the next invocation
72 * of dbs_update_util_handler() (when the update has been
Rafael J. Wysocki78347cd2016-02-15 02:20:11 +010073 * completed) will take a sample.
Viresh Kumaraded3872016-02-11 17:31:15 +053074 *
75 * If this runs in parallel with dbs_work_handler(), we may end
76 * up overwriting the sample_delay_ns value that it has just
Rafael J. Wysocki78347cd2016-02-15 02:20:11 +010077 * written, but it will be corrected next time a sample is
78 * taken, so it shouldn't be significant.
Viresh Kumaraded3872016-02-11 17:31:15 +053079 */
Rafael J. Wysocki78347cd2016-02-15 02:20:11 +010080 gov_update_sample_delay(policy_dbs, 0);
Viresh Kumar26f0dbc2016-11-08 11:06:33 +053081 mutex_unlock(&policy_dbs->update_mutex);
Viresh Kumaraded3872016-02-11 17:31:15 +053082 }
83
84 return count;
85}
86EXPORT_SYMBOL_GPL(store_sampling_rate);
87
Rafael J. Wysockia33cce12016-02-18 02:26:55 +010088/**
89 * gov_update_cpu_data - Update CPU load data.
Rafael J. Wysockia33cce12016-02-18 02:26:55 +010090 * @dbs_data: Top-level governor data pointer.
91 *
92 * Update CPU load data for all CPUs in the domain governed by @dbs_data
93 * (that may be a single policy or a bunch of them if governor tunables are
94 * system-wide).
95 *
96 * Call under the @dbs_data mutex.
97 */
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +010098void gov_update_cpu_data(struct dbs_data *dbs_data)
Rafael J. Wysockia33cce12016-02-18 02:26:55 +010099{
100 struct policy_dbs_info *policy_dbs;
101
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100102 list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
Rafael J. Wysockia33cce12016-02-18 02:26:55 +0100103 unsigned int j;
104
105 for_each_cpu(j, policy_dbs->policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100106 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Rafael J. Wysockia33cce12016-02-18 02:26:55 +0100107
Rafael J. Wysockib4f4b4b2016-04-28 01:19:03 +0200108 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
Rafael J. Wysockia33cce12016-02-18 02:26:55 +0100109 dbs_data->io_is_busy);
110 if (dbs_data->ignore_nice_load)
111 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
112 }
113 }
114}
115EXPORT_SYMBOL_GPL(gov_update_cpu_data);
116
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100117unsigned int dbs_update(struct cpufreq_policy *policy)
Viresh Kumar4471a342012-10-26 00:47:42 +0200118{
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100119 struct policy_dbs_info *policy_dbs = policy->governor_data;
120 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Viresh Kumarff4b1782016-02-09 09:01:32 +0530121 unsigned int ignore_nice = dbs_data->ignore_nice_load;
Stratos Karafotis00bfe052016-11-16 19:26:29 +0200122 unsigned int max_load = 0, idle_periods = UINT_MAX;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100123 unsigned int sampling_rate, io_busy, j;
Viresh Kumar4471a342012-10-26 00:47:42 +0200124
Rafael J. Wysocki57dc3bc2016-02-15 02:20:51 +0100125 /*
126 * Sometimes governors may use an additional multiplier to increase
127 * sample delays temporarily. Apply that multiplier to sampling_rate
128 * so as to keep the wake-up-from-idle detection logic a bit
129 * conservative.
130 */
131 sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100132 /*
133 * For the purpose of ondemand, waiting for disk IO is an indication
134 * that you're performance critical, and not that the system is actually
135 * idle, so do not add the iowait time to the CPU idle time then.
136 */
137 io_busy = dbs_data->io_is_busy;
Viresh Kumar4471a342012-10-26 00:47:42 +0200138
Stratos Karafotisdfa5bb62013-06-05 19:01:25 +0300139 /* Get Absolute Load */
Viresh Kumar4471a342012-10-26 00:47:42 +0200140 for_each_cpu(j, policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100141 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Rafael J. Wysockib4f4b4b2016-04-28 01:19:03 +0200142 u64 update_time, cur_idle_time;
143 unsigned int idle_time, time_elapsed;
Viresh Kumar4471a342012-10-26 00:47:42 +0200144 unsigned int load;
145
Rafael J. Wysockib4f4b4b2016-04-28 01:19:03 +0200146 cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
Viresh Kumar4471a342012-10-26 00:47:42 +0200147
Rafael J. Wysockib4f4b4b2016-04-28 01:19:03 +0200148 time_elapsed = update_time - j_cdbs->prev_update_time;
149 j_cdbs->prev_update_time = update_time;
Viresh Kumar4471a342012-10-26 00:47:42 +0200150
Rafael J. Wysocki94862a62016-04-21 20:57:47 +0200151 idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
152 j_cdbs->prev_cpu_idle = cur_idle_time;
Viresh Kumar4471a342012-10-26 00:47:42 +0200153
154 if (ignore_nice) {
Rafael J. Wysocki679b8fe2016-02-15 02:15:50 +0100155 u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Viresh Kumar4471a342012-10-26 00:47:42 +0200156
Frederic Weisbecker7fb13272017-01-31 04:09:19 +0100157 idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
Rafael J. Wysocki679b8fe2016-02-15 02:15:50 +0100158 j_cdbs->prev_cpu_nice = cur_nice;
Viresh Kumar4471a342012-10-26 00:47:42 +0200159 }
160
Rafael J. Wysocki9485e4c2016-05-06 01:30:37 +0200161 if (unlikely(!time_elapsed)) {
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530162 /*
Rafael J. Wysocki9485e4c2016-05-06 01:30:37 +0200163 * That can only happen when this function is called
164 * twice in a row with a very short interval between the
165 * calls, so the previous load value can be used then.
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530166 */
Rafael J. Wysocki9485e4c2016-05-06 01:30:37 +0200167 load = j_cdbs->prev_load;
Chen Yu75920192018-06-08 09:07:33 +0800168 } else if (unlikely((int)idle_time > 2 * sampling_rate &&
Rafael J. Wysocki9485e4c2016-05-06 01:30:37 +0200169 j_cdbs->prev_load)) {
170 /*
171 * If the CPU had gone completely idle and a task has
172 * just woken up on this CPU now, it would be unfair to
173 * calculate 'load' the usual way for this elapsed
174 * time-window, because it would show near-zero load,
175 * irrespective of how CPU intensive that task actually
176 * was. This is undesirable for latency-sensitive bursty
177 * workloads.
178 *
179 * To avoid this, reuse the 'load' from the previous
180 * time-window and give this task a chance to start with
181 * a reasonably high CPU frequency. However, that
182 * shouldn't be over-done, lest we get stuck at a high
183 * load (high frequency) for too long, even when the
184 * current system load has actually dropped down, so
185 * clear prev_load to guarantee that the load will be
186 * computed again next time.
187 *
Chen Yu75920192018-06-08 09:07:33 +0800188 * Detecting this situation is easy: an unusually large
189 * 'idle_time' (as compared to the sampling rate)
Rafael J. Wysocki9485e4c2016-05-06 01:30:37 +0200190 * indicates this scenario.
191 */
192 load = j_cdbs->prev_load;
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530193 j_cdbs->prev_load = 0;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530194 } else {
Rafael J. Wysocki9485e4c2016-05-06 01:30:37 +0200195 if (time_elapsed >= idle_time) {
196 load = 100 * (time_elapsed - idle_time) / time_elapsed;
197 } else {
198 /*
199 * That can happen if idle_time is returned by
200 * get_cpu_idle_time_jiffy(). In that case
201 * idle_time is roughly equal to the difference
202 * between time_elapsed and "busy time" obtained
203 * from CPU statistics. Then, the "busy time"
204 * can end up being greater than time_elapsed
205 * (for example, if jiffies_64 and the CPU
206 * statistics are updated by different CPUs),
207 * so idle_time may in fact be negative. That
208 * means, though, that the CPU was busy all
209 * the time (on the rough average) during the
210 * last sampling interval and 100 can be
211 * returned as the load.
212 */
213 load = (int)idle_time < 0 ? 100 : 0;
214 }
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530215 j_cdbs->prev_load = load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530216 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200217
Chen Yu75920192018-06-08 09:07:33 +0800218 if (unlikely((int)idle_time > 2 * sampling_rate)) {
219 unsigned int periods = idle_time / sampling_rate;
Stratos Karafotis00bfe052016-11-16 19:26:29 +0200220
221 if (periods < idle_periods)
222 idle_periods = periods;
223 }
224
Viresh Kumar4471a342012-10-26 00:47:42 +0200225 if (load > max_load)
226 max_load = load;
227 }
Stratos Karafotis00bfe052016-11-16 19:26:29 +0200228
229 policy_dbs->idle_periods = idle_periods;
230
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100231 return max_load;
Viresh Kumar4471a342012-10-26 00:47:42 +0200232}
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100233EXPORT_SYMBOL_GPL(dbs_update);
Viresh Kumar4471a342012-10-26 00:47:42 +0200234
Viresh Kumar70f43e52015-12-09 07:34:42 +0530235static void dbs_work_handler(struct work_struct *work)
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530236{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100237 struct policy_dbs_info *policy_dbs;
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530238 struct cpufreq_policy *policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100239 struct dbs_governor *gov;
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530240
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100241 policy_dbs = container_of(work, struct policy_dbs_info, work);
242 policy = policy_dbs->policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100243 gov = dbs_governor_of(policy);
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530244
Viresh Kumar70f43e52015-12-09 07:34:42 +0530245 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100246 * Make sure cpufreq_governor_limits() isn't evaluating load or the
247 * ondemand governor isn't updating the sampling rate in parallel.
Viresh Kumar70f43e52015-12-09 07:34:42 +0530248 */
Viresh Kumar26f0dbc2016-11-08 11:06:33 +0530249 mutex_lock(&policy_dbs->update_mutex);
250 gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
251 mutex_unlock(&policy_dbs->update_mutex);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530252
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100253 /* Allow the utilization update handler to queue up more work. */
254 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100255 /*
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100256 * If the update below is reordered with respect to the sample delay
257 * modification, the utilization update handler may end up using a stale
258 * sample delay value.
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100259 */
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100260 smp_wmb();
261 policy_dbs->work_in_progress = false;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530262}
263
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100264static void dbs_irq_work(struct irq_work *irq_work)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530265{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100266 struct policy_dbs_info *policy_dbs;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100267
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100268 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
Rafael J. Wysocki539a4c42016-03-22 01:17:43 +0100269 schedule_work_on(smp_processor_id(), &policy_dbs->work);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100270}
271
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100272static void dbs_update_util_handler(struct update_util_data *data, u64 time,
Rafael J. Wysocki58919e82016-08-16 22:14:55 +0200273 unsigned int flags)
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100274{
275 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100276 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
Rafael J. Wysocki27de3482016-02-22 14:14:34 +0100277 u64 delta_ns, lst;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530278
Viresh Kumar03639972018-05-22 15:31:30 +0530279 if (!cpufreq_this_cpu_can_update(policy_dbs->policy))
Viresh Kumar674e7542017-07-28 12:16:38 +0530280 return;
281
Viresh Kumar70f43e52015-12-09 07:34:42 +0530282 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100283 * The work may not be allowed to be queued up right now.
284 * Possible reasons:
285 * - Work has already been queued up or is in progress.
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100286 * - It is too early (too little time from the previous sample).
Viresh Kumar70f43e52015-12-09 07:34:42 +0530287 */
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100288 if (policy_dbs->work_in_progress)
289 return;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100290
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100291 /*
292 * If the reads below are reordered before the check above, the value
293 * of sample_delay_ns used in the computation may be stale.
294 */
295 smp_rmb();
Rafael J. Wysocki27de3482016-02-22 14:14:34 +0100296 lst = READ_ONCE(policy_dbs->last_sample_time);
297 delta_ns = time - lst;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100298 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
299 return;
300
301 /*
302 * If the policy is not shared, the irq_work may be queued up right away
303 * at this point. Otherwise, we need to ensure that only one of the
304 * CPUs sharing the policy will do that.
305 */
Rafael J. Wysocki27de3482016-02-22 14:14:34 +0100306 if (policy_dbs->is_shared) {
307 if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
308 return;
309
310 /*
311 * If another CPU updated last_sample_time in the meantime, we
312 * shouldn't be here, so clear the work counter and bail out.
313 */
314 if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
315 atomic_set(&policy_dbs->work_count, 0);
316 return;
317 }
318 }
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100319
320 policy_dbs->last_sample_time = time;
321 policy_dbs->work_in_progress = true;
322 irq_work_queue(&policy_dbs->irq_work);
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530323}
Viresh Kumar44472662013-01-31 17:28:02 +0000324
Rafael J. Wysocki0bed6122016-04-02 01:08:43 +0200325static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
326 unsigned int delay_us)
327{
328 struct cpufreq_policy *policy = policy_dbs->policy;
329 int cpu;
330
331 gov_update_sample_delay(policy_dbs, delay_us);
332 policy_dbs->last_sample_time = 0;
333
334 for_each_cpu(cpu, policy->cpus) {
335 struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
336
337 cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
338 dbs_update_util_handler);
339 }
340}
341
342static inline void gov_clear_update_util(struct cpufreq_policy *policy)
343{
344 int i;
345
346 for_each_cpu(i, policy->cpus)
347 cpufreq_remove_update_util_hook(i);
348
349 synchronize_sched();
350}
351
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100352static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
353 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530354{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100355 struct policy_dbs_info *policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530356 int j;
357
Rafael J. Wysocki7d5a9952016-02-18 18:40:14 +0100358 /* Allocate memory for per-policy governor data. */
359 policy_dbs = gov->alloc();
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100360 if (!policy_dbs)
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100361 return NULL;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530362
Viresh Kumar581c2142016-02-11 17:31:14 +0530363 policy_dbs->policy = policy;
Viresh Kumar26f0dbc2016-11-08 11:06:33 +0530364 mutex_init(&policy_dbs->update_mutex);
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100365 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100366 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
367 INIT_WORK(&policy_dbs->work, dbs_work_handler);
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100368
369 /* Set policy_dbs for all CPUs, online+offline */
370 for_each_cpu(j, policy->related_cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100371 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100372
373 j_cdbs->policy_dbs = policy_dbs;
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100374 }
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100375 return policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530376}
377
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100378static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100379 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530380{
Viresh Kumar44152cb2015-07-18 11:30:59 +0530381 int j;
382
Viresh Kumar26f0dbc2016-11-08 11:06:33 +0530383 mutex_destroy(&policy_dbs->update_mutex);
Viresh Kumar5e4500d2015-12-03 09:37:52 +0530384
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100385 for_each_cpu(j, policy_dbs->policy->related_cpus) {
386 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530387
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100388 j_cdbs->policy_dbs = NULL;
389 j_cdbs->update_util.func = NULL;
390 }
Rafael J. Wysocki7d5a9952016-02-18 18:40:14 +0100391 gov->free(policy_dbs);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530392}
393
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200394int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530395{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100396 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100397 struct dbs_data *dbs_data;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100398 struct policy_dbs_info *policy_dbs;
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100399 int ret = 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530400
Viresh Kumara72c4952015-07-18 11:31:01 +0530401 /* State should be equivalent to EXIT */
402 if (policy->governor_data)
403 return -EBUSY;
404
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100405 policy_dbs = alloc_policy_dbs_info(policy, gov);
406 if (!policy_dbs)
407 return -ENOMEM;
408
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100409 /* Protect gov->gdbs_data against concurrent updates. */
410 mutex_lock(&gov_dbs_data_mutex);
411
412 dbs_data = gov->gdbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530413 if (dbs_data) {
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100414 if (WARN_ON(have_governor_per_policy())) {
415 ret = -EINVAL;
416 goto free_policy_dbs_info;
417 }
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100418 policy_dbs->dbs_data = dbs_data;
419 policy->governor_data = policy_dbs;
Viresh Kumarc54df072016-02-10 11:00:25 +0530420
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100421 gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100422 goto out;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530423 }
424
425 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100426 if (!dbs_data) {
427 ret = -ENOMEM;
428 goto free_policy_dbs_info;
429 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530430
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100431 gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530432
Rafael J. Wysocki9a15fb22016-05-18 22:59:49 +0200433 ret = gov->init(dbs_data);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530434 if (ret)
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100435 goto free_policy_dbs_info;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530436
Rafael J. Wysocki56026642017-12-18 02:15:32 +0100437 /*
438 * The sampling interval should not be less than the transition latency
439 * of the CPU and it also cannot be too small for dbs_update() to work
440 * correctly.
441 */
442 dbs_data->sampling_rate = max_t(unsigned int,
443 CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
444 cpufreq_policy_transition_delay_us(policy));
Viresh Kumar714a2d92015-06-04 16:43:27 +0530445
Viresh Kumar8eec1022015-10-15 21:35:22 +0530446 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100447 gov->gdbs_data = dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530448
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100449 policy_dbs->dbs_data = dbs_data;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100450 policy->governor_data = policy_dbs;
Viresh Kumare4b133c2016-01-25 22:33:46 +0530451
Viresh Kumarc4435632016-02-09 09:01:33 +0530452 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100453 ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
Viresh Kumarc4435632016-02-09 09:01:33 +0530454 get_governor_parent_kobj(policy),
455 "%s", gov->gov.name);
Rafael J. Wysockifafd5e82016-02-08 23:57:22 +0100456 if (!ret)
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100457 goto out;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530458
Rafael J. Wysockifafd5e82016-02-08 23:57:22 +0100459 /* Failure, so roll back. */
Viresh Kumar666f4cc2016-05-18 17:55:27 +0530460 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530461
Viresh Kumare4b133c2016-01-25 22:33:46 +0530462 policy->governor_data = NULL;
463
Viresh Kumar8eec1022015-10-15 21:35:22 +0530464 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100465 gov->gdbs_data = NULL;
Rafael J. Wysocki9a15fb22016-05-18 22:59:49 +0200466 gov->exit(dbs_data);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100467 kfree(dbs_data);
468
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100469free_policy_dbs_info:
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100470 free_policy_dbs_info(policy_dbs, gov);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100471
472out:
473 mutex_unlock(&gov_dbs_data_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530474 return ret;
475}
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200476EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530477
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200478void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530479{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100480 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100481 struct policy_dbs_info *policy_dbs = policy->governor_data;
482 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100483 unsigned int count;
Viresh Kumara72c4952015-07-18 11:31:01 +0530484
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100485 /* Protect gov->gdbs_data against concurrent updates. */
486 mutex_lock(&gov_dbs_data_mutex);
487
Rafael J. Wysocki0dd3c1d2016-03-22 02:47:51 +0100488 count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
489
490 policy->governor_data = NULL;
Viresh Kumarc54df072016-02-10 11:00:25 +0530491
492 if (!count) {
Viresh Kumar8eec1022015-10-15 21:35:22 +0530493 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100494 gov->gdbs_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530495
Rafael J. Wysocki9a15fb22016-05-18 22:59:49 +0200496 gov->exit(dbs_data);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530497 kfree(dbs_data);
498 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530499
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100500 free_policy_dbs_info(policy_dbs, gov);
Rafael J. Wysocki1112e9d2016-02-21 00:53:06 +0100501
502 mutex_unlock(&gov_dbs_data_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530503}
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200504EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530505
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200506int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530507{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100508 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100509 struct policy_dbs_info *policy_dbs = policy->governor_data;
510 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Rafael J. Wysocki702c9e52016-02-18 02:21:21 +0100511 unsigned int sampling_rate, ignore_nice, j;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100512 unsigned int io_busy;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530513
514 if (!policy->cur)
515 return -EINVAL;
516
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100517 policy_dbs->is_shared = policy_is_shared(policy);
Rafael J. Wysocki57dc3bc2016-02-15 02:20:51 +0100518 policy_dbs->rate_mult = 1;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100519
Viresh Kumarff4b1782016-02-09 09:01:32 +0530520 sampling_rate = dbs_data->sampling_rate;
521 ignore_nice = dbs_data->ignore_nice_load;
Rafael J. Wysocki8847e032016-02-18 02:20:13 +0100522 io_busy = dbs_data->io_is_busy;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530523
Viresh Kumar714a2d92015-06-04 16:43:27 +0530524 for_each_cpu(j, policy->cpus) {
Rafael J. Wysocki8c8f77f2016-02-21 00:51:27 +0100525 struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530526
Rafael J. Wysockib4f4b4b2016-04-28 01:19:03 +0200527 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
Rafael J. Wysockiba1ca652016-04-25 16:21:34 +0200528 /*
529 * Make the first invocation of dbs_update() compute the load.
530 */
531 j_cdbs->prev_load = 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530532
533 if (ignore_nice)
534 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Viresh Kumar714a2d92015-06-04 16:43:27 +0530535 }
536
Rafael J. Wysocki702c9e52016-02-18 02:21:21 +0100537 gov->start(policy);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530538
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100539 gov_set_update_util(policy_dbs, sampling_rate);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530540 return 0;
541}
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200542EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530543
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200544void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530545{
Rafael J. Wysockif6709b82016-06-09 01:45:32 +0200546 struct policy_dbs_info *policy_dbs = policy->governor_data;
547
548 gov_clear_update_util(policy_dbs->policy);
549 irq_work_sync(&policy_dbs->irq_work);
550 cancel_work_sync(&policy_dbs->work);
551 atomic_set(&policy_dbs->work_count, 0);
552 policy_dbs->work_in_progress = false;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530553}
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200554EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530555
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200556void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530557{
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100558 struct policy_dbs_info *policy_dbs = policy->governor_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530559
Viresh Kumar26f0dbc2016-11-08 11:06:33 +0530560 mutex_lock(&policy_dbs->update_mutex);
Viresh Kumarbf2be2d2016-05-18 17:55:31 +0530561 cpufreq_policy_apply_limits(policy);
Rafael J. Wysocki4cccf752016-02-15 02:19:31 +0100562 gov_update_sample_delay(policy_dbs, 0);
563
Viresh Kumar26f0dbc2016-11-08 11:06:33 +0530564 mutex_unlock(&policy_dbs->update_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530565}
Rafael J. Wysockie7888922016-06-02 23:24:15 +0200566EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);