blob: e5a08a13ca84f6bdda75f812720000e1b10326ba [file] [log] [blame]
viresh kumar2aacdff2012-10-23 01:28:05 +02001/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
Viresh Kumar4471a342012-10-26 00:47:42 +02006 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
viresh kumar2aacdff2012-10-23 01:28:05 +020012 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Viresh Kumar4471a342012-10-26 00:47:42 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
viresh kumar2aacdff2012-10-23 01:28:05 +020019#include <linux/export.h>
20#include <linux/kernel_stat.h>
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000021#include <linux/slab.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022
23#include "cpufreq_governor.h"
24
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +010025DEFINE_MUTEX(dbs_data_mutex);
26EXPORT_SYMBOL_GPL(dbs_data_mutex);
27
Viresh Kumaraded3872016-02-11 17:31:15 +053028/* Common sysfs tunables */
29/**
30 * store_sampling_rate - update sampling rate effective immediately if needed.
31 *
32 * If new rate is smaller than the old, simply updating
33 * dbs.sampling_rate might not be appropriate. For example, if the
34 * original sampling_rate was 1 second and the requested new sampling rate is 10
35 * ms because the user needs immediate reaction from ondemand governor, but not
36 * sure if higher frequency will be required or not, then, the governor may
37 * change the sampling rate too late; up to 1 second later. Thus, if we are
38 * reducing the sampling rate, we need to make the new value effective
39 * immediately.
40 *
41 * On the other hand, if new rate is larger than the old, then we may evaluate
42 * the load too soon, and it might we worth updating sample_delay_ns then as
43 * well.
44 *
45 * This must be called with dbs_data->mutex held, otherwise traversing
46 * policy_dbs_list isn't safe.
47 */
48ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
49 size_t count)
50{
51 struct policy_dbs_info *policy_dbs;
52 unsigned int rate;
53 int ret;
54 ret = sscanf(buf, "%u", &rate);
55 if (ret != 1)
56 return -EINVAL;
57
58 dbs_data->sampling_rate = max(rate, dbs_data->min_sampling_rate);
59
60 /*
61 * We are operating under dbs_data->mutex and so the list and its
62 * entries can't be freed concurrently.
63 */
64 list_for_each_entry(policy_dbs, &dbs_data->policy_dbs_list, list) {
65 mutex_lock(&policy_dbs->timer_mutex);
66 /*
67 * On 32-bit architectures this may race with the
68 * sample_delay_ns read in dbs_update_util_handler(), but that
69 * really doesn't matter. If the read returns a value that's
70 * too big, the sample will be skipped, but the next invocation
71 * of dbs_update_util_handler() (when the update has been
72 * completed) will take a sample. If the returned value is too
73 * small, the sample will be taken immediately, but that isn't a
74 * problem, as we want the new rate to take effect immediately
75 * anyway.
76 *
77 * If this runs in parallel with dbs_work_handler(), we may end
78 * up overwriting the sample_delay_ns value that it has just
79 * written, but the difference should not be too big and it will
80 * be corrected next time a sample is taken, so it shouldn't be
81 * significant.
82 */
83 gov_update_sample_delay(policy_dbs, dbs_data->sampling_rate);
84 mutex_unlock(&policy_dbs->timer_mutex);
85 }
86
87 return count;
88}
89EXPORT_SYMBOL_GPL(store_sampling_rate);
90
Viresh Kumarc4435632016-02-09 09:01:33 +053091static inline struct dbs_data *to_dbs_data(struct kobject *kobj)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000092{
Viresh Kumarc4435632016-02-09 09:01:33 +053093 return container_of(kobj, struct dbs_data, kobj);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000094}
95
Viresh Kumarc4435632016-02-09 09:01:33 +053096static inline struct governor_attr *to_gov_attr(struct attribute *attr)
97{
98 return container_of(attr, struct governor_attr, attr);
99}
100
101static ssize_t governor_show(struct kobject *kobj, struct attribute *attr,
102 char *buf)
103{
104 struct dbs_data *dbs_data = to_dbs_data(kobj);
105 struct governor_attr *gattr = to_gov_attr(attr);
106 int ret = -EIO;
107
108 if (gattr->show)
109 ret = gattr->show(dbs_data, buf);
110
111 return ret;
112}
113
114static ssize_t governor_store(struct kobject *kobj, struct attribute *attr,
115 const char *buf, size_t count)
116{
117 struct dbs_data *dbs_data = to_dbs_data(kobj);
118 struct governor_attr *gattr = to_gov_attr(attr);
119 int ret = -EIO;
120
121 mutex_lock(&dbs_data->mutex);
122
123 if (gattr->store)
124 ret = gattr->store(dbs_data, buf, count);
125
126 mutex_unlock(&dbs_data->mutex);
127
128 return ret;
129}
130
131/*
132 * Sysfs Ops for accessing governor attributes.
133 *
134 * All show/store invocations for governor specific sysfs attributes, will first
135 * call the below show/store callbacks and the attribute specific callback will
136 * be called from within it.
137 */
138static const struct sysfs_ops governor_sysfs_ops = {
139 .show = governor_show,
140 .store = governor_store,
141};
142
Rafael J. Wysockid10b5eb2016-02-06 13:50:24 +0100143void dbs_check_cpu(struct cpufreq_policy *policy)
Viresh Kumar4471a342012-10-26 00:47:42 +0200144{
Rafael J. Wysockid10b5eb2016-02-06 13:50:24 +0100145 int cpu = policy->cpu;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100146 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100147 struct policy_dbs_info *policy_dbs = policy->governor_data;
148 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Viresh Kumar4471a342012-10-26 00:47:42 +0200149 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
Viresh Kumarff4b1782016-02-09 09:01:32 +0530150 unsigned int sampling_rate = dbs_data->sampling_rate;
151 unsigned int ignore_nice = dbs_data->ignore_nice_load;
Viresh Kumar4471a342012-10-26 00:47:42 +0200152 unsigned int max_load = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +0200153 unsigned int j;
154
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100155 if (gov->governor == GOV_ONDEMAND) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530156 struct od_cpu_dbs_info_s *od_dbs_info =
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100157 gov->get_cpu_dbs_info_s(cpu);
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530158
159 /*
160 * Sometimes, the ondemand governor uses an additional
161 * multiplier to give long delays. So apply this multiplier to
162 * the 'sampling_rate', so as to keep the wake-up-from-idle
163 * detection logic a bit conservative.
164 */
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530165 sampling_rate *= od_dbs_info->rate_mult;
166
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530167 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200168
Stratos Karafotisdfa5bb62013-06-05 19:01:25 +0300169 /* Get Absolute Load */
Viresh Kumar4471a342012-10-26 00:47:42 +0200170 for_each_cpu(j, policy->cpus) {
Viresh Kumar875b8502015-06-19 17:18:03 +0530171 struct cpu_dbs_info *j_cdbs;
Stratos Karafotis9366d842013-02-28 16:57:32 +0000172 u64 cur_wall_time, cur_idle_time;
173 unsigned int idle_time, wall_time;
Viresh Kumar4471a342012-10-26 00:47:42 +0200174 unsigned int load;
Stratos Karafotis9366d842013-02-28 16:57:32 +0000175 int io_busy = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +0200176
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100177 j_cdbs = gov->get_cpu_cdbs(j);
Viresh Kumar4471a342012-10-26 00:47:42 +0200178
Stratos Karafotis9366d842013-02-28 16:57:32 +0000179 /*
180 * For the purpose of ondemand, waiting for disk IO is
181 * an indication that you're performance critical, and
182 * not that the system is actually idle. So do not add
183 * the iowait time to the cpu idle time.
184 */
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100185 if (gov->governor == GOV_ONDEMAND)
Stratos Karafotis9366d842013-02-28 16:57:32 +0000186 io_busy = od_tuners->io_is_busy;
187 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
Viresh Kumar4471a342012-10-26 00:47:42 +0200188
189 wall_time = (unsigned int)
190 (cur_wall_time - j_cdbs->prev_cpu_wall);
191 j_cdbs->prev_cpu_wall = cur_wall_time;
192
Chen Yu0df35022015-12-16 12:20:29 +0800193 if (cur_idle_time < j_cdbs->prev_cpu_idle)
194 cur_idle_time = j_cdbs->prev_cpu_idle;
195
Viresh Kumar4471a342012-10-26 00:47:42 +0200196 idle_time = (unsigned int)
197 (cur_idle_time - j_cdbs->prev_cpu_idle);
198 j_cdbs->prev_cpu_idle = cur_idle_time;
199
200 if (ignore_nice) {
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100201 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +0200202 u64 cur_nice;
203 unsigned long cur_nice_jiffies;
204
205 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
206 cdbs->prev_cpu_nice;
207 /*
208 * Assumption: nice time between sampling periods will
209 * be less than 2^32 jiffies for 32 bit sys
210 */
211 cur_nice_jiffies = (unsigned long)
212 cputime64_to_jiffies64(cur_nice);
213
214 cdbs->prev_cpu_nice =
215 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
216 idle_time += jiffies_to_usecs(cur_nice_jiffies);
217 }
218
Viresh Kumar4471a342012-10-26 00:47:42 +0200219 if (unlikely(!wall_time || wall_time < idle_time))
220 continue;
221
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530222 /*
223 * If the CPU had gone completely idle, and a task just woke up
224 * on this CPU now, it would be unfair to calculate 'load' the
225 * usual way for this elapsed time-window, because it will show
226 * near-zero load, irrespective of how CPU intensive that task
227 * actually is. This is undesirable for latency-sensitive bursty
228 * workloads.
229 *
230 * To avoid this, we reuse the 'load' from the previous
231 * time-window and give this task a chance to start with a
232 * reasonably high CPU frequency. (However, we shouldn't over-do
233 * this copy, lest we get stuck at a high load (high frequency)
234 * for too long, even when the current system load has actually
235 * dropped down. So we perform the copy only once, upon the
236 * first wake-up from idle.)
237 *
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100238 * Detecting this situation is easy: the governor's utilization
239 * update handler would not have run during CPU-idle periods.
240 * Hence, an unusually large 'wall_time' (as compared to the
241 * sampling rate) indicates this scenario.
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530242 *
243 * prev_load can be zero in two cases and we must recalculate it
244 * for both cases:
245 * - during long idle intervals
246 * - explicitly set to zero
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530247 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530248 if (unlikely(wall_time > (2 * sampling_rate) &&
249 j_cdbs->prev_load)) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530250 load = j_cdbs->prev_load;
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530251
252 /*
253 * Perform a destructive copy, to ensure that we copy
254 * the previous load only once, upon the first wake-up
255 * from idle.
256 */
257 j_cdbs->prev_load = 0;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530258 } else {
259 load = 100 * (wall_time - idle_time) / wall_time;
260 j_cdbs->prev_load = load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530261 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200262
Viresh Kumar4471a342012-10-26 00:47:42 +0200263 if (load > max_load)
264 max_load = load;
265 }
266
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100267 gov->gov_check_cpu(cpu, max_load);
Viresh Kumar4471a342012-10-26 00:47:42 +0200268}
269EXPORT_SYMBOL_GPL(dbs_check_cpu);
270
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100271void gov_set_update_util(struct policy_dbs_info *policy_dbs,
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100272 unsigned int delay_us)
Viresh Kumar4471a342012-10-26 00:47:42 +0200273{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100274 struct cpufreq_policy *policy = policy_dbs->policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100275 struct dbs_governor *gov = dbs_governor_of(policy);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530276 int cpu;
Viresh Kumar4471a342012-10-26 00:47:42 +0200277
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100278 gov_update_sample_delay(policy_dbs, delay_us);
279 policy_dbs->last_sample_time = 0;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100280
Viresh Kumar70f43e52015-12-09 07:34:42 +0530281 for_each_cpu(cpu, policy->cpus) {
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100282 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100283
284 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
Viresh Kumar031299b2013-02-27 12:24:03 +0530285 }
286}
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100287EXPORT_SYMBOL_GPL(gov_set_update_util);
Viresh Kumar031299b2013-02-27 12:24:03 +0530288
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100289static inline void gov_clear_update_util(struct cpufreq_policy *policy)
Viresh Kumar031299b2013-02-27 12:24:03 +0530290{
Viresh Kumar031299b2013-02-27 12:24:03 +0530291 int i;
292
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100293 for_each_cpu(i, policy->cpus)
294 cpufreq_set_update_util_data(i, NULL);
295
296 synchronize_rcu();
Viresh Kumar4471a342012-10-26 00:47:42 +0200297}
298
Viresh Kumar581c2142016-02-11 17:31:14 +0530299static void gov_cancel_work(struct cpufreq_policy *policy)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530300{
Viresh Kumar581c2142016-02-11 17:31:14 +0530301 struct policy_dbs_info *policy_dbs = policy->governor_data;
302
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100303 gov_clear_update_util(policy_dbs->policy);
304 irq_work_sync(&policy_dbs->irq_work);
305 cancel_work_sync(&policy_dbs->work);
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100306 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100307 policy_dbs->work_in_progress = false;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530308}
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530309
Viresh Kumar70f43e52015-12-09 07:34:42 +0530310static void dbs_work_handler(struct work_struct *work)
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530311{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100312 struct policy_dbs_info *policy_dbs;
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530313 struct cpufreq_policy *policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100314 struct dbs_governor *gov;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100315 unsigned int delay;
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530316
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100317 policy_dbs = container_of(work, struct policy_dbs_info, work);
318 policy = policy_dbs->policy;
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100319 gov = dbs_governor_of(policy);
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530320
Viresh Kumar70f43e52015-12-09 07:34:42 +0530321 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100322 * Make sure cpufreq_governor_limits() isn't evaluating load or the
323 * ondemand governor isn't updating the sampling rate in parallel.
Viresh Kumar70f43e52015-12-09 07:34:42 +0530324 */
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100325 mutex_lock(&policy_dbs->timer_mutex);
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100326 delay = gov->gov_dbs_timer(policy);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100327 policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay);
328 mutex_unlock(&policy_dbs->timer_mutex);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530329
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100330 /* Allow the utilization update handler to queue up more work. */
331 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100332 /*
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100333 * If the update below is reordered with respect to the sample delay
334 * modification, the utilization update handler may end up using a stale
335 * sample delay value.
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100336 */
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100337 smp_wmb();
338 policy_dbs->work_in_progress = false;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530339}
340
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100341static void dbs_irq_work(struct irq_work *irq_work)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530342{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100343 struct policy_dbs_info *policy_dbs;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100344
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100345 policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
346 schedule_work(&policy_dbs->work);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100347}
348
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100349static void dbs_update_util_handler(struct update_util_data *data, u64 time,
350 unsigned long util, unsigned long max)
351{
352 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100353 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100354 u64 delta_ns;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530355
356 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100357 * The work may not be allowed to be queued up right now.
358 * Possible reasons:
359 * - Work has already been queued up or is in progress.
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100360 * - It is too early (too little time from the previous sample).
Viresh Kumar70f43e52015-12-09 07:34:42 +0530361 */
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100362 if (policy_dbs->work_in_progress)
363 return;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100364
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100365 /*
366 * If the reads below are reordered before the check above, the value
367 * of sample_delay_ns used in the computation may be stale.
368 */
369 smp_rmb();
370 delta_ns = time - policy_dbs->last_sample_time;
371 if ((s64)delta_ns < policy_dbs->sample_delay_ns)
372 return;
373
374 /*
375 * If the policy is not shared, the irq_work may be queued up right away
376 * at this point. Otherwise, we need to ensure that only one of the
377 * CPUs sharing the policy will do that.
378 */
379 if (policy_dbs->is_shared &&
380 !atomic_add_unless(&policy_dbs->work_count, 1, 1))
381 return;
382
383 policy_dbs->last_sample_time = time;
384 policy_dbs->work_in_progress = true;
385 irq_work_queue(&policy_dbs->irq_work);
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530386}
Viresh Kumar44472662013-01-31 17:28:02 +0000387
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100388static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
389 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530390{
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100391 struct policy_dbs_info *policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530392 int j;
393
394 /* Allocate memory for the common information for policy->cpus */
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100395 policy_dbs = kzalloc(sizeof(*policy_dbs), GFP_KERNEL);
396 if (!policy_dbs)
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100397 return NULL;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530398
Viresh Kumar581c2142016-02-11 17:31:14 +0530399 policy_dbs->policy = policy;
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100400 mutex_init(&policy_dbs->timer_mutex);
Rafael J. Wysocki686cc632016-02-08 23:41:10 +0100401 atomic_set(&policy_dbs->work_count, 0);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100402 init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
403 INIT_WORK(&policy_dbs->work, dbs_work_handler);
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100404
405 /* Set policy_dbs for all CPUs, online+offline */
406 for_each_cpu(j, policy->related_cpus) {
407 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
408
409 j_cdbs->policy_dbs = policy_dbs;
410 j_cdbs->update_util.func = dbs_update_util_handler;
411 }
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100412 return policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530413}
414
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100415static void free_policy_dbs_info(struct cpufreq_policy *policy,
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100416 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530417{
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100418 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100419 struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530420 int j;
421
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100422 mutex_destroy(&policy_dbs->timer_mutex);
Viresh Kumar5e4500d2015-12-03 09:37:52 +0530423
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100424 for_each_cpu(j, policy->related_cpus) {
425 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530426
Rafael J. Wysockicea6a9e2016-02-07 16:25:02 +0100427 j_cdbs->policy_dbs = NULL;
428 j_cdbs->update_util.func = NULL;
429 }
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100430 kfree(policy_dbs);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530431}
432
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100433static int cpufreq_governor_init(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530434{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100435 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100436 struct dbs_data *dbs_data = gov->gdbs_data;
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100437 struct policy_dbs_info *policy_dbs;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530438 unsigned int latency;
439 int ret;
440
Viresh Kumara72c4952015-07-18 11:31:01 +0530441 /* State should be equivalent to EXIT */
442 if (policy->governor_data)
443 return -EBUSY;
444
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100445 policy_dbs = alloc_policy_dbs_info(policy, gov);
446 if (!policy_dbs)
447 return -ENOMEM;
448
Viresh Kumar714a2d92015-06-04 16:43:27 +0530449 if (dbs_data) {
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100450 if (WARN_ON(have_governor_per_policy())) {
451 ret = -EINVAL;
452 goto free_policy_dbs_info;
453 }
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100454 policy_dbs->dbs_data = dbs_data;
455 policy->governor_data = policy_dbs;
Viresh Kumarc54df072016-02-10 11:00:25 +0530456
457 mutex_lock(&dbs_data->mutex);
458 dbs_data->usage_count++;
459 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
460 mutex_unlock(&dbs_data->mutex);
461
Viresh Kumar714a2d92015-06-04 16:43:27 +0530462 return 0;
463 }
464
465 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100466 if (!dbs_data) {
467 ret = -ENOMEM;
468 goto free_policy_dbs_info;
469 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530470
Viresh Kumarc54df072016-02-10 11:00:25 +0530471 INIT_LIST_HEAD(&dbs_data->policy_dbs_list);
Viresh Kumarc4435632016-02-09 09:01:33 +0530472 mutex_init(&dbs_data->mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530473
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100474 ret = gov->init(dbs_data, !policy->governor->initialized);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530475 if (ret)
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100476 goto free_policy_dbs_info;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530477
478 /* policy latency is in ns. Convert it to us first */
479 latency = policy->cpuinfo.transition_latency / 1000;
480 if (latency == 0)
481 latency = 1;
482
483 /* Bring kernel and HW constraints together */
484 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
485 MIN_LATENCY_MULTIPLIER * latency);
Viresh Kumarff4b1782016-02-09 09:01:32 +0530486 dbs_data->sampling_rate = max(dbs_data->min_sampling_rate,
487 LATENCY_MULTIPLIER * latency);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530488
Viresh Kumar8eec1022015-10-15 21:35:22 +0530489 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100490 gov->gdbs_data = dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530491
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100492 policy->governor_data = policy_dbs;
Viresh Kumare4b133c2016-01-25 22:33:46 +0530493
Viresh Kumarc54df072016-02-10 11:00:25 +0530494 policy_dbs->dbs_data = dbs_data;
495 dbs_data->usage_count = 1;
496 list_add(&policy_dbs->list, &dbs_data->policy_dbs_list);
497
Viresh Kumarc4435632016-02-09 09:01:33 +0530498 gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
499 ret = kobject_init_and_add(&dbs_data->kobj, &gov->kobj_type,
500 get_governor_parent_kobj(policy),
501 "%s", gov->gov.name);
Rafael J. Wysockifafd5e82016-02-08 23:57:22 +0100502 if (!ret)
503 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530504
Rafael J. Wysockifafd5e82016-02-08 23:57:22 +0100505 /* Failure, so roll back. */
Viresh Kumarc4435632016-02-09 09:01:33 +0530506 pr_err("cpufreq: Governor initialization failed (dbs_data kobject init error %d)\n", ret);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530507
Viresh Kumare4b133c2016-01-25 22:33:46 +0530508 policy->governor_data = NULL;
509
Viresh Kumar8eec1022015-10-15 21:35:22 +0530510 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100511 gov->gdbs_data = NULL;
512 gov->exit(dbs_data, !policy->governor->initialized);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100513 kfree(dbs_data);
514
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100515free_policy_dbs_info:
516 free_policy_dbs_info(policy, gov);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530517 return ret;
518}
519
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100520static int cpufreq_governor_exit(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530521{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100522 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100523 struct policy_dbs_info *policy_dbs = policy->governor_data;
524 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Viresh Kumarc54df072016-02-10 11:00:25 +0530525 int count;
Viresh Kumara72c4952015-07-18 11:31:01 +0530526
Viresh Kumarc54df072016-02-10 11:00:25 +0530527 mutex_lock(&dbs_data->mutex);
528 list_del(&policy_dbs->list);
529 count = --dbs_data->usage_count;
530 mutex_unlock(&dbs_data->mutex);
531
532 if (!count) {
Viresh Kumarc4435632016-02-09 09:01:33 +0530533 kobject_put(&dbs_data->kobj);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530534
Viresh Kumare4b133c2016-01-25 22:33:46 +0530535 policy->governor_data = NULL;
536
Viresh Kumar8eec1022015-10-15 21:35:22 +0530537 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100538 gov->gdbs_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530539
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100540 gov->exit(dbs_data, policy->governor->initialized == 1);
Viresh Kumarc4435632016-02-09 09:01:33 +0530541 mutex_destroy(&dbs_data->mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530542 kfree(dbs_data);
Viresh Kumare4b133c2016-01-25 22:33:46 +0530543 } else {
544 policy->governor_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530545 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530546
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100547 free_policy_dbs_info(policy, gov);
Viresh Kumara72c4952015-07-18 11:31:01 +0530548 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530549}
550
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100551static int cpufreq_governor_start(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530552{
Rafael J. Wysockiea59ee0d2016-02-07 16:09:51 +0100553 struct dbs_governor *gov = dbs_governor_of(policy);
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100554 struct policy_dbs_info *policy_dbs = policy->governor_data;
555 struct dbs_data *dbs_data = policy_dbs->dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530556 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530557 int io_busy = 0;
558
559 if (!policy->cur)
560 return -EINVAL;
561
Rafael J. Wysockie4db2812016-02-15 02:13:42 +0100562 policy_dbs->is_shared = policy_is_shared(policy);
563
Viresh Kumarff4b1782016-02-09 09:01:32 +0530564 sampling_rate = dbs_data->sampling_rate;
565 ignore_nice = dbs_data->ignore_nice_load;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530566
Viresh Kumarff4b1782016-02-09 09:01:32 +0530567 if (gov->governor == GOV_ONDEMAND) {
Viresh Kumar714a2d92015-06-04 16:43:27 +0530568 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
569
Viresh Kumar714a2d92015-06-04 16:43:27 +0530570 io_busy = od_tuners->io_is_busy;
571 }
572
Viresh Kumar714a2d92015-06-04 16:43:27 +0530573 for_each_cpu(j, policy->cpus) {
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100574 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530575 unsigned int prev_load;
576
Viresh Kumar714a2d92015-06-04 16:43:27 +0530577 j_cdbs->prev_cpu_idle =
578 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
579
580 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
581 j_cdbs->prev_cpu_idle);
582 j_cdbs->prev_load = 100 * prev_load /
583 (unsigned int)j_cdbs->prev_cpu_wall;
584
585 if (ignore_nice)
586 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Viresh Kumar714a2d92015-06-04 16:43:27 +0530587 }
588
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100589 if (gov->governor == GOV_CONSERVATIVE) {
Viresh Kumar714a2d92015-06-04 16:43:27 +0530590 struct cs_cpu_dbs_info_s *cs_dbs_info =
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100591 gov->get_cpu_dbs_info_s(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530592
593 cs_dbs_info->down_skip = 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530594 cs_dbs_info->requested_freq = policy->cur;
595 } else {
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100596 struct od_ops *od_ops = gov->gov_ops;
597 struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530598
599 od_dbs_info->rate_mult = 1;
600 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
601 od_ops->powersave_bias_init_cpu(cpu);
602 }
603
Rafael J. Wysockie40e7b22016-02-10 17:07:44 +0100604 gov_set_update_util(policy_dbs, sampling_rate);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530605 return 0;
606}
607
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100608static int cpufreq_governor_stop(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530609{
Viresh Kumar581c2142016-02-11 17:31:14 +0530610 gov_cancel_work(policy);
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530611
Viresh Kumara72c4952015-07-18 11:31:01 +0530612 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530613}
614
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100615static int cpufreq_governor_limits(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530616{
Rafael J. Wysockibc505472016-02-07 16:24:26 +0100617 struct policy_dbs_info *policy_dbs = policy->governor_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530618
Rafael J. Wysockie9751892016-02-07 16:23:49 +0100619 mutex_lock(&policy_dbs->timer_mutex);
620 if (policy->max < policy->cur)
621 __cpufreq_driver_target(policy, policy->max, CPUFREQ_RELATION_H);
622 else if (policy->min > policy->cur)
623 __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
Rafael J. Wysockid10b5eb2016-02-06 13:50:24 +0100624 dbs_check_cpu(policy);
Rafael J. Wysockie9751892016-02-07 16:23:49 +0100625 mutex_unlock(&policy_dbs->timer_mutex);
Viresh Kumara72c4952015-07-18 11:31:01 +0530626
627 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530628}
629
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100630int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000631{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100632 int ret = -EINVAL;
Viresh Kumar4471a342012-10-26 00:47:42 +0200633
Viresh Kumar732b6d62015-06-03 15:57:13 +0530634 /* Lock governor to block concurrent initialization of governor */
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100635 mutex_lock(&dbs_data_mutex);
Viresh Kumar732b6d62015-06-03 15:57:13 +0530636
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100637 if (event == CPUFREQ_GOV_POLICY_INIT) {
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100638 ret = cpufreq_governor_init(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100639 } else if (policy->governor_data) {
640 switch (event) {
641 case CPUFREQ_GOV_POLICY_EXIT:
642 ret = cpufreq_governor_exit(policy);
643 break;
644 case CPUFREQ_GOV_START:
645 ret = cpufreq_governor_start(policy);
646 break;
647 case CPUFREQ_GOV_STOP:
648 ret = cpufreq_governor_stop(policy);
649 break;
650 case CPUFREQ_GOV_LIMITS:
651 ret = cpufreq_governor_limits(policy);
652 break;
653 }
Viresh Kumar732b6d62015-06-03 15:57:13 +0530654 }
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000655
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100656 mutex_unlock(&dbs_data_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530657 return ret;
Viresh Kumar4471a342012-10-26 00:47:42 +0200658}
659EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);