blob: 7e579fc42d2a34e4a2f861bb9a56cddfaf1528db [file] [log] [blame]
viresh kumar2aacdff2012-10-23 01:28:05 +02001/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
Viresh Kumar4471a342012-10-26 00:47:42 +02006 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
viresh kumar2aacdff2012-10-23 01:28:05 +020012 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Viresh Kumar4471a342012-10-26 00:47:42 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
viresh kumar2aacdff2012-10-23 01:28:05 +020019#include <linux/export.h>
20#include <linux/kernel_stat.h>
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000021#include <linux/slab.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022
23#include "cpufreq_governor.h"
24
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +010025DEFINE_MUTEX(dbs_data_mutex);
26EXPORT_SYMBOL_GPL(dbs_data_mutex);
27
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000028static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
29{
30 if (have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010031 return dbs_data->gov->attr_group_gov_pol;
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000032 else
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010033 return dbs_data->gov->attr_group_gov_sys;
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000034}
35
Viresh Kumar4471a342012-10-26 00:47:42 +020036void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
37{
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010038 struct cpu_dbs_info *cdbs = dbs_data->gov->get_cpu_cdbs(cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +020039 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
40 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
Viresh Kumar44152cb2015-07-18 11:30:59 +053041 struct cpufreq_policy *policy = cdbs->shared->policy;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053042 unsigned int sampling_rate;
Viresh Kumar4471a342012-10-26 00:47:42 +020043 unsigned int max_load = 0;
44 unsigned int ignore_nice;
45 unsigned int j;
46
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010047 if (dbs_data->gov->governor == GOV_ONDEMAND) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053048 struct od_cpu_dbs_info_s *od_dbs_info =
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010049 dbs_data->gov->get_cpu_dbs_info_s(cpu);
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053050
51 /*
52 * Sometimes, the ondemand governor uses an additional
53 * multiplier to give long delays. So apply this multiplier to
54 * the 'sampling_rate', so as to keep the wake-up-from-idle
55 * detection logic a bit conservative.
56 */
57 sampling_rate = od_tuners->sampling_rate;
58 sampling_rate *= od_dbs_info->rate_mult;
59
Viresh Kumar6c4640c2013-08-05 12:28:02 +053060 ignore_nice = od_tuners->ignore_nice_load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053061 } else {
62 sampling_rate = cs_tuners->sampling_rate;
Viresh Kumar6c4640c2013-08-05 12:28:02 +053063 ignore_nice = cs_tuners->ignore_nice_load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053064 }
Viresh Kumar4471a342012-10-26 00:47:42 +020065
Stratos Karafotisdfa5bb62013-06-05 19:01:25 +030066 /* Get Absolute Load */
Viresh Kumar4471a342012-10-26 00:47:42 +020067 for_each_cpu(j, policy->cpus) {
Viresh Kumar875b8502015-06-19 17:18:03 +053068 struct cpu_dbs_info *j_cdbs;
Stratos Karafotis9366d842013-02-28 16:57:32 +000069 u64 cur_wall_time, cur_idle_time;
70 unsigned int idle_time, wall_time;
Viresh Kumar4471a342012-10-26 00:47:42 +020071 unsigned int load;
Stratos Karafotis9366d842013-02-28 16:57:32 +000072 int io_busy = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +020073
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010074 j_cdbs = dbs_data->gov->get_cpu_cdbs(j);
Viresh Kumar4471a342012-10-26 00:47:42 +020075
Stratos Karafotis9366d842013-02-28 16:57:32 +000076 /*
77 * For the purpose of ondemand, waiting for disk IO is
78 * an indication that you're performance critical, and
79 * not that the system is actually idle. So do not add
80 * the iowait time to the cpu idle time.
81 */
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +010082 if (dbs_data->gov->governor == GOV_ONDEMAND)
Stratos Karafotis9366d842013-02-28 16:57:32 +000083 io_busy = od_tuners->io_is_busy;
84 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
Viresh Kumar4471a342012-10-26 00:47:42 +020085
86 wall_time = (unsigned int)
87 (cur_wall_time - j_cdbs->prev_cpu_wall);
88 j_cdbs->prev_cpu_wall = cur_wall_time;
89
Chen Yu0df35022015-12-16 12:20:29 +080090 if (cur_idle_time < j_cdbs->prev_cpu_idle)
91 cur_idle_time = j_cdbs->prev_cpu_idle;
92
Viresh Kumar4471a342012-10-26 00:47:42 +020093 idle_time = (unsigned int)
94 (cur_idle_time - j_cdbs->prev_cpu_idle);
95 j_cdbs->prev_cpu_idle = cur_idle_time;
96
97 if (ignore_nice) {
98 u64 cur_nice;
99 unsigned long cur_nice_jiffies;
100
101 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
102 cdbs->prev_cpu_nice;
103 /*
104 * Assumption: nice time between sampling periods will
105 * be less than 2^32 jiffies for 32 bit sys
106 */
107 cur_nice_jiffies = (unsigned long)
108 cputime64_to_jiffies64(cur_nice);
109
110 cdbs->prev_cpu_nice =
111 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
112 idle_time += jiffies_to_usecs(cur_nice_jiffies);
113 }
114
Viresh Kumar4471a342012-10-26 00:47:42 +0200115 if (unlikely(!wall_time || wall_time < idle_time))
116 continue;
117
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530118 /*
119 * If the CPU had gone completely idle, and a task just woke up
120 * on this CPU now, it would be unfair to calculate 'load' the
121 * usual way for this elapsed time-window, because it will show
122 * near-zero load, irrespective of how CPU intensive that task
123 * actually is. This is undesirable for latency-sensitive bursty
124 * workloads.
125 *
126 * To avoid this, we reuse the 'load' from the previous
127 * time-window and give this task a chance to start with a
128 * reasonably high CPU frequency. (However, we shouldn't over-do
129 * this copy, lest we get stuck at a high load (high frequency)
130 * for too long, even when the current system load has actually
131 * dropped down. So we perform the copy only once, upon the
132 * first wake-up from idle.)
133 *
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100134 * Detecting this situation is easy: the governor's utilization
135 * update handler would not have run during CPU-idle periods.
136 * Hence, an unusually large 'wall_time' (as compared to the
137 * sampling rate) indicates this scenario.
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530138 *
139 * prev_load can be zero in two cases and we must recalculate it
140 * for both cases:
141 * - during long idle intervals
142 * - explicitly set to zero
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530143 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530144 if (unlikely(wall_time > (2 * sampling_rate) &&
145 j_cdbs->prev_load)) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530146 load = j_cdbs->prev_load;
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530147
148 /*
149 * Perform a destructive copy, to ensure that we copy
150 * the previous load only once, upon the first wake-up
151 * from idle.
152 */
153 j_cdbs->prev_load = 0;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530154 } else {
155 load = 100 * (wall_time - idle_time) / wall_time;
156 j_cdbs->prev_load = load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530157 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200158
Viresh Kumar4471a342012-10-26 00:47:42 +0200159 if (load > max_load)
160 max_load = load;
161 }
162
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100163 dbs_data->gov->gov_check_cpu(cpu, max_load);
Viresh Kumar4471a342012-10-26 00:47:42 +0200164}
165EXPORT_SYMBOL_GPL(dbs_check_cpu);
166
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100167void gov_set_update_util(struct cpu_common_dbs_info *shared,
168 unsigned int delay_us)
Viresh Kumar4471a342012-10-26 00:47:42 +0200169{
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100170 struct cpufreq_policy *policy = shared->policy;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530171 struct dbs_data *dbs_data = policy->governor_data;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530172 int cpu;
Viresh Kumar4471a342012-10-26 00:47:42 +0200173
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100174 gov_update_sample_delay(shared, delay_us);
175 shared->last_sample_time = 0;
176
Viresh Kumar70f43e52015-12-09 07:34:42 +0530177 for_each_cpu(cpu, policy->cpus) {
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100178 struct cpu_dbs_info *cdbs = dbs_data->gov->get_cpu_cdbs(cpu);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100179
180 cpufreq_set_update_util_data(cpu, &cdbs->update_util);
Viresh Kumar031299b2013-02-27 12:24:03 +0530181 }
182}
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100183EXPORT_SYMBOL_GPL(gov_set_update_util);
Viresh Kumar031299b2013-02-27 12:24:03 +0530184
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100185static inline void gov_clear_update_util(struct cpufreq_policy *policy)
Viresh Kumar031299b2013-02-27 12:24:03 +0530186{
Viresh Kumar031299b2013-02-27 12:24:03 +0530187 int i;
188
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100189 for_each_cpu(i, policy->cpus)
190 cpufreq_set_update_util_data(i, NULL);
191
192 synchronize_rcu();
Viresh Kumar4471a342012-10-26 00:47:42 +0200193}
194
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100195static void gov_cancel_work(struct cpu_common_dbs_info *shared)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530196{
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100197 /* Tell dbs_update_util_handler() to skip queuing up work items. */
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +0100198 atomic_inc(&shared->skip_work);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530199 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100200 * If dbs_update_util_handler() is already running, it may not notice
201 * the incremented skip_work, so wait for it to complete to prevent its
202 * work item from being queued up after the cancel_work_sync() below.
Viresh Kumar70f43e52015-12-09 07:34:42 +0530203 */
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100204 gov_clear_update_util(shared->policy);
205 irq_work_sync(&shared->irq_work);
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +0100206 cancel_work_sync(&shared->work);
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +0100207 atomic_set(&shared->skip_work, 0);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530208}
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530209
Viresh Kumar70f43e52015-12-09 07:34:42 +0530210static void dbs_work_handler(struct work_struct *work)
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530211{
Viresh Kumar70f43e52015-12-09 07:34:42 +0530212 struct cpu_common_dbs_info *shared = container_of(work, struct
213 cpu_common_dbs_info, work);
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530214 struct cpufreq_policy *policy;
215 struct dbs_data *dbs_data;
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100216 unsigned int delay;
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530217
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530218 policy = shared->policy;
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530219 dbs_data = policy->governor_data;
220
Viresh Kumar70f43e52015-12-09 07:34:42 +0530221 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100222 * Make sure cpufreq_governor_limits() isn't evaluating load or the
223 * ondemand governor isn't updating the sampling rate in parallel.
Viresh Kumar70f43e52015-12-09 07:34:42 +0530224 */
225 mutex_lock(&shared->timer_mutex);
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100226 delay = dbs_data->gov->gov_dbs_timer(policy);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100227 shared->sample_delay_ns = jiffies_to_nsecs(delay);
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530228 mutex_unlock(&shared->timer_mutex);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530229
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100230 /*
231 * If the atomic operation below is reordered with respect to the
232 * sample delay modification, the utilization update handler may end
233 * up using a stale sample delay value.
234 */
235 smp_mb__before_atomic();
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +0100236 atomic_dec(&shared->skip_work);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530237}
238
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100239static void dbs_irq_work(struct irq_work *irq_work)
Viresh Kumar70f43e52015-12-09 07:34:42 +0530240{
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100241 struct cpu_common_dbs_info *shared;
242
243 shared = container_of(irq_work, struct cpu_common_dbs_info, irq_work);
244 schedule_work(&shared->work);
245}
246
247static inline void gov_queue_irq_work(struct cpu_common_dbs_info *shared)
248{
249#ifdef CONFIG_SMP
250 irq_work_queue_on(&shared->irq_work, smp_processor_id());
251#else
252 irq_work_queue(&shared->irq_work);
253#endif
254}
255
256static void dbs_update_util_handler(struct update_util_data *data, u64 time,
257 unsigned long util, unsigned long max)
258{
259 struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530260 struct cpu_common_dbs_info *shared = cdbs->shared;
Viresh Kumar70f43e52015-12-09 07:34:42 +0530261
262 /*
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100263 * The work may not be allowed to be queued up right now.
264 * Possible reasons:
265 * - Work has already been queued up or is in progress.
266 * - The governor is being stopped.
267 * - It is too early (too little time from the previous sample).
Viresh Kumar70f43e52015-12-09 07:34:42 +0530268 */
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100269 if (atomic_inc_return(&shared->skip_work) == 1) {
270 u64 delta_ns;
271
272 delta_ns = time - shared->last_sample_time;
273 if ((s64)delta_ns >= shared->sample_delay_ns) {
274 shared->last_sample_time = time;
275 gov_queue_irq_work(shared);
276 return;
277 }
278 }
279 atomic_dec(&shared->skip_work);
Viresh Kumar43e0ee32015-07-18 11:31:00 +0530280}
Viresh Kumar44472662013-01-31 17:28:02 +0000281
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000282static void set_sampling_rate(struct dbs_data *dbs_data,
283 unsigned int sampling_rate)
Viresh Kumar4471a342012-10-26 00:47:42 +0200284{
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100285 if (dbs_data->gov->governor == GOV_CONSERVATIVE) {
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000286 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
287 cs_tuners->sampling_rate = sampling_rate;
288 } else {
289 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
290 od_tuners->sampling_rate = sampling_rate;
291 }
292}
293
Viresh Kumar44152cb2015-07-18 11:30:59 +0530294static int alloc_common_dbs_info(struct cpufreq_policy *policy,
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100295 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530296{
297 struct cpu_common_dbs_info *shared;
298 int j;
299
300 /* Allocate memory for the common information for policy->cpus */
301 shared = kzalloc(sizeof(*shared), GFP_KERNEL);
302 if (!shared)
303 return -ENOMEM;
304
305 /* Set shared for all CPUs, online+offline */
306 for_each_cpu(j, policy->related_cpus)
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100307 gov->get_cpu_cdbs(j)->shared = shared;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530308
Viresh Kumar5e4500d2015-12-03 09:37:52 +0530309 mutex_init(&shared->timer_mutex);
Rafael J. Wysocki2dd3e722015-12-08 21:44:05 +0100310 atomic_set(&shared->skip_work, 0);
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100311 init_irq_work(&shared->irq_work, dbs_irq_work);
Viresh Kumar70f43e52015-12-09 07:34:42 +0530312 INIT_WORK(&shared->work, dbs_work_handler);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530313 return 0;
314}
315
316static void free_common_dbs_info(struct cpufreq_policy *policy,
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100317 struct dbs_governor *gov)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530318{
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100319 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530320 struct cpu_common_dbs_info *shared = cdbs->shared;
321 int j;
322
Viresh Kumar5e4500d2015-12-03 09:37:52 +0530323 mutex_destroy(&shared->timer_mutex);
324
Viresh Kumar44152cb2015-07-18 11:30:59 +0530325 for_each_cpu(j, policy->cpus)
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100326 gov->get_cpu_cdbs(j)->shared = NULL;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530327
328 kfree(shared);
329}
330
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100331static int cpufreq_governor_init(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530332{
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100333 struct dbs_governor *gov = container_of(policy->governor,
334 struct dbs_governor, gov);
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100335 struct dbs_data *dbs_data = gov->gdbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530336 unsigned int latency;
337 int ret;
338
Viresh Kumara72c4952015-07-18 11:31:01 +0530339 /* State should be equivalent to EXIT */
340 if (policy->governor_data)
341 return -EBUSY;
342
Viresh Kumar714a2d92015-06-04 16:43:27 +0530343 if (dbs_data) {
344 if (WARN_ON(have_governor_per_policy()))
345 return -EINVAL;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530346
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100347 ret = alloc_common_dbs_info(policy, gov);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530348 if (ret)
349 return ret;
350
Viresh Kumar714a2d92015-06-04 16:43:27 +0530351 dbs_data->usage_count++;
352 policy->governor_data = dbs_data;
353 return 0;
354 }
355
356 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
357 if (!dbs_data)
358 return -ENOMEM;
359
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100360 ret = alloc_common_dbs_info(policy, gov);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530361 if (ret)
362 goto free_dbs_data;
363
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100364 dbs_data->gov = gov;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530365 dbs_data->usage_count = 1;
366
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100367 ret = gov->init(dbs_data, !policy->governor->initialized);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530368 if (ret)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530369 goto free_common_dbs_info;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530370
371 /* policy latency is in ns. Convert it to us first */
372 latency = policy->cpuinfo.transition_latency / 1000;
373 if (latency == 0)
374 latency = 1;
375
376 /* Bring kernel and HW constraints together */
377 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
378 MIN_LATENCY_MULTIPLIER * latency);
379 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
380 latency * LATENCY_MULTIPLIER));
381
Viresh Kumar8eec1022015-10-15 21:35:22 +0530382 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100383 gov->gdbs_data = dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530384
Viresh Kumare4b133c2016-01-25 22:33:46 +0530385 policy->governor_data = dbs_data;
386
Viresh Kumar714a2d92015-06-04 16:43:27 +0530387 ret = sysfs_create_group(get_governor_parent_kobj(policy),
388 get_sysfs_attr(dbs_data));
389 if (ret)
Viresh Kumar8eec1022015-10-15 21:35:22 +0530390 goto reset_gdbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530391
Viresh Kumar714a2d92015-06-04 16:43:27 +0530392 return 0;
393
Viresh Kumar8eec1022015-10-15 21:35:22 +0530394reset_gdbs_data:
Viresh Kumare4b133c2016-01-25 22:33:46 +0530395 policy->governor_data = NULL;
396
Viresh Kumar8eec1022015-10-15 21:35:22 +0530397 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100398 gov->gdbs_data = NULL;
399 gov->exit(dbs_data, !policy->governor->initialized);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530400free_common_dbs_info:
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100401 free_common_dbs_info(policy, gov);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530402free_dbs_data:
403 kfree(dbs_data);
404 return ret;
405}
406
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100407static int cpufreq_governor_exit(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530408{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100409 struct dbs_data *dbs_data = policy->governor_data;
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100410 struct dbs_governor *gov = dbs_data->gov;
411 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(policy->cpu);
Viresh Kumara72c4952015-07-18 11:31:01 +0530412
413 /* State should be equivalent to INIT */
414 if (!cdbs->shared || cdbs->shared->policy)
415 return -EBUSY;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530416
Viresh Kumar714a2d92015-06-04 16:43:27 +0530417 if (!--dbs_data->usage_count) {
418 sysfs_remove_group(get_governor_parent_kobj(policy),
419 get_sysfs_attr(dbs_data));
420
Viresh Kumare4b133c2016-01-25 22:33:46 +0530421 policy->governor_data = NULL;
422
Viresh Kumar8eec1022015-10-15 21:35:22 +0530423 if (!have_governor_per_policy())
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100424 gov->gdbs_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530425
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100426 gov->exit(dbs_data, policy->governor->initialized == 1);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530427 kfree(dbs_data);
Viresh Kumare4b133c2016-01-25 22:33:46 +0530428 } else {
429 policy->governor_data = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530430 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530431
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100432 free_common_dbs_info(policy, gov);
Viresh Kumara72c4952015-07-18 11:31:01 +0530433 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530434}
435
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100436static int cpufreq_governor_start(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530437{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100438 struct dbs_data *dbs_data = policy->governor_data;
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100439 struct dbs_governor *gov = dbs_data->gov;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530440 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100441 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530442 struct cpu_common_dbs_info *shared = cdbs->shared;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530443 int io_busy = 0;
444
445 if (!policy->cur)
446 return -EINVAL;
447
Viresh Kumara72c4952015-07-18 11:31:01 +0530448 /* State should be equivalent to INIT */
449 if (!shared || shared->policy)
450 return -EBUSY;
451
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100452 if (gov->governor == GOV_CONSERVATIVE) {
Viresh Kumar714a2d92015-06-04 16:43:27 +0530453 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
454
455 sampling_rate = cs_tuners->sampling_rate;
456 ignore_nice = cs_tuners->ignore_nice_load;
457 } else {
458 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
459
460 sampling_rate = od_tuners->sampling_rate;
461 ignore_nice = od_tuners->ignore_nice_load;
462 io_busy = od_tuners->io_is_busy;
463 }
464
Viresh Kumar714a2d92015-06-04 16:43:27 +0530465 for_each_cpu(j, policy->cpus) {
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100466 struct cpu_dbs_info *j_cdbs = gov->get_cpu_cdbs(j);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530467 unsigned int prev_load;
468
Viresh Kumar714a2d92015-06-04 16:43:27 +0530469 j_cdbs->prev_cpu_idle =
470 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
471
472 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
473 j_cdbs->prev_cpu_idle);
474 j_cdbs->prev_load = 100 * prev_load /
475 (unsigned int)j_cdbs->prev_cpu_wall;
476
477 if (ignore_nice)
478 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
479
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100480 j_cdbs->update_util.func = dbs_update_util_handler;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530481 }
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100482 shared->policy = policy;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530483
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100484 if (gov->governor == GOV_CONSERVATIVE) {
Viresh Kumar714a2d92015-06-04 16:43:27 +0530485 struct cs_cpu_dbs_info_s *cs_dbs_info =
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100486 gov->get_cpu_dbs_info_s(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530487
488 cs_dbs_info->down_skip = 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530489 cs_dbs_info->requested_freq = policy->cur;
490 } else {
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100491 struct od_ops *od_ops = gov->gov_ops;
492 struct od_cpu_dbs_info_s *od_dbs_info = gov->get_cpu_dbs_info_s(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530493
494 od_dbs_info->rate_mult = 1;
495 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
496 od_ops->powersave_bias_init_cpu(cpu);
497 }
498
Rafael J. Wysocki9be4fd22016-02-10 16:53:50 +0100499 gov_set_update_util(shared, sampling_rate);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530500 return 0;
501}
502
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100503static int cpufreq_governor_stop(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530504{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100505 struct dbs_data *dbs_data = policy->governor_data;
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100506 struct cpu_dbs_info *cdbs = dbs_data->gov->get_cpu_cdbs(policy->cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530507 struct cpu_common_dbs_info *shared = cdbs->shared;
508
Viresh Kumara72c4952015-07-18 11:31:01 +0530509 /* State should be equivalent to START */
510 if (!shared || !shared->policy)
511 return -EBUSY;
512
Viresh Kumar70f43e52015-12-09 07:34:42 +0530513 gov_cancel_work(shared);
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530514 shared->policy = NULL;
Viresh Kumar3a91b0692015-10-29 08:08:38 +0530515
Viresh Kumara72c4952015-07-18 11:31:01 +0530516 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530517}
518
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100519static int cpufreq_governor_limits(struct cpufreq_policy *policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530520{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100521 struct dbs_data *dbs_data = policy->governor_data;
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100522 struct dbs_governor *gov = dbs_data->gov;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530523 unsigned int cpu = policy->cpu;
Rafael J. Wysocki7bdad342016-02-07 16:05:07 +0100524 struct cpu_dbs_info *cdbs = gov->get_cpu_cdbs(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530525
Viresh Kumara72c4952015-07-18 11:31:01 +0530526 /* State should be equivalent to START */
Viresh Kumar44152cb2015-07-18 11:30:59 +0530527 if (!cdbs->shared || !cdbs->shared->policy)
Viresh Kumara72c4952015-07-18 11:31:01 +0530528 return -EBUSY;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530529
Viresh Kumar44152cb2015-07-18 11:30:59 +0530530 mutex_lock(&cdbs->shared->timer_mutex);
531 if (policy->max < cdbs->shared->policy->cur)
532 __cpufreq_driver_target(cdbs->shared->policy, policy->max,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530533 CPUFREQ_RELATION_H);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530534 else if (policy->min > cdbs->shared->policy->cur)
535 __cpufreq_driver_target(cdbs->shared->policy, policy->min,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530536 CPUFREQ_RELATION_L);
537 dbs_check_cpu(dbs_data, cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530538 mutex_unlock(&cdbs->shared->timer_mutex);
Viresh Kumara72c4952015-07-18 11:31:01 +0530539
540 return 0;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530541}
542
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100543int cpufreq_governor_dbs(struct cpufreq_policy *policy, unsigned int event)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000544{
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100545 int ret = -EINVAL;
Viresh Kumar4471a342012-10-26 00:47:42 +0200546
Viresh Kumar732b6d62015-06-03 15:57:13 +0530547 /* Lock governor to block concurrent initialization of governor */
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100548 mutex_lock(&dbs_data_mutex);
Viresh Kumar732b6d62015-06-03 15:57:13 +0530549
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100550 if (event == CPUFREQ_GOV_POLICY_INIT) {
Rafael J. Wysocki906a6e52016-02-07 16:07:51 +0100551 ret = cpufreq_governor_init(policy);
Rafael J. Wysocki5da3dd12016-02-05 03:15:24 +0100552 } else if (policy->governor_data) {
553 switch (event) {
554 case CPUFREQ_GOV_POLICY_EXIT:
555 ret = cpufreq_governor_exit(policy);
556 break;
557 case CPUFREQ_GOV_START:
558 ret = cpufreq_governor_start(policy);
559 break;
560 case CPUFREQ_GOV_STOP:
561 ret = cpufreq_governor_stop(policy);
562 break;
563 case CPUFREQ_GOV_LIMITS:
564 ret = cpufreq_governor_limits(policy);
565 break;
566 }
Viresh Kumar732b6d62015-06-03 15:57:13 +0530567 }
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000568
Rafael J. Wysocki2bb8d942016-02-07 16:01:31 +0100569 mutex_unlock(&dbs_data_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530570 return ret;
Viresh Kumar4471a342012-10-26 00:47:42 +0200571}
572EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);