blob: b01cb729104b1bd9b80c5a9e48b7da1fffb3aae3 [file] [log] [blame]
viresh kumar2aacdff2012-10-23 01:28:05 +02001/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
Viresh Kumar4471a342012-10-26 00:47:42 +02006 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
viresh kumar2aacdff2012-10-23 01:28:05 +020012 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Viresh Kumar4471a342012-10-26 00:47:42 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
viresh kumar2aacdff2012-10-23 01:28:05 +020019#include <linux/export.h>
20#include <linux/kernel_stat.h>
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000021#include <linux/slab.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022
23#include "cpufreq_governor.h"
24
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000025static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
26{
27 if (have_governor_per_policy())
28 return dbs_data->cdata->attr_group_gov_pol;
29 else
30 return dbs_data->cdata->attr_group_gov_sys;
31}
32
Viresh Kumar4471a342012-10-26 00:47:42 +020033void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
34{
Viresh Kumar875b8502015-06-19 17:18:03 +053035 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +020036 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
Viresh Kumar44152cb2015-07-18 11:30:59 +053038 struct cpufreq_policy *policy = cdbs->shared->policy;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053039 unsigned int sampling_rate;
Viresh Kumar4471a342012-10-26 00:47:42 +020040 unsigned int max_load = 0;
41 unsigned int ignore_nice;
42 unsigned int j;
43
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053044 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
45 struct od_cpu_dbs_info_s *od_dbs_info =
46 dbs_data->cdata->get_cpu_dbs_info_s(cpu);
47
48 /*
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
53 */
54 sampling_rate = od_tuners->sampling_rate;
55 sampling_rate *= od_dbs_info->rate_mult;
56
Viresh Kumar6c4640c2013-08-05 12:28:02 +053057 ignore_nice = od_tuners->ignore_nice_load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053058 } else {
59 sampling_rate = cs_tuners->sampling_rate;
Viresh Kumar6c4640c2013-08-05 12:28:02 +053060 ignore_nice = cs_tuners->ignore_nice_load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053061 }
Viresh Kumar4471a342012-10-26 00:47:42 +020062
Stratos Karafotisdfa5bb62013-06-05 19:01:25 +030063 /* Get Absolute Load */
Viresh Kumar4471a342012-10-26 00:47:42 +020064 for_each_cpu(j, policy->cpus) {
Viresh Kumar875b8502015-06-19 17:18:03 +053065 struct cpu_dbs_info *j_cdbs;
Stratos Karafotis9366d842013-02-28 16:57:32 +000066 u64 cur_wall_time, cur_idle_time;
67 unsigned int idle_time, wall_time;
Viresh Kumar4471a342012-10-26 00:47:42 +020068 unsigned int load;
Stratos Karafotis9366d842013-02-28 16:57:32 +000069 int io_busy = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +020070
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000071 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
Viresh Kumar4471a342012-10-26 00:47:42 +020072
Stratos Karafotis9366d842013-02-28 16:57:32 +000073 /*
74 * For the purpose of ondemand, waiting for disk IO is
75 * an indication that you're performance critical, and
76 * not that the system is actually idle. So do not add
77 * the iowait time to the cpu idle time.
78 */
79 if (dbs_data->cdata->governor == GOV_ONDEMAND)
80 io_busy = od_tuners->io_is_busy;
81 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
Viresh Kumar4471a342012-10-26 00:47:42 +020082
83 wall_time = (unsigned int)
84 (cur_wall_time - j_cdbs->prev_cpu_wall);
85 j_cdbs->prev_cpu_wall = cur_wall_time;
86
87 idle_time = (unsigned int)
88 (cur_idle_time - j_cdbs->prev_cpu_idle);
89 j_cdbs->prev_cpu_idle = cur_idle_time;
90
91 if (ignore_nice) {
92 u64 cur_nice;
93 unsigned long cur_nice_jiffies;
94
95 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
96 cdbs->prev_cpu_nice;
97 /*
98 * Assumption: nice time between sampling periods will
99 * be less than 2^32 jiffies for 32 bit sys
100 */
101 cur_nice_jiffies = (unsigned long)
102 cputime64_to_jiffies64(cur_nice);
103
104 cdbs->prev_cpu_nice =
105 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
106 idle_time += jiffies_to_usecs(cur_nice_jiffies);
107 }
108
Viresh Kumar4471a342012-10-26 00:47:42 +0200109 if (unlikely(!wall_time || wall_time < idle_time))
110 continue;
111
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530112 /*
113 * If the CPU had gone completely idle, and a task just woke up
114 * on this CPU now, it would be unfair to calculate 'load' the
115 * usual way for this elapsed time-window, because it will show
116 * near-zero load, irrespective of how CPU intensive that task
117 * actually is. This is undesirable for latency-sensitive bursty
118 * workloads.
119 *
120 * To avoid this, we reuse the 'load' from the previous
121 * time-window and give this task a chance to start with a
122 * reasonably high CPU frequency. (However, we shouldn't over-do
123 * this copy, lest we get stuck at a high load (high frequency)
124 * for too long, even when the current system load has actually
125 * dropped down. So we perform the copy only once, upon the
126 * first wake-up from idle.)
127 *
128 * Detecting this situation is easy: the governor's deferrable
129 * timer would not have fired during CPU-idle periods. Hence
130 * an unusually large 'wall_time' (as compared to the sampling
131 * rate) indicates this scenario.
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530132 *
133 * prev_load can be zero in two cases and we must recalculate it
134 * for both cases:
135 * - during long idle intervals
136 * - explicitly set to zero
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530137 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530138 if (unlikely(wall_time > (2 * sampling_rate) &&
139 j_cdbs->prev_load)) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530140 load = j_cdbs->prev_load;
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530141
142 /*
143 * Perform a destructive copy, to ensure that we copy
144 * the previous load only once, upon the first wake-up
145 * from idle.
146 */
147 j_cdbs->prev_load = 0;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530148 } else {
149 load = 100 * (wall_time - idle_time) / wall_time;
150 j_cdbs->prev_load = load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530151 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200152
Viresh Kumar4471a342012-10-26 00:47:42 +0200153 if (load > max_load)
154 max_load = load;
155 }
156
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000157 dbs_data->cdata->gov_check_cpu(cpu, max_load);
Viresh Kumar4471a342012-10-26 00:47:42 +0200158}
159EXPORT_SYMBOL_GPL(dbs_check_cpu);
160
Viresh Kumar031299b2013-02-27 12:24:03 +0530161static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
162 unsigned int delay)
Viresh Kumar4471a342012-10-26 00:47:42 +0200163{
Viresh Kumar875b8502015-06-19 17:18:03 +0530164 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +0200165
Viresh Kumar386d46e2015-06-19 17:18:01 +0530166 mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
Viresh Kumar4471a342012-10-26 00:47:42 +0200167}
168
Viresh Kumar031299b2013-02-27 12:24:03 +0530169void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
170 unsigned int delay, bool all_cpus)
Viresh Kumar4471a342012-10-26 00:47:42 +0200171{
Viresh Kumar031299b2013-02-27 12:24:03 +0530172 int i;
Fabio Baltieri58ddcea2013-01-30 13:53:37 +0000173
Jane Li6f1e4ef2014-01-03 17:17:41 +0800174 mutex_lock(&cpufreq_governor_lock);
Stephen Boyd3617f2c2013-08-27 11:47:29 -0700175 if (!policy->governor_enabled)
Jane Li6f1e4ef2014-01-03 17:17:41 +0800176 goto out_unlock;
Stephen Boyd3617f2c2013-08-27 11:47:29 -0700177
Viresh Kumar031299b2013-02-27 12:24:03 +0530178 if (!all_cpus) {
Stephen Boyd69320782013-08-28 14:24:45 -0700179 /*
180 * Use raw_smp_processor_id() to avoid preemptible warnings.
181 * We know that this is only called with all_cpus == false from
182 * works that have been queued with *_work_on() functions and
183 * those works are canceled during CPU_DOWN_PREPARE so they
184 * can't possibly run on any other CPU.
185 */
186 __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
Viresh Kumar031299b2013-02-27 12:24:03 +0530187 } else {
188 for_each_cpu(i, policy->cpus)
189 __gov_queue_work(i, dbs_data, delay);
190 }
Jane Li6f1e4ef2014-01-03 17:17:41 +0800191
192out_unlock:
193 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar031299b2013-02-27 12:24:03 +0530194}
195EXPORT_SYMBOL_GPL(gov_queue_work);
196
197static inline void gov_cancel_work(struct dbs_data *dbs_data,
198 struct cpufreq_policy *policy)
199{
Viresh Kumar875b8502015-06-19 17:18:03 +0530200 struct cpu_dbs_info *cdbs;
Viresh Kumar031299b2013-02-27 12:24:03 +0530201 int i;
202
203 for_each_cpu(i, policy->cpus) {
204 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
Viresh Kumar386d46e2015-06-19 17:18:01 +0530205 cancel_delayed_work_sync(&cdbs->dwork);
Viresh Kumar031299b2013-02-27 12:24:03 +0530206 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200207}
208
Viresh Kumar44472662013-01-31 17:28:02 +0000209/* Will return if we need to evaluate cpu load again or not */
Viresh Kumar44152cb2015-07-18 11:30:59 +0530210bool need_load_eval(struct cpu_common_dbs_info *shared,
211 unsigned int sampling_rate)
Viresh Kumar44472662013-01-31 17:28:02 +0000212{
Viresh Kumar44152cb2015-07-18 11:30:59 +0530213 if (policy_is_shared(shared->policy)) {
Viresh Kumar44472662013-01-31 17:28:02 +0000214 ktime_t time_now = ktime_get();
Viresh Kumar44152cb2015-07-18 11:30:59 +0530215 s64 delta_us = ktime_us_delta(time_now, shared->time_stamp);
Viresh Kumar44472662013-01-31 17:28:02 +0000216
217 /* Do nothing if we recently have sampled */
218 if (delta_us < (s64)(sampling_rate / 2))
219 return false;
220 else
Viresh Kumar44152cb2015-07-18 11:30:59 +0530221 shared->time_stamp = time_now;
Viresh Kumar44472662013-01-31 17:28:02 +0000222 }
223
224 return true;
225}
226EXPORT_SYMBOL_GPL(need_load_eval);
227
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000228static void set_sampling_rate(struct dbs_data *dbs_data,
229 unsigned int sampling_rate)
Viresh Kumar4471a342012-10-26 00:47:42 +0200230{
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000231 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
232 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
233 cs_tuners->sampling_rate = sampling_rate;
234 } else {
235 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
236 od_tuners->sampling_rate = sampling_rate;
237 }
238}
239
Viresh Kumar44152cb2015-07-18 11:30:59 +0530240static int alloc_common_dbs_info(struct cpufreq_policy *policy,
241 struct common_dbs_data *cdata)
242{
243 struct cpu_common_dbs_info *shared;
244 int j;
245
246 /* Allocate memory for the common information for policy->cpus */
247 shared = kzalloc(sizeof(*shared), GFP_KERNEL);
248 if (!shared)
249 return -ENOMEM;
250
251 /* Set shared for all CPUs, online+offline */
252 for_each_cpu(j, policy->related_cpus)
253 cdata->get_cpu_cdbs(j)->shared = shared;
254
255 return 0;
256}
257
258static void free_common_dbs_info(struct cpufreq_policy *policy,
259 struct common_dbs_data *cdata)
260{
261 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(policy->cpu);
262 struct cpu_common_dbs_info *shared = cdbs->shared;
263 int j;
264
265 for_each_cpu(j, policy->cpus)
266 cdata->get_cpu_cdbs(j)->shared = NULL;
267
268 kfree(shared);
269}
270
Viresh Kumar714a2d92015-06-04 16:43:27 +0530271static int cpufreq_governor_init(struct cpufreq_policy *policy,
272 struct dbs_data *dbs_data,
273 struct common_dbs_data *cdata)
274{
275 unsigned int latency;
276 int ret;
277
278 if (dbs_data) {
279 if (WARN_ON(have_governor_per_policy()))
280 return -EINVAL;
Viresh Kumar44152cb2015-07-18 11:30:59 +0530281
282 ret = alloc_common_dbs_info(policy, cdata);
283 if (ret)
284 return ret;
285
Viresh Kumar714a2d92015-06-04 16:43:27 +0530286 dbs_data->usage_count++;
287 policy->governor_data = dbs_data;
288 return 0;
289 }
290
291 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
292 if (!dbs_data)
293 return -ENOMEM;
294
Viresh Kumar44152cb2015-07-18 11:30:59 +0530295 ret = alloc_common_dbs_info(policy, cdata);
296 if (ret)
297 goto free_dbs_data;
298
Viresh Kumar714a2d92015-06-04 16:43:27 +0530299 dbs_data->cdata = cdata;
300 dbs_data->usage_count = 1;
301
302 ret = cdata->init(dbs_data, !policy->governor->initialized);
303 if (ret)
Viresh Kumar44152cb2015-07-18 11:30:59 +0530304 goto free_common_dbs_info;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530305
306 /* policy latency is in ns. Convert it to us first */
307 latency = policy->cpuinfo.transition_latency / 1000;
308 if (latency == 0)
309 latency = 1;
310
311 /* Bring kernel and HW constraints together */
312 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
313 MIN_LATENCY_MULTIPLIER * latency);
314 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
315 latency * LATENCY_MULTIPLIER));
316
317 if (!have_governor_per_policy()) {
318 if (WARN_ON(cpufreq_get_global_kobject())) {
319 ret = -EINVAL;
320 goto cdata_exit;
321 }
322 cdata->gdbs_data = dbs_data;
323 }
324
325 ret = sysfs_create_group(get_governor_parent_kobj(policy),
326 get_sysfs_attr(dbs_data));
327 if (ret)
328 goto put_kobj;
329
330 policy->governor_data = dbs_data;
331
332 return 0;
333
334put_kobj:
335 if (!have_governor_per_policy()) {
336 cdata->gdbs_data = NULL;
337 cpufreq_put_global_kobject();
338 }
339cdata_exit:
340 cdata->exit(dbs_data, !policy->governor->initialized);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530341free_common_dbs_info:
342 free_common_dbs_info(policy, cdata);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530343free_dbs_data:
344 kfree(dbs_data);
345 return ret;
346}
347
348static void cpufreq_governor_exit(struct cpufreq_policy *policy,
349 struct dbs_data *dbs_data)
350{
351 struct common_dbs_data *cdata = dbs_data->cdata;
352
353 policy->governor_data = NULL;
354 if (!--dbs_data->usage_count) {
355 sysfs_remove_group(get_governor_parent_kobj(policy),
356 get_sysfs_attr(dbs_data));
357
358 if (!have_governor_per_policy()) {
359 cdata->gdbs_data = NULL;
360 cpufreq_put_global_kobject();
361 }
362
363 cdata->exit(dbs_data, policy->governor->initialized == 1);
364 kfree(dbs_data);
365 }
Viresh Kumar44152cb2015-07-18 11:30:59 +0530366
367 free_common_dbs_info(policy, cdata);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530368}
369
370static int cpufreq_governor_start(struct cpufreq_policy *policy,
371 struct dbs_data *dbs_data)
372{
373 struct common_dbs_data *cdata = dbs_data->cdata;
374 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
Viresh Kumar49a9a402015-06-19 17:18:04 +0530375 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530376 struct cpu_common_dbs_info *shared = cdbs->shared;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530377 int io_busy = 0;
378
379 if (!policy->cur)
380 return -EINVAL;
381
382 if (cdata->governor == GOV_CONSERVATIVE) {
383 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
384
385 sampling_rate = cs_tuners->sampling_rate;
386 ignore_nice = cs_tuners->ignore_nice_load;
387 } else {
388 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
389
390 sampling_rate = od_tuners->sampling_rate;
391 ignore_nice = od_tuners->ignore_nice_load;
392 io_busy = od_tuners->io_is_busy;
393 }
394
Viresh Kumar44152cb2015-07-18 11:30:59 +0530395 shared->policy = policy;
396 shared->time_stamp = ktime_get();
397 mutex_init(&shared->timer_mutex);
398
Viresh Kumar714a2d92015-06-04 16:43:27 +0530399 for_each_cpu(j, policy->cpus) {
Viresh Kumar875b8502015-06-19 17:18:03 +0530400 struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530401 unsigned int prev_load;
402
Viresh Kumar714a2d92015-06-04 16:43:27 +0530403 j_cdbs->prev_cpu_idle =
404 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
405
406 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
407 j_cdbs->prev_cpu_idle);
408 j_cdbs->prev_load = 100 * prev_load /
409 (unsigned int)j_cdbs->prev_cpu_wall;
410
411 if (ignore_nice)
412 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
413
Viresh Kumar386d46e2015-06-19 17:18:01 +0530414 INIT_DEFERRABLE_WORK(&j_cdbs->dwork, cdata->gov_dbs_timer);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530415 }
416
417 if (cdata->governor == GOV_CONSERVATIVE) {
418 struct cs_cpu_dbs_info_s *cs_dbs_info =
419 cdata->get_cpu_dbs_info_s(cpu);
420
421 cs_dbs_info->down_skip = 0;
422 cs_dbs_info->enable = 1;
423 cs_dbs_info->requested_freq = policy->cur;
424 } else {
425 struct od_ops *od_ops = cdata->gov_ops;
426 struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
427
428 od_dbs_info->rate_mult = 1;
429 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
430 od_ops->powersave_bias_init_cpu(cpu);
431 }
432
Viresh Kumar714a2d92015-06-04 16:43:27 +0530433 gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
434 true);
435 return 0;
436}
437
438static void cpufreq_governor_stop(struct cpufreq_policy *policy,
439 struct dbs_data *dbs_data)
440{
441 struct common_dbs_data *cdata = dbs_data->cdata;
442 unsigned int cpu = policy->cpu;
Viresh Kumar49a9a402015-06-19 17:18:04 +0530443 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530444 struct cpu_common_dbs_info *shared = cdbs->shared;
445
446 gov_cancel_work(dbs_data, policy);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530447
448 if (cdata->governor == GOV_CONSERVATIVE) {
449 struct cs_cpu_dbs_info_s *cs_dbs_info =
450 cdata->get_cpu_dbs_info_s(cpu);
451
452 cs_dbs_info->enable = 0;
453 }
454
Viresh Kumar44152cb2015-07-18 11:30:59 +0530455 shared->policy = NULL;
456 mutex_destroy(&shared->timer_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530457}
458
459static void cpufreq_governor_limits(struct cpufreq_policy *policy,
460 struct dbs_data *dbs_data)
461{
462 struct common_dbs_data *cdata = dbs_data->cdata;
463 unsigned int cpu = policy->cpu;
Viresh Kumar49a9a402015-06-19 17:18:04 +0530464 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530465
Viresh Kumar44152cb2015-07-18 11:30:59 +0530466 if (!cdbs->shared || !cdbs->shared->policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530467 return;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530468
Viresh Kumar44152cb2015-07-18 11:30:59 +0530469 mutex_lock(&cdbs->shared->timer_mutex);
470 if (policy->max < cdbs->shared->policy->cur)
471 __cpufreq_driver_target(cdbs->shared->policy, policy->max,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530472 CPUFREQ_RELATION_H);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530473 else if (policy->min > cdbs->shared->policy->cur)
474 __cpufreq_driver_target(cdbs->shared->policy, policy->min,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530475 CPUFREQ_RELATION_L);
476 dbs_check_cpu(dbs_data, cpu);
Viresh Kumar44152cb2015-07-18 11:30:59 +0530477 mutex_unlock(&cdbs->shared->timer_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530478}
479
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000480int cpufreq_governor_dbs(struct cpufreq_policy *policy,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530481 struct common_dbs_data *cdata, unsigned int event)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000482{
483 struct dbs_data *dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530484 int ret = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +0200485
Viresh Kumar732b6d62015-06-03 15:57:13 +0530486 /* Lock governor to block concurrent initialization of governor */
487 mutex_lock(&cdata->mutex);
488
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000489 if (have_governor_per_policy())
490 dbs_data = policy->governor_data;
491 else
492 dbs_data = cdata->gdbs_data;
Viresh Kumar4471a342012-10-26 00:47:42 +0200493
Viresh Kumar732b6d62015-06-03 15:57:13 +0530494 if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
495 ret = -EINVAL;
496 goto unlock;
497 }
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000498
499 switch (event) {
500 case CPUFREQ_GOV_POLICY_INIT:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530501 ret = cpufreq_governor_init(policy, dbs_data, cdata);
502 break;
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000503 case CPUFREQ_GOV_POLICY_EXIT:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530504 cpufreq_governor_exit(policy, dbs_data);
505 break;
Viresh Kumar4471a342012-10-26 00:47:42 +0200506 case CPUFREQ_GOV_START:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530507 ret = cpufreq_governor_start(policy, dbs_data);
Viresh Kumar4471a342012-10-26 00:47:42 +0200508 break;
Viresh Kumar4471a342012-10-26 00:47:42 +0200509 case CPUFREQ_GOV_STOP:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530510 cpufreq_governor_stop(policy, dbs_data);
Viresh Kumar4471a342012-10-26 00:47:42 +0200511 break;
Viresh Kumar4471a342012-10-26 00:47:42 +0200512 case CPUFREQ_GOV_LIMITS:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530513 cpufreq_governor_limits(policy, dbs_data);
Viresh Kumar4471a342012-10-26 00:47:42 +0200514 break;
515 }
Viresh Kumar714a2d92015-06-04 16:43:27 +0530516
Viresh Kumar732b6d62015-06-03 15:57:13 +0530517unlock:
518 mutex_unlock(&cdata->mutex);
519
Viresh Kumar714a2d92015-06-04 16:43:27 +0530520 return ret;
Viresh Kumar4471a342012-10-26 00:47:42 +0200521}
522EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);