blob: 4ea13f1821546c9df5f8d8a0bf4fbd607f430d43 [file] [log] [blame]
viresh kumar2aacdff2012-10-23 01:28:05 +02001/*
2 * drivers/cpufreq/cpufreq_governor.c
3 *
4 * CPUFREQ governors common code
5 *
Viresh Kumar4471a342012-10-26 00:47:42 +02006 * Copyright (C) 2001 Russell King
7 * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
8 * (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
9 * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
10 * (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
11 *
viresh kumar2aacdff2012-10-23 01:28:05 +020012 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
Viresh Kumar4471a342012-10-26 00:47:42 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
viresh kumar2aacdff2012-10-23 01:28:05 +020019#include <linux/export.h>
20#include <linux/kernel_stat.h>
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000021#include <linux/slab.h>
Viresh Kumar4471a342012-10-26 00:47:42 +020022
23#include "cpufreq_governor.h"
24
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000025static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
26{
27 if (have_governor_per_policy())
28 return dbs_data->cdata->attr_group_gov_pol;
29 else
30 return dbs_data->cdata->attr_group_gov_sys;
31}
32
Viresh Kumar4471a342012-10-26 00:47:42 +020033void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
34{
Viresh Kumar875b8502015-06-19 17:18:03 +053035 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +020036 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
38 struct cpufreq_policy *policy;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053039 unsigned int sampling_rate;
Viresh Kumar4471a342012-10-26 00:47:42 +020040 unsigned int max_load = 0;
41 unsigned int ignore_nice;
42 unsigned int j;
43
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053044 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
45 struct od_cpu_dbs_info_s *od_dbs_info =
46 dbs_data->cdata->get_cpu_dbs_info_s(cpu);
47
48 /*
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
53 */
54 sampling_rate = od_tuners->sampling_rate;
55 sampling_rate *= od_dbs_info->rate_mult;
56
Viresh Kumar6c4640c2013-08-05 12:28:02 +053057 ignore_nice = od_tuners->ignore_nice_load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053058 } else {
59 sampling_rate = cs_tuners->sampling_rate;
Viresh Kumar6c4640c2013-08-05 12:28:02 +053060 ignore_nice = cs_tuners->ignore_nice_load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +053061 }
Viresh Kumar4471a342012-10-26 00:47:42 +020062
63 policy = cdbs->cur_policy;
64
Stratos Karafotisdfa5bb62013-06-05 19:01:25 +030065 /* Get Absolute Load */
Viresh Kumar4471a342012-10-26 00:47:42 +020066 for_each_cpu(j, policy->cpus) {
Viresh Kumar875b8502015-06-19 17:18:03 +053067 struct cpu_dbs_info *j_cdbs;
Stratos Karafotis9366d842013-02-28 16:57:32 +000068 u64 cur_wall_time, cur_idle_time;
69 unsigned int idle_time, wall_time;
Viresh Kumar4471a342012-10-26 00:47:42 +020070 unsigned int load;
Stratos Karafotis9366d842013-02-28 16:57:32 +000071 int io_busy = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +020072
Viresh Kumar4d5dcc42013-03-27 15:58:58 +000073 j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
Viresh Kumar4471a342012-10-26 00:47:42 +020074
Stratos Karafotis9366d842013-02-28 16:57:32 +000075 /*
76 * For the purpose of ondemand, waiting for disk IO is
77 * an indication that you're performance critical, and
78 * not that the system is actually idle. So do not add
79 * the iowait time to the cpu idle time.
80 */
81 if (dbs_data->cdata->governor == GOV_ONDEMAND)
82 io_busy = od_tuners->io_is_busy;
83 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
Viresh Kumar4471a342012-10-26 00:47:42 +020084
85 wall_time = (unsigned int)
86 (cur_wall_time - j_cdbs->prev_cpu_wall);
87 j_cdbs->prev_cpu_wall = cur_wall_time;
88
89 idle_time = (unsigned int)
90 (cur_idle_time - j_cdbs->prev_cpu_idle);
91 j_cdbs->prev_cpu_idle = cur_idle_time;
92
93 if (ignore_nice) {
94 u64 cur_nice;
95 unsigned long cur_nice_jiffies;
96
97 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
98 cdbs->prev_cpu_nice;
99 /*
100 * Assumption: nice time between sampling periods will
101 * be less than 2^32 jiffies for 32 bit sys
102 */
103 cur_nice_jiffies = (unsigned long)
104 cputime64_to_jiffies64(cur_nice);
105
106 cdbs->prev_cpu_nice =
107 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
108 idle_time += jiffies_to_usecs(cur_nice_jiffies);
109 }
110
Viresh Kumar4471a342012-10-26 00:47:42 +0200111 if (unlikely(!wall_time || wall_time < idle_time))
112 continue;
113
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530114 /*
115 * If the CPU had gone completely idle, and a task just woke up
116 * on this CPU now, it would be unfair to calculate 'load' the
117 * usual way for this elapsed time-window, because it will show
118 * near-zero load, irrespective of how CPU intensive that task
119 * actually is. This is undesirable for latency-sensitive bursty
120 * workloads.
121 *
122 * To avoid this, we reuse the 'load' from the previous
123 * time-window and give this task a chance to start with a
124 * reasonably high CPU frequency. (However, we shouldn't over-do
125 * this copy, lest we get stuck at a high load (high frequency)
126 * for too long, even when the current system load has actually
127 * dropped down. So we perform the copy only once, upon the
128 * first wake-up from idle.)
129 *
130 * Detecting this situation is easy: the governor's deferrable
131 * timer would not have fired during CPU-idle periods. Hence
132 * an unusually large 'wall_time' (as compared to the sampling
133 * rate) indicates this scenario.
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530134 *
135 * prev_load can be zero in two cases and we must recalculate it
136 * for both cases:
137 * - during long idle intervals
138 * - explicitly set to zero
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530139 */
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530140 if (unlikely(wall_time > (2 * sampling_rate) &&
141 j_cdbs->prev_load)) {
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530142 load = j_cdbs->prev_load;
Viresh Kumarc8ae4812014-06-09 14:21:24 +0530143
144 /*
145 * Perform a destructive copy, to ensure that we copy
146 * the previous load only once, upon the first wake-up
147 * from idle.
148 */
149 j_cdbs->prev_load = 0;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530150 } else {
151 load = 100 * (wall_time - idle_time) / wall_time;
152 j_cdbs->prev_load = load;
Srivatsa S. Bhat18b46ab2014-06-08 02:11:43 +0530153 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200154
Viresh Kumar4471a342012-10-26 00:47:42 +0200155 if (load > max_load)
156 max_load = load;
157 }
158
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000159 dbs_data->cdata->gov_check_cpu(cpu, max_load);
Viresh Kumar4471a342012-10-26 00:47:42 +0200160}
161EXPORT_SYMBOL_GPL(dbs_check_cpu);
162
Viresh Kumar031299b2013-02-27 12:24:03 +0530163static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
164 unsigned int delay)
Viresh Kumar4471a342012-10-26 00:47:42 +0200165{
Viresh Kumar875b8502015-06-19 17:18:03 +0530166 struct cpu_dbs_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
Viresh Kumar4471a342012-10-26 00:47:42 +0200167
Viresh Kumar386d46e2015-06-19 17:18:01 +0530168 mod_delayed_work_on(cpu, system_wq, &cdbs->dwork, delay);
Viresh Kumar4471a342012-10-26 00:47:42 +0200169}
170
Viresh Kumar031299b2013-02-27 12:24:03 +0530171void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
172 unsigned int delay, bool all_cpus)
Viresh Kumar4471a342012-10-26 00:47:42 +0200173{
Viresh Kumar031299b2013-02-27 12:24:03 +0530174 int i;
Fabio Baltieri58ddcea2013-01-30 13:53:37 +0000175
Jane Li6f1e4ef2014-01-03 17:17:41 +0800176 mutex_lock(&cpufreq_governor_lock);
Stephen Boyd3617f2c2013-08-27 11:47:29 -0700177 if (!policy->governor_enabled)
Jane Li6f1e4ef2014-01-03 17:17:41 +0800178 goto out_unlock;
Stephen Boyd3617f2c2013-08-27 11:47:29 -0700179
Viresh Kumar031299b2013-02-27 12:24:03 +0530180 if (!all_cpus) {
Stephen Boyd69320782013-08-28 14:24:45 -0700181 /*
182 * Use raw_smp_processor_id() to avoid preemptible warnings.
183 * We know that this is only called with all_cpus == false from
184 * works that have been queued with *_work_on() functions and
185 * those works are canceled during CPU_DOWN_PREPARE so they
186 * can't possibly run on any other CPU.
187 */
188 __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
Viresh Kumar031299b2013-02-27 12:24:03 +0530189 } else {
190 for_each_cpu(i, policy->cpus)
191 __gov_queue_work(i, dbs_data, delay);
192 }
Jane Li6f1e4ef2014-01-03 17:17:41 +0800193
194out_unlock:
195 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar031299b2013-02-27 12:24:03 +0530196}
197EXPORT_SYMBOL_GPL(gov_queue_work);
198
199static inline void gov_cancel_work(struct dbs_data *dbs_data,
200 struct cpufreq_policy *policy)
201{
Viresh Kumar875b8502015-06-19 17:18:03 +0530202 struct cpu_dbs_info *cdbs;
Viresh Kumar031299b2013-02-27 12:24:03 +0530203 int i;
204
205 for_each_cpu(i, policy->cpus) {
206 cdbs = dbs_data->cdata->get_cpu_cdbs(i);
Viresh Kumar386d46e2015-06-19 17:18:01 +0530207 cancel_delayed_work_sync(&cdbs->dwork);
Viresh Kumar031299b2013-02-27 12:24:03 +0530208 }
Viresh Kumar4471a342012-10-26 00:47:42 +0200209}
210
Viresh Kumar44472662013-01-31 17:28:02 +0000211/* Will return if we need to evaluate cpu load again or not */
Viresh Kumar875b8502015-06-19 17:18:03 +0530212bool need_load_eval(struct cpu_dbs_info *cdbs, unsigned int sampling_rate)
Viresh Kumar44472662013-01-31 17:28:02 +0000213{
214 if (policy_is_shared(cdbs->cur_policy)) {
215 ktime_t time_now = ktime_get();
216 s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
217
218 /* Do nothing if we recently have sampled */
219 if (delta_us < (s64)(sampling_rate / 2))
220 return false;
221 else
222 cdbs->time_stamp = time_now;
223 }
224
225 return true;
226}
227EXPORT_SYMBOL_GPL(need_load_eval);
228
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000229static void set_sampling_rate(struct dbs_data *dbs_data,
230 unsigned int sampling_rate)
Viresh Kumar4471a342012-10-26 00:47:42 +0200231{
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000232 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
233 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
234 cs_tuners->sampling_rate = sampling_rate;
235 } else {
236 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
237 od_tuners->sampling_rate = sampling_rate;
238 }
239}
240
Viresh Kumar714a2d92015-06-04 16:43:27 +0530241static int cpufreq_governor_init(struct cpufreq_policy *policy,
242 struct dbs_data *dbs_data,
243 struct common_dbs_data *cdata)
244{
245 unsigned int latency;
246 int ret;
247
248 if (dbs_data) {
249 if (WARN_ON(have_governor_per_policy()))
250 return -EINVAL;
251 dbs_data->usage_count++;
252 policy->governor_data = dbs_data;
253 return 0;
254 }
255
256 dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
257 if (!dbs_data)
258 return -ENOMEM;
259
260 dbs_data->cdata = cdata;
261 dbs_data->usage_count = 1;
262
263 ret = cdata->init(dbs_data, !policy->governor->initialized);
264 if (ret)
265 goto free_dbs_data;
266
267 /* policy latency is in ns. Convert it to us first */
268 latency = policy->cpuinfo.transition_latency / 1000;
269 if (latency == 0)
270 latency = 1;
271
272 /* Bring kernel and HW constraints together */
273 dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
274 MIN_LATENCY_MULTIPLIER * latency);
275 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
276 latency * LATENCY_MULTIPLIER));
277
278 if (!have_governor_per_policy()) {
279 if (WARN_ON(cpufreq_get_global_kobject())) {
280 ret = -EINVAL;
281 goto cdata_exit;
282 }
283 cdata->gdbs_data = dbs_data;
284 }
285
286 ret = sysfs_create_group(get_governor_parent_kobj(policy),
287 get_sysfs_attr(dbs_data));
288 if (ret)
289 goto put_kobj;
290
291 policy->governor_data = dbs_data;
292
293 return 0;
294
295put_kobj:
296 if (!have_governor_per_policy()) {
297 cdata->gdbs_data = NULL;
298 cpufreq_put_global_kobject();
299 }
300cdata_exit:
301 cdata->exit(dbs_data, !policy->governor->initialized);
302free_dbs_data:
303 kfree(dbs_data);
304 return ret;
305}
306
307static void cpufreq_governor_exit(struct cpufreq_policy *policy,
308 struct dbs_data *dbs_data)
309{
310 struct common_dbs_data *cdata = dbs_data->cdata;
311
312 policy->governor_data = NULL;
313 if (!--dbs_data->usage_count) {
314 sysfs_remove_group(get_governor_parent_kobj(policy),
315 get_sysfs_attr(dbs_data));
316
317 if (!have_governor_per_policy()) {
318 cdata->gdbs_data = NULL;
319 cpufreq_put_global_kobject();
320 }
321
322 cdata->exit(dbs_data, policy->governor->initialized == 1);
323 kfree(dbs_data);
324 }
325}
326
327static int cpufreq_governor_start(struct cpufreq_policy *policy,
328 struct dbs_data *dbs_data)
329{
330 struct common_dbs_data *cdata = dbs_data->cdata;
331 unsigned int sampling_rate, ignore_nice, j, cpu = policy->cpu;
Viresh Kumar49a9a402015-06-19 17:18:04 +0530332 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530333 int io_busy = 0;
334
335 if (!policy->cur)
336 return -EINVAL;
337
338 if (cdata->governor == GOV_CONSERVATIVE) {
339 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
340
341 sampling_rate = cs_tuners->sampling_rate;
342 ignore_nice = cs_tuners->ignore_nice_load;
343 } else {
344 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
345
346 sampling_rate = od_tuners->sampling_rate;
347 ignore_nice = od_tuners->ignore_nice_load;
348 io_busy = od_tuners->io_is_busy;
349 }
350
Viresh Kumar714a2d92015-06-04 16:43:27 +0530351 for_each_cpu(j, policy->cpus) {
Viresh Kumar875b8502015-06-19 17:18:03 +0530352 struct cpu_dbs_info *j_cdbs = cdata->get_cpu_cdbs(j);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530353 unsigned int prev_load;
354
Viresh Kumar714a2d92015-06-04 16:43:27 +0530355 j_cdbs->cur_policy = policy;
356 j_cdbs->prev_cpu_idle =
357 get_cpu_idle_time(j, &j_cdbs->prev_cpu_wall, io_busy);
358
359 prev_load = (unsigned int)(j_cdbs->prev_cpu_wall -
360 j_cdbs->prev_cpu_idle);
361 j_cdbs->prev_load = 100 * prev_load /
362 (unsigned int)j_cdbs->prev_cpu_wall;
363
364 if (ignore_nice)
365 j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
366
367 mutex_init(&j_cdbs->timer_mutex);
Viresh Kumar386d46e2015-06-19 17:18:01 +0530368 INIT_DEFERRABLE_WORK(&j_cdbs->dwork, cdata->gov_dbs_timer);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530369 }
370
371 if (cdata->governor == GOV_CONSERVATIVE) {
372 struct cs_cpu_dbs_info_s *cs_dbs_info =
373 cdata->get_cpu_dbs_info_s(cpu);
374
375 cs_dbs_info->down_skip = 0;
376 cs_dbs_info->enable = 1;
377 cs_dbs_info->requested_freq = policy->cur;
378 } else {
379 struct od_ops *od_ops = cdata->gov_ops;
380 struct od_cpu_dbs_info_s *od_dbs_info = cdata->get_cpu_dbs_info_s(cpu);
381
382 od_dbs_info->rate_mult = 1;
383 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
384 od_ops->powersave_bias_init_cpu(cpu);
385 }
386
Viresh Kumar714a2d92015-06-04 16:43:27 +0530387 /* Initiate timer time stamp */
Viresh Kumar49a9a402015-06-19 17:18:04 +0530388 cdbs->time_stamp = ktime_get();
Viresh Kumar714a2d92015-06-04 16:43:27 +0530389
390 gov_queue_work(dbs_data, policy, delay_for_sampling_rate(sampling_rate),
391 true);
392 return 0;
393}
394
395static void cpufreq_governor_stop(struct cpufreq_policy *policy,
396 struct dbs_data *dbs_data)
397{
398 struct common_dbs_data *cdata = dbs_data->cdata;
399 unsigned int cpu = policy->cpu;
Viresh Kumar49a9a402015-06-19 17:18:04 +0530400 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530401
402 if (cdata->governor == GOV_CONSERVATIVE) {
403 struct cs_cpu_dbs_info_s *cs_dbs_info =
404 cdata->get_cpu_dbs_info_s(cpu);
405
406 cs_dbs_info->enable = 0;
407 }
408
409 gov_cancel_work(dbs_data, policy);
410
Viresh Kumar49a9a402015-06-19 17:18:04 +0530411 mutex_destroy(&cdbs->timer_mutex);
412 cdbs->cur_policy = NULL;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530413}
414
415static void cpufreq_governor_limits(struct cpufreq_policy *policy,
416 struct dbs_data *dbs_data)
417{
418 struct common_dbs_data *cdata = dbs_data->cdata;
419 unsigned int cpu = policy->cpu;
Viresh Kumar49a9a402015-06-19 17:18:04 +0530420 struct cpu_dbs_info *cdbs = cdata->get_cpu_cdbs(cpu);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530421
Viresh Kumar49a9a402015-06-19 17:18:04 +0530422 if (!cdbs->cur_policy)
Viresh Kumar714a2d92015-06-04 16:43:27 +0530423 return;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530424
Viresh Kumar49a9a402015-06-19 17:18:04 +0530425 mutex_lock(&cdbs->timer_mutex);
426 if (policy->max < cdbs->cur_policy->cur)
427 __cpufreq_driver_target(cdbs->cur_policy, policy->max,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530428 CPUFREQ_RELATION_H);
Viresh Kumar49a9a402015-06-19 17:18:04 +0530429 else if (policy->min > cdbs->cur_policy->cur)
430 __cpufreq_driver_target(cdbs->cur_policy, policy->min,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530431 CPUFREQ_RELATION_L);
432 dbs_check_cpu(dbs_data, cpu);
Viresh Kumar49a9a402015-06-19 17:18:04 +0530433 mutex_unlock(&cdbs->timer_mutex);
Viresh Kumar714a2d92015-06-04 16:43:27 +0530434}
435
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000436int cpufreq_governor_dbs(struct cpufreq_policy *policy,
Viresh Kumar714a2d92015-06-04 16:43:27 +0530437 struct common_dbs_data *cdata, unsigned int event)
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000438{
439 struct dbs_data *dbs_data;
Viresh Kumar714a2d92015-06-04 16:43:27 +0530440 int ret = 0;
Viresh Kumar4471a342012-10-26 00:47:42 +0200441
Viresh Kumar732b6d62015-06-03 15:57:13 +0530442 /* Lock governor to block concurrent initialization of governor */
443 mutex_lock(&cdata->mutex);
444
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000445 if (have_governor_per_policy())
446 dbs_data = policy->governor_data;
447 else
448 dbs_data = cdata->gdbs_data;
Viresh Kumar4471a342012-10-26 00:47:42 +0200449
Viresh Kumar732b6d62015-06-03 15:57:13 +0530450 if (WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT))) {
451 ret = -EINVAL;
452 goto unlock;
453 }
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000454
455 switch (event) {
456 case CPUFREQ_GOV_POLICY_INIT:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530457 ret = cpufreq_governor_init(policy, dbs_data, cdata);
458 break;
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000459 case CPUFREQ_GOV_POLICY_EXIT:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530460 cpufreq_governor_exit(policy, dbs_data);
461 break;
Viresh Kumar4471a342012-10-26 00:47:42 +0200462 case CPUFREQ_GOV_START:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530463 ret = cpufreq_governor_start(policy, dbs_data);
Viresh Kumar4471a342012-10-26 00:47:42 +0200464 break;
Viresh Kumar4471a342012-10-26 00:47:42 +0200465 case CPUFREQ_GOV_STOP:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530466 cpufreq_governor_stop(policy, dbs_data);
Viresh Kumar4471a342012-10-26 00:47:42 +0200467 break;
Viresh Kumar4471a342012-10-26 00:47:42 +0200468 case CPUFREQ_GOV_LIMITS:
Viresh Kumar714a2d92015-06-04 16:43:27 +0530469 cpufreq_governor_limits(policy, dbs_data);
Viresh Kumar4471a342012-10-26 00:47:42 +0200470 break;
471 }
Viresh Kumar714a2d92015-06-04 16:43:27 +0530472
Viresh Kumar732b6d62015-06-03 15:57:13 +0530473unlock:
474 mutex_unlock(&cdata->mutex);
475
Viresh Kumar714a2d92015-06-04 16:43:27 +0530476 return ret;
Viresh Kumar4471a342012-10-26 00:47:42 +0200477}
478EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);