blob: f033adf06eaf8060b4aab2c4fe6c70dea161029c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/drivers/cpufreq/cpufreq.c
3 *
4 * Copyright (C) 2001 Russell King
5 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
Viresh Kumarbb176f72013-06-19 14:19:33 +05306 * (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Ashok Rajc32b6b82005-10-30 14:59:54 -08008 * Oct 2005 - Ashok Raj <ashok.raj@intel.com>
Dave Jones32ee8c32006-02-28 00:43:23 -05009 * Added handling for CPU hotplug
Dave Jones8ff69732006-03-05 03:37:23 -050010 * Feb 2006 - Jacob Shin <jacob.shin@amd.com>
11 * Fix handling for CPU hotplug -- affected CPUs
Ashok Rajc32b6b82005-10-30 14:59:54 -080012 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
Linus Torvalds1da177e2005-04-16 15:20:36 -070016 */
17
Viresh Kumardb701152012-10-23 01:29:03 +020018#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Viresh Kumar5ff0a262013-08-06 22:53:03 +053020#include <linux/cpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <linux/cpufreq.h>
22#include <linux/delay.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/device.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053024#include <linux/init.h>
25#include <linux/kernel_stat.h>
26#include <linux/module.h>
akpm@osdl.org3fc54d32006-01-13 15:54:22 -080027#include <linux/mutex.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053028#include <linux/slab.h>
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +010029#include <linux/syscore_ops.h>
Viresh Kumar5ff0a262013-08-06 22:53:03 +053030#include <linux/tick.h>
Thomas Renninger6f4f2722010-04-20 13:17:36 +020031#include <trace/events/power.h>
32
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/**
Dave Jonescd878472006-08-11 17:59:28 -040034 * The "cpufreq driver" - the arch- or hardware-dependent low
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 * level driver of CPUFreq support, and its spinlock. This lock
36 * also protects the cpufreq_cpu_data array.
37 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +020038static struct cpufreq_driver *cpufreq_driver;
Mike Travis7a6aedf2008-03-25 15:06:53 -070039static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +053040static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
Viresh Kumarbb176f72013-06-19 14:19:33 +053041static DEFINE_RWLOCK(cpufreq_driver_lock);
42static DEFINE_MUTEX(cpufreq_governor_lock);
Lukasz Majewskic88a1f82013-08-06 22:53:08 +053043static LIST_HEAD(cpufreq_policy_list);
Viresh Kumarbb176f72013-06-19 14:19:33 +053044
Thomas Renninger084f3492007-07-09 11:35:28 -070045#ifdef CONFIG_HOTPLUG_CPU
46/* This one keeps track of the previously set governor of a removed CPU */
Dmitry Monakhove77b89f2009-10-05 00:38:55 +040047static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
Thomas Renninger084f3492007-07-09 11:35:28 -070048#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080050/*
51 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
52 * all cpufreq/hotplug/workqueue/etc related lock issues.
53 *
54 * The rules for this semaphore:
55 * - Any routine that wants to read from the policy structure will
56 * do a down_read on this semaphore.
57 * - Any routine that will write to the policy structure and/or may take away
58 * the policy altogether (eg. CPU hotplug), will hold this lock in write
59 * mode before doing so.
60 *
61 * Additional rules:
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080062 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this.
Mathieu Desnoyers395913d2009-06-08 13:17:31 -040064 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080066 */
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080067static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
68
69#define lock_policy_rwsem(mode, cpu) \
Viresh Kumar1b750e32013-10-02 14:13:09 +053070static void lock_policy_rwsem_##mode(int cpu) \
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080071{ \
Viresh Kumar474deff2013-08-20 12:08:25 +053072 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
73 BUG_ON(!policy); \
74 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080075}
76
77lock_policy_rwsem(read, cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080078lock_policy_rwsem(write, cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080079
Viresh Kumarfa1d8af2013-02-07 15:38:42 +053080#define unlock_policy_rwsem(mode, cpu) \
81static void unlock_policy_rwsem_##mode(int cpu) \
82{ \
Viresh Kumar474deff2013-08-20 12:08:25 +053083 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
84 BUG_ON(!policy); \
85 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080086}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080087
Viresh Kumarfa1d8af2013-02-07 15:38:42 +053088unlock_policy_rwsem(read, cpu);
89unlock_policy_rwsem(write, cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -080090
Viresh Kumar6eed9402013-08-06 22:53:11 +053091/*
92 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
93 * sections
94 */
95static DECLARE_RWSEM(cpufreq_rwsem);
96
Linus Torvalds1da177e2005-04-16 15:20:36 -070097/* internal prototypes */
Dave Jones29464f22009-01-18 01:37:11 -050098static int __cpufreq_governor(struct cpufreq_policy *policy,
99 unsigned int event);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800100static unsigned int __cpufreq_get(unsigned int cpu);
David Howells65f27f32006-11-22 14:55:48 +0000101static void handle_update(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
103/**
Dave Jones32ee8c32006-02-28 00:43:23 -0500104 * Two notifier lists: the "policy" list is involved in the
105 * validation process for a new CPU frequency policy; the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106 * "transition" list for kernel code that needs to handle
107 * changes to devices when the CPU clock speed changes.
108 * The mutex locks both lists.
109 */
Alan Sterne041c682006-03-27 01:16:30 -0800110static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700111static struct srcu_notifier_head cpufreq_transition_notifier_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200113static bool init_cpufreq_transition_notifier_list_called;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700114static int __init init_cpufreq_transition_notifier_list(void)
115{
116 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -0200117 init_cpufreq_transition_notifier_list_called = true;
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700118 return 0;
119}
Linus Torvaldsb3438f82006-11-20 11:47:18 -0800120pure_initcall(init_cpufreq_transition_notifier_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400122static int off __read_mostly;
Viresh Kumarda584452012-10-26 00:51:32 +0200123static int cpufreq_disabled(void)
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -0400124{
125 return off;
126}
127void disable_cpufreq(void)
128{
129 off = 1;
130}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131static LIST_HEAD(cpufreq_governor_list);
Dave Jones29464f22009-01-18 01:37:11 -0500132static DEFINE_MUTEX(cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000134bool have_governor_per_policy(void)
135{
Viresh Kumar0b981e72013-10-02 14:13:18 +0530136 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000137}
Viresh Kumar3f869d62013-05-16 05:09:56 +0000138EXPORT_SYMBOL_GPL(have_governor_per_policy);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +0000139
Viresh Kumar944e9a02013-05-16 05:09:57 +0000140struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
141{
142 if (have_governor_per_policy())
143 return &policy->kobj;
144 else
145 return cpufreq_global_kobject;
146}
147EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
148
Viresh Kumar72a4ce32013-05-17 11:26:32 +0000149static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
150{
151 u64 idle_time;
152 u64 cur_wall_time;
153 u64 busy_time;
154
155 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
156
157 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
158 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
159 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
160 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
161 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
162 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
163
164 idle_time = cur_wall_time - busy_time;
165 if (wall)
166 *wall = cputime_to_usecs(cur_wall_time);
167
168 return cputime_to_usecs(idle_time);
169}
170
171u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
172{
173 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
174
175 if (idle_time == -1ULL)
176 return get_cpu_idle_time_jiffy(cpu, wall);
177 else if (!io_busy)
178 idle_time += get_cpu_iowait_time_us(cpu, wall);
179
180 return idle_time;
181}
182EXPORT_SYMBOL_GPL(get_cpu_idle_time);
183
Viresh Kumar6eed9402013-08-06 22:53:11 +0530184struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Viresh Kumar6eed9402013-08-06 22:53:11 +0530186 struct cpufreq_policy *policy = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 unsigned long flags;
188
Viresh Kumar6eed9402013-08-06 22:53:11 +0530189 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
190 return NULL;
191
192 if (!down_read_trylock(&cpufreq_rwsem))
193 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
195 /* get the cpufreq driver */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000196 read_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Viresh Kumar6eed9402013-08-06 22:53:11 +0530198 if (cpufreq_driver) {
199 /* get the CPU */
200 policy = per_cpu(cpufreq_cpu_data, cpu);
201 if (policy)
202 kobject_get(&policy->kobj);
203 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200204
Viresh Kumar6eed9402013-08-06 22:53:11 +0530205 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530207 if (!policy)
Viresh Kumar6eed9402013-08-06 22:53:11 +0530208 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530210 return policy;
Stephen Boyda9144432012-07-20 18:14:38 +0000211}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
213
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530214void cpufreq_cpu_put(struct cpufreq_policy *policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000216 if (cpufreq_disabled())
217 return;
218
Viresh Kumar6eed9402013-08-06 22:53:11 +0530219 kobject_put(&policy->kobj);
220 up_read(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224/*********************************************************************
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
226 *********************************************************************/
227
228/**
229 * adjust_jiffies - adjust the system "loops_per_jiffy"
230 *
231 * This function alters the system "loops_per_jiffy" for the clock
232 * speed change. Note that loops_per_jiffy cannot be updated on SMP
Dave Jones32ee8c32006-02-28 00:43:23 -0500233 * systems as each CPU might be scaled differently. So, use the arch
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 * per-CPU loops_per_jiffy value wherever possible.
235 */
236#ifndef CONFIG_SMP
237static unsigned long l_p_j_ref;
Viresh Kumarbb176f72013-06-19 14:19:33 +0530238static unsigned int l_p_j_ref_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Arjan van de Ven858119e2006-01-14 13:20:43 -0800240static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 if (ci->flags & CPUFREQ_CONST_LOOPS)
243 return;
244
245 if (!l_p_j_ref_freq) {
246 l_p_j_ref = loops_per_jiffy;
247 l_p_j_ref_freq = ci->old;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200248 pr_debug("saving %lu as reference value for loops_per_jiffy; "
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530249 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
Viresh Kumarbb176f72013-06-19 14:19:33 +0530251 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -0700252 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530253 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
254 ci->new);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200255 pr_debug("scaling loops_per_jiffy to %lu "
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530256 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258}
259#else
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530260static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
261{
262 return;
263}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264#endif
265
Viresh Kumar0956df9c2013-06-19 14:19:34 +0530266static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530267 struct cpufreq_freqs *freqs, unsigned int state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268{
269 BUG_ON(irqs_disabled());
270
Dirk Brandewied5aaffa2013-01-17 16:22:21 +0000271 if (cpufreq_disabled())
272 return;
273
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200274 freqs->flags = cpufreq_driver->flags;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200275 pr_debug("notification %u of frequency transition to %u kHz\n",
Dave Jonese4472cb2006-01-31 15:53:55 -0800276 state, freqs->new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 switch (state) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 case CPUFREQ_PRECHANGE:
Dave Jones32ee8c32006-02-28 00:43:23 -0500281 /* detect if the driver reported a value as "old frequency"
Dave Jonese4472cb2006-01-31 15:53:55 -0800282 * which is not equal to what the cpufreq core thinks is
283 * "old frequency".
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200285 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Dave Jonese4472cb2006-01-31 15:53:55 -0800286 if ((policy) && (policy->cpu == freqs->cpu) &&
287 (policy->cur) && (policy->cur != freqs->old)) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200288 pr_debug("Warning: CPU frequency is"
Dave Jonese4472cb2006-01-31 15:53:55 -0800289 " %u, cpufreq assumed %u kHz.\n",
290 freqs->old, policy->cur);
291 freqs->old = policy->cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293 }
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700294 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800295 CPUFREQ_PRECHANGE, freqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
297 break;
Dave Jonese4472cb2006-01-31 15:53:55 -0800298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 case CPUFREQ_POSTCHANGE:
300 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200301 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
Thomas Renninger6f4f2722010-04-20 13:17:36 +0200302 (unsigned long)freqs->cpu);
Thomas Renninger25e41932011-01-03 17:50:44 +0100303 trace_cpu_frequency(freqs->new, freqs->cpu);
Alan Sternb4dfdbb2006-10-04 02:17:06 -0700304 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
Alan Sterne041c682006-03-27 01:16:30 -0800305 CPUFREQ_POSTCHANGE, freqs);
Dave Jonese4472cb2006-01-31 15:53:55 -0800306 if (likely(policy) && likely(policy->cpu == freqs->cpu))
307 policy->cur = freqs->new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 break;
309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
Viresh Kumarbb176f72013-06-19 14:19:33 +0530311
Viresh Kumarb43a7ff2013-03-24 11:56:43 +0530312/**
313 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
314 * on frequency transition.
315 *
316 * This function calls the transition notifiers and the "adjust_jiffies"
317 * function. It is called twice on all CPU frequency changes that have
318 * external effects.
319 */
320void cpufreq_notify_transition(struct cpufreq_policy *policy,
321 struct cpufreq_freqs *freqs, unsigned int state)
322{
323 for_each_cpu(freqs->cpu, policy->cpus)
324 __cpufreq_notify_transition(policy, freqs, state);
325}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
327
328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329/*********************************************************************
330 * SYSFS INTERFACE *
331 *********************************************************************/
332
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700333static struct cpufreq_governor *__find_governor(const char *str_governor)
334{
335 struct cpufreq_governor *t;
336
337 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
Dave Jones29464f22009-01-18 01:37:11 -0500338 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700339 return t;
340
341 return NULL;
342}
343
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344/**
345 * cpufreq_parse_governor - parse a governor string
346 */
Dave Jones905d77c2008-03-05 14:28:32 -0500347static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 struct cpufreq_governor **governor)
349{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700350 int err = -EINVAL;
351
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200352 if (!cpufreq_driver)
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700353 goto out;
354
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200355 if (cpufreq_driver->setpolicy) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
357 *policy = CPUFREQ_POLICY_PERFORMANCE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700358 err = 0;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530359 } else if (!strnicmp(str_governor, "powersave",
360 CPUFREQ_NAME_LEN)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 *policy = CPUFREQ_POLICY_POWERSAVE;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700362 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200364 } else if (cpufreq_driver->target) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 struct cpufreq_governor *t;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700366
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800367 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700368
369 t = __find_governor(str_governor);
370
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700371 if (t == NULL) {
Kees Cook1a8e1462011-05-04 08:38:56 -0700372 int ret;
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700373
Kees Cook1a8e1462011-05-04 08:38:56 -0700374 mutex_unlock(&cpufreq_governor_mutex);
375 ret = request_module("cpufreq_%s", str_governor);
376 mutex_lock(&cpufreq_governor_mutex);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700377
Kees Cook1a8e1462011-05-04 08:38:56 -0700378 if (ret == 0)
379 t = __find_governor(str_governor);
Jeremy Fitzhardingeea714972006-07-06 12:32:01 -0700380 }
381
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700382 if (t != NULL) {
383 *governor = t;
384 err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700386
akpm@osdl.org3fc54d32006-01-13 15:54:22 -0800387 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
Dave Jones29464f22009-01-18 01:37:11 -0500389out:
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -0700390 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393/**
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530394 * cpufreq_per_cpu_attr_read() / show_##file_name() -
395 * print out cpufreq information
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 *
397 * Write out information from cpufreq_driver->policy[cpu]; object must be
398 * "unsigned int".
399 */
400
Dave Jones32ee8c32006-02-28 00:43:23 -0500401#define show_one(file_name, object) \
402static ssize_t show_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500403(struct cpufreq_policy *policy, char *buf) \
Dave Jones32ee8c32006-02-28 00:43:23 -0500404{ \
Dave Jones29464f22009-01-18 01:37:11 -0500405 return sprintf(buf, "%u\n", policy->object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
408show_one(cpuinfo_min_freq, cpuinfo.min_freq);
409show_one(cpuinfo_max_freq, cpuinfo.max_freq);
Thomas Renningered129782009-02-04 01:17:41 +0100410show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411show_one(scaling_min_freq, min);
412show_one(scaling_max_freq, max);
413show_one(scaling_cur_freq, cur);
414
Viresh Kumar037ce832013-10-02 14:13:16 +0530415static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +0530416 struct cpufreq_policy *new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418/**
419 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
420 */
421#define store_one(file_name, object) \
422static ssize_t store_##file_name \
Dave Jones905d77c2008-03-05 14:28:32 -0500423(struct cpufreq_policy *policy, const char *buf, size_t count) \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{ \
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530425 int ret; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 struct cpufreq_policy new_policy; \
427 \
428 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
429 if (ret) \
430 return -EINVAL; \
431 \
Dave Jones29464f22009-01-18 01:37:11 -0500432 ret = sscanf(buf, "%u", &new_policy.object); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 if (ret != 1) \
434 return -EINVAL; \
435 \
Viresh Kumar037ce832013-10-02 14:13:16 +0530436 ret = cpufreq_set_policy(policy, &new_policy); \
Thomas Renninger7970e082006-04-13 15:14:04 +0200437 policy->user_policy.object = policy->object; \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 \
439 return ret ? ret : count; \
440}
441
Dave Jones29464f22009-01-18 01:37:11 -0500442store_one(scaling_min_freq, min);
443store_one(scaling_max_freq, max);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445/**
446 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
447 */
Dave Jones905d77c2008-03-05 14:28:32 -0500448static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
449 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800451 unsigned int cur_freq = __cpufreq_get(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 if (!cur_freq)
453 return sprintf(buf, "<unknown>");
454 return sprintf(buf, "%u\n", cur_freq);
455}
456
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457/**
458 * show_scaling_governor - show the current policy for the specified CPU
459 */
Dave Jones905d77c2008-03-05 14:28:32 -0500460static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Dave Jones29464f22009-01-18 01:37:11 -0500462 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 return sprintf(buf, "powersave\n");
464 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
465 return sprintf(buf, "performance\n");
466 else if (policy->governor)
viresh kumar4b972f02012-10-23 01:23:43 +0200467 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
Dave Jones29464f22009-01-18 01:37:11 -0500468 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 return -EINVAL;
470}
471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472/**
473 * store_scaling_governor - store policy for the specified CPU
474 */
Dave Jones905d77c2008-03-05 14:28:32 -0500475static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
476 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477{
Srivatsa S. Bhat5136fa52013-09-07 01:24:06 +0530478 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 char str_governor[16];
480 struct cpufreq_policy new_policy;
481
482 ret = cpufreq_get_policy(&new_policy, policy->cpu);
483 if (ret)
484 return ret;
485
Dave Jones29464f22009-01-18 01:37:11 -0500486 ret = sscanf(buf, "%15s", str_governor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 if (ret != 1)
488 return -EINVAL;
489
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530490 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
491 &new_policy.governor))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 return -EINVAL;
493
Viresh Kumar037ce832013-10-02 14:13:16 +0530494 ret = cpufreq_set_policy(policy, &new_policy);
Thomas Renninger7970e082006-04-13 15:14:04 +0200495
496 policy->user_policy.policy = policy->policy;
497 policy->user_policy.governor = policy->governor;
Thomas Renninger7970e082006-04-13 15:14:04 +0200498
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530499 if (ret)
500 return ret;
501 else
502 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503}
504
505/**
506 * show_scaling_driver - show the cpufreq driver currently loaded
507 */
Dave Jones905d77c2008-03-05 14:28:32 -0500508static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200510 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511}
512
513/**
514 * show_scaling_available_governors - show the available CPUfreq governors
515 */
Dave Jones905d77c2008-03-05 14:28:32 -0500516static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
517 char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
519 ssize_t i = 0;
520 struct cpufreq_governor *t;
521
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200522 if (!cpufreq_driver->target) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 i += sprintf(buf, "performance powersave");
524 goto out;
525 }
526
527 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
Dave Jones29464f22009-01-18 01:37:11 -0500528 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
529 - (CPUFREQ_NAME_LEN + 2)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 goto out;
viresh kumar4b972f02012-10-23 01:23:43 +0200531 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
Dave Jones7d5e3502006-02-02 17:03:42 -0500533out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 i += sprintf(&buf[i], "\n");
535 return i;
536}
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700537
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800538ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539{
540 ssize_t i = 0;
541 unsigned int cpu;
542
Rusty Russell835481d2009-01-04 05:18:06 -0800543 for_each_cpu(cpu, mask) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 if (i)
545 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
546 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
547 if (i >= (PAGE_SIZE - 5))
Dave Jones29464f22009-01-18 01:37:11 -0500548 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 }
550 i += sprintf(&buf[i], "\n");
551 return i;
552}
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800553EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700555/**
556 * show_related_cpus - show the CPUs affected by each transition even if
557 * hw coordination is in use
558 */
559static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
560{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800561 return cpufreq_show_cpus(policy->related_cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700562}
563
564/**
565 * show_affected_cpus - show the CPUs affected by each transition
566 */
567static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
568{
Lan Tianyuf4fd3792013-06-27 15:08:54 +0800569 return cpufreq_show_cpus(policy->cpus, buf);
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700570}
571
Venki Pallipadi9e769882007-10-26 10:18:21 -0700572static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
Dave Jones905d77c2008-03-05 14:28:32 -0500573 const char *buf, size_t count)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700574{
575 unsigned int freq = 0;
576 unsigned int ret;
577
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700578 if (!policy->governor || !policy->governor->store_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700579 return -EINVAL;
580
581 ret = sscanf(buf, "%u", &freq);
582 if (ret != 1)
583 return -EINVAL;
584
585 policy->governor->store_setspeed(policy, freq);
586
587 return count;
588}
589
590static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
591{
CHIKAMA masaki879000f2008-06-05 22:46:33 -0700592 if (!policy->governor || !policy->governor->show_setspeed)
Venki Pallipadi9e769882007-10-26 10:18:21 -0700593 return sprintf(buf, "<unsupported>\n");
594
595 return policy->governor->show_setspeed(policy, buf);
596}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Thomas Renningere2f74f32009-11-19 12:31:01 +0100598/**
viresh kumar8bf1ac72012-10-23 01:23:33 +0200599 * show_bios_limit - show the current cpufreq HW/BIOS limitation
Thomas Renningere2f74f32009-11-19 12:31:01 +0100600 */
601static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
602{
603 unsigned int limit;
604 int ret;
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200605 if (cpufreq_driver->bios_limit) {
606 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
Thomas Renningere2f74f32009-11-19 12:31:01 +0100607 if (!ret)
608 return sprintf(buf, "%u\n", limit);
609 }
610 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
611}
612
Borislav Petkov6dad2a22010-03-31 21:56:46 +0200613cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
614cpufreq_freq_attr_ro(cpuinfo_min_freq);
615cpufreq_freq_attr_ro(cpuinfo_max_freq);
616cpufreq_freq_attr_ro(cpuinfo_transition_latency);
617cpufreq_freq_attr_ro(scaling_available_governors);
618cpufreq_freq_attr_ro(scaling_driver);
619cpufreq_freq_attr_ro(scaling_cur_freq);
620cpufreq_freq_attr_ro(bios_limit);
621cpufreq_freq_attr_ro(related_cpus);
622cpufreq_freq_attr_ro(affected_cpus);
623cpufreq_freq_attr_rw(scaling_min_freq);
624cpufreq_freq_attr_rw(scaling_max_freq);
625cpufreq_freq_attr_rw(scaling_governor);
626cpufreq_freq_attr_rw(scaling_setspeed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
Dave Jones905d77c2008-03-05 14:28:32 -0500628static struct attribute *default_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 &cpuinfo_min_freq.attr,
630 &cpuinfo_max_freq.attr,
Thomas Renningered129782009-02-04 01:17:41 +0100631 &cpuinfo_transition_latency.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 &scaling_min_freq.attr,
633 &scaling_max_freq.attr,
634 &affected_cpus.attr,
Darrick J. Wonge8628dd2008-04-18 13:31:12 -0700635 &related_cpus.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 &scaling_governor.attr,
637 &scaling_driver.attr,
638 &scaling_available_governors.attr,
Venki Pallipadi9e769882007-10-26 10:18:21 -0700639 &scaling_setspeed.attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 NULL
641};
642
Dave Jones29464f22009-01-18 01:37:11 -0500643#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
644#define to_attr(a) container_of(a, struct freq_attr, attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Dave Jones29464f22009-01-18 01:37:11 -0500646static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647{
Dave Jones905d77c2008-03-05 14:28:32 -0500648 struct cpufreq_policy *policy = to_policy(kobj);
649 struct freq_attr *fattr = to_attr(attr);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530650 ssize_t ret;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530651
652 if (!down_read_trylock(&cpufreq_rwsem))
Viresh Kumar1b750e32013-10-02 14:13:09 +0530653 return -EINVAL;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800654
Viresh Kumar1b750e32013-10-02 14:13:09 +0530655 lock_policy_rwsem_read(policy->cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800656
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530657 if (fattr->show)
658 ret = fattr->show(policy, buf);
659 else
660 ret = -EIO;
661
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800662 unlock_policy_rwsem_read(policy->cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530663 up_read(&cpufreq_rwsem);
Viresh Kumar1b750e32013-10-02 14:13:09 +0530664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return ret;
666}
667
Dave Jones905d77c2008-03-05 14:28:32 -0500668static ssize_t store(struct kobject *kobj, struct attribute *attr,
669 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670{
Dave Jones905d77c2008-03-05 14:28:32 -0500671 struct cpufreq_policy *policy = to_policy(kobj);
672 struct freq_attr *fattr = to_attr(attr);
Dave Jonesa07530b2008-03-05 14:22:25 -0500673 ssize_t ret = -EINVAL;
Viresh Kumar6eed9402013-08-06 22:53:11 +0530674
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530675 get_online_cpus();
676
677 if (!cpu_online(policy->cpu))
678 goto unlock;
679
Viresh Kumar6eed9402013-08-06 22:53:11 +0530680 if (!down_read_trylock(&cpufreq_rwsem))
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530681 goto unlock;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800682
Viresh Kumar1b750e32013-10-02 14:13:09 +0530683 lock_policy_rwsem_write(policy->cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800684
Gautham R Shenoye08f5f52006-10-26 16:20:58 +0530685 if (fattr->store)
686 ret = fattr->store(policy, buf, count);
687 else
688 ret = -EIO;
689
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -0800690 unlock_policy_rwsem_write(policy->cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +0530691
Viresh Kumar6eed9402013-08-06 22:53:11 +0530692 up_read(&cpufreq_rwsem);
Srivatsa S. Bhat4f750c92013-09-07 01:23:43 +0530693unlock:
694 put_online_cpus();
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return ret;
697}
698
Dave Jones905d77c2008-03-05 14:28:32 -0500699static void cpufreq_sysfs_release(struct kobject *kobj)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
Dave Jones905d77c2008-03-05 14:28:32 -0500701 struct cpufreq_policy *policy = to_policy(kobj);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200702 pr_debug("last reference is dropped\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 complete(&policy->kobj_unregister);
704}
705
Emese Revfy52cf25d2010-01-19 02:58:23 +0100706static const struct sysfs_ops sysfs_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 .show = show,
708 .store = store,
709};
710
711static struct kobj_type ktype_cpufreq = {
712 .sysfs_ops = &sysfs_ops,
713 .default_attrs = default_attrs,
714 .release = cpufreq_sysfs_release,
715};
716
Viresh Kumar2361be22013-05-17 16:09:09 +0530717struct kobject *cpufreq_global_kobject;
718EXPORT_SYMBOL(cpufreq_global_kobject);
719
720static int cpufreq_global_kobject_usage;
721
722int cpufreq_get_global_kobject(void)
723{
724 if (!cpufreq_global_kobject_usage++)
725 return kobject_add(cpufreq_global_kobject,
726 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
727
728 return 0;
729}
730EXPORT_SYMBOL(cpufreq_get_global_kobject);
731
732void cpufreq_put_global_kobject(void)
733{
734 if (!--cpufreq_global_kobject_usage)
735 kobject_del(cpufreq_global_kobject);
736}
737EXPORT_SYMBOL(cpufreq_put_global_kobject);
738
739int cpufreq_sysfs_create_file(const struct attribute *attr)
740{
741 int ret = cpufreq_get_global_kobject();
742
743 if (!ret) {
744 ret = sysfs_create_file(cpufreq_global_kobject, attr);
745 if (ret)
746 cpufreq_put_global_kobject();
747 }
748
749 return ret;
750}
751EXPORT_SYMBOL(cpufreq_sysfs_create_file);
752
753void cpufreq_sysfs_remove_file(const struct attribute *attr)
754{
755 sysfs_remove_file(cpufreq_global_kobject, attr);
756 cpufreq_put_global_kobject();
757}
758EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
759
Dave Jones19d6f7e2009-07-08 17:35:39 -0400760/* symlink affected CPUs */
Viresh Kumar308b60e2013-07-31 14:35:14 +0200761static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400762{
763 unsigned int j;
764 int ret = 0;
765
766 for_each_cpu(j, policy->cpus) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800767 struct device *cpu_dev;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400768
Viresh Kumar308b60e2013-07-31 14:35:14 +0200769 if (j == policy->cpu)
Dave Jones19d6f7e2009-07-08 17:35:39 -0400770 continue;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400771
Viresh Kumare8fdde12013-07-31 14:31:33 +0200772 pr_debug("Adding link for CPU: %u\n", j);
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800773 cpu_dev = get_cpu_device(j);
774 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Dave Jones19d6f7e2009-07-08 17:35:39 -0400775 "cpufreq");
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200776 if (ret)
777 break;
Dave Jones19d6f7e2009-07-08 17:35:39 -0400778 }
779 return ret;
780}
781
Viresh Kumar308b60e2013-07-31 14:35:14 +0200782static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800783 struct device *dev)
Dave Jones909a6942009-07-08 18:05:42 -0400784{
785 struct freq_attr **drv_attr;
Dave Jones909a6942009-07-08 18:05:42 -0400786 int ret = 0;
Dave Jones909a6942009-07-08 18:05:42 -0400787
788 /* prepare interface data */
789 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800790 &dev->kobj, "cpufreq");
Dave Jones909a6942009-07-08 18:05:42 -0400791 if (ret)
792 return ret;
793
794 /* set up files for this cpu device */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200795 drv_attr = cpufreq_driver->attr;
Dave Jones909a6942009-07-08 18:05:42 -0400796 while ((drv_attr) && (*drv_attr)) {
797 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
798 if (ret)
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200799 goto err_out_kobj_put;
Dave Jones909a6942009-07-08 18:05:42 -0400800 drv_attr++;
801 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200802 if (cpufreq_driver->get) {
Dave Jones909a6942009-07-08 18:05:42 -0400803 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
804 if (ret)
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200805 goto err_out_kobj_put;
Dave Jones909a6942009-07-08 18:05:42 -0400806 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200807 if (cpufreq_driver->target) {
Dave Jones909a6942009-07-08 18:05:42 -0400808 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
809 if (ret)
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200810 goto err_out_kobj_put;
Dave Jones909a6942009-07-08 18:05:42 -0400811 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200812 if (cpufreq_driver->bios_limit) {
Thomas Renningere2f74f32009-11-19 12:31:01 +0100813 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
814 if (ret)
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200815 goto err_out_kobj_put;
Thomas Renningere2f74f32009-11-19 12:31:01 +0100816 }
Dave Jones909a6942009-07-08 18:05:42 -0400817
Viresh Kumar308b60e2013-07-31 14:35:14 +0200818 ret = cpufreq_add_dev_symlink(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400819 if (ret)
820 goto err_out_kobj_put;
821
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +0530822 return ret;
823
824err_out_kobj_put:
825 kobject_put(&policy->kobj);
826 wait_for_completion(&policy->kobj_unregister);
827 return ret;
828}
829
830static void cpufreq_init_policy(struct cpufreq_policy *policy)
831{
832 struct cpufreq_policy new_policy;
833 int ret = 0;
834
Viresh Kumard5b73cd2013-08-06 22:53:06 +0530835 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar037ce832013-10-02 14:13:16 +0530836 /* assure that the starting sequence is run in cpufreq_set_policy */
Dave Jonesecf7e462009-07-08 18:48:47 -0400837 policy->governor = NULL;
838
839 /* set default policy */
Viresh Kumar037ce832013-10-02 14:13:16 +0530840 ret = cpufreq_set_policy(policy, &new_policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400841 policy->user_policy.policy = policy->policy;
842 policy->user_policy.governor = policy->governor;
843
844 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200845 pr_debug("setting policy failed\n");
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200846 if (cpufreq_driver->exit)
847 cpufreq_driver->exit(policy);
Dave Jonesecf7e462009-07-08 18:48:47 -0400848 }
Dave Jones909a6942009-07-08 18:05:42 -0400849}
850
Viresh Kumarfcf80582013-01-29 14:39:08 +0000851#ifdef CONFIG_HOTPLUG_CPU
Viresh Kumard8d3b472013-08-04 01:20:07 +0200852static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
853 unsigned int cpu, struct device *dev,
854 bool frozen)
Viresh Kumarfcf80582013-01-29 14:39:08 +0000855{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +0200856 int ret = 0, has_target = !!cpufreq_driver->target;
Viresh Kumarfcf80582013-01-29 14:39:08 +0000857 unsigned long flags;
858
Viresh Kumar3de9bde2013-08-06 22:53:13 +0530859 if (has_target) {
860 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
861 if (ret) {
862 pr_err("%s: Failed to stop governor\n", __func__);
863 return ret;
864 }
865 }
Viresh Kumarfcf80582013-01-29 14:39:08 +0000866
Viresh Kumard8d3b472013-08-04 01:20:07 +0200867 lock_policy_rwsem_write(policy->cpu);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +0530868
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000869 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +0530870
Viresh Kumarfcf80582013-01-29 14:39:08 +0000871 cpumask_set_cpu(cpu, policy->cpus);
872 per_cpu(cpufreq_cpu_data, cpu) = policy;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000873 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumarfcf80582013-01-29 14:39:08 +0000874
Viresh Kumard8d3b472013-08-04 01:20:07 +0200875 unlock_policy_rwsem_write(policy->cpu);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +0530876
Viresh Kumar820c6ca2013-04-22 00:48:03 +0200877 if (has_target) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +0530878 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
879 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
880 pr_err("%s: Failed to start governor\n", __func__);
881 return ret;
882 }
Viresh Kumar820c6ca2013-04-22 00:48:03 +0200883 }
Viresh Kumarfcf80582013-01-29 14:39:08 +0000884
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +0530885 /* Don't touch sysfs links during light-weight init */
Rafael J. Wysocki71c34612013-08-04 01:19:34 +0200886 if (!frozen)
887 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
Viresh Kumarfcf80582013-01-29 14:39:08 +0000888
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +0530889 return ret;
Viresh Kumarfcf80582013-01-29 14:39:08 +0000890}
891#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Srivatsa S. Bhat84148092013-07-30 04:25:10 +0530893static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
894{
895 struct cpufreq_policy *policy;
896 unsigned long flags;
897
Lan Tianyu44871c92013-09-11 15:05:05 +0800898 read_lock_irqsave(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +0530899
900 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
901
Lan Tianyu44871c92013-09-11 15:05:05 +0800902 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Srivatsa S. Bhat84148092013-07-30 04:25:10 +0530903
904 return policy;
905}
906
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +0530907static struct cpufreq_policy *cpufreq_policy_alloc(void)
908{
909 struct cpufreq_policy *policy;
910
911 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
912 if (!policy)
913 return NULL;
914
915 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
916 goto err_free_policy;
917
918 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
919 goto err_free_cpumask;
920
Lukasz Majewskic88a1f82013-08-06 22:53:08 +0530921 INIT_LIST_HEAD(&policy->policy_list);
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +0530922 return policy;
923
924err_free_cpumask:
925 free_cpumask_var(policy->cpus);
926err_free_policy:
927 kfree(policy);
928
929 return NULL;
930}
931
932static void cpufreq_policy_free(struct cpufreq_policy *policy)
933{
934 free_cpumask_var(policy->related_cpus);
935 free_cpumask_var(policy->cpus);
936 kfree(policy);
937}
938
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +0530939static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
940{
Srivatsa S. Bhatcb38ed52013-09-12 01:43:42 +0530941 if (cpu == policy->cpu)
942 return;
943
Viresh Kumar8efd5762013-09-17 10:22:11 +0530944 /*
945 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
946 * Also lock for last cpu is enough here as contention will happen only
947 * after policy->cpu is changed and after it is changed, other threads
948 * will try to acquire lock for new cpu. And policy is already updated
949 * by then.
950 */
951 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
952
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +0530953 policy->last_cpu = policy->cpu;
954 policy->cpu = cpu;
955
Viresh Kumar8efd5762013-09-17 10:22:11 +0530956 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu));
957
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +0530958#ifdef CONFIG_CPU_FREQ_TABLE
959 cpufreq_frequency_table_update_policy_cpu(policy);
960#endif
961 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
962 CPUFREQ_UPDATE_POLICY_CPU, policy);
963}
964
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +0530965static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
966 bool frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967{
Viresh Kumarfcf80582013-01-29 14:39:08 +0000968 unsigned int j, cpu = dev->id;
Viresh Kumar65922462013-02-07 10:56:03 +0530969 int ret = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 unsigned long flags;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -0500972#ifdef CONFIG_HOTPLUG_CPU
Viresh Kumar1b274292013-08-20 12:08:26 +0530973 struct cpufreq_policy *tpolicy;
Viresh Kumarfcf80582013-01-29 14:39:08 +0000974 struct cpufreq_governor *gov;
Prarit Bhargava90e41ba2009-11-12 09:18:46 -0500975#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Ashok Rajc32b6b82005-10-30 14:59:54 -0800977 if (cpu_is_offline(cpu))
978 return 0;
979
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +0200980 pr_debug("adding CPU %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
982#ifdef CONFIG_SMP
983 /* check whether a different CPU already registered this
984 * CPU because it is in the same boat. */
985 policy = cpufreq_cpu_get(cpu);
986 if (unlikely(policy)) {
Dave Jones8ff69732006-03-05 03:37:23 -0500987 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 return 0;
989 }
Li Zhong5025d622013-08-21 01:31:08 +0200990#endif
Viresh Kumarfcf80582013-01-29 14:39:08 +0000991
Viresh Kumar6eed9402013-08-06 22:53:11 +0530992 if (!down_read_trylock(&cpufreq_rwsem))
993 return 0;
994
Viresh Kumarfcf80582013-01-29 14:39:08 +0000995#ifdef CONFIG_HOTPLUG_CPU
996 /* Check if this cpu was hot-unplugged earlier and has siblings */
Nathan Zimmer0d1857a2013-02-22 16:24:34 +0000997 read_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar1b274292013-08-20 12:08:26 +0530998 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
999 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001000 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar1b274292013-08-20 12:08:26 +05301001 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301002 up_read(&cpufreq_rwsem);
1003 return ret;
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301004 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001005 }
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001006 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumarfcf80582013-01-29 14:39:08 +00001007#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301009 if (frozen)
1010 /* Restore the saved policy when doing light-weight init */
1011 policy = cpufreq_policy_restore(cpu);
1012 else
1013 policy = cpufreq_policy_alloc();
1014
Dave Jones059019a2009-07-08 16:30:03 -04001015 if (!policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 goto nomem_out;
Dave Jones059019a2009-07-08 16:30:03 -04001017
Srivatsa S. Bhat0d66b912013-09-12 01:42:59 +05301018
1019 /*
1020 * In the resume path, since we restore a saved policy, the assignment
1021 * to policy->cpu is like an update of the existing policy, rather than
1022 * the creation of a brand new one. So we need to perform this update
1023 * by invoking update_policy_cpu().
1024 */
1025 if (frozen && cpu != policy->cpu)
1026 update_policy_cpu(policy, cpu);
1027 else
1028 policy->cpu = cpu;
1029
Viresh Kumar65922462013-02-07 10:56:03 +05301030 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
Rusty Russell835481d2009-01-04 05:18:06 -08001031 cpumask_copy(policy->cpus, cpumask_of(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 init_completion(&policy->kobj_unregister);
David Howells65f27f32006-11-22 14:55:48 +00001034 INIT_WORK(&policy->update, handle_update);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
1036 /* call driver. From then on the cpufreq must be able
1037 * to accept all calls to ->verify and ->setpolicy for this CPU
1038 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001039 ret = cpufreq_driver->init(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001041 pr_debug("initialization failed\n");
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301042 goto err_set_policy_cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 }
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001044
Viresh Kumarda60ce92013-10-03 20:28:30 +05301045 if (cpufreq_driver->get) {
1046 policy->cur = cpufreq_driver->get(policy->cpu);
1047 if (!policy->cur) {
1048 pr_err("%s: ->get() failed\n", __func__);
1049 goto err_get_freq;
1050 }
1051 }
1052
Viresh Kumarfcf80582013-01-29 14:39:08 +00001053 /* related cpus should atleast have policy->cpus */
1054 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1055
Viresh Kumar643ae6e2013-01-12 05:14:38 +00001056 /*
1057 * affected cpus must always be the one, which are online. We aren't
1058 * managing offline cpus here.
1059 */
1060 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1061
Mike Chan187d9f42008-12-04 12:19:17 -08001062 policy->user_policy.min = policy->min;
1063 policy->user_policy.max = policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Thomas Renningera1531ac2008-07-29 22:32:58 -07001065 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1066 CPUFREQ_START, policy);
1067
Viresh Kumarfcf80582013-01-29 14:39:08 +00001068#ifdef CONFIG_HOTPLUG_CPU
1069 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1070 if (gov) {
1071 policy->governor = gov;
1072 pr_debug("Restoring governor %s for cpu %d\n",
1073 policy->governor->name, cpu);
Thomas Renninger4bfa0422009-07-24 15:25:03 +02001074 }
Viresh Kumarfcf80582013-01-29 14:39:08 +00001075#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301077 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar474deff2013-08-20 12:08:25 +05301078 for_each_cpu(j, policy->cpus)
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301079 per_cpu(cpufreq_cpu_data, j) = policy;
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301080 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1081
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301082 if (!frozen) {
Viresh Kumar308b60e2013-07-31 14:35:14 +02001083 ret = cpufreq_add_dev_interface(policy, dev);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301084 if (ret)
1085 goto err_out_unregister;
1086 }
Dave Jones8ff69732006-03-05 03:37:23 -05001087
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301088 write_lock_irqsave(&cpufreq_driver_lock, flags);
1089 list_add(&policy->policy_list, &cpufreq_policy_list);
1090 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1091
Srivatsa S. Bhate18f1682013-07-30 04:24:23 +05301092 cpufreq_init_policy(policy);
1093
Greg Kroah-Hartman038c5b32007-12-17 15:54:39 -04001094 kobject_uevent(&policy->kobj, KOBJ_ADD);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301095 up_read(&cpufreq_rwsem);
1096
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001097 pr_debug("initialization complete\n");
Dave Jones87c32272006-03-29 01:48:37 -05001098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return 0;
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101err_out_unregister:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001102 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar474deff2013-08-20 12:08:25 +05301103 for_each_cpu(j, policy->cpus)
Mike Travis7a6aedf2008-03-25 15:06:53 -07001104 per_cpu(cpufreq_cpu_data, j) = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001105 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
Viresh Kumarda60ce92013-10-03 20:28:30 +05301107err_get_freq:
1108 if (cpufreq_driver->exit)
1109 cpufreq_driver->exit(policy);
Viresh Kumar2eaa3e22013-02-07 10:55:00 +05301110err_set_policy_cpu:
Srivatsa S. Bhate9698cc2013-07-30 04:24:11 +05301111 cpufreq_policy_free(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112nomem_out:
Viresh Kumar6eed9402013-08-06 22:53:11 +05301113 up_read(&cpufreq_rwsem);
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return ret;
1116}
1117
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301118/**
1119 * cpufreq_add_dev - add a CPU device
1120 *
1121 * Adds the cpufreq interface for a CPU device.
1122 *
1123 * The Oracle says: try running cpufreq registration/unregistration concurrently
1124 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1125 * mess up, but more thorough testing is needed. - Mathieu
1126 */
1127static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1128{
1129 return __cpufreq_add_dev(dev, sif, false);
1130}
1131
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301132static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301133 unsigned int old_cpu, bool frozen)
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301134{
1135 struct device *cpu_dev;
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301136 int ret;
1137
1138 /* first sibling now owns the new sysfs dir */
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301139 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301140
1141 /* Don't touch sysfs files during light-weight tear-down */
1142 if (frozen)
1143 return cpu_dev->id;
1144
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301145 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301146 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301147 if (ret) {
1148 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1149
Viresh Kumar1b750e32013-10-02 14:13:09 +05301150 lock_policy_rwsem_write(old_cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301151 cpumask_set_cpu(old_cpu, policy->cpus);
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301152 unlock_policy_rwsem_write(old_cpu);
1153
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301154 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301155 "cpufreq");
1156
1157 return -EINVAL;
1158 }
1159
1160 return cpu_dev->id;
1161}
1162
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301163static int __cpufreq_remove_dev_prepare(struct device *dev,
1164 struct subsys_interface *sif,
1165 bool frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166{
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301167 unsigned int cpu = dev->id, cpus;
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301168 int new_cpu, ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 unsigned long flags;
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301170 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001172 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001174 write_lock_irqsave(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301176 policy = per_cpu(cpufreq_cpu_data, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301178 /* Save the policy somewhere when doing a light-weight tear-down */
1179 if (frozen)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301180 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301181
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00001182 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301184 if (!policy) {
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001185 pr_debug("%s: No cpu_data found\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301189 if (cpufreq_driver->target) {
1190 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1191 if (ret) {
1192 pr_err("%s: Failed to stop governor\n", __func__);
1193 return ret;
1194 }
1195 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001196
Jacob Shin27ecddc2011-04-27 13:32:11 -05001197#ifdef CONFIG_HOTPLUG_CPU
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001198 if (!cpufreq_driver->setpolicy)
Dirk Brandewiefa69e332013-02-06 09:02:11 -08001199 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301200 policy->governor->name, CPUFREQ_NAME_LEN);
Jacob Shin27ecddc2011-04-27 13:32:11 -05001201#endif
1202
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301203 lock_policy_rwsem_read(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301204 cpus = cpumask_weight(policy->cpus);
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301205 unlock_policy_rwsem_read(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
Srivatsa S. Bhat61173f22013-09-12 01:43:25 +05301207 if (cpu != policy->cpu) {
1208 if (!frozen)
1209 sysfs_remove_link(&dev->kobj, "cpufreq");
Viresh Kumar73bf0fc2013-02-05 22:21:14 +01001210 } else if (cpus > 1) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301211 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
Srivatsa S. Bhatf9ba6802013-07-30 04:24:36 +05301212 if (new_cpu >= 0) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301213 update_policy_cpu(policy, new_cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301214
1215 if (!frozen) {
Viresh Kumar75949c92013-10-02 14:13:13 +05301216 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1217 __func__, new_cpu, cpu);
Srivatsa S. Bhata82fab22013-07-30 04:24:49 +05301218 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001219 }
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001220 }
Venki Pallipadiec282972007-03-26 12:03:19 -07001221
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301222 return 0;
1223}
1224
1225static int __cpufreq_remove_dev_finish(struct device *dev,
1226 struct subsys_interface *sif,
1227 bool frozen)
1228{
1229 unsigned int cpu = dev->id, cpus;
1230 int ret;
1231 unsigned long flags;
1232 struct cpufreq_policy *policy;
1233 struct kobject *kobj;
1234 struct completion *cmp;
1235
1236 read_lock_irqsave(&cpufreq_driver_lock, flags);
1237 policy = per_cpu(cpufreq_cpu_data, cpu);
1238 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1239
1240 if (!policy) {
1241 pr_debug("%s: No cpu_data found\n", __func__);
1242 return -EINVAL;
1243 }
1244
Viresh Kumar1b750e32013-10-02 14:13:09 +05301245 lock_policy_rwsem_write(cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301246 cpus = cpumask_weight(policy->cpus);
Viresh Kumar9c8f1ee2013-09-12 17:06:33 +05301247
1248 if (cpus > 1)
1249 cpumask_clear_cpu(cpu, policy->cpus);
1250 unlock_policy_rwsem_write(cpu);
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301251
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001252 /* If cpu is last user of policy, free policy */
1253 if (cpus == 1) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301254 if (cpufreq_driver->target) {
1255 ret = __cpufreq_governor(policy,
1256 CPUFREQ_GOV_POLICY_EXIT);
1257 if (ret) {
1258 pr_err("%s: Failed to exit governor\n",
1259 __func__);
1260 return ret;
1261 }
Viresh Kumaredab2fb2013-08-20 12:08:22 +05301262 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001263
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301264 if (!frozen) {
1265 lock_policy_rwsem_read(cpu);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301266 kobj = &policy->kobj;
1267 cmp = &policy->kobj_unregister;
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301268 unlock_policy_rwsem_read(cpu);
1269 kobject_put(kobj);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001270
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301271 /*
1272 * We need to make sure that the underlying kobj is
1273 * actually not referenced anymore by anybody before we
1274 * proceed with unloading.
1275 */
1276 pr_debug("waiting for dropping of refcount\n");
1277 wait_for_completion(cmp);
1278 pr_debug("wait complete\n");
1279 }
1280
1281 /*
1282 * Perform the ->exit() even during light-weight tear-down,
1283 * since this is a core component, and is essential for the
1284 * subsequent light-weight ->init() to succeed.
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001285 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001286 if (cpufreq_driver->exit)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301287 cpufreq_driver->exit(policy);
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001288
Viresh Kumar9515f4d2013-08-20 12:08:23 +05301289 /* Remove policy from list of active policies */
1290 write_lock_irqsave(&cpufreq_driver_lock, flags);
1291 list_del(&policy->policy_list);
1292 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1293
Srivatsa S. Bhat84148092013-07-30 04:25:10 +05301294 if (!frozen)
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301295 cpufreq_policy_free(policy);
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001296 } else {
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001297 if (cpufreq_driver->target) {
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301298 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1299 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1300 pr_err("%s: Failed to start governor\n",
1301 __func__);
1302 return ret;
1303 }
Rafael J. Wysocki2a998592013-07-30 00:32:00 +02001304 }
Viresh Kumarb8eed8a2013-01-14 13:23:03 +00001305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Viresh Kumar474deff2013-08-20 12:08:25 +05301307 per_cpu(cpufreq_cpu_data, cpu) = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 return 0;
1309}
1310
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301311/**
Viresh Kumar27a862e2013-10-02 14:13:14 +05301312 * cpufreq_remove_dev - remove a CPU device
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301313 *
1314 * Removes the cpufreq interface for a CPU device.
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05301315 */
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001316static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001317{
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001318 unsigned int cpu = dev->id;
Viresh Kumar27a862e2013-10-02 14:13:14 +05301319 int ret;
Venki Pallipadiec282972007-03-26 12:03:19 -07001320
1321 if (cpu_is_offline(cpu))
1322 return 0;
1323
Viresh Kumar27a862e2013-10-02 14:13:14 +05301324 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1325
1326 if (!ret)
1327 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1328
1329 return ret;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001330}
1331
David Howells65f27f32006-11-22 14:55:48 +00001332static void handle_update(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333{
David Howells65f27f32006-11-22 14:55:48 +00001334 struct cpufreq_policy *policy =
1335 container_of(work, struct cpufreq_policy, update);
1336 unsigned int cpu = policy->cpu;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001337 pr_debug("handle_update for cpu %u called\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 cpufreq_update_policy(cpu);
1339}
1340
1341/**
Viresh Kumarbb176f72013-06-19 14:19:33 +05301342 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1343 * in deep trouble.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 * @cpu: cpu number
1345 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1346 * @new_freq: CPU frequency the CPU actually runs at
1347 *
Dave Jones29464f22009-01-18 01:37:11 -05001348 * We adjust to current frequency first, and need to clean up later.
1349 * So either call to cpufreq_update_policy() or schedule handle_update()).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 */
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301351static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1352 unsigned int new_freq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353{
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301354 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 struct cpufreq_freqs freqs;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301356 unsigned long flags;
1357
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001358 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1360
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 freqs.old = old_freq;
1362 freqs.new = new_freq;
Viresh Kumarb43a7ff2013-03-24 11:56:43 +05301363
1364 read_lock_irqsave(&cpufreq_driver_lock, flags);
1365 policy = per_cpu(cpufreq_cpu_data, cpu);
1366 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1367
1368 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1369 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370}
1371
Dave Jones32ee8c32006-02-28 00:43:23 -05001372/**
Dhaval Giani4ab70df2006-12-13 14:49:15 +05301373 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001374 * @cpu: CPU number
1375 *
1376 * This is the last known freq, without actually getting it from the driver.
1377 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1378 */
1379unsigned int cpufreq_quick_get(unsigned int cpu)
1380{
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001381 struct cpufreq_policy *policy;
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301382 unsigned int ret_freq = 0;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001383
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001384 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1385 return cpufreq_driver->get(cpu);
Dirk Brandewie9e21ba82013-02-06 09:02:08 -08001386
1387 policy = cpufreq_cpu_get(cpu);
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001388 if (policy) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301389 ret_freq = policy->cur;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001390 cpufreq_cpu_put(policy);
1391 }
1392
Dave Jones4d34a672008-02-07 16:33:49 -05001393 return ret_freq;
Venkatesh Pallipadi95235ca2005-12-02 10:43:20 -08001394}
1395EXPORT_SYMBOL(cpufreq_quick_get);
1396
Jesse Barnes3d737102011-06-28 10:59:12 -07001397/**
1398 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1399 * @cpu: CPU number
1400 *
1401 * Just return the max possible frequency for a given CPU.
1402 */
1403unsigned int cpufreq_quick_get_max(unsigned int cpu)
1404{
1405 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1406 unsigned int ret_freq = 0;
1407
1408 if (policy) {
1409 ret_freq = policy->max;
1410 cpufreq_cpu_put(policy);
1411 }
1412
1413 return ret_freq;
1414}
1415EXPORT_SYMBOL(cpufreq_quick_get_max);
1416
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001417static unsigned int __cpufreq_get(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
Mike Travis7a6aedf2008-03-25 15:06:53 -07001419 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301420 unsigned int ret_freq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001422 if (!cpufreq_driver->get)
Dave Jones4d34a672008-02-07 16:33:49 -05001423 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001425 ret_freq = cpufreq_driver->get(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301427 if (ret_freq && policy->cur &&
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001428 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301429 /* verify no discrepancy between actual and
1430 saved value exists */
1431 if (unlikely(ret_freq != policy->cur)) {
1432 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 schedule_work(&policy->update);
1434 }
1435 }
1436
Dave Jones4d34a672008-02-07 16:33:49 -05001437 return ret_freq;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001438}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001440/**
1441 * cpufreq_get - get the current CPU frequency (in kHz)
1442 * @cpu: CPU number
1443 *
1444 * Get the CPU current (static) CPU frequency
1445 */
1446unsigned int cpufreq_get(unsigned int cpu)
1447{
1448 unsigned int ret_freq = 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001449
Viresh Kumar26ca8692013-09-20 22:37:31 +05301450 if (cpufreq_disabled() || !cpufreq_driver)
1451 return -ENOENT;
1452
Viresh Kumar6eed9402013-08-06 22:53:11 +05301453 if (!down_read_trylock(&cpufreq_rwsem))
1454 return 0;
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001455
Viresh Kumar1b750e32013-10-02 14:13:09 +05301456 lock_policy_rwsem_read(cpu);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001457
1458 ret_freq = __cpufreq_get(cpu);
1459
1460 unlock_policy_rwsem_read(cpu);
Viresh Kumar6eed9402013-08-06 22:53:11 +05301461 up_read(&cpufreq_rwsem);
1462
Dave Jones4d34a672008-02-07 16:33:49 -05001463 return ret_freq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464}
1465EXPORT_SYMBOL(cpufreq_get);
1466
Kay Sievers8a25a2f2011-12-21 14:29:42 -08001467static struct subsys_interface cpufreq_interface = {
1468 .name = "cpufreq",
1469 .subsys = &cpu_subsys,
1470 .add_dev = cpufreq_add_dev,
1471 .remove_dev = cpufreq_remove_dev,
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001472};
1473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474/**
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001475 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1476 *
1477 * This function is only executed for the boot processor. The other CPUs
1478 * have been put offline by means of CPU hotplug.
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001479 */
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001480static int cpufreq_bp_suspend(void)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001481{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301482 int ret = 0;
Dave Jones4bc5d342009-08-04 14:03:25 -04001483
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001484 int cpu = smp_processor_id();
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301485 struct cpufreq_policy *policy;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001486
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001487 pr_debug("suspending cpu %u\n", cpu);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001488
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001489 /* If there's no policy for the boot CPU, we have nothing to do. */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301490 policy = cpufreq_cpu_get(cpu);
1491 if (!policy)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001492 return 0;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001493
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001494 if (cpufreq_driver->suspend) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301495 ret = cpufreq_driver->suspend(policy);
Dominik Brodowskice6c3992009-08-07 22:58:51 +02001496 if (ret)
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001497 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301498 "step on CPU %u\n", policy->cpu);
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001499 }
1500
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301501 cpufreq_cpu_put(policy);
Dave Jonesc9060492008-02-07 16:32:18 -05001502 return ret;
Benjamin Herrenschmidt42d4dc32005-04-29 07:40:12 -07001503}
1504
1505/**
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001506 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 *
1508 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
Dominik Brodowskice6c3992009-08-07 22:58:51 +02001509 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1510 * restored. It will verify that the current freq is in sync with
1511 * what we believe it to be. This is a bit later than when it
1512 * should be, but nonethteless it's better than calling
1513 * cpufreq_driver->get() here which might re-enable interrupts...
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001514 *
1515 * This function is only executed for the boot CPU. The other CPUs have not
1516 * been turned on yet.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 */
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001518static void cpufreq_bp_resume(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301520 int ret = 0;
Dave Jones4bc5d342009-08-04 14:03:25 -04001521
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001522 int cpu = smp_processor_id();
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301523 struct cpufreq_policy *policy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001525 pr_debug("resuming cpu %u\n", cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001527 /* If there's no policy for the boot CPU, we have nothing to do. */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301528 policy = cpufreq_cpu_get(cpu);
1529 if (!policy)
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001530 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001532 if (cpufreq_driver->resume) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301533 ret = cpufreq_driver->resume(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 if (ret) {
1535 printk(KERN_ERR "cpufreq: resume failed in ->resume "
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301536 "step on CPU %u\n", policy->cpu);
Dave Jonesc9060492008-02-07 16:32:18 -05001537 goto fail;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 }
1539 }
1540
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301541 schedule_work(&policy->update);
Dominik Brodowskice6c3992009-08-07 22:58:51 +02001542
Dave Jonesc9060492008-02-07 16:32:18 -05001543fail:
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301544 cpufreq_cpu_put(policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545}
1546
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01001547static struct syscore_ops cpufreq_syscore_ops = {
1548 .suspend = cpufreq_bp_suspend,
1549 .resume = cpufreq_bp_resume,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550};
1551
Borislav Petkov9d950462013-01-20 10:24:28 +00001552/**
1553 * cpufreq_get_current_driver - return current driver's name
1554 *
1555 * Return the name string of the currently loaded cpufreq driver
1556 * or NULL, if none.
1557 */
1558const char *cpufreq_get_current_driver(void)
1559{
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001560 if (cpufreq_driver)
1561 return cpufreq_driver->name;
1562
1563 return NULL;
Borislav Petkov9d950462013-01-20 10:24:28 +00001564}
1565EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
1567/*********************************************************************
1568 * NOTIFIER LISTS INTERFACE *
1569 *********************************************************************/
1570
1571/**
1572 * cpufreq_register_notifier - register a driver with cpufreq
1573 * @nb: notifier function to register
1574 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1575 *
Dave Jones32ee8c32006-02-28 00:43:23 -05001576 * Add a driver to one of two lists: either a list of drivers that
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 * are notified about clock rate changes (once before and once after
1578 * the transition), or a list of drivers that are notified about
1579 * changes in cpufreq policy.
1580 *
1581 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001582 * blocking_notifier_chain_register.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 */
1584int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1585{
1586 int ret;
1587
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001588 if (cpufreq_disabled())
1589 return -EINVAL;
1590
Cesar Eduardo Barros74212ca2008-02-16 08:41:24 -02001591 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 switch (list) {
1594 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001595 ret = srcu_notifier_chain_register(
Alan Sterne041c682006-03-27 01:16:30 -08001596 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 break;
1598 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001599 ret = blocking_notifier_chain_register(
1600 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 break;
1602 default:
1603 ret = -EINVAL;
1604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 return ret;
1607}
1608EXPORT_SYMBOL(cpufreq_register_notifier);
1609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610/**
1611 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1612 * @nb: notifier block to be unregistered
Viresh Kumarbb176f72013-06-19 14:19:33 +05301613 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 *
1615 * Remove a driver from the CPU frequency notifier list.
1616 *
1617 * This function may sleep, and has the same return conditions as
Alan Sterne041c682006-03-27 01:16:30 -08001618 * blocking_notifier_chain_unregister.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 */
1620int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1621{
1622 int ret;
1623
Dirk Brandewied5aaffa2013-01-17 16:22:21 +00001624 if (cpufreq_disabled())
1625 return -EINVAL;
1626
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 switch (list) {
1628 case CPUFREQ_TRANSITION_NOTIFIER:
Alan Sternb4dfdbb2006-10-04 02:17:06 -07001629 ret = srcu_notifier_chain_unregister(
Alan Sterne041c682006-03-27 01:16:30 -08001630 &cpufreq_transition_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 break;
1632 case CPUFREQ_POLICY_NOTIFIER:
Alan Sterne041c682006-03-27 01:16:30 -08001633 ret = blocking_notifier_chain_unregister(
1634 &cpufreq_policy_notifier_list, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 break;
1636 default:
1637 ret = -EINVAL;
1638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
1640 return ret;
1641}
1642EXPORT_SYMBOL(cpufreq_unregister_notifier);
1643
1644
1645/*********************************************************************
1646 * GOVERNORS *
1647 *********************************************************************/
1648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649int __cpufreq_driver_target(struct cpufreq_policy *policy,
1650 unsigned int target_freq,
1651 unsigned int relation)
1652{
1653 int retval = -EINVAL;
Viresh Kumar72499242012-10-31 01:28:21 +01001654 unsigned int old_target_freq = target_freq;
Ashok Rajc32b6b82005-10-30 14:59:54 -08001655
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001656 if (cpufreq_disabled())
1657 return -ENODEV;
1658
Viresh Kumar72499242012-10-31 01:28:21 +01001659 /* Make sure that target_freq is within supported range */
1660 if (target_freq > policy->max)
1661 target_freq = policy->max;
1662 if (target_freq < policy->min)
1663 target_freq = policy->min;
1664
1665 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1666 policy->cpu, target_freq, relation, old_target_freq);
Viresh Kumar5a1c0222012-10-31 01:28:15 +01001667
1668 if (target_freq == policy->cur)
1669 return 0;
1670
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001671 if (cpufreq_driver->target)
1672 retval = cpufreq_driver->target(policy, target_freq, relation);
Ashok Raj90d45d12005-11-08 21:34:24 -08001673
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 return retval;
1675}
1676EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678int cpufreq_driver_target(struct cpufreq_policy *policy,
1679 unsigned int target_freq,
1680 unsigned int relation)
1681{
Julia Lawallf1829e42008-07-25 22:44:53 +02001682 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Viresh Kumar1b750e32013-10-02 14:13:09 +05301684 lock_policy_rwsem_write(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
1686 ret = __cpufreq_driver_target(policy, target_freq, relation);
1687
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08001688 unlock_policy_rwsem_write(policy->cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 return ret;
1691}
1692EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1693
Arjan van de Ven153d7f32006-07-26 15:40:07 +02001694/*
Arjan van de Ven153d7f32006-07-26 15:40:07 +02001695 * when "event" is CPUFREQ_GOV_LIMITS
1696 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301698static int __cpufreq_governor(struct cpufreq_policy *policy,
1699 unsigned int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700{
Dave Jonescc993ca2005-07-28 09:43:56 -07001701 int ret;
Thomas Renninger6afde102007-10-02 13:28:13 -07001702
1703 /* Only must be defined when default governor is known to have latency
1704 restrictions, like e.g. conservative or ondemand.
1705 That this is the case is already ensured in Kconfig
1706 */
1707#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1708 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1709#else
1710 struct cpufreq_governor *gov = NULL;
1711#endif
Thomas Renninger1c256242007-10-02 13:28:12 -07001712
1713 if (policy->governor->max_transition_latency &&
1714 policy->cpuinfo.transition_latency >
1715 policy->governor->max_transition_latency) {
Thomas Renninger6afde102007-10-02 13:28:13 -07001716 if (!gov)
1717 return -EINVAL;
1718 else {
1719 printk(KERN_WARNING "%s governor failed, too long"
1720 " transition latency of HW, fallback"
1721 " to %s governor\n",
1722 policy->governor->name,
1723 gov->name);
1724 policy->governor = gov;
1725 }
Thomas Renninger1c256242007-10-02 13:28:12 -07001726 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Viresh Kumarfe492f32013-08-06 22:53:10 +05301728 if (event == CPUFREQ_GOV_POLICY_INIT)
1729 if (!try_module_get(policy->governor->owner))
1730 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001732 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301733 policy->cpu, event);
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08001734
1735 mutex_lock(&cpufreq_governor_lock);
Srivatsa S. Bhat56d07db2013-09-07 01:23:55 +05301736 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
Viresh Kumarf73d3932013-08-31 17:53:40 +05301737 || (!policy->governor_enabled
1738 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08001739 mutex_unlock(&cpufreq_governor_lock);
1740 return -EBUSY;
1741 }
1742
1743 if (event == CPUFREQ_GOV_STOP)
1744 policy->governor_enabled = false;
1745 else if (event == CPUFREQ_GOV_START)
1746 policy->governor_enabled = true;
1747
1748 mutex_unlock(&cpufreq_governor_lock);
1749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 ret = policy->governor->governor(policy, event);
1751
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00001752 if (!ret) {
1753 if (event == CPUFREQ_GOV_POLICY_INIT)
1754 policy->governor->initialized++;
1755 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1756 policy->governor->initialized--;
Xiaoguang Chen95731eb2013-06-19 15:00:07 +08001757 } else {
1758 /* Restore original values */
1759 mutex_lock(&cpufreq_governor_lock);
1760 if (event == CPUFREQ_GOV_STOP)
1761 policy->governor_enabled = true;
1762 else if (event == CPUFREQ_GOV_START)
1763 policy->governor_enabled = false;
1764 mutex_unlock(&cpufreq_governor_lock);
Viresh Kumar4d5dcc42013-03-27 15:58:58 +00001765 }
Viresh Kumarb3940582013-02-01 05:42:58 +00001766
Viresh Kumarfe492f32013-08-06 22:53:10 +05301767 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1768 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 module_put(policy->governor->owner);
1770
1771 return ret;
1772}
1773
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774int cpufreq_register_governor(struct cpufreq_governor *governor)
1775{
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07001776 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
1778 if (!governor)
1779 return -EINVAL;
1780
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001781 if (cpufreq_disabled())
1782 return -ENODEV;
1783
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001784 mutex_lock(&cpufreq_governor_mutex);
Dave Jones32ee8c32006-02-28 00:43:23 -05001785
Viresh Kumarb3940582013-02-01 05:42:58 +00001786 governor->initialized = 0;
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07001787 err = -EBUSY;
1788 if (__find_governor(governor->name) == NULL) {
1789 err = 0;
1790 list_add(&governor->governor_list, &cpufreq_governor_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Dave Jones32ee8c32006-02-28 00:43:23 -05001793 mutex_unlock(&cpufreq_governor_mutex);
Jeremy Fitzhardinge3bcb09a2006-07-06 12:30:26 -07001794 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795}
1796EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1799{
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05001800#ifdef CONFIG_HOTPLUG_CPU
1801 int cpu;
1802#endif
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 if (!governor)
1805 return;
1806
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04001807 if (cpufreq_disabled())
1808 return;
1809
Prarit Bhargava90e41ba2009-11-12 09:18:46 -05001810#ifdef CONFIG_HOTPLUG_CPU
1811 for_each_present_cpu(cpu) {
1812 if (cpu_online(cpu))
1813 continue;
1814 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1815 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1816 }
1817#endif
1818
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001819 mutex_lock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 list_del(&governor->governor_list);
akpm@osdl.org3fc54d32006-01-13 15:54:22 -08001821 mutex_unlock(&cpufreq_governor_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 return;
1823}
1824EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1825
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827/*********************************************************************
1828 * POLICY INTERFACE *
1829 *********************************************************************/
1830
1831/**
1832 * cpufreq_get_policy - get the current cpufreq_policy
Dave Jones29464f22009-01-18 01:37:11 -05001833 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1834 * is written
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 *
1836 * Reads the current cpufreq policy.
1837 */
1838int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1839{
1840 struct cpufreq_policy *cpu_policy;
1841 if (!policy)
1842 return -EINVAL;
1843
1844 cpu_policy = cpufreq_cpu_get(cpu);
1845 if (!cpu_policy)
1846 return -EINVAL;
1847
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301848 memcpy(policy, cpu_policy, sizeof(*policy));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 cpufreq_cpu_put(cpu_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 return 0;
1852}
1853EXPORT_SYMBOL(cpufreq_get_policy);
1854
Arjan van de Ven153d7f32006-07-26 15:40:07 +02001855/*
Viresh Kumar037ce832013-10-02 14:13:16 +05301856 * policy : current policy.
1857 * new_policy: policy to be set.
Arjan van de Ven153d7f32006-07-26 15:40:07 +02001858 */
Viresh Kumar037ce832013-10-02 14:13:16 +05301859static int cpufreq_set_policy(struct cpufreq_policy *policy,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301860 struct cpufreq_policy *new_policy)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861{
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001862 int ret = 0, failed = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301864 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1865 new_policy->min, new_policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301867 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301869 if (new_policy->min > policy->max || new_policy->max < policy->min) {
Mattia Dongili9c9a43e2006-07-05 23:12:20 +02001870 ret = -EINVAL;
1871 goto error_out;
1872 }
1873
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 /* verify the cpu speed can be set within this limit */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301875 ret = cpufreq_driver->verify(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 if (ret)
1877 goto error_out;
1878
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 /* adjust if necessary - all reasons */
Alan Sterne041c682006-03-27 01:16:30 -08001880 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301881 CPUFREQ_ADJUST, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 /* adjust if necessary - hardware incompatibility*/
Alan Sterne041c682006-03-27 01:16:30 -08001884 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301885 CPUFREQ_INCOMPATIBLE, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
Viresh Kumarbb176f72013-06-19 14:19:33 +05301887 /*
1888 * verify the cpu speed can be set within this limit, which might be
1889 * different to the first one
1890 */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301891 ret = cpufreq_driver->verify(new_policy);
Alan Sterne041c682006-03-27 01:16:30 -08001892 if (ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 goto error_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
1895 /* notification of the new policy */
Alan Sterne041c682006-03-27 01:16:30 -08001896 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301897 CPUFREQ_NOTIFY, new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301899 policy->min = new_policy->min;
1900 policy->max = new_policy->max;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001902 pr_debug("new min and max freqs are %u - %u kHz\n",
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301903 policy->min, policy->max);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001905 if (cpufreq_driver->setpolicy) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301906 policy->policy = new_policy->policy;
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001907 pr_debug("setting range\n");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301908 ret = cpufreq_driver->setpolicy(new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 } else {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301910 if (new_policy->governor != policy->governor) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 /* save old, working values */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301912 struct cpufreq_governor *old_gov = policy->governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001914 pr_debug("governor switch\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 /* end old governor */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301917 if (policy->governor) {
1918 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1919 unlock_policy_rwsem_write(new_policy->cpu);
1920 __cpufreq_governor(policy,
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001921 CPUFREQ_GOV_POLICY_EXIT);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301922 lock_policy_rwsem_write(new_policy->cpu);
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001923 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 /* start new governor */
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301926 policy->governor = new_policy->governor;
1927 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
1928 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001929 failed = 0;
Viresh Kumar955ef482013-05-16 05:09:58 +00001930 } else {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301931 unlock_policy_rwsem_write(new_policy->cpu);
1932 __cpufreq_governor(policy,
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001933 CPUFREQ_GOV_POLICY_EXIT);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301934 lock_policy_rwsem_write(new_policy->cpu);
Viresh Kumar955ef482013-05-16 05:09:58 +00001935 }
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001936 }
1937
1938 if (failed) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 /* new governor failed, so re-start old one */
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001940 pr_debug("starting governor %s failed\n",
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301941 policy->governor->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 if (old_gov) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301943 policy->governor = old_gov;
1944 __cpufreq_governor(policy,
Viresh Kumar7bd353a2013-03-27 15:58:57 +00001945 CPUFREQ_GOV_POLICY_INIT);
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301946 __cpufreq_governor(policy,
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05301947 CPUFREQ_GOV_START);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 }
1949 ret = -EINVAL;
1950 goto error_out;
1951 }
1952 /* might be a policy change, too, so fall through */
1953 }
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001954 pr_debug("governor: change or update limits\n");
Viresh Kumar3de9bde2013-08-06 22:53:13 +05301955 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 }
1957
Dave Jones7d5e3502006-02-02 17:03:42 -05001958error_out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 return ret;
1960}
1961
1962/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
1964 * @cpu: CPU which shall be re-evaluated
1965 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001966 * Useful for policy notifiers which have different necessities
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 * at different times.
1968 */
1969int cpufreq_update_policy(unsigned int cpu)
1970{
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301971 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1972 struct cpufreq_policy new_policy;
Julia Lawallf1829e42008-07-25 22:44:53 +02001973 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301975 if (!policy) {
Julia Lawallf1829e42008-07-25 22:44:53 +02001976 ret = -ENODEV;
1977 goto no_policy;
1978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Viresh Kumar1b750e32013-10-02 14:13:09 +05301980 lock_policy_rwsem_write(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001982 pr_debug("updating policy for CPU %u\n", cpu);
Viresh Kumard5b73cd2013-08-06 22:53:06 +05301983 memcpy(&new_policy, policy, sizeof(*policy));
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301984 new_policy.min = policy->user_policy.min;
1985 new_policy.max = policy->user_policy.max;
1986 new_policy.policy = policy->user_policy.policy;
1987 new_policy.governor = policy->user_policy.governor;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
Viresh Kumarbb176f72013-06-19 14:19:33 +05301989 /*
1990 * BIOS might change freq behind our back
1991 * -> ask driver for current freq and notify governors about a change
1992 */
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02001993 if (cpufreq_driver->get) {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301994 new_policy.cur = cpufreq_driver->get(cpu);
1995 if (!policy->cur) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02001996 pr_debug("Driver did not initialize current freq");
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301997 policy->cur = new_policy.cur;
Thomas Renningera85f7bd2006-02-01 11:36:04 +01001998 } else {
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05301999 if (policy->cur != new_policy.cur && cpufreq_driver->target)
2000 cpufreq_out_of_sync(cpu, policy->cur,
2001 new_policy.cur);
Thomas Renningera85f7bd2006-02-01 11:36:04 +01002002 }
Thomas Renninger0961dd02006-01-26 18:46:33 +01002003 }
2004
Viresh Kumar037ce832013-10-02 14:13:16 +05302005 ret = cpufreq_set_policy(policy, &new_policy);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002007 unlock_policy_rwsem_write(cpu);
2008
Viresh Kumar3a3e9e02013-08-06 22:53:05 +05302009 cpufreq_cpu_put(policy);
Julia Lawallf1829e42008-07-25 22:44:53 +02002010no_policy:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 return ret;
2012}
2013EXPORT_SYMBOL(cpufreq_update_policy);
2014
Paul Gortmaker27609842013-06-19 13:54:04 -04002015static int cpufreq_cpu_callback(struct notifier_block *nfb,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002016 unsigned long action, void *hcpu)
2017{
2018 unsigned int cpu = (unsigned long)hcpu;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002019 struct device *dev;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302020 bool frozen = false;
Ashok Rajc32b6b82005-10-30 14:59:54 -08002021
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002022 dev = get_cpu_device(cpu);
2023 if (dev) {
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302024
2025 if (action & CPU_TASKS_FROZEN)
2026 frozen = true;
2027
2028 switch (action & ~CPU_TASKS_FROZEN) {
Ashok Rajc32b6b82005-10-30 14:59:54 -08002029 case CPU_ONLINE:
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302030 __cpufreq_add_dev(dev, NULL, frozen);
Srivatsa S. Bhat23d328992013-07-30 04:23:56 +05302031 cpufreq_update_policy(cpu);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002032 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302033
Ashok Rajc32b6b82005-10-30 14:59:54 -08002034 case CPU_DOWN_PREPARE:
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05302035 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
Srivatsa S. Bhat1aee40a2013-09-07 01:23:27 +05302036 break;
2037
2038 case CPU_POST_DEAD:
Srivatsa S. Bhatcedb70a2013-09-07 01:23:09 +05302039 __cpufreq_remove_dev_finish(dev, NULL, frozen);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002040 break;
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302041
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002042 case CPU_DOWN_FAILED:
Srivatsa S. Bhat5302c3f2013-07-30 04:25:25 +05302043 __cpufreq_add_dev(dev, NULL, frozen);
Ashok Rajc32b6b82005-10-30 14:59:54 -08002044 break;
2045 }
2046 }
2047 return NOTIFY_OK;
2048}
2049
Neal Buckendahl9c36f742010-06-22 22:02:44 -05002050static struct notifier_block __refdata cpufreq_cpu_notifier = {
Viresh Kumarbb176f72013-06-19 14:19:33 +05302051 .notifier_call = cpufreq_cpu_callback,
Ashok Rajc32b6b82005-10-30 14:59:54 -08002052};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
2054/*********************************************************************
2055 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2056 *********************************************************************/
2057
2058/**
2059 * cpufreq_register_driver - register a CPU Frequency driver
2060 * @driver_data: A struct cpufreq_driver containing the values#
2061 * submitted by the CPU Frequency driver.
2062 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302063 * Registers a CPU Frequency driver to this core code. This code
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 * returns zero on success, -EBUSY when another driver got here first
Dave Jones32ee8c32006-02-28 00:43:23 -05002065 * (and isn't unregistered in the meantime).
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 *
2067 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002068int cpufreq_register_driver(struct cpufreq_driver *driver_data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069{
2070 unsigned long flags;
2071 int ret;
2072
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002073 if (cpufreq_disabled())
2074 return -ENODEV;
2075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 if (!driver_data || !driver_data->verify || !driver_data->init ||
2077 ((!driver_data->setpolicy) && (!driver_data->target)))
2078 return -EINVAL;
2079
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002080 pr_debug("trying to register driver %s\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
2082 if (driver_data->setpolicy)
2083 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2084
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002085 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002086 if (cpufreq_driver) {
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002087 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Yinghai Lu4dea5802013-09-18 21:05:20 -07002088 return -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 }
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002090 cpufreq_driver = driver_data;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002091 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002093 ret = subsys_interface_register(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002094 if (ret)
2095 goto err_null_driver;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002097 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 int i;
2099 ret = -ENODEV;
2100
2101 /* check for at least one working CPU */
Mike Travis7a6aedf2008-03-25 15:06:53 -07002102 for (i = 0; i < nr_cpu_ids; i++)
2103 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 ret = 0;
Mike Travis7a6aedf2008-03-25 15:06:53 -07002105 break;
2106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108 /* if all ->init() calls failed, unregister */
2109 if (ret) {
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002110 pr_debug("no CPU initialized for driver %s\n",
Gautham R Shenoye08f5f52006-10-26 16:20:58 +05302111 driver_data->name);
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002112 goto err_if_unreg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 }
2114 }
2115
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002116 register_hotcpu_notifier(&cpufreq_cpu_notifier);
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002117 pr_debug("driver %s up and running\n", driver_data->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002119 return 0;
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002120err_if_unreg:
2121 subsys_interface_unregister(&cpufreq_interface);
Jiri Slaby8f5bc2a2011-03-01 17:41:10 +01002122err_null_driver:
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002123 write_lock_irqsave(&cpufreq_driver_lock, flags);
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002124 cpufreq_driver = NULL;
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002125 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Dave Jones4d34a672008-02-07 16:33:49 -05002126 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127}
2128EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130/**
2131 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2132 *
Viresh Kumarbb176f72013-06-19 14:19:33 +05302133 * Unregister the current CPUFreq driver. Only call this if you have
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 * the right to do so, i.e. if you have succeeded in initialising before!
2135 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2136 * currently not initialised.
2137 */
Linus Torvalds221dee22007-02-26 14:55:48 -08002138int cpufreq_unregister_driver(struct cpufreq_driver *driver)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139{
2140 unsigned long flags;
2141
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002142 if (!cpufreq_driver || (driver != cpufreq_driver))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
Dominik Brodowski2d06d8c2011-03-27 15:04:46 +02002145 pr_debug("unregistering driver %s\n", driver->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Kay Sievers8a25a2f2011-12-21 14:29:42 -08002147 subsys_interface_unregister(&cpufreq_interface);
Chandra Seetharaman65edc682006-06-27 02:54:08 -07002148 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
Viresh Kumar6eed9402013-08-06 22:53:11 +05302150 down_write(&cpufreq_rwsem);
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002151 write_lock_irqsave(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302152
Rafael J. Wysocki1c3d85d2013-04-29 00:08:16 +02002153 cpufreq_driver = NULL;
Viresh Kumar6eed9402013-08-06 22:53:11 +05302154
Nathan Zimmer0d1857a2013-02-22 16:24:34 +00002155 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
Viresh Kumar6eed9402013-08-06 22:53:11 +05302156 up_write(&cpufreq_rwsem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
2158 return 0;
2159}
2160EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002161
2162static int __init cpufreq_core_init(void)
2163{
2164 int cpu;
2165
Konrad Rzeszutek Wilka7b422c2012-03-13 19:18:39 -04002166 if (cpufreq_disabled())
2167 return -ENODEV;
2168
Viresh Kumar474deff2013-08-20 12:08:25 +05302169 for_each_possible_cpu(cpu)
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002170 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002171
Viresh Kumar2361be22013-05-17 16:09:09 +05302172 cpufreq_global_kobject = kobject_create();
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002173 BUG_ON(!cpufreq_global_kobject);
Rafael J. Wysockie00e56d2011-03-23 22:16:32 +01002174 register_syscore_ops(&cpufreq_syscore_ops);
Thomas Renninger8aa84ad2009-07-24 15:25:05 +02002175
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002176 return 0;
2177}
Venkatesh Pallipadi5a01f2e2007-02-05 16:12:44 -08002178core_initcall(cpufreq_core_init);