blob: adef788862d5b1ef003105b523425dde9b7db3a9 [file] [log] [blame]
Nishanth Menona0dd7b72014-05-05 08:33:50 -05001/*
Viresh Kumar33692dc2015-09-04 13:47:25 +05302 * Generic OPP helper interface for CPU device
Nishanth Menona0dd7b72014-05-05 08:33:50 -05003 *
4 * Copyright (C) 2009-2014 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
Viresh Kumard6d2a522015-10-17 09:45:18 +053013
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Viresh Kumarf59d3ee2015-09-04 13:47:26 +053016#include <linux/cpu.h>
Nishanth Menona0dd7b72014-05-05 08:33:50 -050017#include <linux/cpufreq.h>
Nishanth Menona0dd7b72014-05-05 08:33:50 -050018#include <linux/err.h>
19#include <linux/errno.h>
20#include <linux/export.h>
Nishanth Menona0dd7b72014-05-05 08:33:50 -050021#include <linux/slab.h>
22
Viresh Kumarf59d3ee2015-09-04 13:47:26 +053023#include "opp.h"
24
Viresh Kumar33692dc2015-09-04 13:47:25 +053025#ifdef CONFIG_CPU_FREQ
Viresh Kumarf59d3ee2015-09-04 13:47:26 +053026
Nishanth Menona0dd7b72014-05-05 08:33:50 -050027/**
28 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
29 * @dev: device for which we do this operation
30 * @table: Cpufreq table returned back to caller
31 *
32 * Generate a cpufreq table for a provided device- this assumes that the
Viresh Kumar2c2709d2016-02-16 14:17:53 +053033 * opp table is already initialized and ready for usage.
Nishanth Menona0dd7b72014-05-05 08:33:50 -050034 *
35 * This function allocates required memory for the cpufreq table. It is
36 * expected that the caller does the required maintenance such as freeing
37 * the table as required.
38 *
39 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
40 * if no memory available for the operation (table is not populated), returns 0
41 * if successful and table is populated.
42 *
43 * WARNING: It is important for the callers to ensure refreshing their copy of
44 * the table if any of the mentioned functions have been invoked in the interim.
Nishanth Menona0dd7b72014-05-05 08:33:50 -050045 */
46int dev_pm_opp_init_cpufreq_table(struct device *dev,
47 struct cpufreq_frequency_table **table)
48{
49 struct dev_pm_opp *opp;
50 struct cpufreq_frequency_table *freq_table = NULL;
51 int i, max_opps, ret = 0;
52 unsigned long rate;
53
Nishanth Menona0dd7b72014-05-05 08:33:50 -050054 max_opps = dev_pm_opp_get_opp_count(dev);
Viresh Kumar8a31d9d92017-01-23 10:11:47 +053055 if (max_opps <= 0)
56 return max_opps ? max_opps : -ENODATA;
Nishanth Menona0dd7b72014-05-05 08:33:50 -050057
Anand Moond3599922014-09-05 08:38:30 +053058 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
Viresh Kumar8a31d9d92017-01-23 10:11:47 +053059 if (!freq_table)
60 return -ENOMEM;
Nishanth Menona0dd7b72014-05-05 08:33:50 -050061
62 for (i = 0, rate = 0; i < max_opps; i++, rate++) {
63 /* find next rate */
64 opp = dev_pm_opp_find_freq_ceil(dev, &rate);
65 if (IS_ERR(opp)) {
66 ret = PTR_ERR(opp);
67 goto out;
68 }
69 freq_table[i].driver_data = i;
70 freq_table[i].frequency = rate / 1000;
Bartlomiej Zolnierkiewicz79eea442015-07-29 16:23:08 +053071
72 /* Is Boost/turbo opp ? */
73 if (dev_pm_opp_is_turbo(opp))
74 freq_table[i].flags = CPUFREQ_BOOST_FREQ;
Viresh Kumar8a31d9d92017-01-23 10:11:47 +053075
76 dev_pm_opp_put(opp);
Nishanth Menona0dd7b72014-05-05 08:33:50 -050077 }
78
79 freq_table[i].driver_data = i;
80 freq_table[i].frequency = CPUFREQ_TABLE_END;
81
82 *table = &freq_table[0];
83
84out:
Nishanth Menona0dd7b72014-05-05 08:33:50 -050085 if (ret)
86 kfree(freq_table);
87
88 return ret;
89}
90EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
91
92/**
93 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
94 * @dev: device for which we do this operation
95 * @table: table to free
96 *
97 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
98 */
99void dev_pm_opp_free_cpufreq_table(struct device *dev,
100 struct cpufreq_frequency_table **table)
101{
102 if (!table)
103 return;
104
105 kfree(*table);
106 *table = NULL;
107}
108EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
Viresh Kumar33692dc2015-09-04 13:47:25 +0530109#endif /* CONFIG_CPU_FREQ */
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530110
Viresh Kumarf47b72a2016-05-05 16:20:33 +0530111void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530112{
113 struct device *cpu_dev;
114 int cpu;
115
116 WARN_ON(cpumask_empty(cpumask));
117
118 for_each_cpu(cpu, cpumask) {
119 cpu_dev = get_cpu_device(cpu);
120 if (!cpu_dev) {
121 pr_err("%s: failed to get cpu%d device\n", __func__,
122 cpu);
123 continue;
124 }
125
Sudeep Holla411466c2016-05-03 15:05:04 +0100126 if (of)
127 dev_pm_opp_of_remove_table(cpu_dev);
128 else
129 dev_pm_opp_remove_table(cpu_dev);
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530130 }
131}
Sudeep Holla411466c2016-05-03 15:05:04 +0100132
133/**
134 * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
135 * @cpumask: cpumask for which OPP table needs to be removed
136 *
137 * This removes the OPP tables for CPUs present in the @cpumask.
138 * This should be used to remove all the OPPs entries associated with
139 * the cpus in @cpumask.
140 *
141 * Locking: The internal opp_table and opp structures are RCU protected.
142 * Hence this function internally uses RCU updater strategy with mutex locks
143 * to keep the integrity of the internal data structures. Callers should ensure
144 * that this function is *NOT* called under RCU protection or in contexts where
145 * mutex cannot be locked.
146 */
147void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
148{
149 _dev_pm_opp_cpumask_remove_table(cpumask, false);
150}
151EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
152
Viresh Kumar2c931042016-04-21 14:28:56 +0530153/**
154 * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
155 * @cpu_dev: CPU device for which we do this operation
156 * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
157 *
158 * This marks OPP table of the @cpu_dev as shared by the CPUs present in
159 * @cpumask.
160 *
161 * Returns -ENODEV if OPP table isn't already present.
162 *
163 * Locking: The internal opp_table and opp structures are RCU protected.
164 * Hence this function internally uses RCU updater strategy with mutex locks
165 * to keep the integrity of the internal data structures. Callers should ensure
166 * that this function is *NOT* called under RCU protection or in contexts where
167 * mutex cannot be locked.
168 */
Viresh Kumardde370b2016-04-27 08:52:22 +0530169int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
Arnd Bergmannddbb74b2016-04-30 13:33:29 +0200170 const struct cpumask *cpumask)
Viresh Kumar2c931042016-04-21 14:28:56 +0530171{
172 struct opp_device *opp_dev;
173 struct opp_table *opp_table;
174 struct device *dev;
175 int cpu, ret = 0;
176
177 mutex_lock(&opp_table_lock);
178
179 opp_table = _find_opp_table(cpu_dev);
180 if (IS_ERR(opp_table)) {
181 ret = PTR_ERR(opp_table);
182 goto unlock;
183 }
184
185 for_each_cpu(cpu, cpumask) {
186 if (cpu == cpu_dev->id)
187 continue;
188
189 dev = get_cpu_device(cpu);
190 if (!dev) {
191 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
192 __func__, cpu);
193 continue;
194 }
195
196 opp_dev = _add_opp_dev(dev, opp_table);
197 if (!opp_dev) {
198 dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
199 __func__, cpu);
200 continue;
201 }
Viresh Kumar46e7a4e2016-04-21 14:28:57 +0530202
203 /* Mark opp-table as multiple CPUs are sharing it now */
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530204 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
Viresh Kumar2c931042016-04-21 14:28:56 +0530205 }
206unlock:
207 mutex_unlock(&opp_table_lock);
208
209 return ret;
210}
211EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
Viresh Kumar6f707da2016-04-27 08:52:23 +0530212
213/**
214 * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
215 * @cpu_dev: CPU device for which we do this operation
216 * @cpumask: cpumask to update with information of sharing CPUs
217 *
218 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
219 *
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530220 * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
221 * table's status is access-unknown.
Viresh Kumar6f707da2016-04-27 08:52:23 +0530222 *
223 * Locking: The internal opp_table and opp structures are RCU protected.
224 * Hence this function internally uses RCU updater strategy with mutex locks
225 * to keep the integrity of the internal data structures. Callers should ensure
226 * that this function is *NOT* called under RCU protection or in contexts where
227 * mutex cannot be locked.
228 */
Arnd Bergmannddbb74b2016-04-30 13:33:29 +0200229int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
Viresh Kumar6f707da2016-04-27 08:52:23 +0530230{
231 struct opp_device *opp_dev;
232 struct opp_table *opp_table;
233 int ret = 0;
234
235 mutex_lock(&opp_table_lock);
236
237 opp_table = _find_opp_table(cpu_dev);
238 if (IS_ERR(opp_table)) {
239 ret = PTR_ERR(opp_table);
240 goto unlock;
241 }
242
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530243 if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
244 ret = -EINVAL;
245 goto unlock;
246 }
247
Viresh Kumar6f707da2016-04-27 08:52:23 +0530248 cpumask_clear(cpumask);
249
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530250 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
Viresh Kumar6f707da2016-04-27 08:52:23 +0530251 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
252 cpumask_set_cpu(opp_dev->dev->id, cpumask);
253 } else {
254 cpumask_set_cpu(cpu_dev->id, cpumask);
255 }
256
257unlock:
258 mutex_unlock(&opp_table_lock);
259
260 return ret;
261}
262EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);