blob: b5055cc886ef58b69a8306f050dfa87660e0b687 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Nishanth Menona0dd7b72014-05-05 08:33:50 -05002/*
Viresh Kumar33692dc2015-09-04 13:47:25 +05303 * Generic OPP helper interface for CPU device
Nishanth Menona0dd7b72014-05-05 08:33:50 -05004 *
5 * Copyright (C) 2009-2014 Texas Instruments Incorporated.
6 * Nishanth Menon
7 * Romit Dasgupta
8 * Kevin Hilman
Nishanth Menona0dd7b72014-05-05 08:33:50 -05009 */
Viresh Kumard6d2a522015-10-17 09:45:18 +053010
11#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
Viresh Kumarf59d3ee2015-09-04 13:47:26 +053013#include <linux/cpu.h>
Nishanth Menona0dd7b72014-05-05 08:33:50 -050014#include <linux/cpufreq.h>
Nishanth Menona0dd7b72014-05-05 08:33:50 -050015#include <linux/err.h>
16#include <linux/errno.h>
17#include <linux/export.h>
Nishanth Menona0dd7b72014-05-05 08:33:50 -050018#include <linux/slab.h>
19
Viresh Kumarf59d3ee2015-09-04 13:47:26 +053020#include "opp.h"
21
Viresh Kumar33692dc2015-09-04 13:47:25 +053022#ifdef CONFIG_CPU_FREQ
Viresh Kumarf59d3ee2015-09-04 13:47:26 +053023
Nishanth Menona0dd7b72014-05-05 08:33:50 -050024/**
25 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
26 * @dev: device for which we do this operation
27 * @table: Cpufreq table returned back to caller
28 *
29 * Generate a cpufreq table for a provided device- this assumes that the
Viresh Kumar2c2709d2016-02-16 14:17:53 +053030 * opp table is already initialized and ready for usage.
Nishanth Menona0dd7b72014-05-05 08:33:50 -050031 *
32 * This function allocates required memory for the cpufreq table. It is
33 * expected that the caller does the required maintenance such as freeing
34 * the table as required.
35 *
36 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
37 * if no memory available for the operation (table is not populated), returns 0
38 * if successful and table is populated.
39 *
40 * WARNING: It is important for the callers to ensure refreshing their copy of
41 * the table if any of the mentioned functions have been invoked in the interim.
Nishanth Menona0dd7b72014-05-05 08:33:50 -050042 */
43int dev_pm_opp_init_cpufreq_table(struct device *dev,
44 struct cpufreq_frequency_table **table)
45{
46 struct dev_pm_opp *opp;
47 struct cpufreq_frequency_table *freq_table = NULL;
48 int i, max_opps, ret = 0;
49 unsigned long rate;
50
Nishanth Menona0dd7b72014-05-05 08:33:50 -050051 max_opps = dev_pm_opp_get_opp_count(dev);
Viresh Kumar8a31d9d92017-01-23 10:11:47 +053052 if (max_opps <= 0)
53 return max_opps ? max_opps : -ENODATA;
Nishanth Menona0dd7b72014-05-05 08:33:50 -050054
Jia-Ju Bai4a823c02018-01-26 16:48:49 +080055 freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
Viresh Kumar8a31d9d92017-01-23 10:11:47 +053056 if (!freq_table)
57 return -ENOMEM;
Nishanth Menona0dd7b72014-05-05 08:33:50 -050058
59 for (i = 0, rate = 0; i < max_opps; i++, rate++) {
60 /* find next rate */
61 opp = dev_pm_opp_find_freq_ceil(dev, &rate);
62 if (IS_ERR(opp)) {
63 ret = PTR_ERR(opp);
64 goto out;
65 }
66 freq_table[i].driver_data = i;
67 freq_table[i].frequency = rate / 1000;
Bartlomiej Zolnierkiewicz79eea442015-07-29 16:23:08 +053068
69 /* Is Boost/turbo opp ? */
70 if (dev_pm_opp_is_turbo(opp))
71 freq_table[i].flags = CPUFREQ_BOOST_FREQ;
Viresh Kumar8a31d9d92017-01-23 10:11:47 +053072
73 dev_pm_opp_put(opp);
Nishanth Menona0dd7b72014-05-05 08:33:50 -050074 }
75
76 freq_table[i].driver_data = i;
77 freq_table[i].frequency = CPUFREQ_TABLE_END;
78
79 *table = &freq_table[0];
80
81out:
Nishanth Menona0dd7b72014-05-05 08:33:50 -050082 if (ret)
83 kfree(freq_table);
84
85 return ret;
86}
87EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
88
89/**
90 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
91 * @dev: device for which we do this operation
92 * @table: table to free
93 *
94 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
95 */
96void dev_pm_opp_free_cpufreq_table(struct device *dev,
97 struct cpufreq_frequency_table **table)
98{
99 if (!table)
100 return;
101
102 kfree(*table);
103 *table = NULL;
104}
105EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
Viresh Kumar33692dc2015-09-04 13:47:25 +0530106#endif /* CONFIG_CPU_FREQ */
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530107
Viresh Kumar2a4eb732018-09-13 13:14:36 +0530108void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask,
Viresh Kumar404b1362018-09-13 13:09:27 +0530109 int last_cpu)
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530110{
111 struct device *cpu_dev;
112 int cpu;
113
114 WARN_ON(cpumask_empty(cpumask));
115
116 for_each_cpu(cpu, cpumask) {
Viresh Kumar404b1362018-09-13 13:09:27 +0530117 if (cpu == last_cpu)
118 break;
119
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530120 cpu_dev = get_cpu_device(cpu);
121 if (!cpu_dev) {
122 pr_err("%s: failed to get cpu%d device\n", __func__,
123 cpu);
124 continue;
125 }
126
Viresh Kumar2a4eb732018-09-13 13:14:36 +0530127 _dev_pm_opp_find_and_remove_table(cpu_dev);
Viresh Kumarf59d3ee2015-09-04 13:47:26 +0530128 }
129}
Sudeep Holla411466c2016-05-03 15:05:04 +0100130
131/**
132 * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
133 * @cpumask: cpumask for which OPP table needs to be removed
134 *
135 * This removes the OPP tables for CPUs present in the @cpumask.
136 * This should be used to remove all the OPPs entries associated with
137 * the cpus in @cpumask.
Sudeep Holla411466c2016-05-03 15:05:04 +0100138 */
139void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
140{
Viresh Kumar2a4eb732018-09-13 13:14:36 +0530141 _dev_pm_opp_cpumask_remove_table(cpumask, -1);
Sudeep Holla411466c2016-05-03 15:05:04 +0100142}
143EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
144
Viresh Kumar2c931042016-04-21 14:28:56 +0530145/**
146 * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
147 * @cpu_dev: CPU device for which we do this operation
148 * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
149 *
150 * This marks OPP table of the @cpu_dev as shared by the CPUs present in
151 * @cpumask.
152 *
153 * Returns -ENODEV if OPP table isn't already present.
Viresh Kumar2c931042016-04-21 14:28:56 +0530154 */
Viresh Kumardde370b2016-04-27 08:52:22 +0530155int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
Arnd Bergmannddbb74b2016-04-30 13:33:29 +0200156 const struct cpumask *cpumask)
Viresh Kumar2c931042016-04-21 14:28:56 +0530157{
158 struct opp_device *opp_dev;
159 struct opp_table *opp_table;
160 struct device *dev;
161 int cpu, ret = 0;
162
Viresh Kumar2c931042016-04-21 14:28:56 +0530163 opp_table = _find_opp_table(cpu_dev);
Viresh Kumar5b650b32017-01-23 10:11:48 +0530164 if (IS_ERR(opp_table))
165 return PTR_ERR(opp_table);
Viresh Kumar2c931042016-04-21 14:28:56 +0530166
167 for_each_cpu(cpu, cpumask) {
168 if (cpu == cpu_dev->id)
169 continue;
170
171 dev = get_cpu_device(cpu);
172 if (!dev) {
173 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
174 __func__, cpu);
175 continue;
176 }
177
178 opp_dev = _add_opp_dev(dev, opp_table);
179 if (!opp_dev) {
180 dev_err(dev, "%s: failed to add opp-dev for cpu%d device\n",
181 __func__, cpu);
182 continue;
183 }
Viresh Kumar46e7a4e2016-04-21 14:28:57 +0530184
185 /* Mark opp-table as multiple CPUs are sharing it now */
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530186 opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
Viresh Kumar2c931042016-04-21 14:28:56 +0530187 }
Viresh Kumar5b650b32017-01-23 10:11:48 +0530188
189 dev_pm_opp_put_opp_table(opp_table);
Viresh Kumar2c931042016-04-21 14:28:56 +0530190
191 return ret;
192}
193EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
Viresh Kumar6f707da2016-04-27 08:52:23 +0530194
195/**
196 * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
197 * @cpu_dev: CPU device for which we do this operation
198 * @cpumask: cpumask to update with information of sharing CPUs
199 *
200 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
201 *
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530202 * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
203 * table's status is access-unknown.
Viresh Kumar6f707da2016-04-27 08:52:23 +0530204 */
Arnd Bergmannddbb74b2016-04-30 13:33:29 +0200205int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
Viresh Kumar6f707da2016-04-27 08:52:23 +0530206{
207 struct opp_device *opp_dev;
208 struct opp_table *opp_table;
209 int ret = 0;
210
Viresh Kumar6f707da2016-04-27 08:52:23 +0530211 opp_table = _find_opp_table(cpu_dev);
Viresh Kumar5b650b32017-01-23 10:11:48 +0530212 if (IS_ERR(opp_table))
213 return PTR_ERR(opp_table);
Viresh Kumar6f707da2016-04-27 08:52:23 +0530214
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530215 if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
216 ret = -EINVAL;
Viresh Kumar5b650b32017-01-23 10:11:48 +0530217 goto put_opp_table;
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530218 }
219
Viresh Kumar6f707da2016-04-27 08:52:23 +0530220 cpumask_clear(cpumask);
221
Viresh Kumar79ee2e82016-06-16 19:03:11 +0530222 if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
Viresh Kumar3d255692018-08-03 07:05:21 +0530223 mutex_lock(&opp_table->lock);
Viresh Kumar6f707da2016-04-27 08:52:23 +0530224 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
225 cpumask_set_cpu(opp_dev->dev->id, cpumask);
Viresh Kumar3d255692018-08-03 07:05:21 +0530226 mutex_unlock(&opp_table->lock);
Viresh Kumar6f707da2016-04-27 08:52:23 +0530227 } else {
228 cpumask_set_cpu(cpu_dev->id, cpumask);
229 }
230
Viresh Kumar5b650b32017-01-23 10:11:48 +0530231put_opp_table:
232 dev_pm_opp_put_opp_table(opp_table);
Viresh Kumar6f707da2016-04-27 08:52:23 +0530233
234 return ret;
235}
236EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);