Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * System Control and Power Interface (SCMI) based CPUFreq Interface driver |
| 4 | * |
| 5 | * Copyright (C) 2018 ARM Ltd. |
| 6 | * Sudeep Holla <sudeep.holla@arm.com> |
| 7 | */ |
| 8 | |
| 9 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 10 | |
| 11 | #include <linux/cpu.h> |
| 12 | #include <linux/cpufreq.h> |
| 13 | #include <linux/cpumask.h> |
Quentin Perret | 3c42985 | 2019-02-04 11:09:52 +0000 | [diff] [blame] | 14 | #include <linux/energy_model.h> |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 15 | #include <linux/export.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/pm_opp.h> |
| 18 | #include <linux/slab.h> |
| 19 | #include <linux/scmi_protocol.h> |
| 20 | #include <linux/types.h> |
| 21 | |
| 22 | struct scmi_data { |
| 23 | int domain_id; |
| 24 | struct device *cpu_dev; |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 25 | }; |
| 26 | |
| 27 | static const struct scmi_handle *handle; |
| 28 | |
| 29 | static unsigned int scmi_cpufreq_get_rate(unsigned int cpu) |
| 30 | { |
| 31 | struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); |
| 32 | struct scmi_perf_ops *perf_ops = handle->perf_ops; |
| 33 | struct scmi_data *priv = policy->driver_data; |
| 34 | unsigned long rate; |
| 35 | int ret; |
| 36 | |
| 37 | ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false); |
| 38 | if (ret) |
| 39 | return 0; |
| 40 | return rate / 1000; |
| 41 | } |
| 42 | |
| 43 | /* |
| 44 | * perf_ops->freq_set is not a synchronous, the actual OPP change will |
| 45 | * happen asynchronously and can get notified if the events are |
| 46 | * subscribed for by the SCMI firmware |
| 47 | */ |
| 48 | static int |
| 49 | scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) |
| 50 | { |
| 51 | int ret; |
| 52 | struct scmi_data *priv = policy->driver_data; |
| 53 | struct scmi_perf_ops *perf_ops = handle->perf_ops; |
Quentin Perret | 0e141d1 | 2019-01-09 10:42:36 +0000 | [diff] [blame] | 54 | u64 freq = policy->freq_table[index].frequency; |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 55 | |
Quentin Perret | 0e141d1 | 2019-01-09 10:42:36 +0000 | [diff] [blame] | 56 | ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false); |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 57 | if (!ret) |
| 58 | arch_set_freq_scale(policy->related_cpus, freq, |
| 59 | policy->cpuinfo.max_freq); |
| 60 | return ret; |
| 61 | } |
| 62 | |
Sudeep Holla | 02f208c | 2017-07-20 14:43:07 +0100 | [diff] [blame] | 63 | static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy, |
| 64 | unsigned int target_freq) |
| 65 | { |
| 66 | struct scmi_data *priv = policy->driver_data; |
| 67 | struct scmi_perf_ops *perf_ops = handle->perf_ops; |
| 68 | |
| 69 | if (!perf_ops->freq_set(handle, priv->domain_id, |
| 70 | target_freq * 1000, true)) { |
| 71 | arch_set_freq_scale(policy->related_cpus, target_freq, |
| 72 | policy->cpuinfo.max_freq); |
| 73 | return target_freq; |
| 74 | } |
| 75 | |
| 76 | return 0; |
| 77 | } |
| 78 | |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 79 | static int |
| 80 | scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) |
| 81 | { |
| 82 | int cpu, domain, tdomain; |
| 83 | struct device *tcpu_dev; |
| 84 | |
| 85 | domain = handle->perf_ops->device_domain_id(cpu_dev); |
| 86 | if (domain < 0) |
| 87 | return domain; |
| 88 | |
| 89 | for_each_possible_cpu(cpu) { |
| 90 | if (cpu == cpu_dev->id) |
| 91 | continue; |
| 92 | |
| 93 | tcpu_dev = get_cpu_device(cpu); |
| 94 | if (!tcpu_dev) |
| 95 | continue; |
| 96 | |
| 97 | tdomain = handle->perf_ops->device_domain_id(tcpu_dev); |
| 98 | if (tdomain == domain) |
| 99 | cpumask_set_cpu(cpu, cpumask); |
| 100 | } |
| 101 | |
| 102 | return 0; |
| 103 | } |
| 104 | |
Quentin Perret | 3c42985 | 2019-02-04 11:09:52 +0000 | [diff] [blame] | 105 | static int __maybe_unused |
| 106 | scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu) |
| 107 | { |
| 108 | struct device *cpu_dev = get_cpu_device(cpu); |
| 109 | unsigned long Hz; |
| 110 | int ret, domain; |
| 111 | |
| 112 | if (!cpu_dev) { |
| 113 | pr_err("failed to get cpu%d device\n", cpu); |
| 114 | return -ENODEV; |
| 115 | } |
| 116 | |
| 117 | domain = handle->perf_ops->device_domain_id(cpu_dev); |
| 118 | if (domain < 0) |
| 119 | return domain; |
| 120 | |
| 121 | /* Get the power cost of the performance domain. */ |
| 122 | Hz = *KHz * 1000; |
| 123 | ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power); |
| 124 | if (ret) |
| 125 | return ret; |
| 126 | |
| 127 | /* The EM framework specifies the frequency in KHz. */ |
| 128 | *KHz = Hz / 1000; |
| 129 | |
| 130 | return 0; |
| 131 | } |
| 132 | |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 133 | static int scmi_cpufreq_init(struct cpufreq_policy *policy) |
| 134 | { |
Quentin Perret | 3c42985 | 2019-02-04 11:09:52 +0000 | [diff] [blame] | 135 | int ret, nr_opp; |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 136 | unsigned int latency; |
| 137 | struct device *cpu_dev; |
| 138 | struct scmi_data *priv; |
| 139 | struct cpufreq_frequency_table *freq_table; |
Quentin Perret | 3c42985 | 2019-02-04 11:09:52 +0000 | [diff] [blame] | 140 | struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power); |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 141 | |
| 142 | cpu_dev = get_cpu_device(policy->cpu); |
| 143 | if (!cpu_dev) { |
| 144 | pr_err("failed to get cpu%d device\n", policy->cpu); |
| 145 | return -ENODEV; |
| 146 | } |
| 147 | |
Sudeep Holla | 7859e08 | 2018-05-09 17:52:06 +0100 | [diff] [blame] | 148 | ret = handle->perf_ops->device_opps_add(handle, cpu_dev); |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 149 | if (ret) { |
| 150 | dev_warn(cpu_dev, "failed to add opps to the device\n"); |
| 151 | return ret; |
| 152 | } |
| 153 | |
| 154 | ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus); |
| 155 | if (ret) { |
| 156 | dev_warn(cpu_dev, "failed to get sharing cpumask\n"); |
| 157 | return ret; |
| 158 | } |
| 159 | |
| 160 | ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); |
| 161 | if (ret) { |
| 162 | dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", |
| 163 | __func__, ret); |
| 164 | return ret; |
| 165 | } |
| 166 | |
Quentin Perret | 3c42985 | 2019-02-04 11:09:52 +0000 | [diff] [blame] | 167 | nr_opp = dev_pm_opp_get_opp_count(cpu_dev); |
| 168 | if (nr_opp <= 0) { |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 169 | dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); |
| 170 | ret = -EPROBE_DEFER; |
| 171 | goto out_free_opp; |
| 172 | } |
| 173 | |
| 174 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 175 | if (!priv) { |
| 176 | ret = -ENOMEM; |
| 177 | goto out_free_opp; |
| 178 | } |
| 179 | |
| 180 | ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); |
| 181 | if (ret) { |
| 182 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); |
| 183 | goto out_free_priv; |
| 184 | } |
| 185 | |
| 186 | priv->cpu_dev = cpu_dev; |
| 187 | priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev); |
| 188 | |
| 189 | policy->driver_data = priv; |
Viresh Kumar | d983af9 | 2018-04-03 15:37:38 +0530 | [diff] [blame] | 190 | policy->freq_table = freq_table; |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 191 | |
| 192 | /* SCMI allows DVFS request for any domain from any CPU */ |
| 193 | policy->dvfs_possible_from_any_cpu = true; |
| 194 | |
Sudeep Holla | 7859e08 | 2018-05-09 17:52:06 +0100 | [diff] [blame] | 195 | latency = handle->perf_ops->transition_latency_get(handle, cpu_dev); |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 196 | if (!latency) |
| 197 | latency = CPUFREQ_ETERNAL; |
| 198 | |
| 199 | policy->cpuinfo.transition_latency = latency; |
| 200 | |
Sudeep Holla | 02f208c | 2017-07-20 14:43:07 +0100 | [diff] [blame] | 201 | policy->fast_switch_possible = true; |
Quentin Perret | 3c42985 | 2019-02-04 11:09:52 +0000 | [diff] [blame] | 202 | |
| 203 | em_register_perf_domain(policy->cpus, nr_opp, &em_cb); |
| 204 | |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 205 | return 0; |
| 206 | |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 207 | out_free_priv: |
| 208 | kfree(priv); |
| 209 | out_free_opp: |
Viresh Kumar | 1690d8b | 2019-01-04 15:14:33 +0530 | [diff] [blame] | 210 | dev_pm_opp_remove_all_dynamic(cpu_dev); |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 211 | |
| 212 | return ret; |
| 213 | } |
| 214 | |
| 215 | static int scmi_cpufreq_exit(struct cpufreq_policy *policy) |
| 216 | { |
| 217 | struct scmi_data *priv = policy->driver_data; |
| 218 | |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 219 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
Viresh Kumar | 1690d8b | 2019-01-04 15:14:33 +0530 | [diff] [blame] | 220 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); |
Yangtao Li | 8cbd468 | 2019-02-16 11:31:48 -0500 | [diff] [blame] | 221 | kfree(priv); |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 226 | static struct cpufreq_driver scmi_cpufreq_driver = { |
| 227 | .name = "scmi", |
| 228 | .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
Amit Kucheria | 5da7af9 | 2019-01-29 10:25:14 +0530 | [diff] [blame] | 229 | CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
| 230 | CPUFREQ_IS_COOLING_DEV, |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 231 | .verify = cpufreq_generic_frequency_table_verify, |
| 232 | .attr = cpufreq_generic_attr, |
| 233 | .target_index = scmi_cpufreq_set_target, |
Sudeep Holla | 02f208c | 2017-07-20 14:43:07 +0100 | [diff] [blame] | 234 | .fast_switch = scmi_cpufreq_fast_switch, |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 235 | .get = scmi_cpufreq_get_rate, |
| 236 | .init = scmi_cpufreq_init, |
| 237 | .exit = scmi_cpufreq_exit, |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 238 | }; |
| 239 | |
| 240 | static int scmi_cpufreq_probe(struct scmi_device *sdev) |
| 241 | { |
| 242 | int ret; |
| 243 | |
| 244 | handle = sdev->handle; |
| 245 | |
| 246 | if (!handle || !handle->perf_ops) |
| 247 | return -ENODEV; |
| 248 | |
| 249 | ret = cpufreq_register_driver(&scmi_cpufreq_driver); |
| 250 | if (ret) { |
| 251 | dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n", |
| 252 | __func__, ret); |
| 253 | } |
| 254 | |
| 255 | return ret; |
| 256 | } |
| 257 | |
| 258 | static void scmi_cpufreq_remove(struct scmi_device *sdev) |
| 259 | { |
| 260 | cpufreq_unregister_driver(&scmi_cpufreq_driver); |
| 261 | } |
| 262 | |
| 263 | static const struct scmi_device_id scmi_id_table[] = { |
Sudeep Holla | 12b7662 | 2019-11-06 17:56:25 +0000 | [diff] [blame] | 264 | { SCMI_PROTOCOL_PERF, "cpufreq" }, |
Sudeep Holla | 99d6bdf | 2017-06-18 09:38:11 +0100 | [diff] [blame] | 265 | { }, |
| 266 | }; |
| 267 | MODULE_DEVICE_TABLE(scmi, scmi_id_table); |
| 268 | |
| 269 | static struct scmi_driver scmi_cpufreq_drv = { |
| 270 | .name = "scmi-cpufreq", |
| 271 | .probe = scmi_cpufreq_probe, |
| 272 | .remove = scmi_cpufreq_remove, |
| 273 | .id_table = scmi_id_table, |
| 274 | }; |
| 275 | module_scmi_driver(scmi_cpufreq_drv); |
| 276 | |
| 277 | MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>"); |
| 278 | MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver"); |
| 279 | MODULE_LICENSE("GPL v2"); |