Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (c) 2018, The Linux Foundation. All rights reserved. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/bitfield.h> |
| 7 | #include <linux/cpufreq.h> |
| 8 | #include <linux/init.h> |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 9 | #include <linux/interconnect.h> |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/of_address.h> |
| 14 | #include <linux/of_platform.h> |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 15 | #include <linux/pm_opp.h> |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 16 | #include <linux/slab.h> |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 17 | #include <linux/spinlock.h> |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 18 | |
| 19 | #define LUT_MAX_ENTRIES 40U |
| 20 | #define LUT_SRC GENMASK(31, 30) |
| 21 | #define LUT_L_VAL GENMASK(7, 0) |
| 22 | #define LUT_CORE_COUNT GENMASK(18, 16) |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 23 | #define LUT_VOLT GENMASK(11, 0) |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 24 | #define CLK_HW_DIV 2 |
Sibi Sankar | 0eae1e3 | 2019-08-07 17:15:43 +0530 | [diff] [blame] | 25 | #define LUT_TURBO_IND 1 |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 26 | |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 27 | #define HZ_PER_KHZ 1000 |
| 28 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 29 | struct qcom_cpufreq_soc_data { |
| 30 | u32 reg_enable; |
| 31 | u32 reg_freq_lut; |
| 32 | u32 reg_volt_lut; |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 33 | u32 reg_current_vote; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 34 | u32 reg_perf_state; |
| 35 | u8 lut_row_size; |
| 36 | }; |
| 37 | |
| 38 | struct qcom_cpufreq_data { |
| 39 | void __iomem *base; |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 40 | struct resource *res; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 41 | const struct qcom_cpufreq_soc_data *soc_data; |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 42 | |
| 43 | /* |
| 44 | * Mutex to synchronize between de-init sequence and re-starting LMh |
| 45 | * polling/interrupts |
| 46 | */ |
| 47 | struct mutex throttle_lock; |
| 48 | int throttle_irq; |
Ard Biesheuvel | be6592e | 2021-11-11 17:48:06 +0200 | [diff] [blame] | 49 | char irq_name[15]; |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 50 | bool cancel_throttle; |
| 51 | struct delayed_work throttle_work; |
| 52 | struct cpufreq_policy *policy; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 53 | }; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 54 | |
| 55 | static unsigned long cpu_hw_rate, xo_rate; |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 56 | static bool icc_scaling_enabled; |
| 57 | |
| 58 | static int qcom_cpufreq_set_bw(struct cpufreq_policy *policy, |
| 59 | unsigned long freq_khz) |
| 60 | { |
| 61 | unsigned long freq_hz = freq_khz * 1000; |
| 62 | struct dev_pm_opp *opp; |
| 63 | struct device *dev; |
| 64 | int ret; |
| 65 | |
| 66 | dev = get_cpu_device(policy->cpu); |
| 67 | if (!dev) |
| 68 | return -ENODEV; |
| 69 | |
| 70 | opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true); |
| 71 | if (IS_ERR(opp)) |
| 72 | return PTR_ERR(opp); |
| 73 | |
Viresh Kumar | 8d25157 | 2021-01-21 15:27:55 +0530 | [diff] [blame] | 74 | ret = dev_pm_opp_set_opp(dev, opp); |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 75 | dev_pm_opp_put(opp); |
| 76 | return ret; |
| 77 | } |
| 78 | |
| 79 | static int qcom_cpufreq_update_opp(struct device *cpu_dev, |
| 80 | unsigned long freq_khz, |
| 81 | unsigned long volt) |
| 82 | { |
| 83 | unsigned long freq_hz = freq_khz * 1000; |
| 84 | int ret; |
| 85 | |
| 86 | /* Skip voltage update if the opp table is not available */ |
| 87 | if (!icc_scaling_enabled) |
| 88 | return dev_pm_opp_add(cpu_dev, freq_hz, volt); |
| 89 | |
| 90 | ret = dev_pm_opp_adjust_voltage(cpu_dev, freq_hz, volt, volt, volt); |
| 91 | if (ret) { |
| 92 | dev_err(cpu_dev, "Voltage update failed freq=%ld\n", freq_khz); |
| 93 | return ret; |
| 94 | } |
| 95 | |
| 96 | return dev_pm_opp_enable(cpu_dev, freq_hz); |
| 97 | } |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 98 | |
| 99 | static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy, |
| 100 | unsigned int index) |
| 101 | { |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 102 | struct qcom_cpufreq_data *data = policy->driver_data; |
| 103 | const struct qcom_cpufreq_soc_data *soc_data = data->soc_data; |
Douglas RAILLARD | ada54f3 | 2019-08-08 14:18:57 +0100 | [diff] [blame] | 104 | unsigned long freq = policy->freq_table[index].frequency; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 105 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 106 | writel_relaxed(index, data->base + soc_data->reg_perf_state); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 107 | |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 108 | if (icc_scaling_enabled) |
| 109 | qcom_cpufreq_set_bw(policy, freq); |
| 110 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 111 | return 0; |
| 112 | } |
| 113 | |
| 114 | static unsigned int qcom_cpufreq_hw_get(unsigned int cpu) |
| 115 | { |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 116 | struct qcom_cpufreq_data *data; |
| 117 | const struct qcom_cpufreq_soc_data *soc_data; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 118 | struct cpufreq_policy *policy; |
| 119 | unsigned int index; |
| 120 | |
| 121 | policy = cpufreq_cpu_get_raw(cpu); |
| 122 | if (!policy) |
| 123 | return 0; |
| 124 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 125 | data = policy->driver_data; |
| 126 | soc_data = data->soc_data; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 127 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 128 | index = readl_relaxed(data->base + soc_data->reg_perf_state); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 129 | index = min(index, LUT_MAX_ENTRIES - 1); |
| 130 | |
| 131 | return policy->freq_table[index].frequency; |
| 132 | } |
| 133 | |
| 134 | static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy, |
| 135 | unsigned int target_freq) |
| 136 | { |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 137 | struct qcom_cpufreq_data *data = policy->driver_data; |
| 138 | const struct qcom_cpufreq_soc_data *soc_data = data->soc_data; |
Viresh Kumar | 292072c | 2020-07-30 08:59:40 +0530 | [diff] [blame] | 139 | unsigned int index; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 140 | |
| 141 | index = policy->cached_resolved_idx; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 142 | writel_relaxed(index, data->base + soc_data->reg_perf_state); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 143 | |
Ionela Voinescu | 1a0419b | 2020-09-01 21:55:46 +0100 | [diff] [blame] | 144 | return policy->freq_table[index].frequency; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 145 | } |
| 146 | |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 147 | static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev, |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 148 | struct cpufreq_policy *policy) |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 149 | { |
Sibi Sankar | 0eae1e3 | 2019-08-07 17:15:43 +0530 | [diff] [blame] | 150 | u32 data, src, lval, i, core_count, prev_freq = 0, freq; |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 151 | u32 volt; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 152 | struct cpufreq_frequency_table *table; |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 153 | struct dev_pm_opp *opp; |
| 154 | unsigned long rate; |
| 155 | int ret; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 156 | struct qcom_cpufreq_data *drv_data = policy->driver_data; |
| 157 | const struct qcom_cpufreq_soc_data *soc_data = drv_data->soc_data; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 158 | |
| 159 | table = kcalloc(LUT_MAX_ENTRIES + 1, sizeof(*table), GFP_KERNEL); |
| 160 | if (!table) |
| 161 | return -ENOMEM; |
| 162 | |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 163 | ret = dev_pm_opp_of_add_table(cpu_dev); |
| 164 | if (!ret) { |
| 165 | /* Disable all opps and cross-validate against LUT later */ |
| 166 | icc_scaling_enabled = true; |
| 167 | for (rate = 0; ; rate++) { |
| 168 | opp = dev_pm_opp_find_freq_ceil(cpu_dev, &rate); |
| 169 | if (IS_ERR(opp)) |
| 170 | break; |
| 171 | |
| 172 | dev_pm_opp_put(opp); |
| 173 | dev_pm_opp_disable(cpu_dev, rate); |
| 174 | } |
| 175 | } else if (ret != -ENODEV) { |
| 176 | dev_err(cpu_dev, "Invalid opp table in device tree\n"); |
| 177 | return ret; |
| 178 | } else { |
Sibi Sankar | afdb219 | 2020-06-22 13:46:49 +0530 | [diff] [blame] | 179 | policy->fast_switch_possible = true; |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 180 | icc_scaling_enabled = false; |
| 181 | } |
| 182 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 183 | for (i = 0; i < LUT_MAX_ENTRIES; i++) { |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 184 | data = readl_relaxed(drv_data->base + soc_data->reg_freq_lut + |
| 185 | i * soc_data->lut_row_size); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 186 | src = FIELD_GET(LUT_SRC, data); |
| 187 | lval = FIELD_GET(LUT_L_VAL, data); |
| 188 | core_count = FIELD_GET(LUT_CORE_COUNT, data); |
| 189 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 190 | data = readl_relaxed(drv_data->base + soc_data->reg_volt_lut + |
| 191 | i * soc_data->lut_row_size); |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 192 | volt = FIELD_GET(LUT_VOLT, data) * 1000; |
| 193 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 194 | if (src) |
| 195 | freq = xo_rate * lval / 1000; |
| 196 | else |
| 197 | freq = cpu_hw_rate / 1000; |
| 198 | |
Sibi Sankar | 0eae1e3 | 2019-08-07 17:15:43 +0530 | [diff] [blame] | 199 | if (freq != prev_freq && core_count != LUT_TURBO_IND) { |
Matthias Kaehlcke | bc9b9c5 | 2020-09-15 10:10:54 -0700 | [diff] [blame] | 200 | if (!qcom_cpufreq_update_opp(cpu_dev, freq, volt)) { |
| 201 | table[i].frequency = freq; |
| 202 | dev_dbg(cpu_dev, "index=%d freq=%d, core_count %d\n", i, |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 203 | freq, core_count); |
Matthias Kaehlcke | bc9b9c5 | 2020-09-15 10:10:54 -0700 | [diff] [blame] | 204 | } else { |
| 205 | dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", freq); |
| 206 | table[i].frequency = CPUFREQ_ENTRY_INVALID; |
| 207 | } |
| 208 | |
Sibi Sankar | 0eae1e3 | 2019-08-07 17:15:43 +0530 | [diff] [blame] | 209 | } else if (core_count == LUT_TURBO_IND) { |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 210 | table[i].frequency = CPUFREQ_ENTRY_INVALID; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | /* |
| 214 | * Two of the same frequencies with the same core counts means |
| 215 | * end of table |
| 216 | */ |
Sibi Sankar | 0eae1e3 | 2019-08-07 17:15:43 +0530 | [diff] [blame] | 217 | if (i > 0 && prev_freq == freq) { |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 218 | struct cpufreq_frequency_table *prev = &table[i - 1]; |
| 219 | |
| 220 | /* |
| 221 | * Only treat the last frequency that might be a boost |
| 222 | * as the boost frequency |
| 223 | */ |
Sibi Sankar | 0eae1e3 | 2019-08-07 17:15:43 +0530 | [diff] [blame] | 224 | if (prev->frequency == CPUFREQ_ENTRY_INVALID) { |
Matthias Kaehlcke | bc9b9c5 | 2020-09-15 10:10:54 -0700 | [diff] [blame] | 225 | if (!qcom_cpufreq_update_opp(cpu_dev, prev_freq, volt)) { |
| 226 | prev->frequency = prev_freq; |
| 227 | prev->flags = CPUFREQ_BOOST_FREQ; |
| 228 | } else { |
| 229 | dev_warn(cpu_dev, "failed to update OPP for freq=%d\n", |
| 230 | freq); |
| 231 | } |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | break; |
| 235 | } |
| 236 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 237 | prev_freq = freq; |
| 238 | } |
| 239 | |
| 240 | table[i].frequency = CPUFREQ_TABLE_END; |
| 241 | policy->freq_table = table; |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 242 | dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 243 | |
| 244 | return 0; |
| 245 | } |
| 246 | |
| 247 | static void qcom_get_related_cpus(int index, struct cpumask *m) |
| 248 | { |
| 249 | struct device_node *cpu_np; |
| 250 | struct of_phandle_args args; |
| 251 | int cpu, ret; |
| 252 | |
| 253 | for_each_possible_cpu(cpu) { |
| 254 | cpu_np = of_cpu_device_node_get(cpu); |
| 255 | if (!cpu_np) |
| 256 | continue; |
| 257 | |
| 258 | ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain", |
| 259 | "#freq-domain-cells", 0, |
| 260 | &args); |
| 261 | of_node_put(cpu_np); |
| 262 | if (ret < 0) |
| 263 | continue; |
| 264 | |
| 265 | if (index == args.args[0]) |
| 266 | cpumask_set_cpu(cpu, m); |
| 267 | } |
| 268 | } |
| 269 | |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 270 | static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data) |
| 271 | { |
| 272 | unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote); |
| 273 | |
| 274 | return (val & 0x3FF) * 19200; |
| 275 | } |
| 276 | |
| 277 | static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) |
| 278 | { |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 279 | struct cpufreq_policy *policy = data->policy; |
| 280 | int cpu = cpumask_first(policy->cpus); |
| 281 | struct device *dev = get_cpu_device(cpu); |
Lukasz Luba | 0258cb1 | 2021-11-09 19:57:13 +0000 | [diff] [blame] | 282 | unsigned long freq_hz, throttled_freq; |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 283 | struct dev_pm_opp *opp; |
| 284 | unsigned int freq; |
| 285 | |
| 286 | /* |
| 287 | * Get the h/w throttled frequency, normalize it using the |
| 288 | * registered opp table and use it to calculate thermal pressure. |
| 289 | */ |
| 290 | freq = qcom_lmh_get_throttle_freq(data); |
| 291 | freq_hz = freq * HZ_PER_KHZ; |
| 292 | |
| 293 | opp = dev_pm_opp_find_freq_floor(dev, &freq_hz); |
| 294 | if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE) |
| 295 | dev_pm_opp_find_freq_ceil(dev, &freq_hz); |
| 296 | |
| 297 | throttled_freq = freq_hz / HZ_PER_KHZ; |
| 298 | |
Lukasz Luba | 0258cb1 | 2021-11-09 19:57:13 +0000 | [diff] [blame] | 299 | /* Update thermal pressure (the boost frequencies are accepted) */ |
| 300 | arch_update_thermal_pressure(policy->related_cpus, throttled_freq); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 301 | |
| 302 | /* |
| 303 | * In the unlikely case policy is unregistered do not enable |
| 304 | * polling or h/w interrupt |
| 305 | */ |
| 306 | mutex_lock(&data->throttle_lock); |
| 307 | if (data->cancel_throttle) |
| 308 | goto out; |
| 309 | |
| 310 | /* |
| 311 | * If h/w throttled frequency is higher than what cpufreq has requested |
| 312 | * for, then stop polling and switch back to interrupt mechanism. |
| 313 | */ |
| 314 | if (throttled_freq >= qcom_cpufreq_hw_get(cpu)) |
| 315 | enable_irq(data->throttle_irq); |
| 316 | else |
| 317 | mod_delayed_work(system_highpri_wq, &data->throttle_work, |
| 318 | msecs_to_jiffies(10)); |
| 319 | |
| 320 | out: |
| 321 | mutex_unlock(&data->throttle_lock); |
| 322 | } |
| 323 | |
| 324 | static void qcom_lmh_dcvs_poll(struct work_struct *work) |
| 325 | { |
| 326 | struct qcom_cpufreq_data *data; |
| 327 | |
| 328 | data = container_of(work, struct qcom_cpufreq_data, throttle_work.work); |
| 329 | qcom_lmh_dcvs_notify(data); |
| 330 | } |
| 331 | |
| 332 | static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data) |
| 333 | { |
| 334 | struct qcom_cpufreq_data *c_data = data; |
| 335 | |
| 336 | /* Disable interrupt and enable polling */ |
| 337 | disable_irq_nosync(c_data->throttle_irq); |
Vladimir Zapolskiy | e0e27c3 | 2021-11-11 17:48:07 +0200 | [diff] [blame] | 338 | schedule_delayed_work(&c_data->throttle_work, 0); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 339 | |
Vladimir Zapolskiy | e0e27c3 | 2021-11-11 17:48:07 +0200 | [diff] [blame] | 340 | return IRQ_HANDLED; |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 341 | } |
| 342 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 343 | static const struct qcom_cpufreq_soc_data qcom_soc_data = { |
| 344 | .reg_enable = 0x0, |
| 345 | .reg_freq_lut = 0x110, |
| 346 | .reg_volt_lut = 0x114, |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 347 | .reg_current_vote = 0x704, |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 348 | .reg_perf_state = 0x920, |
| 349 | .lut_row_size = 32, |
| 350 | }; |
| 351 | |
Manivannan Sadhasivam | 49b59f4 | 2020-09-15 12:54:23 +0530 | [diff] [blame] | 352 | static const struct qcom_cpufreq_soc_data epss_soc_data = { |
| 353 | .reg_enable = 0x0, |
| 354 | .reg_freq_lut = 0x100, |
| 355 | .reg_volt_lut = 0x200, |
| 356 | .reg_perf_state = 0x320, |
| 357 | .lut_row_size = 4, |
| 358 | }; |
| 359 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 360 | static const struct of_device_id qcom_cpufreq_hw_match[] = { |
| 361 | { .compatible = "qcom,cpufreq-hw", .data = &qcom_soc_data }, |
Manivannan Sadhasivam | 49b59f4 | 2020-09-15 12:54:23 +0530 | [diff] [blame] | 362 | { .compatible = "qcom,cpufreq-epss", .data = &epss_soc_data }, |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 363 | {} |
| 364 | }; |
| 365 | MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match); |
| 366 | |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 367 | static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index) |
| 368 | { |
| 369 | struct qcom_cpufreq_data *data = policy->driver_data; |
| 370 | struct platform_device *pdev = cpufreq_get_driver_data(); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 371 | int ret; |
| 372 | |
| 373 | /* |
| 374 | * Look for LMh interrupt. If no interrupt line is specified / |
| 375 | * if there is an error, allow cpufreq to be enabled as usual. |
| 376 | */ |
Stephen Boyd | 8f5783a | 2021-11-16 18:03:46 -0800 | [diff] [blame] | 377 | data->throttle_irq = platform_get_irq_optional(pdev, index); |
| 378 | if (data->throttle_irq == -ENXIO) |
| 379 | return 0; |
| 380 | if (data->throttle_irq < 0) |
| 381 | return data->throttle_irq; |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 382 | |
| 383 | data->cancel_throttle = false; |
| 384 | data->policy = policy; |
| 385 | |
| 386 | mutex_init(&data->throttle_lock); |
| 387 | INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll); |
| 388 | |
Ard Biesheuvel | be6592e | 2021-11-11 17:48:06 +0200 | [diff] [blame] | 389 | snprintf(data->irq_name, sizeof(data->irq_name), "dcvsh-irq-%u", policy->cpu); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 390 | ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq, |
Ard Biesheuvel | be6592e | 2021-11-11 17:48:06 +0200 | [diff] [blame] | 391 | IRQF_ONESHOT, data->irq_name, data); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 392 | if (ret) { |
Ard Biesheuvel | be6592e | 2021-11-11 17:48:06 +0200 | [diff] [blame] | 393 | dev_err(&pdev->dev, "Error registering %s: %d\n", data->irq_name, ret); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 394 | return 0; |
| 395 | } |
| 396 | |
Vladimir Zapolskiy | 3ed6dfb | 2021-11-11 17:48:08 +0200 | [diff] [blame] | 397 | ret = irq_set_affinity_hint(data->throttle_irq, policy->cpus); |
| 398 | if (ret) |
| 399 | dev_err(&pdev->dev, "Failed to set CPU affinity of %s[%d]\n", |
| 400 | data->irq_name, data->throttle_irq); |
| 401 | |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 402 | return 0; |
| 403 | } |
| 404 | |
| 405 | static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data) |
| 406 | { |
| 407 | if (data->throttle_irq <= 0) |
| 408 | return; |
| 409 | |
| 410 | mutex_lock(&data->throttle_lock); |
| 411 | data->cancel_throttle = true; |
| 412 | mutex_unlock(&data->throttle_lock); |
| 413 | |
| 414 | cancel_delayed_work_sync(&data->throttle_work); |
| 415 | free_irq(data->throttle_irq, data); |
| 416 | } |
| 417 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 418 | static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy) |
| 419 | { |
Manivannan Sadhasivam | bd74e28 | 2020-09-08 13:27:12 +0530 | [diff] [blame] | 420 | struct platform_device *pdev = cpufreq_get_driver_data(); |
| 421 | struct device *dev = &pdev->dev; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 422 | struct of_phandle_args args; |
| 423 | struct device_node *cpu_np; |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 424 | struct device *cpu_dev; |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 425 | struct resource *res; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 426 | void __iomem *base; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 427 | struct qcom_cpufreq_data *data; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 428 | int ret, index; |
| 429 | |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 430 | cpu_dev = get_cpu_device(policy->cpu); |
| 431 | if (!cpu_dev) { |
| 432 | pr_err("%s: failed to get cpu%d device\n", __func__, |
| 433 | policy->cpu); |
| 434 | return -ENODEV; |
| 435 | } |
| 436 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 437 | cpu_np = of_cpu_device_node_get(policy->cpu); |
| 438 | if (!cpu_np) |
| 439 | return -EINVAL; |
| 440 | |
| 441 | ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain", |
| 442 | "#freq-domain-cells", 0, &args); |
| 443 | of_node_put(cpu_np); |
| 444 | if (ret) |
| 445 | return ret; |
| 446 | |
| 447 | index = args.args[0]; |
| 448 | |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 449 | res = platform_get_resource(pdev, IORESOURCE_MEM, index); |
| 450 | if (!res) { |
| 451 | dev_err(dev, "failed to get mem resource %d\n", index); |
| 452 | return -ENODEV; |
| 453 | } |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 454 | |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 455 | if (!request_mem_region(res->start, resource_size(res), res->name)) { |
| 456 | dev_err(dev, "failed to request resource %pR\n", res); |
| 457 | return -EBUSY; |
| 458 | } |
| 459 | |
| 460 | base = ioremap(res->start, resource_size(res)); |
Wei Yongjun | 536eb97 | 2021-03-04 10:04:23 +0000 | [diff] [blame] | 461 | if (!base) { |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 462 | dev_err(dev, "failed to map resource %pR\n", res); |
Wei Yongjun | 536eb97 | 2021-03-04 10:04:23 +0000 | [diff] [blame] | 463 | ret = -ENOMEM; |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 464 | goto release_region; |
| 465 | } |
| 466 | |
| 467 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 468 | if (!data) { |
| 469 | ret = -ENOMEM; |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 470 | goto unmap_base; |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 471 | } |
| 472 | |
| 473 | data->soc_data = of_device_get_match_data(&pdev->dev); |
| 474 | data->base = base; |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 475 | data->res = res; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 476 | |
| 477 | /* HW should be in enabled state to proceed */ |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 478 | if (!(readl_relaxed(base + data->soc_data->reg_enable) & 0x1)) { |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 479 | dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index); |
| 480 | ret = -ENODEV; |
| 481 | goto error; |
| 482 | } |
| 483 | |
| 484 | qcom_get_related_cpus(index, policy->cpus); |
| 485 | if (!cpumask_weight(policy->cpus)) { |
| 486 | dev_err(dev, "Domain-%d failed to get related CPUs\n", index); |
| 487 | ret = -ENOENT; |
| 488 | goto error; |
| 489 | } |
| 490 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 491 | policy->driver_data = data; |
Taniya Das | f0712ac | 2021-08-23 15:45:46 +0530 | [diff] [blame] | 492 | policy->dvfs_possible_from_any_cpu = true; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 493 | |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 494 | ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 495 | if (ret) { |
| 496 | dev_err(dev, "Domain-%d failed to read LUT\n", index); |
| 497 | goto error; |
| 498 | } |
| 499 | |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 500 | ret = dev_pm_opp_get_opp_count(cpu_dev); |
| 501 | if (ret <= 0) { |
| 502 | dev_err(cpu_dev, "Failed to add OPPs\n"); |
| 503 | ret = -ENODEV; |
| 504 | goto error; |
| 505 | } |
| 506 | |
Shawn Guo | 2669917 | 2021-01-13 14:52:41 +0800 | [diff] [blame] | 507 | if (policy_has_boost_freq(policy)) { |
| 508 | ret = cpufreq_enable_boost_support(); |
| 509 | if (ret) |
| 510 | dev_warn(cpu_dev, "failed to enable boost: %d\n", ret); |
| 511 | } |
| 512 | |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 513 | ret = qcom_cpufreq_hw_lmh_init(policy, index); |
| 514 | if (ret) |
| 515 | goto error; |
| 516 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 517 | return 0; |
| 518 | error: |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 519 | kfree(data); |
| 520 | unmap_base: |
Shawn Guo | 02fc409 | 2021-02-28 09:33:19 +0800 | [diff] [blame] | 521 | iounmap(base); |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 522 | release_region: |
| 523 | release_mem_region(res->start, resource_size(res)); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 524 | return ret; |
| 525 | } |
| 526 | |
| 527 | static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy) |
| 528 | { |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 529 | struct device *cpu_dev = get_cpu_device(policy->cpu); |
Manivannan Sadhasivam | dcd1fd7 | 2020-09-15 12:54:22 +0530 | [diff] [blame] | 530 | struct qcom_cpufreq_data *data = policy->driver_data; |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 531 | struct resource *res = data->res; |
| 532 | void __iomem *base = data->base; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 533 | |
Taniya Das | 55538fb | 2019-01-31 23:02:50 +0530 | [diff] [blame] | 534 | dev_pm_opp_remove_all_dynamic(cpu_dev); |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 535 | dev_pm_opp_of_cpumask_remove_table(policy->related_cpus); |
Thara Gopinath | 275157b | 2021-08-09 15:16:01 -0400 | [diff] [blame] | 536 | qcom_cpufreq_hw_lmh_exit(data); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 537 | kfree(policy->freq_table); |
Shawn Guo | 67fc209 | 2021-01-19 10:39:25 +0800 | [diff] [blame] | 538 | kfree(data); |
| 539 | iounmap(base); |
| 540 | release_mem_region(res->start, resource_size(res)); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 541 | |
| 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | static struct freq_attr *qcom_cpufreq_hw_attr[] = { |
| 546 | &cpufreq_freq_attr_scaling_available_freqs, |
| 547 | &cpufreq_freq_attr_scaling_boost_freqs, |
| 548 | NULL |
| 549 | }; |
| 550 | |
| 551 | static struct cpufreq_driver cpufreq_qcom_hw_driver = { |
Viresh Kumar | 5ae4a4b | 2021-02-02 10:25:11 +0530 | [diff] [blame] | 552 | .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK | |
Amit Kucheria | 4c5ff1c | 2019-01-29 10:25:09 +0530 | [diff] [blame] | 553 | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | |
| 554 | CPUFREQ_IS_COOLING_DEV, |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 555 | .verify = cpufreq_generic_frequency_table_verify, |
| 556 | .target_index = qcom_cpufreq_hw_target_index, |
| 557 | .get = qcom_cpufreq_hw_get, |
| 558 | .init = qcom_cpufreq_hw_cpu_init, |
| 559 | .exit = qcom_cpufreq_hw_cpu_exit, |
Viresh Kumar | e96c215 | 2021-08-10 12:24:36 +0530 | [diff] [blame] | 560 | .register_em = cpufreq_register_em_with_opp, |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 561 | .fast_switch = qcom_cpufreq_hw_fast_switch, |
| 562 | .name = "qcom-cpufreq-hw", |
| 563 | .attr = qcom_cpufreq_hw_attr, |
| 564 | }; |
| 565 | |
| 566 | static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev) |
| 567 | { |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 568 | struct device *cpu_dev; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 569 | struct clk *clk; |
| 570 | int ret; |
| 571 | |
| 572 | clk = clk_get(&pdev->dev, "xo"); |
| 573 | if (IS_ERR(clk)) |
| 574 | return PTR_ERR(clk); |
| 575 | |
| 576 | xo_rate = clk_get_rate(clk); |
| 577 | clk_put(clk); |
| 578 | |
| 579 | clk = clk_get(&pdev->dev, "alternate"); |
| 580 | if (IS_ERR(clk)) |
| 581 | return PTR_ERR(clk); |
| 582 | |
| 583 | cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV; |
| 584 | clk_put(clk); |
| 585 | |
Manivannan Sadhasivam | bd74e28 | 2020-09-08 13:27:12 +0530 | [diff] [blame] | 586 | cpufreq_qcom_hw_driver.driver_data = pdev; |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 587 | |
Sibi Sankar | 51c843c | 2020-06-22 13:46:48 +0530 | [diff] [blame] | 588 | /* Check for optional interconnect paths on CPU0 */ |
| 589 | cpu_dev = get_cpu_device(0); |
| 590 | if (!cpu_dev) |
| 591 | return -EPROBE_DEFER; |
| 592 | |
| 593 | ret = dev_pm_opp_of_find_icc_paths(cpu_dev, NULL); |
| 594 | if (ret) |
| 595 | return ret; |
| 596 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 597 | ret = cpufreq_register_driver(&cpufreq_qcom_hw_driver); |
| 598 | if (ret) |
| 599 | dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n"); |
| 600 | else |
| 601 | dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n"); |
| 602 | |
| 603 | return ret; |
| 604 | } |
| 605 | |
| 606 | static int qcom_cpufreq_hw_driver_remove(struct platform_device *pdev) |
| 607 | { |
| 608 | return cpufreq_unregister_driver(&cpufreq_qcom_hw_driver); |
| 609 | } |
| 610 | |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 611 | static struct platform_driver qcom_cpufreq_hw_driver = { |
| 612 | .probe = qcom_cpufreq_hw_driver_probe, |
| 613 | .remove = qcom_cpufreq_hw_driver_remove, |
| 614 | .driver = { |
| 615 | .name = "qcom-cpufreq-hw", |
| 616 | .of_match_table = qcom_cpufreq_hw_match, |
| 617 | }, |
| 618 | }; |
| 619 | |
| 620 | static int __init qcom_cpufreq_hw_init(void) |
| 621 | { |
| 622 | return platform_driver_register(&qcom_cpufreq_hw_driver); |
| 623 | } |
Amit Kucheria | 11ff4bd | 2019-10-21 17:45:15 +0530 | [diff] [blame] | 624 | postcore_initcall(qcom_cpufreq_hw_init); |
Taniya Das | 2849dd8 | 2018-12-14 09:40:24 +0530 | [diff] [blame] | 625 | |
| 626 | static void __exit qcom_cpufreq_hw_exit(void) |
| 627 | { |
| 628 | platform_driver_unregister(&qcom_cpufreq_hw_driver); |
| 629 | } |
| 630 | module_exit(qcom_cpufreq_hw_exit); |
| 631 | |
| 632 | MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver"); |
| 633 | MODULE_LICENSE("GPL v2"); |