Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * processor_thermal.c - Passive cooling submodule of the ACPI processor driver |
| 4 | * |
| 5 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
| 6 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
| 7 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> |
| 8 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
| 9 | * - Added processor hotplug support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/cpufreq.h> |
Lv Zheng | 8b48463 | 2013-12-03 08:49:16 +0800 | [diff] [blame] | 16 | #include <linux/acpi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <acpi/processor.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 18 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #ifdef CONFIG_CPU_FREQ |
| 21 | |
| 22 | /* If a passive cooling situation is detected, primarily CPUfreq is used, as it |
| 23 | * offers (in most cases) voltage scaling in addition to frequency scaling, and |
| 24 | * thus a cubic (instead of linear) reduction of energy. Also, we allow for |
| 25 | * _any_ cpufreq driver and not only the acpi-cpufreq driver. |
| 26 | */ |
| 27 | |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 28 | #define CPUFREQ_THERMAL_MIN_STEP 0 |
| 29 | #define CPUFREQ_THERMAL_MAX_STEP 3 |
| 30 | |
Mike Travis | c938ac2 | 2008-03-05 08:31:29 -0800 | [diff] [blame] | 31 | static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 33 | #define reduction_pctg(cpu) \ |
| 34 | per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) |
| 35 | |
| 36 | /* |
| 37 | * Emulate "per package data" using per cpu data (which should really be |
| 38 | * provided elsewhere) |
| 39 | * |
| 40 | * Note we can lose a CPU on cpu hotunplug, in this case we forget the state |
| 41 | * temporarily. Fortunately that's not a big issue here (I hope) |
| 42 | */ |
| 43 | static int phys_package_first_cpu(int cpu) |
| 44 | { |
| 45 | int i; |
| 46 | int id = topology_physical_package_id(cpu); |
| 47 | |
| 48 | for_each_online_cpu(i) |
| 49 | if (topology_physical_package_id(i) == id) |
| 50 | return i; |
| 51 | return 0; |
| 52 | } |
| 53 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | static int cpu_has_cpufreq(unsigned int cpu) |
| 55 | { |
Manfred Spraul | 8120832 | 2021-12-22 15:09:31 +0100 | [diff] [blame] | 56 | struct cpufreq_policy *policy; |
| 57 | |
| 58 | if (!acpi_processor_cpufreq_init) |
Thomas Renninger | 75b245b3 | 2005-12-21 01:29:00 -0500 | [diff] [blame] | 59 | return 0; |
Manfred Spraul | 8120832 | 2021-12-22 15:09:31 +0100 | [diff] [blame] | 60 | |
| 61 | policy = cpufreq_cpu_get(cpu); |
| 62 | if (policy) { |
| 63 | cpufreq_cpu_put(policy); |
| 64 | return 1; |
| 65 | } |
| 66 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | } |
| 68 | |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 69 | static int cpufreq_get_max_state(unsigned int cpu) |
| 70 | { |
| 71 | if (!cpu_has_cpufreq(cpu)) |
| 72 | return 0; |
| 73 | |
| 74 | return CPUFREQ_THERMAL_MAX_STEP; |
| 75 | } |
| 76 | |
| 77 | static int cpufreq_get_cur_state(unsigned int cpu) |
| 78 | { |
| 79 | if (!cpu_has_cpufreq(cpu)) |
| 80 | return 0; |
| 81 | |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 82 | return reduction_pctg(cpu); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 83 | } |
| 84 | |
| 85 | static int cpufreq_set_cur_state(unsigned int cpu, int state) |
| 86 | { |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 87 | struct cpufreq_policy *policy; |
| 88 | struct acpi_processor *pr; |
| 89 | unsigned long max_freq; |
| 90 | int i, ret; |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 91 | |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 92 | if (!cpu_has_cpufreq(cpu)) |
| 93 | return 0; |
| 94 | |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 95 | reduction_pctg(cpu) = state; |
| 96 | |
| 97 | /* |
| 98 | * Update all the CPUs in the same package because they all |
| 99 | * contribute to the temperature and often share the same |
| 100 | * frequency. |
| 101 | */ |
| 102 | for_each_online_cpu(i) { |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 103 | if (topology_physical_package_id(i) != |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 104 | topology_physical_package_id(cpu)) |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 105 | continue; |
| 106 | |
| 107 | pr = per_cpu(processors, i); |
| 108 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 109 | if (unlikely(!freq_qos_request_active(&pr->thermal_req))) |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 110 | continue; |
| 111 | |
| 112 | policy = cpufreq_cpu_get(i); |
| 113 | if (!policy) |
| 114 | return -EINVAL; |
| 115 | |
| 116 | max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100; |
| 117 | |
| 118 | cpufreq_cpu_put(policy); |
| 119 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 120 | ret = freq_qos_update_request(&pr->thermal_req, max_freq); |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 121 | if (ret < 0) { |
| 122 | pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n", |
| 123 | pr->id, ret); |
| 124 | } |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 125 | } |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 126 | return 0; |
| 127 | } |
| 128 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 129 | void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) |
Len Brown | 4be44fc | 2005-08-05 00:44:28 -0400 | [diff] [blame] | 130 | { |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 131 | unsigned int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 133 | for_each_cpu(cpu, policy->related_cpus) { |
| 134 | struct acpi_processor *pr = per_cpu(processors, cpu); |
| 135 | int ret; |
Rafael J. Wysocki | 2d8b39a | 2019-10-15 19:35:20 +0200 | [diff] [blame] | 136 | |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 137 | if (!pr) |
| 138 | continue; |
| 139 | |
| 140 | ret = freq_qos_add_request(&policy->constraints, |
| 141 | &pr->thermal_req, |
| 142 | FREQ_QOS_MAX, INT_MAX); |
| 143 | if (ret < 0) |
| 144 | pr_err("Failed to add freq constraint for CPU%d (%d)\n", |
| 145 | cpu, ret); |
| 146 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 149 | void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) |
Len Brown | 4be44fc | 2005-08-05 00:44:28 -0400 | [diff] [blame] | 150 | { |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 151 | unsigned int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 153 | for_each_cpu(cpu, policy->related_cpus) { |
| 154 | struct acpi_processor *pr = per_cpu(processors, policy->cpu); |
| 155 | |
| 156 | if (pr) |
| 157 | freq_qos_remove_request(&pr->thermal_req); |
| 158 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | } |
Len Brown | 4be44fc | 2005-08-05 00:44:28 -0400 | [diff] [blame] | 160 | #else /* ! CONFIG_CPU_FREQ */ |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 161 | static int cpufreq_get_max_state(unsigned int cpu) |
| 162 | { |
| 163 | return 0; |
| 164 | } |
| 165 | |
| 166 | static int cpufreq_get_cur_state(unsigned int cpu) |
| 167 | { |
| 168 | return 0; |
| 169 | } |
| 170 | |
| 171 | static int cpufreq_set_cur_state(unsigned int cpu, int state) |
| 172 | { |
| 173 | return 0; |
| 174 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | #endif |
| 177 | |
Al Stone | 6dd7aca | 2013-12-05 11:14:16 -0700 | [diff] [blame] | 178 | /* thermal cooling device callbacks */ |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 179 | static int acpi_processor_max_state(struct acpi_processor *pr) |
| 180 | { |
| 181 | int max_state = 0; |
| 182 | |
| 183 | /* |
| 184 | * There exists four states according to |
Al Stone | 6dd7aca | 2013-12-05 11:14:16 -0700 | [diff] [blame] | 185 | * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3 |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 186 | */ |
| 187 | max_state += cpufreq_get_max_state(pr->id); |
| 188 | if (pr->flags.throttling) |
| 189 | max_state += (pr->throttling.state_count -1); |
| 190 | |
| 191 | return max_state; |
| 192 | } |
| 193 | static int |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 194 | processor_get_max_state(struct thermal_cooling_device *cdev, |
| 195 | unsigned long *state) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 196 | { |
| 197 | struct acpi_device *device = cdev->devdata; |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 198 | struct acpi_processor *pr; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 199 | |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 200 | if (!device) |
| 201 | return -EINVAL; |
| 202 | |
| 203 | pr = acpi_driver_data(device); |
| 204 | if (!pr) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 205 | return -EINVAL; |
| 206 | |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 207 | *state = acpi_processor_max_state(pr); |
| 208 | return 0; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | static int |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 212 | processor_get_cur_state(struct thermal_cooling_device *cdev, |
| 213 | unsigned long *cur_state) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 214 | { |
| 215 | struct acpi_device *device = cdev->devdata; |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 216 | struct acpi_processor *pr; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 217 | |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 218 | if (!device) |
| 219 | return -EINVAL; |
| 220 | |
| 221 | pr = acpi_driver_data(device); |
| 222 | if (!pr) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 223 | return -EINVAL; |
| 224 | |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 225 | *cur_state = cpufreq_get_cur_state(pr->id); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 226 | if (pr->flags.throttling) |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 227 | *cur_state += pr->throttling.state; |
| 228 | return 0; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | static int |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 232 | processor_set_cur_state(struct thermal_cooling_device *cdev, |
| 233 | unsigned long state) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 234 | { |
| 235 | struct acpi_device *device = cdev->devdata; |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 236 | struct acpi_processor *pr; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 237 | int result = 0; |
| 238 | int max_pstate; |
| 239 | |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 240 | if (!device) |
| 241 | return -EINVAL; |
| 242 | |
| 243 | pr = acpi_driver_data(device); |
| 244 | if (!pr) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 245 | return -EINVAL; |
| 246 | |
| 247 | max_pstate = cpufreq_get_max_state(pr->id); |
| 248 | |
| 249 | if (state > acpi_processor_max_state(pr)) |
| 250 | return -EINVAL; |
| 251 | |
| 252 | if (state <= max_pstate) { |
| 253 | if (pr->flags.throttling && pr->throttling.state) |
Frans Pop | 2a90800 | 2009-08-26 14:29:29 -0700 | [diff] [blame] | 254 | result = acpi_processor_set_throttling(pr, 0, false); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 255 | cpufreq_set_cur_state(pr->id, state); |
| 256 | } else { |
| 257 | cpufreq_set_cur_state(pr->id, max_pstate); |
| 258 | result = acpi_processor_set_throttling(pr, |
Frans Pop | 2a90800 | 2009-08-26 14:29:29 -0700 | [diff] [blame] | 259 | state - max_pstate, false); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 260 | } |
| 261 | return result; |
| 262 | } |
| 263 | |
Vasiliy Kulikov | 9c8b04b | 2011-06-25 21:07:52 +0400 | [diff] [blame] | 264 | const struct thermal_cooling_device_ops processor_cooling_ops = { |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 265 | .get_max_state = processor_get_max_state, |
| 266 | .get_cur_state = processor_get_cur_state, |
| 267 | .set_cur_state = processor_set_cur_state, |
| 268 | }; |