Thomas Gleixner | c942fdd | 2019-05-27 08:55:06 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * processor_thermal.c - Passive cooling submodule of the ACPI processor driver |
| 4 | * |
| 5 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
| 6 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
| 7 | * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> |
| 8 | * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> |
| 9 | * - Added processor hotplug support |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
| 12 | #include <linux/kernel.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/init.h> |
| 15 | #include <linux/cpufreq.h> |
Lv Zheng | 8b48463 | 2013-12-03 08:49:16 +0800 | [diff] [blame] | 16 | #include <linux/acpi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <acpi/processor.h> |
Linus Torvalds | 7c0f6ba | 2016-12-24 11:46:01 -0800 | [diff] [blame] | 18 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Len Brown | a192a95 | 2009-07-28 16:45:54 -0400 | [diff] [blame] | 20 | #define PREFIX "ACPI: " |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #define ACPI_PROCESSOR_CLASS "processor" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT |
Len Brown | f52fd66 | 2007-02-12 22:42:12 -0500 | [diff] [blame] | 24 | ACPI_MODULE_NAME("processor_thermal"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | #ifdef CONFIG_CPU_FREQ |
| 27 | |
| 28 | /* If a passive cooling situation is detected, primarily CPUfreq is used, as it |
| 29 | * offers (in most cases) voltage scaling in addition to frequency scaling, and |
| 30 | * thus a cubic (instead of linear) reduction of energy. Also, we allow for |
| 31 | * _any_ cpufreq driver and not only the acpi-cpufreq driver. |
| 32 | */ |
| 33 | |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 34 | #define CPUFREQ_THERMAL_MIN_STEP 0 |
| 35 | #define CPUFREQ_THERMAL_MAX_STEP 3 |
| 36 | |
Mike Travis | c938ac2 | 2008-03-05 08:31:29 -0800 | [diff] [blame] | 37 | static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 39 | #define reduction_pctg(cpu) \ |
| 40 | per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu)) |
| 41 | |
| 42 | /* |
| 43 | * Emulate "per package data" using per cpu data (which should really be |
| 44 | * provided elsewhere) |
| 45 | * |
| 46 | * Note we can lose a CPU on cpu hotunplug, in this case we forget the state |
| 47 | * temporarily. Fortunately that's not a big issue here (I hope) |
| 48 | */ |
| 49 | static int phys_package_first_cpu(int cpu) |
| 50 | { |
| 51 | int i; |
| 52 | int id = topology_physical_package_id(cpu); |
| 53 | |
| 54 | for_each_online_cpu(i) |
| 55 | if (topology_physical_package_id(i) == id) |
| 56 | return i; |
| 57 | return 0; |
| 58 | } |
| 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | static int cpu_has_cpufreq(unsigned int cpu) |
| 61 | { |
| 62 | struct cpufreq_policy policy; |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 63 | if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu)) |
Thomas Renninger | 75b245b3 | 2005-12-21 01:29:00 -0500 | [diff] [blame] | 64 | return 0; |
| 65 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | } |
| 67 | |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 68 | static int cpufreq_get_max_state(unsigned int cpu) |
| 69 | { |
| 70 | if (!cpu_has_cpufreq(cpu)) |
| 71 | return 0; |
| 72 | |
| 73 | return CPUFREQ_THERMAL_MAX_STEP; |
| 74 | } |
| 75 | |
| 76 | static int cpufreq_get_cur_state(unsigned int cpu) |
| 77 | { |
| 78 | if (!cpu_has_cpufreq(cpu)) |
| 79 | return 0; |
| 80 | |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 81 | return reduction_pctg(cpu); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | static int cpufreq_set_cur_state(unsigned int cpu, int state) |
| 85 | { |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 86 | struct cpufreq_policy *policy; |
| 87 | struct acpi_processor *pr; |
| 88 | unsigned long max_freq; |
| 89 | int i, ret; |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 90 | |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 91 | if (!cpu_has_cpufreq(cpu)) |
| 92 | return 0; |
| 93 | |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 94 | reduction_pctg(cpu) = state; |
| 95 | |
| 96 | /* |
| 97 | * Update all the CPUs in the same package because they all |
| 98 | * contribute to the temperature and often share the same |
| 99 | * frequency. |
| 100 | */ |
| 101 | for_each_online_cpu(i) { |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 102 | if (topology_physical_package_id(i) != |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 103 | topology_physical_package_id(cpu)) |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 104 | continue; |
| 105 | |
| 106 | pr = per_cpu(processors, i); |
| 107 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 108 | if (unlikely(!freq_qos_request_active(&pr->thermal_req))) |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 109 | continue; |
| 110 | |
| 111 | policy = cpufreq_cpu_get(i); |
| 112 | if (!policy) |
| 113 | return -EINVAL; |
| 114 | |
| 115 | max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100; |
| 116 | |
| 117 | cpufreq_cpu_put(policy); |
| 118 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 119 | ret = freq_qos_update_request(&pr->thermal_req, max_freq); |
Viresh Kumar | d15ce41 | 2019-08-28 14:20:13 +0530 | [diff] [blame] | 120 | if (ret < 0) { |
| 121 | pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n", |
| 122 | pr->id, ret); |
| 123 | } |
Andi Kleen | 2815ab9 | 2012-02-06 08:17:11 -0800 | [diff] [blame] | 124 | } |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 125 | return 0; |
| 126 | } |
| 127 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 128 | void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy) |
Len Brown | 4be44fc | 2005-08-05 00:44:28 -0400 | [diff] [blame] | 129 | { |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 130 | unsigned int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 132 | for_each_cpu(cpu, policy->related_cpus) { |
| 133 | struct acpi_processor *pr = per_cpu(processors, cpu); |
| 134 | int ret; |
Rafael J. Wysocki | 2d8b39a | 2019-10-15 19:35:20 +0200 | [diff] [blame] | 135 | |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 136 | if (!pr) |
| 137 | continue; |
| 138 | |
| 139 | ret = freq_qos_add_request(&policy->constraints, |
| 140 | &pr->thermal_req, |
| 141 | FREQ_QOS_MAX, INT_MAX); |
| 142 | if (ret < 0) |
| 143 | pr_err("Failed to add freq constraint for CPU%d (%d)\n", |
| 144 | cpu, ret); |
| 145 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } |
| 147 | |
Rafael J. Wysocki | 3000ce3 | 2019-10-16 12:47:06 +0200 | [diff] [blame] | 148 | void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy) |
Len Brown | 4be44fc | 2005-08-05 00:44:28 -0400 | [diff] [blame] | 149 | { |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 150 | unsigned int cpu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 151 | |
Rafael J. Wysocki | a1bb46c | 2019-10-25 02:41:40 +0200 | [diff] [blame] | 152 | for_each_cpu(cpu, policy->related_cpus) { |
| 153 | struct acpi_processor *pr = per_cpu(processors, policy->cpu); |
| 154 | |
| 155 | if (pr) |
| 156 | freq_qos_remove_request(&pr->thermal_req); |
| 157 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } |
Len Brown | 4be44fc | 2005-08-05 00:44:28 -0400 | [diff] [blame] | 159 | #else /* ! CONFIG_CPU_FREQ */ |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 160 | static int cpufreq_get_max_state(unsigned int cpu) |
| 161 | { |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | static int cpufreq_get_cur_state(unsigned int cpu) |
| 166 | { |
| 167 | return 0; |
| 168 | } |
| 169 | |
| 170 | static int cpufreq_set_cur_state(unsigned int cpu, int state) |
| 171 | { |
| 172 | return 0; |
| 173 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | #endif |
| 176 | |
Al Stone | 6dd7aca | 2013-12-05 11:14:16 -0700 | [diff] [blame] | 177 | /* thermal cooling device callbacks */ |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 178 | static int acpi_processor_max_state(struct acpi_processor *pr) |
| 179 | { |
| 180 | int max_state = 0; |
| 181 | |
| 182 | /* |
| 183 | * There exists four states according to |
Al Stone | 6dd7aca | 2013-12-05 11:14:16 -0700 | [diff] [blame] | 184 | * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3 |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 185 | */ |
| 186 | max_state += cpufreq_get_max_state(pr->id); |
| 187 | if (pr->flags.throttling) |
| 188 | max_state += (pr->throttling.state_count -1); |
| 189 | |
| 190 | return max_state; |
| 191 | } |
| 192 | static int |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 193 | processor_get_max_state(struct thermal_cooling_device *cdev, |
| 194 | unsigned long *state) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 195 | { |
| 196 | struct acpi_device *device = cdev->devdata; |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 197 | struct acpi_processor *pr; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 198 | |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 199 | if (!device) |
| 200 | return -EINVAL; |
| 201 | |
| 202 | pr = acpi_driver_data(device); |
| 203 | if (!pr) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 204 | return -EINVAL; |
| 205 | |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 206 | *state = acpi_processor_max_state(pr); |
| 207 | return 0; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | static int |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 211 | processor_get_cur_state(struct thermal_cooling_device *cdev, |
| 212 | unsigned long *cur_state) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 213 | { |
| 214 | struct acpi_device *device = cdev->devdata; |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 215 | struct acpi_processor *pr; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 216 | |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 217 | if (!device) |
| 218 | return -EINVAL; |
| 219 | |
| 220 | pr = acpi_driver_data(device); |
| 221 | if (!pr) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 222 | return -EINVAL; |
| 223 | |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 224 | *cur_state = cpufreq_get_cur_state(pr->id); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 225 | if (pr->flags.throttling) |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 226 | *cur_state += pr->throttling.state; |
| 227 | return 0; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 228 | } |
| 229 | |
| 230 | static int |
Matthew Garrett | 6503e5d | 2008-11-27 17:48:13 +0000 | [diff] [blame] | 231 | processor_set_cur_state(struct thermal_cooling_device *cdev, |
| 232 | unsigned long state) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 233 | { |
| 234 | struct acpi_device *device = cdev->devdata; |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 235 | struct acpi_processor *pr; |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 236 | int result = 0; |
| 237 | int max_pstate; |
| 238 | |
Colin Ian King | 99aa363 | 2013-03-25 10:50:06 +0000 | [diff] [blame] | 239 | if (!device) |
| 240 | return -EINVAL; |
| 241 | |
| 242 | pr = acpi_driver_data(device); |
| 243 | if (!pr) |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 244 | return -EINVAL; |
| 245 | |
| 246 | max_pstate = cpufreq_get_max_state(pr->id); |
| 247 | |
| 248 | if (state > acpi_processor_max_state(pr)) |
| 249 | return -EINVAL; |
| 250 | |
| 251 | if (state <= max_pstate) { |
| 252 | if (pr->flags.throttling && pr->throttling.state) |
Frans Pop | 2a90800 | 2009-08-26 14:29:29 -0700 | [diff] [blame] | 253 | result = acpi_processor_set_throttling(pr, 0, false); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 254 | cpufreq_set_cur_state(pr->id, state); |
| 255 | } else { |
| 256 | cpufreq_set_cur_state(pr->id, max_pstate); |
| 257 | result = acpi_processor_set_throttling(pr, |
Frans Pop | 2a90800 | 2009-08-26 14:29:29 -0700 | [diff] [blame] | 258 | state - max_pstate, false); |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 259 | } |
| 260 | return result; |
| 261 | } |
| 262 | |
Vasiliy Kulikov | 9c8b04b | 2011-06-25 21:07:52 +0400 | [diff] [blame] | 263 | const struct thermal_cooling_device_ops processor_cooling_ops = { |
Zhang Rui | d9460fd22 | 2008-01-17 15:51:23 +0800 | [diff] [blame] | 264 | .get_max_state = processor_get_max_state, |
| 265 | .get_cur_state = processor_get_cur_state, |
| 266 | .set_cur_state = processor_set_cur_state, |
| 267 | }; |