Greg Kroah-Hartman | 6ee97d3 | 2017-11-07 17:30:08 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Arch specific cpu topology information |
| 4 | * |
| 5 | * Copyright (C) 2016, ARM Ltd. |
| 6 | * Written by: Juri Lelli, ARM Ltd. |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/acpi.h> |
| 10 | #include <linux/cpu.h> |
| 11 | #include <linux/cpufreq.h> |
| 12 | #include <linux/device.h> |
| 13 | #include <linux/of.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/string.h> |
| 16 | #include <linux/sched/topology.h> |
Morten Rasmussen | bb1fbdd | 2018-07-20 14:32:32 +0100 | [diff] [blame] | 17 | #include <linux/cpuset.h> |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 18 | #include <linux/cpumask.h> |
| 19 | #include <linux/init.h> |
| 20 | #include <linux/percpu.h> |
| 21 | #include <linux/sched.h> |
| 22 | #include <linux/smp.h> |
Yun Hsiang | e209b3b8 | 2020-08-13 14:41:46 +0800 | [diff] [blame] | 23 | #include <trace/hooks/topology.h> |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 24 | |
Valentin Schneider | 15e5d5b | 2020-09-01 21:55:49 +0100 | [diff] [blame] | 25 | bool topology_scale_freq_invariant(void) |
| 26 | { |
| 27 | return cpufreq_supports_freq_invariance() || |
| 28 | arch_freq_counters_available(cpu_online_mask); |
| 29 | } |
| 30 | |
Valentin Schneider | ecddc3a | 2020-09-01 21:55:48 +0100 | [diff] [blame] | 31 | __weak bool arch_freq_counters_available(const struct cpumask *cpus) |
Ionela Voinescu | cd0ed03 | 2020-03-05 09:06:26 +0000 | [diff] [blame] | 32 | { |
| 33 | return false; |
| 34 | } |
Dietmar Eggemann | 0e27c56 | 2017-09-26 17:41:10 +0100 | [diff] [blame] | 35 | DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; |
Shaleen Agrawal | 5a920a6 | 2020-10-26 18:23:50 -0700 | [diff] [blame] | 36 | EXPORT_PER_CPU_SYMBOL_GPL(freq_scale); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 37 | |
Ionela Voinescu | a20b705 | 2020-09-24 13:30:15 +0100 | [diff] [blame] | 38 | void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq, |
| 39 | unsigned long max_freq) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 40 | { |
Dietmar Eggemann | 0e27c56 | 2017-09-26 17:41:10 +0100 | [diff] [blame] | 41 | unsigned long scale; |
| 42 | int i; |
| 43 | |
Ionela Voinescu | 0a10d3f | 2020-09-01 21:55:45 +0100 | [diff] [blame] | 44 | if (WARN_ON_ONCE(!cur_freq || !max_freq)) |
| 45 | return; |
| 46 | |
Ionela Voinescu | cd0ed03 | 2020-03-05 09:06:26 +0000 | [diff] [blame] | 47 | /* |
| 48 | * If the use of counters for FIE is enabled, just return as we don't |
| 49 | * want to update the scale factor with information from CPUFREQ. |
| 50 | * Instead the scale factor will be updated from arch_scale_freq_tick. |
| 51 | */ |
| 52 | if (arch_freq_counters_available(cpus)) |
| 53 | return; |
| 54 | |
Dietmar Eggemann | 0e27c56 | 2017-09-26 17:41:10 +0100 | [diff] [blame] | 55 | scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq; |
| 56 | |
Yun Hsiang | 07f80e7 | 2020-08-31 15:13:01 +0800 | [diff] [blame] | 57 | trace_android_vh_arch_set_freq_scale(cpus, cur_freq, max_freq, &scale); |
Yun Hsiang | e209b3b8 | 2020-08-13 14:41:46 +0800 | [diff] [blame] | 58 | |
Dietmar Eggemann | 0e27c56 | 2017-09-26 17:41:10 +0100 | [diff] [blame] | 59 | for_each_cpu(i, cpus) |
| 60 | per_cpu(freq_scale, i) = scale; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 61 | } |
| 62 | |
Dietmar Eggemann | 8216f58 | 2017-09-26 17:41:11 +0100 | [diff] [blame] | 63 | DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE; |
Pavankumar Kondeti | 5695c1e | 2020-11-17 11:37:48 +0530 | [diff] [blame] | 64 | EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 65 | |
Juri Lelli | 4ca4f26 | 2017-05-31 17:59:31 +0100 | [diff] [blame] | 66 | void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 67 | { |
| 68 | per_cpu(cpu_scale, cpu) = capacity; |
| 69 | } |
| 70 | |
Valentin Schneider | 25980c7 | 2020-07-12 17:59:15 +0100 | [diff] [blame] | 71 | DEFINE_PER_CPU(unsigned long, thermal_pressure); |
Pavankumar Kondeti | 81d0336 | 2020-12-17 14:17:25 +0530 | [diff] [blame] | 72 | EXPORT_PER_CPU_SYMBOL_GPL(thermal_pressure); |
Valentin Schneider | 25980c7 | 2020-07-12 17:59:15 +0100 | [diff] [blame] | 73 | |
| 74 | void topology_set_thermal_pressure(const struct cpumask *cpus, |
| 75 | unsigned long th_pressure) |
| 76 | { |
| 77 | int cpu; |
| 78 | |
| 79 | for_each_cpu(cpu, cpus) |
| 80 | WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); |
| 81 | } |
Wei Wang | 7372c23 | 2020-11-03 21:46:15 -0800 | [diff] [blame] | 82 | EXPORT_SYMBOL_GPL(topology_set_thermal_pressure); |
Valentin Schneider | 25980c7 | 2020-07-12 17:59:15 +0100 | [diff] [blame] | 83 | |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 84 | static ssize_t cpu_capacity_show(struct device *dev, |
| 85 | struct device_attribute *attr, |
| 86 | char *buf) |
| 87 | { |
| 88 | struct cpu *cpu = container_of(dev, struct cpu, dev); |
| 89 | |
Joe Perches | aa83889 | 2020-09-16 13:40:39 -0700 | [diff] [blame] | 90 | return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 91 | } |
| 92 | |
Morten Rasmussen | bb1fbdd | 2018-07-20 14:32:32 +0100 | [diff] [blame] | 93 | static void update_topology_flags_workfn(struct work_struct *work); |
| 94 | static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn); |
| 95 | |
Lingutla Chandrasekhar | 5d777b1 | 2019-04-01 09:54:41 +0530 | [diff] [blame] | 96 | static DEVICE_ATTR_RO(cpu_capacity); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 97 | |
| 98 | static int register_cpu_capacity_sysctl(void) |
| 99 | { |
| 100 | int i; |
| 101 | struct device *cpu; |
| 102 | |
| 103 | for_each_possible_cpu(i) { |
| 104 | cpu = get_cpu_device(i); |
| 105 | if (!cpu) { |
| 106 | pr_err("%s: too early to get CPU%d device!\n", |
| 107 | __func__, i); |
| 108 | continue; |
| 109 | } |
| 110 | device_create_file(cpu, &dev_attr_cpu_capacity); |
| 111 | } |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | subsys_initcall(register_cpu_capacity_sysctl); |
| 116 | |
Morten Rasmussen | bb1fbdd | 2018-07-20 14:32:32 +0100 | [diff] [blame] | 117 | static int update_topology; |
Abhijeet Dharmapurikar | ae8c2a7 | 2021-05-05 10:58:55 -0700 | [diff] [blame] | 118 | bool topology_update_done; |
| 119 | EXPORT_SYMBOL_GPL(topology_update_done); |
Morten Rasmussen | bb1fbdd | 2018-07-20 14:32:32 +0100 | [diff] [blame] | 120 | |
| 121 | int topology_update_cpu_topology(void) |
| 122 | { |
| 123 | return update_topology; |
| 124 | } |
| 125 | |
| 126 | /* |
| 127 | * Updating the sched_domains can't be done directly from cpufreq callbacks |
| 128 | * due to locking, so queue the work for later. |
| 129 | */ |
| 130 | static void update_topology_flags_workfn(struct work_struct *work) |
| 131 | { |
| 132 | update_topology = 1; |
| 133 | rebuild_sched_domains(); |
Abhijeet Dharmapurikar | ae8c2a7 | 2021-05-05 10:58:55 -0700 | [diff] [blame] | 134 | topology_update_done = true; |
Abhijeet Dharmapurikar | f7128c7 | 2021-05-04 12:26:13 -0700 | [diff] [blame] | 135 | trace_android_vh_update_topology_flags_workfn(NULL); |
Morten Rasmussen | bb1fbdd | 2018-07-20 14:32:32 +0100 | [diff] [blame] | 136 | pr_debug("sched_domain hierarchy rebuilt, flags updated\n"); |
| 137 | update_topology = 0; |
| 138 | } |
| 139 | |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 140 | static DEFINE_PER_CPU(u32, freq_factor) = 1; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 141 | static u32 *raw_capacity; |
Viresh Kumar | 62de116 | 2017-06-23 14:55:33 +0530 | [diff] [blame] | 142 | |
Prasad Sodagudi | 82d8ba7 | 2017-10-10 00:34:56 -0700 | [diff] [blame] | 143 | static int free_raw_capacity(void) |
Viresh Kumar | 62de116 | 2017-06-23 14:55:33 +0530 | [diff] [blame] | 144 | { |
| 145 | kfree(raw_capacity); |
| 146 | raw_capacity = NULL; |
| 147 | |
| 148 | return 0; |
| 149 | } |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 150 | |
Juri Lelli | 4ca4f26 | 2017-05-31 17:59:31 +0100 | [diff] [blame] | 151 | void topology_normalize_cpu_scale(void) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 152 | { |
| 153 | u64 capacity; |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 154 | u64 capacity_scale; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 155 | int cpu; |
| 156 | |
Viresh Kumar | 62de116 | 2017-06-23 14:55:33 +0530 | [diff] [blame] | 157 | if (!raw_capacity) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 158 | return; |
| 159 | |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 160 | capacity_scale = 1; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 161 | for_each_possible_cpu(cpu) { |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 162 | capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); |
| 163 | capacity_scale = max(capacity, capacity_scale); |
| 164 | } |
| 165 | |
| 166 | pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale); |
| 167 | for_each_possible_cpu(cpu) { |
| 168 | capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu); |
| 169 | capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT, |
| 170 | capacity_scale); |
Juri Lelli | 4ca4f26 | 2017-05-31 17:59:31 +0100 | [diff] [blame] | 171 | topology_set_cpu_scale(cpu, capacity); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 172 | pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n", |
Vincent Guittot | 8ec59c0 | 2019-06-17 17:00:17 +0200 | [diff] [blame] | 173 | cpu, topology_get_cpu_scale(cpu)); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 174 | } |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 175 | } |
| 176 | |
Viresh Kumar | 805df29 | 2017-06-23 14:55:32 +0530 | [diff] [blame] | 177 | bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 178 | { |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 179 | struct clk *cpu_clk; |
Viresh Kumar | 62de116 | 2017-06-23 14:55:33 +0530 | [diff] [blame] | 180 | static bool cap_parsing_failed; |
Viresh Kumar | 805df29 | 2017-06-23 14:55:32 +0530 | [diff] [blame] | 181 | int ret; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 182 | u32 cpu_capacity; |
| 183 | |
| 184 | if (cap_parsing_failed) |
Viresh Kumar | 805df29 | 2017-06-23 14:55:32 +0530 | [diff] [blame] | 185 | return false; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 186 | |
Viresh Kumar | 3eeba1a | 2017-06-23 14:55:30 +0530 | [diff] [blame] | 187 | ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 188 | &cpu_capacity); |
| 189 | if (!ret) { |
| 190 | if (!raw_capacity) { |
| 191 | raw_capacity = kcalloc(num_possible_cpus(), |
| 192 | sizeof(*raw_capacity), |
| 193 | GFP_KERNEL); |
| 194 | if (!raw_capacity) { |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 195 | cap_parsing_failed = true; |
Viresh Kumar | 805df29 | 2017-06-23 14:55:32 +0530 | [diff] [blame] | 196 | return false; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 197 | } |
| 198 | } |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 199 | raw_capacity[cpu] = cpu_capacity; |
Rob Herring | 6ef2541 | 2017-07-18 16:42:49 -0500 | [diff] [blame] | 200 | pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n", |
| 201 | cpu_node, raw_capacity[cpu]); |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 202 | |
| 203 | /* |
| 204 | * Update freq_factor for calculating early boot cpu capacities. |
| 205 | * For non-clk CPU DVFS mechanism, there's no way to get the |
| 206 | * frequency value now, assuming they are running at the same |
| 207 | * frequency (by keeping the initial freq_factor value). |
| 208 | */ |
| 209 | cpu_clk = of_clk_get(cpu_node, 0); |
Jeffy Chen | 4dfff3d | 2020-03-17 14:33:08 +0800 | [diff] [blame] | 210 | if (!PTR_ERR_OR_ZERO(cpu_clk)) { |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 211 | per_cpu(freq_factor, cpu) = |
| 212 | clk_get_rate(cpu_clk) / 1000; |
Jeffy Chen | 4dfff3d | 2020-03-17 14:33:08 +0800 | [diff] [blame] | 213 | clk_put(cpu_clk); |
| 214 | } |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 215 | } else { |
| 216 | if (raw_capacity) { |
Rob Herring | 6ef2541 | 2017-07-18 16:42:49 -0500 | [diff] [blame] | 217 | pr_err("cpu_capacity: missing %pOF raw capacity\n", |
| 218 | cpu_node); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 219 | pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n"); |
| 220 | } |
| 221 | cap_parsing_failed = true; |
Viresh Kumar | 62de116 | 2017-06-23 14:55:33 +0530 | [diff] [blame] | 222 | free_raw_capacity(); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 223 | } |
| 224 | |
| 225 | return !ret; |
| 226 | } |
| 227 | |
| 228 | #ifdef CONFIG_CPU_FREQ |
Gaku Inami | 9de9a44 | 2018-02-13 11:06:40 +0900 | [diff] [blame] | 229 | static cpumask_var_t cpus_to_visit; |
| 230 | static void parsing_done_workfn(struct work_struct *work); |
| 231 | static DECLARE_WORK(parsing_done_work, parsing_done_workfn); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 232 | |
Gaku Inami | 9de9a44 | 2018-02-13 11:06:40 +0900 | [diff] [blame] | 233 | static int |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 234 | init_cpu_capacity_callback(struct notifier_block *nb, |
| 235 | unsigned long val, |
| 236 | void *data) |
| 237 | { |
| 238 | struct cpufreq_policy *policy = data; |
| 239 | int cpu; |
| 240 | |
Viresh Kumar | d8bcf4d | 2017-06-23 14:55:34 +0530 | [diff] [blame] | 241 | if (!raw_capacity) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 242 | return 0; |
| 243 | |
Viresh Kumar | 40f0fc2 | 2019-07-23 11:44:06 +0530 | [diff] [blame] | 244 | if (val != CPUFREQ_CREATE_POLICY) |
Viresh Kumar | 93a5708 | 2017-06-23 14:55:31 +0530 | [diff] [blame] | 245 | return 0; |
| 246 | |
| 247 | pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n", |
| 248 | cpumask_pr_args(policy->related_cpus), |
| 249 | cpumask_pr_args(cpus_to_visit)); |
| 250 | |
| 251 | cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); |
| 252 | |
Jeffy Chen | b8fe128 | 2020-01-13 11:48:15 +0800 | [diff] [blame] | 253 | for_each_cpu(cpu, policy->related_cpus) |
| 254 | per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000; |
Viresh Kumar | 93a5708 | 2017-06-23 14:55:31 +0530 | [diff] [blame] | 255 | |
| 256 | if (cpumask_empty(cpus_to_visit)) { |
| 257 | topology_normalize_cpu_scale(); |
Morten Rasmussen | bb1fbdd | 2018-07-20 14:32:32 +0100 | [diff] [blame] | 258 | schedule_work(&update_topology_flags_work); |
Viresh Kumar | 62de116 | 2017-06-23 14:55:33 +0530 | [diff] [blame] | 259 | free_raw_capacity(); |
Viresh Kumar | 93a5708 | 2017-06-23 14:55:31 +0530 | [diff] [blame] | 260 | pr_debug("cpu_capacity: parsing done\n"); |
Viresh Kumar | 93a5708 | 2017-06-23 14:55:31 +0530 | [diff] [blame] | 261 | schedule_work(&parsing_done_work); |
| 262 | } |
| 263 | |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 264 | return 0; |
| 265 | } |
| 266 | |
Gaku Inami | 9de9a44 | 2018-02-13 11:06:40 +0900 | [diff] [blame] | 267 | static struct notifier_block init_cpu_capacity_notifier = { |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 268 | .notifier_call = init_cpu_capacity_callback, |
| 269 | }; |
| 270 | |
| 271 | static int __init register_cpufreq_notifier(void) |
| 272 | { |
Dietmar Eggemann | 5408211 | 2017-09-26 17:41:06 +0100 | [diff] [blame] | 273 | int ret; |
| 274 | |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 275 | /* |
| 276 | * on ACPI-based systems we need to use the default cpu capacity |
| 277 | * until we have the necessary code to parse the cpu capacity, so |
| 278 | * skip registering cpufreq notifier. |
| 279 | */ |
Juri Lelli | c105aa3 | 2017-05-31 17:59:29 +0100 | [diff] [blame] | 280 | if (!acpi_disabled || !raw_capacity) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 281 | return -EINVAL; |
| 282 | |
Geert Uytterhoeven | 0fd3311 | 2019-05-27 14:27:03 +0200 | [diff] [blame] | 283 | if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 284 | return -ENOMEM; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 285 | |
| 286 | cpumask_copy(cpus_to_visit, cpu_possible_mask); |
| 287 | |
Dietmar Eggemann | 5408211 | 2017-09-26 17:41:06 +0100 | [diff] [blame] | 288 | ret = cpufreq_register_notifier(&init_cpu_capacity_notifier, |
| 289 | CPUFREQ_POLICY_NOTIFIER); |
| 290 | |
| 291 | if (ret) |
| 292 | free_cpumask_var(cpus_to_visit); |
| 293 | |
| 294 | return ret; |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 295 | } |
| 296 | core_initcall(register_cpufreq_notifier); |
| 297 | |
Gaku Inami | 9de9a44 | 2018-02-13 11:06:40 +0900 | [diff] [blame] | 298 | static void parsing_done_workfn(struct work_struct *work) |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 299 | { |
| 300 | cpufreq_unregister_notifier(&init_cpu_capacity_notifier, |
| 301 | CPUFREQ_POLICY_NOTIFIER); |
Dietmar Eggemann | 5408211 | 2017-09-26 17:41:06 +0100 | [diff] [blame] | 302 | free_cpumask_var(cpus_to_visit); |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | #else |
Juri Lelli | 2ef7a29 | 2017-05-31 17:59:28 +0100 | [diff] [blame] | 306 | core_initcall(free_raw_capacity); |
| 307 | #endif |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 308 | |
| 309 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) |
Zeng Tao | f3c1948 | 2020-01-17 09:52:52 +0800 | [diff] [blame] | 310 | /* |
| 311 | * This function returns the logic cpu number of the node. |
| 312 | * There are basically three kinds of return values: |
| 313 | * (1) logic cpu number which is > 0. |
| 314 | * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but |
| 315 | * there is no possible logical CPU in the kernel to match. This happens |
| 316 | * when CONFIG_NR_CPUS is configure to be smaller than the number of |
| 317 | * CPU nodes in DT. We need to just ignore this case. |
| 318 | * (3) -1 if the node does not exist in the device tree |
| 319 | */ |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 320 | static int __init get_cpu_for_node(struct device_node *node) |
| 321 | { |
| 322 | struct device_node *cpu_node; |
| 323 | int cpu; |
| 324 | |
| 325 | cpu_node = of_parse_phandle(node, "cpu", 0); |
| 326 | if (!cpu_node) |
| 327 | return -1; |
| 328 | |
| 329 | cpu = of_cpu_node_to_id(cpu_node); |
| 330 | if (cpu >= 0) |
| 331 | topology_parse_cpu_capacity(cpu_node, cpu); |
| 332 | else |
Zeng Tao | f3c1948 | 2020-01-17 09:52:52 +0800 | [diff] [blame] | 333 | pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n", |
| 334 | cpu_node, cpumask_pr_args(cpu_possible_mask)); |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 335 | |
| 336 | of_node_put(cpu_node); |
| 337 | return cpu; |
| 338 | } |
| 339 | |
| 340 | static int __init parse_core(struct device_node *core, int package_id, |
| 341 | int core_id) |
| 342 | { |
Zeng Tao | 4a33691 | 2020-03-04 11:54:52 +0800 | [diff] [blame] | 343 | char name[20]; |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 344 | bool leaf = true; |
| 345 | int i = 0; |
| 346 | int cpu; |
| 347 | struct device_node *t; |
| 348 | |
| 349 | do { |
| 350 | snprintf(name, sizeof(name), "thread%d", i); |
| 351 | t = of_get_child_by_name(core, name); |
| 352 | if (t) { |
| 353 | leaf = false; |
| 354 | cpu = get_cpu_for_node(t); |
| 355 | if (cpu >= 0) { |
| 356 | cpu_topology[cpu].package_id = package_id; |
| 357 | cpu_topology[cpu].core_id = core_id; |
| 358 | cpu_topology[cpu].thread_id = i; |
Zeng Tao | f3c1948 | 2020-01-17 09:52:52 +0800 | [diff] [blame] | 359 | } else if (cpu != -ENODEV) { |
| 360 | pr_err("%pOF: Can't get CPU for thread\n", t); |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 361 | of_node_put(t); |
| 362 | return -EINVAL; |
| 363 | } |
| 364 | of_node_put(t); |
| 365 | } |
| 366 | i++; |
| 367 | } while (t); |
| 368 | |
| 369 | cpu = get_cpu_for_node(core); |
| 370 | if (cpu >= 0) { |
| 371 | if (!leaf) { |
| 372 | pr_err("%pOF: Core has both threads and CPU\n", |
| 373 | core); |
| 374 | return -EINVAL; |
| 375 | } |
| 376 | |
| 377 | cpu_topology[cpu].package_id = package_id; |
| 378 | cpu_topology[cpu].core_id = core_id; |
Zeng Tao | f3c1948 | 2020-01-17 09:52:52 +0800 | [diff] [blame] | 379 | } else if (leaf && cpu != -ENODEV) { |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 380 | pr_err("%pOF: Can't get CPU for leaf core\n", core); |
| 381 | return -EINVAL; |
| 382 | } |
| 383 | |
| 384 | return 0; |
| 385 | } |
| 386 | |
| 387 | static int __init parse_cluster(struct device_node *cluster, int depth) |
| 388 | { |
Zeng Tao | 4a33691 | 2020-03-04 11:54:52 +0800 | [diff] [blame] | 389 | char name[20]; |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 390 | bool leaf = true; |
| 391 | bool has_cores = false; |
| 392 | struct device_node *c; |
| 393 | static int package_id __initdata; |
| 394 | int core_id = 0; |
| 395 | int i, ret; |
| 396 | |
| 397 | /* |
| 398 | * First check for child clusters; we currently ignore any |
| 399 | * information about the nesting of clusters and present the |
| 400 | * scheduler with a flat list of them. |
| 401 | */ |
| 402 | i = 0; |
| 403 | do { |
| 404 | snprintf(name, sizeof(name), "cluster%d", i); |
| 405 | c = of_get_child_by_name(cluster, name); |
| 406 | if (c) { |
| 407 | leaf = false; |
| 408 | ret = parse_cluster(c, depth + 1); |
| 409 | of_node_put(c); |
| 410 | if (ret != 0) |
| 411 | return ret; |
| 412 | } |
| 413 | i++; |
| 414 | } while (c); |
| 415 | |
| 416 | /* Now check for cores */ |
| 417 | i = 0; |
| 418 | do { |
| 419 | snprintf(name, sizeof(name), "core%d", i); |
| 420 | c = of_get_child_by_name(cluster, name); |
| 421 | if (c) { |
| 422 | has_cores = true; |
| 423 | |
| 424 | if (depth == 0) { |
| 425 | pr_err("%pOF: cpu-map children should be clusters\n", |
| 426 | c); |
| 427 | of_node_put(c); |
| 428 | return -EINVAL; |
| 429 | } |
| 430 | |
| 431 | if (leaf) { |
| 432 | ret = parse_core(c, package_id, core_id++); |
| 433 | } else { |
| 434 | pr_err("%pOF: Non-leaf cluster with core %s\n", |
| 435 | cluster, name); |
| 436 | ret = -EINVAL; |
| 437 | } |
| 438 | |
| 439 | of_node_put(c); |
| 440 | if (ret != 0) |
| 441 | return ret; |
| 442 | } |
| 443 | i++; |
| 444 | } while (c); |
| 445 | |
| 446 | if (leaf && !has_cores) |
| 447 | pr_warn("%pOF: empty cluster\n", cluster); |
| 448 | |
| 449 | if (leaf) |
| 450 | package_id++; |
| 451 | |
| 452 | return 0; |
| 453 | } |
| 454 | |
| 455 | static int __init parse_dt_topology(void) |
| 456 | { |
| 457 | struct device_node *cn, *map; |
| 458 | int ret = 0; |
| 459 | int cpu; |
| 460 | |
| 461 | cn = of_find_node_by_path("/cpus"); |
| 462 | if (!cn) { |
| 463 | pr_err("No CPU information found in DT\n"); |
| 464 | return 0; |
| 465 | } |
| 466 | |
| 467 | /* |
| 468 | * When topology is provided cpu-map is essentially a root |
| 469 | * cluster with restricted subnodes. |
| 470 | */ |
| 471 | map = of_get_child_by_name(cn, "cpu-map"); |
| 472 | if (!map) |
| 473 | goto out; |
| 474 | |
| 475 | ret = parse_cluster(map, 0); |
| 476 | if (ret != 0) |
| 477 | goto out_map; |
| 478 | |
| 479 | topology_normalize_cpu_scale(); |
| 480 | |
| 481 | /* |
| 482 | * Check that all cores are in the topology; the SMP code will |
| 483 | * only mark cores described in the DT as possible. |
| 484 | */ |
| 485 | for_each_possible_cpu(cpu) |
| 486 | if (cpu_topology[cpu].package_id == -1) |
| 487 | ret = -EINVAL; |
| 488 | |
| 489 | out_map: |
| 490 | of_node_put(map); |
| 491 | out: |
| 492 | of_node_put(cn); |
| 493 | return ret; |
| 494 | } |
Atish Patra | ca74b31 | 2019-06-27 12:52:59 -0700 | [diff] [blame] | 495 | #endif |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 496 | |
| 497 | /* |
| 498 | * cpu topology table |
| 499 | */ |
| 500 | struct cpu_topology cpu_topology[NR_CPUS]; |
| 501 | EXPORT_SYMBOL_GPL(cpu_topology); |
| 502 | |
| 503 | const struct cpumask *cpu_coregroup_mask(int cpu) |
| 504 | { |
| 505 | const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); |
| 506 | |
| 507 | /* Find the smaller of NUMA, core or LLC siblings */ |
| 508 | if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { |
| 509 | /* not numa in package, lets use the package siblings */ |
| 510 | core_mask = &cpu_topology[cpu].core_sibling; |
| 511 | } |
| 512 | if (cpu_topology[cpu].llc_id != -1) { |
| 513 | if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) |
| 514 | core_mask = &cpu_topology[cpu].llc_sibling; |
| 515 | } |
| 516 | |
| 517 | return core_mask; |
| 518 | } |
| 519 | |
| 520 | void update_siblings_masks(unsigned int cpuid) |
| 521 | { |
| 522 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; |
| 523 | int cpu; |
| 524 | |
| 525 | /* update core and thread sibling masks */ |
| 526 | for_each_online_cpu(cpu) { |
| 527 | cpu_topo = &cpu_topology[cpu]; |
| 528 | |
Wang Qing | bdc3ad9 | 2022-04-10 19:36:19 -0700 | [diff] [blame] | 529 | if (cpu_topo->llc_id != -1 && cpuid_topo->llc_id == cpu_topo->llc_id) { |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 530 | cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); |
| 531 | cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); |
| 532 | } |
| 533 | |
| 534 | if (cpuid_topo->package_id != cpu_topo->package_id) |
| 535 | continue; |
| 536 | |
| 537 | cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); |
| 538 | cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); |
| 539 | |
| 540 | if (cpuid_topo->core_id != cpu_topo->core_id) |
| 541 | continue; |
| 542 | |
| 543 | cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); |
| 544 | cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); |
| 545 | } |
| 546 | } |
| 547 | |
| 548 | static void clear_cpu_topology(int cpu) |
| 549 | { |
| 550 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; |
| 551 | |
| 552 | cpumask_clear(&cpu_topo->llc_sibling); |
| 553 | cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); |
| 554 | |
| 555 | cpumask_clear(&cpu_topo->core_sibling); |
| 556 | cpumask_set_cpu(cpu, &cpu_topo->core_sibling); |
| 557 | cpumask_clear(&cpu_topo->thread_sibling); |
| 558 | cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); |
| 559 | } |
| 560 | |
Atish Patra | ca74b31 | 2019-06-27 12:52:59 -0700 | [diff] [blame] | 561 | void __init reset_cpu_topology(void) |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 562 | { |
| 563 | unsigned int cpu; |
| 564 | |
| 565 | for_each_possible_cpu(cpu) { |
| 566 | struct cpu_topology *cpu_topo = &cpu_topology[cpu]; |
| 567 | |
| 568 | cpu_topo->thread_id = -1; |
| 569 | cpu_topo->core_id = -1; |
| 570 | cpu_topo->package_id = -1; |
| 571 | cpu_topo->llc_id = -1; |
| 572 | |
| 573 | clear_cpu_topology(cpu); |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | void remove_cpu_topology(unsigned int cpu) |
| 578 | { |
| 579 | int sibling; |
| 580 | |
| 581 | for_each_cpu(sibling, topology_core_cpumask(cpu)) |
| 582 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); |
| 583 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) |
| 584 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); |
| 585 | for_each_cpu(sibling, topology_llc_cpumask(cpu)) |
| 586 | cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); |
| 587 | |
| 588 | clear_cpu_topology(cpu); |
| 589 | } |
| 590 | |
| 591 | __weak int __init parse_acpi_topology(void) |
| 592 | { |
| 593 | return 0; |
| 594 | } |
| 595 | |
Atish Patra | ca74b31 | 2019-06-27 12:52:59 -0700 | [diff] [blame] | 596 | #if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) |
Atish Patra | 60c1b22 | 2019-06-27 12:52:58 -0700 | [diff] [blame] | 597 | void __init init_cpu_topology(void) |
| 598 | { |
| 599 | reset_cpu_topology(); |
| 600 | |
| 601 | /* |
| 602 | * Discard anything that was parsed if we hit an error so we |
| 603 | * don't use partial information. |
| 604 | */ |
| 605 | if (parse_acpi_topology()) |
| 606 | reset_cpu_topology(); |
| 607 | else if (of_have_populated_dt() && parse_dt_topology()) |
| 608 | reset_cpu_topology(); |
| 609 | } |
| 610 | #endif |