blob: 976154140f0b00660df7f940f90267d1a2a63387 [file] [log] [blame]
Greg Kroah-Hartman6ee97d32017-11-07 17:30:08 +01001// SPDX-License-Identifier: GPL-2.0
Juri Lelli2ef7a292017-05-31 17:59:28 +01002/*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
Juri Lelli2ef7a292017-05-31 17:59:28 +01007 */
8
9#include <linux/acpi.h>
10#include <linux/cpu.h>
11#include <linux/cpufreq.h>
12#include <linux/device.h>
13#include <linux/of.h>
14#include <linux/slab.h>
Juri Lelli2ef7a292017-05-31 17:59:28 +010015#include <linux/sched/topology.h>
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010016#include <linux/cpuset.h>
Atish Patra60c1b222019-06-27 12:52:58 -070017#include <linux/cpumask.h>
18#include <linux/init.h>
Viresh Kumar83150f52021-06-15 14:27:50 +053019#include <linux/rcupdate.h>
Atish Patra60c1b222019-06-27 12:52:58 -070020#include <linux/sched.h>
Juri Lelli2ef7a292017-05-31 17:59:28 +010021
Viresh Kumar83150f52021-06-15 14:27:50 +053022static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
Viresh Kumar01e055c2021-03-10 08:21:04 +053023static struct cpumask scale_freq_counters_mask;
24static bool scale_freq_invariant;
Lukasz Lubac214f12412021-11-09 19:57:10 +000025static DEFINE_PER_CPU(u32, freq_factor) = 1;
Viresh Kumar01e055c2021-03-10 08:21:04 +053026
27static bool supports_scale_freq_counters(const struct cpumask *cpus)
28{
29 return cpumask_subset(cpus, &scale_freq_counters_mask);
30}
31
Valentin Schneider15e5d5b2020-09-01 21:55:49 +010032bool topology_scale_freq_invariant(void)
33{
34 return cpufreq_supports_freq_invariance() ||
Viresh Kumar01e055c2021-03-10 08:21:04 +053035 supports_scale_freq_counters(cpu_online_mask);
Valentin Schneider15e5d5b2020-09-01 21:55:49 +010036}
37
Viresh Kumar01e055c2021-03-10 08:21:04 +053038static void update_scale_freq_invariant(bool status)
Ionela Voinescucd0ed032020-03-05 09:06:26 +000039{
Viresh Kumar01e055c2021-03-10 08:21:04 +053040 if (scale_freq_invariant == status)
41 return;
42
43 /*
44 * Task scheduler behavior depends on frequency invariance support,
45 * either cpufreq or counter driven. If the support status changes as
46 * a result of counter initialisation and use, retrigger the build of
47 * scheduling domains to ensure the information is propagated properly.
48 */
49 if (topology_scale_freq_invariant() == status) {
50 scale_freq_invariant = status;
51 rebuild_sched_domains_energy();
52 }
Ionela Voinescucd0ed032020-03-05 09:06:26 +000053}
Viresh Kumar01e055c2021-03-10 08:21:04 +053054
55void topology_set_scale_freq_source(struct scale_freq_data *data,
56 const struct cpumask *cpus)
57{
58 struct scale_freq_data *sfd;
59 int cpu;
60
61 /*
62 * Avoid calling rebuild_sched_domains() unnecessarily if FIE is
63 * supported by cpufreq.
64 */
65 if (cpumask_empty(&scale_freq_counters_mask))
66 scale_freq_invariant = topology_scale_freq_invariant();
67
Viresh Kumar83150f52021-06-15 14:27:50 +053068 rcu_read_lock();
69
Viresh Kumar01e055c2021-03-10 08:21:04 +053070 for_each_cpu(cpu, cpus) {
Viresh Kumar83150f52021-06-15 14:27:50 +053071 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
Viresh Kumar01e055c2021-03-10 08:21:04 +053072
73 /* Use ARCH provided counters whenever possible */
74 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) {
Viresh Kumar83150f52021-06-15 14:27:50 +053075 rcu_assign_pointer(per_cpu(sft_data, cpu), data);
Viresh Kumar01e055c2021-03-10 08:21:04 +053076 cpumask_set_cpu(cpu, &scale_freq_counters_mask);
77 }
78 }
79
Viresh Kumar83150f52021-06-15 14:27:50 +053080 rcu_read_unlock();
81
Viresh Kumar01e055c2021-03-10 08:21:04 +053082 update_scale_freq_invariant(true);
83}
Viresh Kumar2f533952021-03-10 08:25:27 +053084EXPORT_SYMBOL_GPL(topology_set_scale_freq_source);
Viresh Kumar01e055c2021-03-10 08:21:04 +053085
86void topology_clear_scale_freq_source(enum scale_freq_source source,
87 const struct cpumask *cpus)
88{
89 struct scale_freq_data *sfd;
90 int cpu;
91
Viresh Kumar83150f52021-06-15 14:27:50 +053092 rcu_read_lock();
93
Viresh Kumar01e055c2021-03-10 08:21:04 +053094 for_each_cpu(cpu, cpus) {
Viresh Kumar83150f52021-06-15 14:27:50 +053095 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu));
Viresh Kumar01e055c2021-03-10 08:21:04 +053096
97 if (sfd && sfd->source == source) {
Viresh Kumar83150f52021-06-15 14:27:50 +053098 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL);
Viresh Kumar01e055c2021-03-10 08:21:04 +053099 cpumask_clear_cpu(cpu, &scale_freq_counters_mask);
100 }
101 }
102
Viresh Kumar83150f52021-06-15 14:27:50 +0530103 rcu_read_unlock();
104
105 /*
106 * Make sure all references to previous sft_data are dropped to avoid
107 * use-after-free races.
108 */
109 synchronize_rcu();
110
Viresh Kumar01e055c2021-03-10 08:21:04 +0530111 update_scale_freq_invariant(false);
112}
Viresh Kumar2f533952021-03-10 08:25:27 +0530113EXPORT_SYMBOL_GPL(topology_clear_scale_freq_source);
Viresh Kumar01e055c2021-03-10 08:21:04 +0530114
115void topology_scale_freq_tick(void)
116{
Viresh Kumar83150f52021-06-15 14:27:50 +0530117 struct scale_freq_data *sfd = rcu_dereference_sched(*this_cpu_ptr(&sft_data));
Viresh Kumar01e055c2021-03-10 08:21:04 +0530118
119 if (sfd)
120 sfd->set_freq_scale();
121}
122
Viresh Kumareec73522021-03-10 08:16:40 +0530123DEFINE_PER_CPU(unsigned long, arch_freq_scale) = SCHED_CAPACITY_SCALE;
Viresh Kumar2f533952021-03-10 08:25:27 +0530124EXPORT_PER_CPU_SYMBOL_GPL(arch_freq_scale);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100125
Ionela Voinescua20b7052020-09-24 13:30:15 +0100126void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
127 unsigned long max_freq)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100128{
Dietmar Eggemann0e27c562017-09-26 17:41:10 +0100129 unsigned long scale;
130 int i;
131
Ionela Voinescu0a10d3f2020-09-01 21:55:45 +0100132 if (WARN_ON_ONCE(!cur_freq || !max_freq))
133 return;
134
Ionela Voinescucd0ed032020-03-05 09:06:26 +0000135 /*
136 * If the use of counters for FIE is enabled, just return as we don't
137 * want to update the scale factor with information from CPUFREQ.
138 * Instead the scale factor will be updated from arch_scale_freq_tick.
139 */
Viresh Kumar01e055c2021-03-10 08:21:04 +0530140 if (supports_scale_freq_counters(cpus))
Ionela Voinescucd0ed032020-03-05 09:06:26 +0000141 return;
142
Dietmar Eggemann0e27c562017-09-26 17:41:10 +0100143 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
144
145 for_each_cpu(i, cpus)
Viresh Kumareec73522021-03-10 08:16:40 +0530146 per_cpu(arch_freq_scale, i) = scale;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100147}
148
Dietmar Eggemann8216f582017-09-26 17:41:11 +0100149DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
Thara Gopinath275157b2021-08-09 15:16:01 -0400150EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100151
Juri Lelli4ca4f262017-05-31 17:59:31 +0100152void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100153{
154 per_cpu(cpu_scale, cpu) = capacity;
155}
156
Valentin Schneider25980c72020-07-12 17:59:15 +0100157DEFINE_PER_CPU(unsigned long, thermal_pressure);
158
Lukasz Lubac214f12412021-11-09 19:57:10 +0000159/**
160 * topology_update_thermal_pressure() - Update thermal pressure for CPUs
161 * @cpus : The related CPUs for which capacity has been reduced
162 * @capped_freq : The maximum allowed frequency that CPUs can run at
163 *
164 * Update the value of thermal pressure for all @cpus in the mask. The
165 * cpumask should include all (online+offline) affected CPUs, to avoid
166 * operating on stale data when hot-plug is used for some CPUs. The
167 * @capped_freq reflects the currently allowed max CPUs frequency due to
168 * thermal capping. It might be also a boost frequency value, which is bigger
169 * than the internal 'freq_factor' max frequency. In such case the pressure
170 * value should simply be removed, since this is an indication that there is
171 * no thermal throttling. The @capped_freq must be provided in kHz.
172 */
173void topology_update_thermal_pressure(const struct cpumask *cpus,
174 unsigned long capped_freq)
175{
Lukasz Luba7e97b3d2021-11-09 19:57:14 +0000176 unsigned long max_capacity, capacity, th_pressure;
Lukasz Lubac214f12412021-11-09 19:57:10 +0000177 u32 max_freq;
178 int cpu;
179
180 cpu = cpumask_first(cpus);
181 max_capacity = arch_scale_cpu_capacity(cpu);
182 max_freq = per_cpu(freq_factor, cpu);
183
184 /* Convert to MHz scale which is used in 'freq_factor' */
185 capped_freq /= 1000;
186
187 /*
188 * Handle properly the boost frequencies, which should simply clean
189 * the thermal pressure value.
190 */
191 if (max_freq <= capped_freq)
192 capacity = max_capacity;
193 else
194 capacity = mult_frac(max_capacity, capped_freq, max_freq);
195
Lukasz Luba7e97b3d2021-11-09 19:57:14 +0000196 th_pressure = max_capacity - capacity;
197
198 for_each_cpu(cpu, cpus)
199 WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
Lukasz Lubac214f12412021-11-09 19:57:10 +0000200}
201EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
202
Juri Lelli2ef7a292017-05-31 17:59:28 +0100203static ssize_t cpu_capacity_show(struct device *dev,
204 struct device_attribute *attr,
205 char *buf)
206{
207 struct cpu *cpu = container_of(dev, struct cpu, dev);
208
Joe Perchesaa838892020-09-16 13:40:39 -0700209 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
Juri Lelli2ef7a292017-05-31 17:59:28 +0100210}
211
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +0100212static void update_topology_flags_workfn(struct work_struct *work);
213static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
214
Lingutla Chandrasekhar5d777b12019-04-01 09:54:41 +0530215static DEVICE_ATTR_RO(cpu_capacity);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100216
217static int register_cpu_capacity_sysctl(void)
218{
219 int i;
220 struct device *cpu;
221
222 for_each_possible_cpu(i) {
223 cpu = get_cpu_device(i);
224 if (!cpu) {
225 pr_err("%s: too early to get CPU%d device!\n",
226 __func__, i);
227 continue;
228 }
229 device_create_file(cpu, &dev_attr_cpu_capacity);
230 }
231
232 return 0;
233}
234subsys_initcall(register_cpu_capacity_sysctl);
235
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +0100236static int update_topology;
237
238int topology_update_cpu_topology(void)
239{
240 return update_topology;
241}
242
243/*
244 * Updating the sched_domains can't be done directly from cpufreq callbacks
245 * due to locking, so queue the work for later.
246 */
247static void update_topology_flags_workfn(struct work_struct *work)
248{
249 update_topology = 1;
250 rebuild_sched_domains();
251 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
252 update_topology = 0;
253}
254
Juri Lelli2ef7a292017-05-31 17:59:28 +0100255static u32 *raw_capacity;
Viresh Kumar62de1162017-06-23 14:55:33 +0530256
Prasad Sodagudi82d8ba72017-10-10 00:34:56 -0700257static int free_raw_capacity(void)
Viresh Kumar62de1162017-06-23 14:55:33 +0530258{
259 kfree(raw_capacity);
260 raw_capacity = NULL;
261
262 return 0;
263}
Juri Lelli2ef7a292017-05-31 17:59:28 +0100264
Juri Lelli4ca4f262017-05-31 17:59:31 +0100265void topology_normalize_cpu_scale(void)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100266{
267 u64 capacity;
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800268 u64 capacity_scale;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100269 int cpu;
270
Viresh Kumar62de1162017-06-23 14:55:33 +0530271 if (!raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100272 return;
273
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800274 capacity_scale = 1;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100275 for_each_possible_cpu(cpu) {
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800276 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
277 capacity_scale = max(capacity, capacity_scale);
278 }
279
280 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
281 for_each_possible_cpu(cpu) {
282 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
283 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
284 capacity_scale);
Juri Lelli4ca4f262017-05-31 17:59:31 +0100285 topology_set_cpu_scale(cpu, capacity);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100286 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200287 cpu, topology_get_cpu_scale(cpu));
Juri Lelli2ef7a292017-05-31 17:59:28 +0100288 }
Juri Lelli2ef7a292017-05-31 17:59:28 +0100289}
290
Viresh Kumar805df292017-06-23 14:55:32 +0530291bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100292{
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800293 struct clk *cpu_clk;
Viresh Kumar62de1162017-06-23 14:55:33 +0530294 static bool cap_parsing_failed;
Viresh Kumar805df292017-06-23 14:55:32 +0530295 int ret;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100296 u32 cpu_capacity;
297
298 if (cap_parsing_failed)
Viresh Kumar805df292017-06-23 14:55:32 +0530299 return false;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100300
Viresh Kumar3eeba1a2017-06-23 14:55:30 +0530301 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
Juri Lelli2ef7a292017-05-31 17:59:28 +0100302 &cpu_capacity);
303 if (!ret) {
304 if (!raw_capacity) {
305 raw_capacity = kcalloc(num_possible_cpus(),
306 sizeof(*raw_capacity),
307 GFP_KERNEL);
308 if (!raw_capacity) {
Juri Lelli2ef7a292017-05-31 17:59:28 +0100309 cap_parsing_failed = true;
Viresh Kumar805df292017-06-23 14:55:32 +0530310 return false;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100311 }
312 }
Juri Lelli2ef7a292017-05-31 17:59:28 +0100313 raw_capacity[cpu] = cpu_capacity;
Rob Herring6ef25412017-07-18 16:42:49 -0500314 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
315 cpu_node, raw_capacity[cpu]);
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800316
317 /*
318 * Update freq_factor for calculating early boot cpu capacities.
319 * For non-clk CPU DVFS mechanism, there's no way to get the
320 * frequency value now, assuming they are running at the same
321 * frequency (by keeping the initial freq_factor value).
322 */
323 cpu_clk = of_clk_get(cpu_node, 0);
Jeffy Chen4dfff3d2020-03-17 14:33:08 +0800324 if (!PTR_ERR_OR_ZERO(cpu_clk)) {
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800325 per_cpu(freq_factor, cpu) =
326 clk_get_rate(cpu_clk) / 1000;
Jeffy Chen4dfff3d2020-03-17 14:33:08 +0800327 clk_put(cpu_clk);
328 }
Juri Lelli2ef7a292017-05-31 17:59:28 +0100329 } else {
330 if (raw_capacity) {
Rob Herring6ef25412017-07-18 16:42:49 -0500331 pr_err("cpu_capacity: missing %pOF raw capacity\n",
332 cpu_node);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100333 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
334 }
335 cap_parsing_failed = true;
Viresh Kumar62de1162017-06-23 14:55:33 +0530336 free_raw_capacity();
Juri Lelli2ef7a292017-05-31 17:59:28 +0100337 }
338
339 return !ret;
340}
341
342#ifdef CONFIG_CPU_FREQ
Gaku Inami9de9a442018-02-13 11:06:40 +0900343static cpumask_var_t cpus_to_visit;
344static void parsing_done_workfn(struct work_struct *work);
345static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100346
Gaku Inami9de9a442018-02-13 11:06:40 +0900347static int
Juri Lelli2ef7a292017-05-31 17:59:28 +0100348init_cpu_capacity_callback(struct notifier_block *nb,
349 unsigned long val,
350 void *data)
351{
352 struct cpufreq_policy *policy = data;
353 int cpu;
354
Viresh Kumard8bcf4d2017-06-23 14:55:34 +0530355 if (!raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100356 return 0;
357
Viresh Kumar40f0fc22019-07-23 11:44:06 +0530358 if (val != CPUFREQ_CREATE_POLICY)
Viresh Kumar93a57082017-06-23 14:55:31 +0530359 return 0;
360
361 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
362 cpumask_pr_args(policy->related_cpus),
363 cpumask_pr_args(cpus_to_visit));
364
365 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
366
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800367 for_each_cpu(cpu, policy->related_cpus)
368 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
Viresh Kumar93a57082017-06-23 14:55:31 +0530369
370 if (cpumask_empty(cpus_to_visit)) {
371 topology_normalize_cpu_scale();
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +0100372 schedule_work(&update_topology_flags_work);
Viresh Kumar62de1162017-06-23 14:55:33 +0530373 free_raw_capacity();
Viresh Kumar93a57082017-06-23 14:55:31 +0530374 pr_debug("cpu_capacity: parsing done\n");
Viresh Kumar93a57082017-06-23 14:55:31 +0530375 schedule_work(&parsing_done_work);
376 }
377
Juri Lelli2ef7a292017-05-31 17:59:28 +0100378 return 0;
379}
380
Gaku Inami9de9a442018-02-13 11:06:40 +0900381static struct notifier_block init_cpu_capacity_notifier = {
Juri Lelli2ef7a292017-05-31 17:59:28 +0100382 .notifier_call = init_cpu_capacity_callback,
383};
384
385static int __init register_cpufreq_notifier(void)
386{
Dietmar Eggemann54082112017-09-26 17:41:06 +0100387 int ret;
388
Juri Lelli2ef7a292017-05-31 17:59:28 +0100389 /*
390 * on ACPI-based systems we need to use the default cpu capacity
391 * until we have the necessary code to parse the cpu capacity, so
392 * skip registering cpufreq notifier.
393 */
Juri Lellic105aa32017-05-31 17:59:29 +0100394 if (!acpi_disabled || !raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100395 return -EINVAL;
396
Geert Uytterhoeven0fd33112019-05-27 14:27:03 +0200397 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
Juri Lelli2ef7a292017-05-31 17:59:28 +0100398 return -ENOMEM;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100399
400 cpumask_copy(cpus_to_visit, cpu_possible_mask);
401
Dietmar Eggemann54082112017-09-26 17:41:06 +0100402 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
403 CPUFREQ_POLICY_NOTIFIER);
404
405 if (ret)
406 free_cpumask_var(cpus_to_visit);
407
408 return ret;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100409}
410core_initcall(register_cpufreq_notifier);
411
Gaku Inami9de9a442018-02-13 11:06:40 +0900412static void parsing_done_workfn(struct work_struct *work)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100413{
414 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
415 CPUFREQ_POLICY_NOTIFIER);
Dietmar Eggemann54082112017-09-26 17:41:06 +0100416 free_cpumask_var(cpus_to_visit);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100417}
418
419#else
Juri Lelli2ef7a292017-05-31 17:59:28 +0100420core_initcall(free_raw_capacity);
421#endif
Atish Patra60c1b222019-06-27 12:52:58 -0700422
423#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
Zeng Taof3c19482020-01-17 09:52:52 +0800424/*
425 * This function returns the logic cpu number of the node.
426 * There are basically three kinds of return values:
427 * (1) logic cpu number which is > 0.
428 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
429 * there is no possible logical CPU in the kernel to match. This happens
430 * when CONFIG_NR_CPUS is configure to be smaller than the number of
431 * CPU nodes in DT. We need to just ignore this case.
432 * (3) -1 if the node does not exist in the device tree
433 */
Atish Patra60c1b222019-06-27 12:52:58 -0700434static int __init get_cpu_for_node(struct device_node *node)
435{
436 struct device_node *cpu_node;
437 int cpu;
438
439 cpu_node = of_parse_phandle(node, "cpu", 0);
440 if (!cpu_node)
441 return -1;
442
443 cpu = of_cpu_node_to_id(cpu_node);
444 if (cpu >= 0)
445 topology_parse_cpu_capacity(cpu_node, cpu);
446 else
Zeng Taof3c19482020-01-17 09:52:52 +0800447 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
448 cpu_node, cpumask_pr_args(cpu_possible_mask));
Atish Patra60c1b222019-06-27 12:52:58 -0700449
450 of_node_put(cpu_node);
451 return cpu;
452}
453
454static int __init parse_core(struct device_node *core, int package_id,
455 int core_id)
456{
Zeng Tao4a336912020-03-04 11:54:52 +0800457 char name[20];
Atish Patra60c1b222019-06-27 12:52:58 -0700458 bool leaf = true;
459 int i = 0;
460 int cpu;
461 struct device_node *t;
462
463 do {
464 snprintf(name, sizeof(name), "thread%d", i);
465 t = of_get_child_by_name(core, name);
466 if (t) {
467 leaf = false;
468 cpu = get_cpu_for_node(t);
469 if (cpu >= 0) {
470 cpu_topology[cpu].package_id = package_id;
471 cpu_topology[cpu].core_id = core_id;
472 cpu_topology[cpu].thread_id = i;
Zeng Taof3c19482020-01-17 09:52:52 +0800473 } else if (cpu != -ENODEV) {
474 pr_err("%pOF: Can't get CPU for thread\n", t);
Atish Patra60c1b222019-06-27 12:52:58 -0700475 of_node_put(t);
476 return -EINVAL;
477 }
478 of_node_put(t);
479 }
480 i++;
481 } while (t);
482
483 cpu = get_cpu_for_node(core);
484 if (cpu >= 0) {
485 if (!leaf) {
486 pr_err("%pOF: Core has both threads and CPU\n",
487 core);
488 return -EINVAL;
489 }
490
491 cpu_topology[cpu].package_id = package_id;
492 cpu_topology[cpu].core_id = core_id;
Zeng Taof3c19482020-01-17 09:52:52 +0800493 } else if (leaf && cpu != -ENODEV) {
Atish Patra60c1b222019-06-27 12:52:58 -0700494 pr_err("%pOF: Can't get CPU for leaf core\n", core);
495 return -EINVAL;
496 }
497
498 return 0;
499}
500
501static int __init parse_cluster(struct device_node *cluster, int depth)
502{
Zeng Tao4a336912020-03-04 11:54:52 +0800503 char name[20];
Atish Patra60c1b222019-06-27 12:52:58 -0700504 bool leaf = true;
505 bool has_cores = false;
506 struct device_node *c;
507 static int package_id __initdata;
508 int core_id = 0;
509 int i, ret;
510
511 /*
512 * First check for child clusters; we currently ignore any
513 * information about the nesting of clusters and present the
514 * scheduler with a flat list of them.
515 */
516 i = 0;
517 do {
518 snprintf(name, sizeof(name), "cluster%d", i);
519 c = of_get_child_by_name(cluster, name);
520 if (c) {
521 leaf = false;
522 ret = parse_cluster(c, depth + 1);
523 of_node_put(c);
524 if (ret != 0)
525 return ret;
526 }
527 i++;
528 } while (c);
529
530 /* Now check for cores */
531 i = 0;
532 do {
533 snprintf(name, sizeof(name), "core%d", i);
534 c = of_get_child_by_name(cluster, name);
535 if (c) {
536 has_cores = true;
537
538 if (depth == 0) {
539 pr_err("%pOF: cpu-map children should be clusters\n",
540 c);
541 of_node_put(c);
542 return -EINVAL;
543 }
544
545 if (leaf) {
546 ret = parse_core(c, package_id, core_id++);
547 } else {
548 pr_err("%pOF: Non-leaf cluster with core %s\n",
549 cluster, name);
550 ret = -EINVAL;
551 }
552
553 of_node_put(c);
554 if (ret != 0)
555 return ret;
556 }
557 i++;
558 } while (c);
559
560 if (leaf && !has_cores)
561 pr_warn("%pOF: empty cluster\n", cluster);
562
563 if (leaf)
564 package_id++;
565
566 return 0;
567}
568
569static int __init parse_dt_topology(void)
570{
571 struct device_node *cn, *map;
572 int ret = 0;
573 int cpu;
574
575 cn = of_find_node_by_path("/cpus");
576 if (!cn) {
577 pr_err("No CPU information found in DT\n");
578 return 0;
579 }
580
581 /*
582 * When topology is provided cpu-map is essentially a root
583 * cluster with restricted subnodes.
584 */
585 map = of_get_child_by_name(cn, "cpu-map");
586 if (!map)
587 goto out;
588
589 ret = parse_cluster(map, 0);
590 if (ret != 0)
591 goto out_map;
592
593 topology_normalize_cpu_scale();
594
595 /*
596 * Check that all cores are in the topology; the SMP code will
597 * only mark cores described in the DT as possible.
598 */
599 for_each_possible_cpu(cpu)
600 if (cpu_topology[cpu].package_id == -1)
601 ret = -EINVAL;
602
603out_map:
604 of_node_put(map);
605out:
606 of_node_put(cn);
607 return ret;
608}
Atish Patraca74b312019-06-27 12:52:59 -0700609#endif
Atish Patra60c1b222019-06-27 12:52:58 -0700610
611/*
612 * cpu topology table
613 */
614struct cpu_topology cpu_topology[NR_CPUS];
615EXPORT_SYMBOL_GPL(cpu_topology);
616
617const struct cpumask *cpu_coregroup_mask(int cpu)
618{
619 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
620
621 /* Find the smaller of NUMA, core or LLC siblings */
622 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
623 /* not numa in package, lets use the package siblings */
624 core_mask = &cpu_topology[cpu].core_sibling;
625 }
626 if (cpu_topology[cpu].llc_id != -1) {
627 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
628 core_mask = &cpu_topology[cpu].llc_sibling;
629 }
630
631 return core_mask;
632}
633
Jonathan Cameronc5e22fe2021-09-24 20:51:02 +1200634const struct cpumask *cpu_clustergroup_mask(int cpu)
635{
636 return &cpu_topology[cpu].cluster_sibling;
637}
638
Atish Patra60c1b222019-06-27 12:52:58 -0700639void update_siblings_masks(unsigned int cpuid)
640{
641 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
642 int cpu;
643
644 /* update core and thread sibling masks */
645 for_each_online_cpu(cpu) {
646 cpu_topo = &cpu_topology[cpu];
647
648 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
649 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
650 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
651 }
652
653 if (cpuid_topo->package_id != cpu_topo->package_id)
654 continue;
655
Jonathan Cameronc5e22fe2021-09-24 20:51:02 +1200656 if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
657 cpuid_topo->cluster_id != -1) {
658 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
659 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
660 }
661
Atish Patra60c1b222019-06-27 12:52:58 -0700662 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
663 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
664
665 if (cpuid_topo->core_id != cpu_topo->core_id)
666 continue;
667
668 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
669 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
670 }
671}
672
673static void clear_cpu_topology(int cpu)
674{
675 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
676
677 cpumask_clear(&cpu_topo->llc_sibling);
678 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
679
Jonathan Cameronc5e22fe2021-09-24 20:51:02 +1200680 cpumask_clear(&cpu_topo->cluster_sibling);
681 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
682
Atish Patra60c1b222019-06-27 12:52:58 -0700683 cpumask_clear(&cpu_topo->core_sibling);
684 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
685 cpumask_clear(&cpu_topo->thread_sibling);
686 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
687}
688
Atish Patraca74b312019-06-27 12:52:59 -0700689void __init reset_cpu_topology(void)
Atish Patra60c1b222019-06-27 12:52:58 -0700690{
691 unsigned int cpu;
692
693 for_each_possible_cpu(cpu) {
694 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
695
696 cpu_topo->thread_id = -1;
697 cpu_topo->core_id = -1;
Jonathan Cameronc5e22fe2021-09-24 20:51:02 +1200698 cpu_topo->cluster_id = -1;
Atish Patra60c1b222019-06-27 12:52:58 -0700699 cpu_topo->package_id = -1;
700 cpu_topo->llc_id = -1;
701
702 clear_cpu_topology(cpu);
703 }
704}
705
706void remove_cpu_topology(unsigned int cpu)
707{
708 int sibling;
709
710 for_each_cpu(sibling, topology_core_cpumask(cpu))
711 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
712 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
713 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
Wang ShaoBo4cc4cc22021-11-10 17:58:56 +0800714 for_each_cpu(sibling, topology_cluster_cpumask(cpu))
715 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling));
Atish Patra60c1b222019-06-27 12:52:58 -0700716 for_each_cpu(sibling, topology_llc_cpumask(cpu))
717 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
718
719 clear_cpu_topology(cpu);
720}
721
722__weak int __init parse_acpi_topology(void)
723{
724 return 0;
725}
726
Atish Patraca74b312019-06-27 12:52:59 -0700727#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
Atish Patra60c1b222019-06-27 12:52:58 -0700728void __init init_cpu_topology(void)
729{
730 reset_cpu_topology();
731
732 /*
733 * Discard anything that was parsed if we hit an error so we
734 * don't use partial information.
735 */
736 if (parse_acpi_topology())
737 reset_cpu_topology();
738 else if (of_have_populated_dt() && parse_dt_topology())
739 reset_cpu_topology();
740}
741#endif