blob: 4cb1616d3871516129db4c6880e8907e6206d40a [file] [log] [blame]
Greg Kroah-Hartman6ee97d32017-11-07 17:30:08 +01001// SPDX-License-Identifier: GPL-2.0
Juri Lelli2ef7a292017-05-31 17:59:28 +01002/*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
Juri Lelli2ef7a292017-05-31 17:59:28 +01007 */
8
9#include <linux/acpi.h>
10#include <linux/cpu.h>
11#include <linux/cpufreq.h>
12#include <linux/device.h>
13#include <linux/of.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/sched/topology.h>
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010017#include <linux/cpuset.h>
Atish Patra60c1b222019-06-27 12:52:58 -070018#include <linux/cpumask.h>
19#include <linux/init.h>
20#include <linux/percpu.h>
21#include <linux/sched.h>
22#include <linux/smp.h>
Juri Lelli2ef7a292017-05-31 17:59:28 +010023
Dietmar Eggemann0e27c562017-09-26 17:41:10 +010024DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
Juri Lelli2ef7a292017-05-31 17:59:28 +010025
Dietmar Eggemann0e27c562017-09-26 17:41:10 +010026void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
27 unsigned long max_freq)
Juri Lelli2ef7a292017-05-31 17:59:28 +010028{
Dietmar Eggemann0e27c562017-09-26 17:41:10 +010029 unsigned long scale;
30 int i;
31
32 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
33
34 for_each_cpu(i, cpus)
35 per_cpu(freq_scale, i) = scale;
Juri Lelli2ef7a292017-05-31 17:59:28 +010036}
37
Dietmar Eggemann8216f582017-09-26 17:41:11 +010038DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
Juri Lelli2ef7a292017-05-31 17:59:28 +010039
Juri Lelli4ca4f262017-05-31 17:59:31 +010040void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +010041{
42 per_cpu(cpu_scale, cpu) = capacity;
43}
44
45static ssize_t cpu_capacity_show(struct device *dev,
46 struct device_attribute *attr,
47 char *buf)
48{
49 struct cpu *cpu = container_of(dev, struct cpu, dev);
50
Vincent Guittot8ec59c02019-06-17 17:00:17 +020051 return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
Juri Lelli2ef7a292017-05-31 17:59:28 +010052}
53
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010054static void update_topology_flags_workfn(struct work_struct *work);
55static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
56
Lingutla Chandrasekhar5d777b12019-04-01 09:54:41 +053057static DEVICE_ATTR_RO(cpu_capacity);
Juri Lelli2ef7a292017-05-31 17:59:28 +010058
59static int register_cpu_capacity_sysctl(void)
60{
61 int i;
62 struct device *cpu;
63
64 for_each_possible_cpu(i) {
65 cpu = get_cpu_device(i);
66 if (!cpu) {
67 pr_err("%s: too early to get CPU%d device!\n",
68 __func__, i);
69 continue;
70 }
71 device_create_file(cpu, &dev_attr_cpu_capacity);
72 }
73
74 return 0;
75}
76subsys_initcall(register_cpu_capacity_sysctl);
77
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010078static int update_topology;
79
80int topology_update_cpu_topology(void)
81{
82 return update_topology;
83}
84
85/*
86 * Updating the sched_domains can't be done directly from cpufreq callbacks
87 * due to locking, so queue the work for later.
88 */
89static void update_topology_flags_workfn(struct work_struct *work)
90{
91 update_topology = 1;
92 rebuild_sched_domains();
93 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
94 update_topology = 0;
95}
96
Jeffy Chenb8fe1282020-01-13 11:48:15 +080097static DEFINE_PER_CPU(u32, freq_factor) = 1;
Juri Lelli2ef7a292017-05-31 17:59:28 +010098static u32 *raw_capacity;
Viresh Kumar62de1162017-06-23 14:55:33 +053099
Prasad Sodagudi82d8ba72017-10-10 00:34:56 -0700100static int free_raw_capacity(void)
Viresh Kumar62de1162017-06-23 14:55:33 +0530101{
102 kfree(raw_capacity);
103 raw_capacity = NULL;
104
105 return 0;
106}
Juri Lelli2ef7a292017-05-31 17:59:28 +0100107
Juri Lelli4ca4f262017-05-31 17:59:31 +0100108void topology_normalize_cpu_scale(void)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100109{
110 u64 capacity;
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800111 u64 capacity_scale;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100112 int cpu;
113
Viresh Kumar62de1162017-06-23 14:55:33 +0530114 if (!raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100115 return;
116
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800117 capacity_scale = 1;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100118 for_each_possible_cpu(cpu) {
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800119 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
120 capacity_scale = max(capacity, capacity_scale);
121 }
122
123 pr_debug("cpu_capacity: capacity_scale=%llu\n", capacity_scale);
124 for_each_possible_cpu(cpu) {
125 capacity = raw_capacity[cpu] * per_cpu(freq_factor, cpu);
126 capacity = div64_u64(capacity << SCHED_CAPACITY_SHIFT,
127 capacity_scale);
Juri Lelli4ca4f262017-05-31 17:59:31 +0100128 topology_set_cpu_scale(cpu, capacity);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100129 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200130 cpu, topology_get_cpu_scale(cpu));
Juri Lelli2ef7a292017-05-31 17:59:28 +0100131 }
Juri Lelli2ef7a292017-05-31 17:59:28 +0100132}
133
Viresh Kumar805df292017-06-23 14:55:32 +0530134bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100135{
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800136 struct clk *cpu_clk;
Viresh Kumar62de1162017-06-23 14:55:33 +0530137 static bool cap_parsing_failed;
Viresh Kumar805df292017-06-23 14:55:32 +0530138 int ret;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100139 u32 cpu_capacity;
140
141 if (cap_parsing_failed)
Viresh Kumar805df292017-06-23 14:55:32 +0530142 return false;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100143
Viresh Kumar3eeba1a2017-06-23 14:55:30 +0530144 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
Juri Lelli2ef7a292017-05-31 17:59:28 +0100145 &cpu_capacity);
146 if (!ret) {
147 if (!raw_capacity) {
148 raw_capacity = kcalloc(num_possible_cpus(),
149 sizeof(*raw_capacity),
150 GFP_KERNEL);
151 if (!raw_capacity) {
Juri Lelli2ef7a292017-05-31 17:59:28 +0100152 cap_parsing_failed = true;
Viresh Kumar805df292017-06-23 14:55:32 +0530153 return false;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100154 }
155 }
Juri Lelli2ef7a292017-05-31 17:59:28 +0100156 raw_capacity[cpu] = cpu_capacity;
Rob Herring6ef25412017-07-18 16:42:49 -0500157 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
158 cpu_node, raw_capacity[cpu]);
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800159
160 /*
161 * Update freq_factor for calculating early boot cpu capacities.
162 * For non-clk CPU DVFS mechanism, there's no way to get the
163 * frequency value now, assuming they are running at the same
164 * frequency (by keeping the initial freq_factor value).
165 */
166 cpu_clk = of_clk_get(cpu_node, 0);
167 if (!PTR_ERR_OR_ZERO(cpu_clk))
168 per_cpu(freq_factor, cpu) =
169 clk_get_rate(cpu_clk) / 1000;
170
171 clk_put(cpu_clk);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100172 } else {
173 if (raw_capacity) {
Rob Herring6ef25412017-07-18 16:42:49 -0500174 pr_err("cpu_capacity: missing %pOF raw capacity\n",
175 cpu_node);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100176 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
177 }
178 cap_parsing_failed = true;
Viresh Kumar62de1162017-06-23 14:55:33 +0530179 free_raw_capacity();
Juri Lelli2ef7a292017-05-31 17:59:28 +0100180 }
181
182 return !ret;
183}
184
185#ifdef CONFIG_CPU_FREQ
Gaku Inami9de9a442018-02-13 11:06:40 +0900186static cpumask_var_t cpus_to_visit;
187static void parsing_done_workfn(struct work_struct *work);
188static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100189
Gaku Inami9de9a442018-02-13 11:06:40 +0900190static int
Juri Lelli2ef7a292017-05-31 17:59:28 +0100191init_cpu_capacity_callback(struct notifier_block *nb,
192 unsigned long val,
193 void *data)
194{
195 struct cpufreq_policy *policy = data;
196 int cpu;
197
Viresh Kumard8bcf4d2017-06-23 14:55:34 +0530198 if (!raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100199 return 0;
200
Viresh Kumar40f0fc22019-07-23 11:44:06 +0530201 if (val != CPUFREQ_CREATE_POLICY)
Viresh Kumar93a57082017-06-23 14:55:31 +0530202 return 0;
203
204 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
205 cpumask_pr_args(policy->related_cpus),
206 cpumask_pr_args(cpus_to_visit));
207
208 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
209
Jeffy Chenb8fe1282020-01-13 11:48:15 +0800210 for_each_cpu(cpu, policy->related_cpus)
211 per_cpu(freq_factor, cpu) = policy->cpuinfo.max_freq / 1000;
Viresh Kumar93a57082017-06-23 14:55:31 +0530212
213 if (cpumask_empty(cpus_to_visit)) {
214 topology_normalize_cpu_scale();
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +0100215 schedule_work(&update_topology_flags_work);
Viresh Kumar62de1162017-06-23 14:55:33 +0530216 free_raw_capacity();
Viresh Kumar93a57082017-06-23 14:55:31 +0530217 pr_debug("cpu_capacity: parsing done\n");
Viresh Kumar93a57082017-06-23 14:55:31 +0530218 schedule_work(&parsing_done_work);
219 }
220
Juri Lelli2ef7a292017-05-31 17:59:28 +0100221 return 0;
222}
223
Gaku Inami9de9a442018-02-13 11:06:40 +0900224static struct notifier_block init_cpu_capacity_notifier = {
Juri Lelli2ef7a292017-05-31 17:59:28 +0100225 .notifier_call = init_cpu_capacity_callback,
226};
227
228static int __init register_cpufreq_notifier(void)
229{
Dietmar Eggemann54082112017-09-26 17:41:06 +0100230 int ret;
231
Juri Lelli2ef7a292017-05-31 17:59:28 +0100232 /*
233 * on ACPI-based systems we need to use the default cpu capacity
234 * until we have the necessary code to parse the cpu capacity, so
235 * skip registering cpufreq notifier.
236 */
Juri Lellic105aa32017-05-31 17:59:29 +0100237 if (!acpi_disabled || !raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100238 return -EINVAL;
239
Geert Uytterhoeven0fd33112019-05-27 14:27:03 +0200240 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
Juri Lelli2ef7a292017-05-31 17:59:28 +0100241 return -ENOMEM;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100242
243 cpumask_copy(cpus_to_visit, cpu_possible_mask);
244
Dietmar Eggemann54082112017-09-26 17:41:06 +0100245 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
246 CPUFREQ_POLICY_NOTIFIER);
247
248 if (ret)
249 free_cpumask_var(cpus_to_visit);
250
251 return ret;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100252}
253core_initcall(register_cpufreq_notifier);
254
Gaku Inami9de9a442018-02-13 11:06:40 +0900255static void parsing_done_workfn(struct work_struct *work)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100256{
257 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
258 CPUFREQ_POLICY_NOTIFIER);
Dietmar Eggemann54082112017-09-26 17:41:06 +0100259 free_cpumask_var(cpus_to_visit);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100260}
261
262#else
Juri Lelli2ef7a292017-05-31 17:59:28 +0100263core_initcall(free_raw_capacity);
264#endif
Atish Patra60c1b222019-06-27 12:52:58 -0700265
266#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
Zeng Taof3c19482020-01-17 09:52:52 +0800267/*
268 * This function returns the logic cpu number of the node.
269 * There are basically three kinds of return values:
270 * (1) logic cpu number which is > 0.
271 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
272 * there is no possible logical CPU in the kernel to match. This happens
273 * when CONFIG_NR_CPUS is configure to be smaller than the number of
274 * CPU nodes in DT. We need to just ignore this case.
275 * (3) -1 if the node does not exist in the device tree
276 */
Atish Patra60c1b222019-06-27 12:52:58 -0700277static int __init get_cpu_for_node(struct device_node *node)
278{
279 struct device_node *cpu_node;
280 int cpu;
281
282 cpu_node = of_parse_phandle(node, "cpu", 0);
283 if (!cpu_node)
284 return -1;
285
286 cpu = of_cpu_node_to_id(cpu_node);
287 if (cpu >= 0)
288 topology_parse_cpu_capacity(cpu_node, cpu);
289 else
Zeng Taof3c19482020-01-17 09:52:52 +0800290 pr_info("CPU node for %pOF exist but the possible cpu range is :%*pbl\n",
291 cpu_node, cpumask_pr_args(cpu_possible_mask));
Atish Patra60c1b222019-06-27 12:52:58 -0700292
293 of_node_put(cpu_node);
294 return cpu;
295}
296
297static int __init parse_core(struct device_node *core, int package_id,
298 int core_id)
299{
Zeng Tao4a336912020-03-04 11:54:52 +0800300 char name[20];
Atish Patra60c1b222019-06-27 12:52:58 -0700301 bool leaf = true;
302 int i = 0;
303 int cpu;
304 struct device_node *t;
305
306 do {
307 snprintf(name, sizeof(name), "thread%d", i);
308 t = of_get_child_by_name(core, name);
309 if (t) {
310 leaf = false;
311 cpu = get_cpu_for_node(t);
312 if (cpu >= 0) {
313 cpu_topology[cpu].package_id = package_id;
314 cpu_topology[cpu].core_id = core_id;
315 cpu_topology[cpu].thread_id = i;
Zeng Taof3c19482020-01-17 09:52:52 +0800316 } else if (cpu != -ENODEV) {
317 pr_err("%pOF: Can't get CPU for thread\n", t);
Atish Patra60c1b222019-06-27 12:52:58 -0700318 of_node_put(t);
319 return -EINVAL;
320 }
321 of_node_put(t);
322 }
323 i++;
324 } while (t);
325
326 cpu = get_cpu_for_node(core);
327 if (cpu >= 0) {
328 if (!leaf) {
329 pr_err("%pOF: Core has both threads and CPU\n",
330 core);
331 return -EINVAL;
332 }
333
334 cpu_topology[cpu].package_id = package_id;
335 cpu_topology[cpu].core_id = core_id;
Zeng Taof3c19482020-01-17 09:52:52 +0800336 } else if (leaf && cpu != -ENODEV) {
Atish Patra60c1b222019-06-27 12:52:58 -0700337 pr_err("%pOF: Can't get CPU for leaf core\n", core);
338 return -EINVAL;
339 }
340
341 return 0;
342}
343
344static int __init parse_cluster(struct device_node *cluster, int depth)
345{
Zeng Tao4a336912020-03-04 11:54:52 +0800346 char name[20];
Atish Patra60c1b222019-06-27 12:52:58 -0700347 bool leaf = true;
348 bool has_cores = false;
349 struct device_node *c;
350 static int package_id __initdata;
351 int core_id = 0;
352 int i, ret;
353
354 /*
355 * First check for child clusters; we currently ignore any
356 * information about the nesting of clusters and present the
357 * scheduler with a flat list of them.
358 */
359 i = 0;
360 do {
361 snprintf(name, sizeof(name), "cluster%d", i);
362 c = of_get_child_by_name(cluster, name);
363 if (c) {
364 leaf = false;
365 ret = parse_cluster(c, depth + 1);
366 of_node_put(c);
367 if (ret != 0)
368 return ret;
369 }
370 i++;
371 } while (c);
372
373 /* Now check for cores */
374 i = 0;
375 do {
376 snprintf(name, sizeof(name), "core%d", i);
377 c = of_get_child_by_name(cluster, name);
378 if (c) {
379 has_cores = true;
380
381 if (depth == 0) {
382 pr_err("%pOF: cpu-map children should be clusters\n",
383 c);
384 of_node_put(c);
385 return -EINVAL;
386 }
387
388 if (leaf) {
389 ret = parse_core(c, package_id, core_id++);
390 } else {
391 pr_err("%pOF: Non-leaf cluster with core %s\n",
392 cluster, name);
393 ret = -EINVAL;
394 }
395
396 of_node_put(c);
397 if (ret != 0)
398 return ret;
399 }
400 i++;
401 } while (c);
402
403 if (leaf && !has_cores)
404 pr_warn("%pOF: empty cluster\n", cluster);
405
406 if (leaf)
407 package_id++;
408
409 return 0;
410}
411
412static int __init parse_dt_topology(void)
413{
414 struct device_node *cn, *map;
415 int ret = 0;
416 int cpu;
417
418 cn = of_find_node_by_path("/cpus");
419 if (!cn) {
420 pr_err("No CPU information found in DT\n");
421 return 0;
422 }
423
424 /*
425 * When topology is provided cpu-map is essentially a root
426 * cluster with restricted subnodes.
427 */
428 map = of_get_child_by_name(cn, "cpu-map");
429 if (!map)
430 goto out;
431
432 ret = parse_cluster(map, 0);
433 if (ret != 0)
434 goto out_map;
435
436 topology_normalize_cpu_scale();
437
438 /*
439 * Check that all cores are in the topology; the SMP code will
440 * only mark cores described in the DT as possible.
441 */
442 for_each_possible_cpu(cpu)
443 if (cpu_topology[cpu].package_id == -1)
444 ret = -EINVAL;
445
446out_map:
447 of_node_put(map);
448out:
449 of_node_put(cn);
450 return ret;
451}
Atish Patraca74b312019-06-27 12:52:59 -0700452#endif
Atish Patra60c1b222019-06-27 12:52:58 -0700453
454/*
455 * cpu topology table
456 */
457struct cpu_topology cpu_topology[NR_CPUS];
458EXPORT_SYMBOL_GPL(cpu_topology);
459
460const struct cpumask *cpu_coregroup_mask(int cpu)
461{
462 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu));
463
464 /* Find the smaller of NUMA, core or LLC siblings */
465 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) {
466 /* not numa in package, lets use the package siblings */
467 core_mask = &cpu_topology[cpu].core_sibling;
468 }
469 if (cpu_topology[cpu].llc_id != -1) {
470 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask))
471 core_mask = &cpu_topology[cpu].llc_sibling;
472 }
473
474 return core_mask;
475}
476
477void update_siblings_masks(unsigned int cpuid)
478{
479 struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
480 int cpu;
481
482 /* update core and thread sibling masks */
483 for_each_online_cpu(cpu) {
484 cpu_topo = &cpu_topology[cpu];
485
486 if (cpuid_topo->llc_id == cpu_topo->llc_id) {
487 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling);
488 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling);
489 }
490
491 if (cpuid_topo->package_id != cpu_topo->package_id)
492 continue;
493
494 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
495 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
496
497 if (cpuid_topo->core_id != cpu_topo->core_id)
498 continue;
499
500 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
501 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
502 }
503}
504
505static void clear_cpu_topology(int cpu)
506{
507 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
508
509 cpumask_clear(&cpu_topo->llc_sibling);
510 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
511
512 cpumask_clear(&cpu_topo->core_sibling);
513 cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
514 cpumask_clear(&cpu_topo->thread_sibling);
515 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
516}
517
Atish Patraca74b312019-06-27 12:52:59 -0700518void __init reset_cpu_topology(void)
Atish Patra60c1b222019-06-27 12:52:58 -0700519{
520 unsigned int cpu;
521
522 for_each_possible_cpu(cpu) {
523 struct cpu_topology *cpu_topo = &cpu_topology[cpu];
524
525 cpu_topo->thread_id = -1;
526 cpu_topo->core_id = -1;
527 cpu_topo->package_id = -1;
528 cpu_topo->llc_id = -1;
529
530 clear_cpu_topology(cpu);
531 }
532}
533
534void remove_cpu_topology(unsigned int cpu)
535{
536 int sibling;
537
538 for_each_cpu(sibling, topology_core_cpumask(cpu))
539 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
540 for_each_cpu(sibling, topology_sibling_cpumask(cpu))
541 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
542 for_each_cpu(sibling, topology_llc_cpumask(cpu))
543 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling));
544
545 clear_cpu_topology(cpu);
546}
547
548__weak int __init parse_acpi_topology(void)
549{
550 return 0;
551}
552
Atish Patraca74b312019-06-27 12:52:59 -0700553#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV)
Atish Patra60c1b222019-06-27 12:52:58 -0700554void __init init_cpu_topology(void)
555{
556 reset_cpu_topology();
557
558 /*
559 * Discard anything that was parsed if we hit an error so we
560 * don't use partial information.
561 */
562 if (parse_acpi_topology())
563 reset_cpu_topology();
564 else if (of_have_populated_dt() && parse_dt_topology())
565 reset_cpu_topology();
566}
567#endif