blob: 9b09e31ae82fddf1df399dfd67f386a321133c15 [file] [log] [blame]
Greg Kroah-Hartman6ee97d32017-11-07 17:30:08 +01001// SPDX-License-Identifier: GPL-2.0
Juri Lelli2ef7a292017-05-31 17:59:28 +01002/*
3 * Arch specific cpu topology information
4 *
5 * Copyright (C) 2016, ARM Ltd.
6 * Written by: Juri Lelli, ARM Ltd.
Juri Lelli2ef7a292017-05-31 17:59:28 +01007 */
8
9#include <linux/acpi.h>
10#include <linux/cpu.h>
11#include <linux/cpufreq.h>
12#include <linux/device.h>
13#include <linux/of.h>
14#include <linux/slab.h>
15#include <linux/string.h>
16#include <linux/sched/topology.h>
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010017#include <linux/cpuset.h>
Juri Lelli2ef7a292017-05-31 17:59:28 +010018
Dietmar Eggemann0e27c562017-09-26 17:41:10 +010019DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
Juri Lelli2ef7a292017-05-31 17:59:28 +010020
Dietmar Eggemann0e27c562017-09-26 17:41:10 +010021void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
22 unsigned long max_freq)
Juri Lelli2ef7a292017-05-31 17:59:28 +010023{
Dietmar Eggemann0e27c562017-09-26 17:41:10 +010024 unsigned long scale;
25 int i;
26
27 scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
28
29 for_each_cpu(i, cpus)
30 per_cpu(freq_scale, i) = scale;
Juri Lelli2ef7a292017-05-31 17:59:28 +010031}
32
Dietmar Eggemann8216f582017-09-26 17:41:11 +010033DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
Juri Lelli2ef7a292017-05-31 17:59:28 +010034
Juri Lelli4ca4f262017-05-31 17:59:31 +010035void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +010036{
37 per_cpu(cpu_scale, cpu) = capacity;
38}
39
40static ssize_t cpu_capacity_show(struct device *dev,
41 struct device_attribute *attr,
42 char *buf)
43{
44 struct cpu *cpu = container_of(dev, struct cpu, dev);
45
Vincent Guittot8ec59c02019-06-17 17:00:17 +020046 return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
Juri Lelli2ef7a292017-05-31 17:59:28 +010047}
48
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010049static void update_topology_flags_workfn(struct work_struct *work);
50static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
51
Lingutla Chandrasekhar5d777b12019-04-01 09:54:41 +053052static DEVICE_ATTR_RO(cpu_capacity);
Juri Lelli2ef7a292017-05-31 17:59:28 +010053
54static int register_cpu_capacity_sysctl(void)
55{
56 int i;
57 struct device *cpu;
58
59 for_each_possible_cpu(i) {
60 cpu = get_cpu_device(i);
61 if (!cpu) {
62 pr_err("%s: too early to get CPU%d device!\n",
63 __func__, i);
64 continue;
65 }
66 device_create_file(cpu, &dev_attr_cpu_capacity);
67 }
68
69 return 0;
70}
71subsys_initcall(register_cpu_capacity_sysctl);
72
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +010073static int update_topology;
74
75int topology_update_cpu_topology(void)
76{
77 return update_topology;
78}
79
80/*
81 * Updating the sched_domains can't be done directly from cpufreq callbacks
82 * due to locking, so queue the work for later.
83 */
84static void update_topology_flags_workfn(struct work_struct *work)
85{
86 update_topology = 1;
87 rebuild_sched_domains();
88 pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
89 update_topology = 0;
90}
91
Juri Lelli2ef7a292017-05-31 17:59:28 +010092static u32 capacity_scale;
93static u32 *raw_capacity;
Viresh Kumar62de1162017-06-23 14:55:33 +053094
Prasad Sodagudi82d8ba72017-10-10 00:34:56 -070095static int free_raw_capacity(void)
Viresh Kumar62de1162017-06-23 14:55:33 +053096{
97 kfree(raw_capacity);
98 raw_capacity = NULL;
99
100 return 0;
101}
Juri Lelli2ef7a292017-05-31 17:59:28 +0100102
Juri Lelli4ca4f262017-05-31 17:59:31 +0100103void topology_normalize_cpu_scale(void)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100104{
105 u64 capacity;
106 int cpu;
107
Viresh Kumar62de1162017-06-23 14:55:33 +0530108 if (!raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100109 return;
110
111 pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100112 for_each_possible_cpu(cpu) {
113 pr_debug("cpu_capacity: cpu=%d raw_capacity=%u\n",
114 cpu, raw_capacity[cpu]);
115 capacity = (raw_capacity[cpu] << SCHED_CAPACITY_SHIFT)
116 / capacity_scale;
Juri Lelli4ca4f262017-05-31 17:59:31 +0100117 topology_set_cpu_scale(cpu, capacity);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100118 pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200119 cpu, topology_get_cpu_scale(cpu));
Juri Lelli2ef7a292017-05-31 17:59:28 +0100120 }
Juri Lelli2ef7a292017-05-31 17:59:28 +0100121}
122
Viresh Kumar805df292017-06-23 14:55:32 +0530123bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100124{
Viresh Kumar62de1162017-06-23 14:55:33 +0530125 static bool cap_parsing_failed;
Viresh Kumar805df292017-06-23 14:55:32 +0530126 int ret;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100127 u32 cpu_capacity;
128
129 if (cap_parsing_failed)
Viresh Kumar805df292017-06-23 14:55:32 +0530130 return false;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100131
Viresh Kumar3eeba1a2017-06-23 14:55:30 +0530132 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
Juri Lelli2ef7a292017-05-31 17:59:28 +0100133 &cpu_capacity);
134 if (!ret) {
135 if (!raw_capacity) {
136 raw_capacity = kcalloc(num_possible_cpus(),
137 sizeof(*raw_capacity),
138 GFP_KERNEL);
139 if (!raw_capacity) {
140 pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
141 cap_parsing_failed = true;
Viresh Kumar805df292017-06-23 14:55:32 +0530142 return false;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100143 }
144 }
145 capacity_scale = max(cpu_capacity, capacity_scale);
146 raw_capacity[cpu] = cpu_capacity;
Rob Herring6ef25412017-07-18 16:42:49 -0500147 pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
148 cpu_node, raw_capacity[cpu]);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100149 } else {
150 if (raw_capacity) {
Rob Herring6ef25412017-07-18 16:42:49 -0500151 pr_err("cpu_capacity: missing %pOF raw capacity\n",
152 cpu_node);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100153 pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
154 }
155 cap_parsing_failed = true;
Viresh Kumar62de1162017-06-23 14:55:33 +0530156 free_raw_capacity();
Juri Lelli2ef7a292017-05-31 17:59:28 +0100157 }
158
159 return !ret;
160}
161
162#ifdef CONFIG_CPU_FREQ
Gaku Inami9de9a442018-02-13 11:06:40 +0900163static cpumask_var_t cpus_to_visit;
164static void parsing_done_workfn(struct work_struct *work);
165static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100166
Gaku Inami9de9a442018-02-13 11:06:40 +0900167static int
Juri Lelli2ef7a292017-05-31 17:59:28 +0100168init_cpu_capacity_callback(struct notifier_block *nb,
169 unsigned long val,
170 void *data)
171{
172 struct cpufreq_policy *policy = data;
173 int cpu;
174
Viresh Kumard8bcf4d2017-06-23 14:55:34 +0530175 if (!raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100176 return 0;
177
Viresh Kumar93a57082017-06-23 14:55:31 +0530178 if (val != CPUFREQ_NOTIFY)
179 return 0;
180
181 pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
182 cpumask_pr_args(policy->related_cpus),
183 cpumask_pr_args(cpus_to_visit));
184
185 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
186
187 for_each_cpu(cpu, policy->related_cpus) {
Vincent Guittot8ec59c02019-06-17 17:00:17 +0200188 raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
Viresh Kumar93a57082017-06-23 14:55:31 +0530189 policy->cpuinfo.max_freq / 1000UL;
190 capacity_scale = max(raw_capacity[cpu], capacity_scale);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100191 }
Viresh Kumar93a57082017-06-23 14:55:31 +0530192
193 if (cpumask_empty(cpus_to_visit)) {
194 topology_normalize_cpu_scale();
Morten Rasmussenbb1fbdd2018-07-20 14:32:32 +0100195 schedule_work(&update_topology_flags_work);
Viresh Kumar62de1162017-06-23 14:55:33 +0530196 free_raw_capacity();
Viresh Kumar93a57082017-06-23 14:55:31 +0530197 pr_debug("cpu_capacity: parsing done\n");
Viresh Kumar93a57082017-06-23 14:55:31 +0530198 schedule_work(&parsing_done_work);
199 }
200
Juri Lelli2ef7a292017-05-31 17:59:28 +0100201 return 0;
202}
203
Gaku Inami9de9a442018-02-13 11:06:40 +0900204static struct notifier_block init_cpu_capacity_notifier = {
Juri Lelli2ef7a292017-05-31 17:59:28 +0100205 .notifier_call = init_cpu_capacity_callback,
206};
207
208static int __init register_cpufreq_notifier(void)
209{
Dietmar Eggemann54082112017-09-26 17:41:06 +0100210 int ret;
211
Juri Lelli2ef7a292017-05-31 17:59:28 +0100212 /*
213 * on ACPI-based systems we need to use the default cpu capacity
214 * until we have the necessary code to parse the cpu capacity, so
215 * skip registering cpufreq notifier.
216 */
Juri Lellic105aa32017-05-31 17:59:29 +0100217 if (!acpi_disabled || !raw_capacity)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100218 return -EINVAL;
219
220 if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
221 pr_err("cpu_capacity: failed to allocate memory for cpus_to_visit\n");
222 return -ENOMEM;
223 }
224
225 cpumask_copy(cpus_to_visit, cpu_possible_mask);
226
Dietmar Eggemann54082112017-09-26 17:41:06 +0100227 ret = cpufreq_register_notifier(&init_cpu_capacity_notifier,
228 CPUFREQ_POLICY_NOTIFIER);
229
230 if (ret)
231 free_cpumask_var(cpus_to_visit);
232
233 return ret;
Juri Lelli2ef7a292017-05-31 17:59:28 +0100234}
235core_initcall(register_cpufreq_notifier);
236
Gaku Inami9de9a442018-02-13 11:06:40 +0900237static void parsing_done_workfn(struct work_struct *work)
Juri Lelli2ef7a292017-05-31 17:59:28 +0100238{
239 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
240 CPUFREQ_POLICY_NOTIFIER);
Dietmar Eggemann54082112017-09-26 17:41:06 +0100241 free_cpumask_var(cpus_to_visit);
Juri Lelli2ef7a292017-05-31 17:59:28 +0100242}
243
244#else
Juri Lelli2ef7a292017-05-31 17:59:28 +0100245core_initcall(free_raw_capacity);
246#endif