blob: f19d50bd89254cee060c5a690078d2a56a2f3fca [file] [log] [blame]
Sudeep Holla246246c2014-09-30 14:48:25 +01001/*
2 * cacheinfo support - processor cache information via sysfs
3 *
4 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5 * Author: Sudeep Holla <sudeep.holla@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
Sudeep Holla8e1073b2016-10-28 09:45:30 +010019#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
Sudeep Holla55877ef2016-10-28 09:45:29 +010021#include <linux/acpi.h>
Sudeep Holla246246c2014-09-30 14:48:25 +010022#include <linux/bitops.h>
23#include <linux/cacheinfo.h>
24#include <linux/compiler.h>
25#include <linux/cpu.h>
26#include <linux/device.h>
27#include <linux/init.h>
28#include <linux/of.h>
29#include <linux/sched.h>
30#include <linux/slab.h>
31#include <linux/smp.h>
32#include <linux/sysfs.h>
33
34/* pointer to per cpu cacheinfo */
35static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
36#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
37#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
38#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
39
40struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
41{
42 return ci_cacheinfo(cpu);
43}
44
45#ifdef CONFIG_OF
46static int cache_setup_of_node(unsigned int cpu)
47{
48 struct device_node *np;
49 struct cacheinfo *this_leaf;
50 struct device *cpu_dev = get_cpu_device(cpu);
51 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
52 unsigned int index = 0;
53
54 /* skip if of_node is already populated */
55 if (this_cpu_ci->info_list->of_node)
56 return 0;
57
58 if (!cpu_dev) {
59 pr_err("No cpu device for CPU %d\n", cpu);
60 return -ENODEV;
61 }
62 np = cpu_dev->of_node;
63 if (!np) {
64 pr_err("Failed to find cpu%d device node\n", cpu);
65 return -ENOENT;
66 }
67
Sudeep Holla8a7d95f2015-03-17 17:28:46 +000068 while (index < cache_leaves(cpu)) {
Sudeep Holla246246c2014-09-30 14:48:25 +010069 this_leaf = this_cpu_ci->info_list + index;
70 if (this_leaf->level != 1)
71 np = of_find_next_cache_node(np);
72 else
73 np = of_node_get(np);/* cpu node itself */
Sudeep Holla8a7d95f2015-03-17 17:28:46 +000074 if (!np)
75 break;
Sudeep Holla246246c2014-09-30 14:48:25 +010076 this_leaf->of_node = np;
77 index++;
78 }
Sudeep Holla8a7d95f2015-03-17 17:28:46 +000079
80 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
81 return -ENOENT;
82
Sudeep Holla246246c2014-09-30 14:48:25 +010083 return 0;
84}
85
86static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
87 struct cacheinfo *sib_leaf)
88{
89 return sib_leaf->of_node == this_leaf->of_node;
90}
91#else
92static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
93static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
94 struct cacheinfo *sib_leaf)
95{
96 /*
97 * For non-DT systems, assume unique level 1 cache, system-wide
98 * shared caches for all other levels. This will be used only if
99 * arch specific code has not populated shared_cpu_map
100 */
101 return !(this_leaf->level == 1);
102}
103#endif
104
105static int cache_shared_cpu_map_setup(unsigned int cpu)
106{
107 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
108 struct cacheinfo *this_leaf, *sib_leaf;
109 unsigned int index;
Sudeep Holla55877ef2016-10-28 09:45:29 +0100110 int ret = 0;
Sudeep Holla246246c2014-09-30 14:48:25 +0100111
Sudeep Hollafac51482016-10-28 09:45:28 +0100112 if (this_cpu_ci->cpu_map_populated)
113 return 0;
114
Sudeep Holla55877ef2016-10-28 09:45:29 +0100115 if (of_have_populated_dt())
116 ret = cache_setup_of_node(cpu);
117 else if (!acpi_disabled)
118 /* No cache property/hierarchy support yet in ACPI */
119 ret = -ENOTSUPP;
Sudeep Holla246246c2014-09-30 14:48:25 +0100120 if (ret)
121 return ret;
122
123 for (index = 0; index < cache_leaves(cpu); index++) {
124 unsigned int i;
125
126 this_leaf = this_cpu_ci->info_list + index;
127 /* skip if shared_cpu_map is already populated */
128 if (!cpumask_empty(&this_leaf->shared_cpu_map))
129 continue;
130
131 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
132 for_each_online_cpu(i) {
133 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
134
135 if (i == cpu || !sib_cpu_ci->info_list)
136 continue;/* skip if itself or no cacheinfo */
137 sib_leaf = sib_cpu_ci->info_list + index;
138 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
139 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
140 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
141 }
142 }
143 }
144
145 return 0;
146}
147
148static void cache_shared_cpu_map_remove(unsigned int cpu)
149{
150 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
151 struct cacheinfo *this_leaf, *sib_leaf;
152 unsigned int sibling, index;
153
154 for (index = 0; index < cache_leaves(cpu); index++) {
155 this_leaf = this_cpu_ci->info_list + index;
156 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
157 struct cpu_cacheinfo *sib_cpu_ci;
158
159 if (sibling == cpu) /* skip itself */
160 continue;
Borislav Petkov2110d702015-08-08 10:46:02 +0200161
Sudeep Holla246246c2014-09-30 14:48:25 +0100162 sib_cpu_ci = get_cpu_cacheinfo(sibling);
Borislav Petkov2110d702015-08-08 10:46:02 +0200163 if (!sib_cpu_ci->info_list)
164 continue;
165
Sudeep Holla246246c2014-09-30 14:48:25 +0100166 sib_leaf = sib_cpu_ci->info_list + index;
167 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
168 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
169 }
170 of_node_put(this_leaf->of_node);
171 }
172}
173
174static void free_cache_attributes(unsigned int cpu)
175{
Borislav Petkov2110d702015-08-08 10:46:02 +0200176 if (!per_cpu_cacheinfo(cpu))
177 return;
178
Sudeep Holla246246c2014-09-30 14:48:25 +0100179 cache_shared_cpu_map_remove(cpu);
180
181 kfree(per_cpu_cacheinfo(cpu));
182 per_cpu_cacheinfo(cpu) = NULL;
183}
184
185int __weak init_cache_level(unsigned int cpu)
186{
187 return -ENOENT;
188}
189
190int __weak populate_cache_leaves(unsigned int cpu)
191{
192 return -ENOENT;
193}
194
195static int detect_cache_attributes(unsigned int cpu)
196{
197 int ret;
198
Sudeep Holla3370e132015-05-27 11:26:13 +0100199 if (init_cache_level(cpu) || !cache_leaves(cpu))
Sudeep Holla246246c2014-09-30 14:48:25 +0100200 return -ENOENT;
201
202 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
203 sizeof(struct cacheinfo), GFP_KERNEL);
204 if (per_cpu_cacheinfo(cpu) == NULL)
205 return -ENOMEM;
206
207 ret = populate_cache_leaves(cpu);
208 if (ret)
209 goto free_ci;
210 /*
Will Deacon2539b252015-05-08 14:45:34 +0100211 * For systems using DT for cache hierarchy, of_node and shared_cpu_map
Sudeep Holla246246c2014-09-30 14:48:25 +0100212 * will be set up here only if they are not populated already
213 */
214 ret = cache_shared_cpu_map_setup(cpu);
Sudeep Holla8a7d95f2015-03-17 17:28:46 +0000215 if (ret) {
Sudeep Holla55877ef2016-10-28 09:45:29 +0100216 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
Sudeep Holla246246c2014-09-30 14:48:25 +0100217 goto free_ci;
Sudeep Holla8a7d95f2015-03-17 17:28:46 +0000218 }
Sudeep Holla246246c2014-09-30 14:48:25 +0100219 return 0;
220
221free_ci:
222 free_cache_attributes(cpu);
223 return ret;
224}
225
226/* pointer to cpuX/cache device */
227static DEFINE_PER_CPU(struct device *, ci_cache_dev);
228#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
229
230static cpumask_t cache_dev_map;
231
232/* pointer to array of devices for cpuX/cache/indexY */
233static DEFINE_PER_CPU(struct device **, ci_index_dev);
234#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
235#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
236
237#define show_one(file_name, object) \
238static ssize_t file_name##_show(struct device *dev, \
239 struct device_attribute *attr, char *buf) \
240{ \
241 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
242 return sprintf(buf, "%u\n", this_leaf->object); \
243}
244
245show_one(level, level);
246show_one(coherency_line_size, coherency_line_size);
247show_one(number_of_sets, number_of_sets);
248show_one(physical_line_partition, physical_line_partition);
249show_one(ways_of_associativity, ways_of_associativity);
250
251static ssize_t size_show(struct device *dev,
252 struct device_attribute *attr, char *buf)
253{
254 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
255
256 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
257}
258
259static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
260{
261 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
262 const struct cpumask *mask = &this_leaf->shared_cpu_map;
263
264 return cpumap_print_to_pagebuf(list, buf, mask);
265}
266
267static ssize_t shared_cpu_map_show(struct device *dev,
268 struct device_attribute *attr, char *buf)
269{
270 return shared_cpumap_show_func(dev, false, buf);
271}
272
273static ssize_t shared_cpu_list_show(struct device *dev,
274 struct device_attribute *attr, char *buf)
275{
276 return shared_cpumap_show_func(dev, true, buf);
277}
278
279static ssize_t type_show(struct device *dev,
280 struct device_attribute *attr, char *buf)
281{
282 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
283
284 switch (this_leaf->type) {
285 case CACHE_TYPE_DATA:
286 return sprintf(buf, "Data\n");
287 case CACHE_TYPE_INST:
288 return sprintf(buf, "Instruction\n");
289 case CACHE_TYPE_UNIFIED:
290 return sprintf(buf, "Unified\n");
291 default:
292 return -EINVAL;
293 }
294}
295
296static ssize_t allocation_policy_show(struct device *dev,
297 struct device_attribute *attr, char *buf)
298{
299 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
300 unsigned int ci_attr = this_leaf->attributes;
301 int n = 0;
302
303 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
304 n = sprintf(buf, "ReadWriteAllocate\n");
305 else if (ci_attr & CACHE_READ_ALLOCATE)
306 n = sprintf(buf, "ReadAllocate\n");
307 else if (ci_attr & CACHE_WRITE_ALLOCATE)
308 n = sprintf(buf, "WriteAllocate\n");
309 return n;
310}
311
312static ssize_t write_policy_show(struct device *dev,
313 struct device_attribute *attr, char *buf)
314{
315 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
316 unsigned int ci_attr = this_leaf->attributes;
317 int n = 0;
318
319 if (ci_attr & CACHE_WRITE_THROUGH)
320 n = sprintf(buf, "WriteThrough\n");
321 else if (ci_attr & CACHE_WRITE_BACK)
322 n = sprintf(buf, "WriteBack\n");
323 return n;
324}
325
326static DEVICE_ATTR_RO(level);
327static DEVICE_ATTR_RO(type);
328static DEVICE_ATTR_RO(coherency_line_size);
329static DEVICE_ATTR_RO(ways_of_associativity);
330static DEVICE_ATTR_RO(number_of_sets);
331static DEVICE_ATTR_RO(size);
332static DEVICE_ATTR_RO(allocation_policy);
333static DEVICE_ATTR_RO(write_policy);
334static DEVICE_ATTR_RO(shared_cpu_map);
335static DEVICE_ATTR_RO(shared_cpu_list);
336static DEVICE_ATTR_RO(physical_line_partition);
337
338static struct attribute *cache_default_attrs[] = {
339 &dev_attr_type.attr,
340 &dev_attr_level.attr,
341 &dev_attr_shared_cpu_map.attr,
342 &dev_attr_shared_cpu_list.attr,
343 &dev_attr_coherency_line_size.attr,
344 &dev_attr_ways_of_associativity.attr,
345 &dev_attr_number_of_sets.attr,
346 &dev_attr_size.attr,
347 &dev_attr_allocation_policy.attr,
348 &dev_attr_write_policy.attr,
349 &dev_attr_physical_line_partition.attr,
350 NULL
351};
352
353static umode_t
354cache_default_attrs_is_visible(struct kobject *kobj,
355 struct attribute *attr, int unused)
356{
357 struct device *dev = kobj_to_dev(kobj);
358 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
359 const struct cpumask *mask = &this_leaf->shared_cpu_map;
360 umode_t mode = attr->mode;
361
362 if ((attr == &dev_attr_type.attr) && this_leaf->type)
363 return mode;
364 if ((attr == &dev_attr_level.attr) && this_leaf->level)
365 return mode;
366 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
367 return mode;
368 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
369 return mode;
370 if ((attr == &dev_attr_coherency_line_size.attr) &&
371 this_leaf->coherency_line_size)
372 return mode;
373 if ((attr == &dev_attr_ways_of_associativity.attr) &&
374 this_leaf->size) /* allow 0 = full associativity */
375 return mode;
376 if ((attr == &dev_attr_number_of_sets.attr) &&
377 this_leaf->number_of_sets)
378 return mode;
379 if ((attr == &dev_attr_size.attr) && this_leaf->size)
380 return mode;
381 if ((attr == &dev_attr_write_policy.attr) &&
382 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
383 return mode;
384 if ((attr == &dev_attr_allocation_policy.attr) &&
385 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
386 return mode;
387 if ((attr == &dev_attr_physical_line_partition.attr) &&
388 this_leaf->physical_line_partition)
389 return mode;
390
391 return 0;
392}
393
394static const struct attribute_group cache_default_group = {
395 .attrs = cache_default_attrs,
396 .is_visible = cache_default_attrs_is_visible,
397};
398
399static const struct attribute_group *cache_default_groups[] = {
400 &cache_default_group,
401 NULL,
402};
403
404static const struct attribute_group *cache_private_groups[] = {
405 &cache_default_group,
406 NULL, /* Place holder for private group */
407 NULL,
408};
409
410const struct attribute_group *
411__weak cache_get_priv_group(struct cacheinfo *this_leaf)
412{
413 return NULL;
414}
415
416static const struct attribute_group **
417cache_get_attribute_groups(struct cacheinfo *this_leaf)
418{
419 const struct attribute_group *priv_group =
420 cache_get_priv_group(this_leaf);
421
422 if (!priv_group)
423 return cache_default_groups;
424
425 if (!cache_private_groups[1])
426 cache_private_groups[1] = priv_group;
427
428 return cache_private_groups;
429}
430
431/* Add/Remove cache interface for CPU device */
432static void cpu_cache_sysfs_exit(unsigned int cpu)
433{
434 int i;
435 struct device *ci_dev;
436
437 if (per_cpu_index_dev(cpu)) {
438 for (i = 0; i < cache_leaves(cpu); i++) {
439 ci_dev = per_cache_index_dev(cpu, i);
440 if (!ci_dev)
441 continue;
442 device_unregister(ci_dev);
443 }
444 kfree(per_cpu_index_dev(cpu));
445 per_cpu_index_dev(cpu) = NULL;
446 }
447 device_unregister(per_cpu_cache_dev(cpu));
448 per_cpu_cache_dev(cpu) = NULL;
449}
450
451static int cpu_cache_sysfs_init(unsigned int cpu)
452{
453 struct device *dev = get_cpu_device(cpu);
454
455 if (per_cpu_cacheinfo(cpu) == NULL)
456 return -ENOENT;
457
458 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
459 if (IS_ERR(per_cpu_cache_dev(cpu)))
460 return PTR_ERR(per_cpu_cache_dev(cpu));
461
462 /* Allocate all required memory */
463 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
464 sizeof(struct device *), GFP_KERNEL);
465 if (unlikely(per_cpu_index_dev(cpu) == NULL))
466 goto err_out;
467
468 return 0;
469
470err_out:
471 cpu_cache_sysfs_exit(cpu);
472 return -ENOMEM;
473}
474
475static int cache_add_dev(unsigned int cpu)
476{
477 unsigned int i;
478 int rc;
479 struct device *ci_dev, *parent;
480 struct cacheinfo *this_leaf;
481 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
482 const struct attribute_group **cache_groups;
483
484 rc = cpu_cache_sysfs_init(cpu);
485 if (unlikely(rc < 0))
486 return rc;
487
488 parent = per_cpu_cache_dev(cpu);
489 for (i = 0; i < cache_leaves(cpu); i++) {
490 this_leaf = this_cpu_ci->info_list + i;
491 if (this_leaf->disable_sysfs)
492 continue;
493 cache_groups = cache_get_attribute_groups(this_leaf);
494 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
495 "index%1u", i);
496 if (IS_ERR(ci_dev)) {
497 rc = PTR_ERR(ci_dev);
498 goto err;
499 }
500 per_cache_index_dev(cpu, i) = ci_dev;
501 }
502 cpumask_set_cpu(cpu, &cache_dev_map);
503
504 return 0;
505err:
506 cpu_cache_sysfs_exit(cpu);
507 return rc;
508}
509
510static void cache_remove_dev(unsigned int cpu)
511{
512 if (!cpumask_test_cpu(cpu, &cache_dev_map))
513 return;
514 cpumask_clear_cpu(cpu, &cache_dev_map);
515
516 cpu_cache_sysfs_exit(cpu);
517}
518
519static int cacheinfo_cpu_callback(struct notifier_block *nfb,
520 unsigned long action, void *hcpu)
521{
522 unsigned int cpu = (unsigned long)hcpu;
523 int rc = 0;
524
525 switch (action & ~CPU_TASKS_FROZEN) {
526 case CPU_ONLINE:
527 rc = detect_cache_attributes(cpu);
528 if (!rc)
529 rc = cache_add_dev(cpu);
530 break;
531 case CPU_DEAD:
532 cache_remove_dev(cpu);
Borislav Petkov2110d702015-08-08 10:46:02 +0200533 free_cache_attributes(cpu);
Sudeep Holla246246c2014-09-30 14:48:25 +0100534 break;
535 }
536 return notifier_from_errno(rc);
537}
538
539static int __init cacheinfo_sysfs_init(void)
540{
541 int cpu, rc = 0;
542
543 cpu_notifier_register_begin();
544
545 for_each_online_cpu(cpu) {
546 rc = detect_cache_attributes(cpu);
Sudeep Holla6df43c92014-11-12 14:42:53 +0000547 if (rc)
Sudeep Holla246246c2014-09-30 14:48:25 +0100548 goto out;
Sudeep Holla246246c2014-09-30 14:48:25 +0100549 rc = cache_add_dev(cpu);
550 if (rc) {
551 free_cache_attributes(cpu);
552 pr_err("error populating cacheinfo..cpu%d\n", cpu);
553 goto out;
554 }
555 }
556 __hotcpu_notifier(cacheinfo_cpu_callback, 0);
557
558out:
559 cpu_notifier_register_done();
560 return rc;
561}
562
563device_initcall(cacheinfo_sysfs_init);