blob: 0d3c45e2fccdfa29de1fa9e7ca266914cbcaeeda [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Nathan Lynch93197a32008-12-23 18:55:54 +00002/*
3 * Processor cache information made available to userspace via sysfs;
4 * intended to be compatible with x86 intel_cacheinfo implementation.
5 *
6 * Copyright 2008 IBM Corporation
7 * Author: Nathan Lynch
Nathan Lynch93197a32008-12-23 18:55:54 +00008 */
9
10#include <linux/cpu.h>
11#include <linux/cpumask.h>
Nathan Lynch93197a32008-12-23 18:55:54 +000012#include <linux/kernel.h>
13#include <linux/kobject.h>
14#include <linux/list.h>
15#include <linux/notifier.h>
16#include <linux/of.h>
17#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090018#include <linux/slab.h>
Nathan Lynch93197a32008-12-23 18:55:54 +000019#include <asm/prom.h>
Gautham R. Shenoy500fe5f2018-10-11 11:03:03 +053020#include <asm/cputhreads.h>
21#include <asm/smp.h>
Nathan Lynch93197a32008-12-23 18:55:54 +000022
23#include "cacheinfo.h"
24
25/* per-cpu object for tracking:
26 * - a "cache" kobject for the top-level directory
27 * - a list of "index" objects representing the cpu's local cache hierarchy
28 */
29struct cache_dir {
30 struct kobject *kobj; /* bare (not embedded) kobject for cache
31 * directory */
32 struct cache_index_dir *index; /* list of index objects */
33};
34
35/* "index" object: each cpu's cache directory has an index
36 * subdirectory corresponding to a cache object associated with the
37 * cpu. This object's lifetime is managed via the embedded kobject.
38 */
39struct cache_index_dir {
40 struct kobject kobj;
41 struct cache_index_dir *next; /* next index in parent directory */
42 struct cache *cache;
43};
44
45/* Template for determining which OF properties to query for a given
46 * cache type */
47struct cache_type_info {
48 const char *name;
49 const char *size_prop;
50
51 /* Allow for both [di]-cache-line-size and
52 * [di]-cache-block-size properties. According to the PowerPC
53 * Processor binding, -line-size should be provided if it
54 * differs from the cache block size (that which is operated
55 * on by cache instructions), so we look for -line-size first.
56 * See cache_get_line_size(). */
57
58 const char *line_size_props[2];
59 const char *nr_sets_prop;
60};
61
62/* These are used to index the cache_type_info array. */
Dave Olsonf7e9e352015-04-02 21:28:45 -070063#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
64#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
65#define CACHE_TYPE_INSTRUCTION 2
66#define CACHE_TYPE_DATA 3
Nathan Lynch93197a32008-12-23 18:55:54 +000067
68static const struct cache_type_info cache_type_info[] = {
69 {
Dave Olsonf7e9e352015-04-02 21:28:45 -070070 /* Embedded systems that use cache-size, cache-block-size,
71 * etc. for the Unified (typically L2) cache. */
72 .name = "Unified",
73 .size_prop = "cache-size",
74 .line_size_props = { "cache-line-size",
75 "cache-block-size", },
76 .nr_sets_prop = "cache-sets",
77 },
78 {
Nathan Lynch93197a32008-12-23 18:55:54 +000079 /* PowerPC Processor binding says the [di]-cache-*
80 * must be equal on unified caches, so just use
81 * d-cache properties. */
82 .name = "Unified",
83 .size_prop = "d-cache-size",
84 .line_size_props = { "d-cache-line-size",
85 "d-cache-block-size", },
86 .nr_sets_prop = "d-cache-sets",
87 },
88 {
89 .name = "Instruction",
90 .size_prop = "i-cache-size",
91 .line_size_props = { "i-cache-line-size",
92 "i-cache-block-size", },
93 .nr_sets_prop = "i-cache-sets",
94 },
95 {
96 .name = "Data",
97 .size_prop = "d-cache-size",
98 .line_size_props = { "d-cache-line-size",
99 "d-cache-block-size", },
100 .nr_sets_prop = "d-cache-sets",
101 },
102};
103
104/* Cache object: each instance of this corresponds to a distinct cache
105 * in the system. There are separate objects for Harvard caches: one
106 * each for instruction and data, and each refers to the same OF node.
107 * The refcount of the OF node is elevated for the lifetime of the
108 * cache object. A cache object is released when its shared_cpu_map
109 * is cleared (see cache_cpu_clear).
110 *
111 * A cache object is on two lists: an unsorted global list
112 * (cache_list) of cache objects; and a singly-linked list
113 * representing the local cache hierarchy, which is ordered by level
114 * (e.g. L1d -> L1i -> L2 -> L3).
115 */
116struct cache {
117 struct device_node *ofnode; /* OF node for this cache, may be cpu */
118 struct cpumask shared_cpu_map; /* online CPUs using this cache */
119 int type; /* split cache disambiguation */
120 int level; /* level not explicit in device tree */
121 struct list_head list; /* global list of cache objects */
122 struct cache *next_local; /* next cache of >= level */
123};
124
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000125static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
Nathan Lynch93197a32008-12-23 18:55:54 +0000126
127/* traversal/modification of this list occurs only at cpu hotplug time;
128 * access is serialized by cpu hotplug locking
129 */
130static LIST_HEAD(cache_list);
131
132static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
133{
134 return container_of(k, struct cache_index_dir, kobj);
135}
136
137static const char *cache_type_string(const struct cache *cache)
138{
139 return cache_type_info[cache->type].name;
140}
141
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400142static void cache_init(struct cache *cache, int type, int level,
143 struct device_node *ofnode)
Nathan Lynch93197a32008-12-23 18:55:54 +0000144{
145 cache->type = type;
146 cache->level = level;
147 cache->ofnode = of_node_get(ofnode);
148 INIT_LIST_HEAD(&cache->list);
149 list_add(&cache->list, &cache_list);
150}
151
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400152static struct cache *new_cache(int type, int level, struct device_node *ofnode)
Nathan Lynch93197a32008-12-23 18:55:54 +0000153{
154 struct cache *cache;
155
156 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
157 if (cache)
158 cache_init(cache, type, level, ofnode);
159
160 return cache;
161}
162
163static void release_cache_debugcheck(struct cache *cache)
164{
165 struct cache *iter;
166
167 list_for_each_entry(iter, &cache_list, list)
168 WARN_ONCE(iter->next_local == cache,
Rob Herringb7c670d2017-08-21 10:16:47 -0500169 "cache for %pOF(%s) refers to cache for %pOF(%s)\n",
170 iter->ofnode,
Nathan Lynch93197a32008-12-23 18:55:54 +0000171 cache_type_string(iter),
Rob Herringb7c670d2017-08-21 10:16:47 -0500172 cache->ofnode,
Nathan Lynch93197a32008-12-23 18:55:54 +0000173 cache_type_string(cache));
174}
175
176static void release_cache(struct cache *cache)
177{
178 if (!cache)
179 return;
180
Rob Herringb7c670d2017-08-21 10:16:47 -0500181 pr_debug("freeing L%d %s cache for %pOF\n", cache->level,
182 cache_type_string(cache), cache->ofnode);
Nathan Lynch93197a32008-12-23 18:55:54 +0000183
184 release_cache_debugcheck(cache);
185 list_del(&cache->list);
186 of_node_put(cache->ofnode);
187 kfree(cache);
188}
189
190static void cache_cpu_set(struct cache *cache, int cpu)
191{
192 struct cache *next = cache;
193
194 while (next) {
195 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
Rob Herringb7c670d2017-08-21 10:16:47 -0500196 "CPU %i already accounted in %pOF(%s)\n",
197 cpu, next->ofnode,
Nathan Lynch93197a32008-12-23 18:55:54 +0000198 cache_type_string(next));
199 cpumask_set_cpu(cpu, &next->shared_cpu_map);
200 next = next->next_local;
201 }
202}
203
204static int cache_size(const struct cache *cache, unsigned int *ret)
205{
206 const char *propname;
Anton Blanchardd10bd842013-08-07 02:01:37 +1000207 const __be32 *cache_size;
Nathan Lynch93197a32008-12-23 18:55:54 +0000208
209 propname = cache_type_info[cache->type].size_prop;
210
211 cache_size = of_get_property(cache->ofnode, propname, NULL);
212 if (!cache_size)
213 return -ENODEV;
214
Anton Blanchardd10bd842013-08-07 02:01:37 +1000215 *ret = of_read_number(cache_size, 1);
Nathan Lynch93197a32008-12-23 18:55:54 +0000216 return 0;
217}
218
219static int cache_size_kb(const struct cache *cache, unsigned int *ret)
220{
221 unsigned int size;
222
223 if (cache_size(cache, &size))
224 return -ENODEV;
225
226 *ret = size / 1024;
227 return 0;
228}
229
230/* not cache_line_size() because that's a macro in include/linux/cache.h */
231static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
232{
Anton Blanchardd10bd842013-08-07 02:01:37 +1000233 const __be32 *line_size;
Nathan Lynch93197a32008-12-23 18:55:54 +0000234 int i, lim;
235
236 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
237
238 for (i = 0; i < lim; i++) {
239 const char *propname;
240
241 propname = cache_type_info[cache->type].line_size_props[i];
242 line_size = of_get_property(cache->ofnode, propname, NULL);
243 if (line_size)
244 break;
245 }
246
247 if (!line_size)
248 return -ENODEV;
249
Anton Blanchardd10bd842013-08-07 02:01:37 +1000250 *ret = of_read_number(line_size, 1);
Nathan Lynch93197a32008-12-23 18:55:54 +0000251 return 0;
252}
253
254static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
255{
256 const char *propname;
Anton Blanchardd10bd842013-08-07 02:01:37 +1000257 const __be32 *nr_sets;
Nathan Lynch93197a32008-12-23 18:55:54 +0000258
259 propname = cache_type_info[cache->type].nr_sets_prop;
260
261 nr_sets = of_get_property(cache->ofnode, propname, NULL);
262 if (!nr_sets)
263 return -ENODEV;
264
Anton Blanchardd10bd842013-08-07 02:01:37 +1000265 *ret = of_read_number(nr_sets, 1);
Nathan Lynch93197a32008-12-23 18:55:54 +0000266 return 0;
267}
268
269static int cache_associativity(const struct cache *cache, unsigned int *ret)
270{
271 unsigned int line_size;
272 unsigned int nr_sets;
273 unsigned int size;
274
275 if (cache_nr_sets(cache, &nr_sets))
276 goto err;
277
278 /* If the cache is fully associative, there is no need to
279 * check the other properties.
280 */
281 if (nr_sets == 1) {
282 *ret = 0;
283 return 0;
284 }
285
286 if (cache_get_line_size(cache, &line_size))
287 goto err;
288 if (cache_size(cache, &size))
289 goto err;
290
291 if (!(nr_sets > 0 && size > 0 && line_size > 0))
292 goto err;
293
294 *ret = (size / nr_sets) / line_size;
295 return 0;
296err:
297 return -ENODEV;
298}
299
300/* helper for dealing with split caches */
301static struct cache *cache_find_first_sibling(struct cache *cache)
302{
303 struct cache *iter;
304
Dave Olsonf7e9e352015-04-02 21:28:45 -0700305 if (cache->type == CACHE_TYPE_UNIFIED ||
306 cache->type == CACHE_TYPE_UNIFIED_D)
Nathan Lynch93197a32008-12-23 18:55:54 +0000307 return cache;
308
309 list_for_each_entry(iter, &cache_list, list)
310 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
311 return iter;
312
313 return cache;
314}
315
316/* return the first cache on a local list matching node */
317static struct cache *cache_lookup_by_node(const struct device_node *node)
318{
319 struct cache *cache = NULL;
320 struct cache *iter;
321
322 list_for_each_entry(iter, &cache_list, list) {
323 if (iter->ofnode != node)
324 continue;
325 cache = cache_find_first_sibling(iter);
326 break;
327 }
328
329 return cache;
330}
331
332static bool cache_node_is_unified(const struct device_node *np)
333{
334 return of_get_property(np, "cache-unified", NULL);
335}
336
Dave Olsonf7e9e352015-04-02 21:28:45 -0700337/*
338 * Unified caches can have two different sets of tags. Most embedded
339 * use cache-size, etc. for the unified cache size, but open firmware systems
340 * use d-cache-size, etc. Check on initialization for which type we have, and
341 * return the appropriate structure type. Assume it's embedded if it isn't
342 * open firmware. If it's yet a 3rd type, then there will be missing entries
343 * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
344 * to be extended further.
345 */
346static int cache_is_unified_d(const struct device_node *np)
Nathan Lynch93197a32008-12-23 18:55:54 +0000347{
Dave Olsonf7e9e352015-04-02 21:28:45 -0700348 return of_get_property(np,
349 cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
350 CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
351}
Nathan Lynch93197a32008-12-23 18:55:54 +0000352
Dave Olsonf7e9e352015-04-02 21:28:45 -0700353static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
354{
Rob Herringb7c670d2017-08-21 10:16:47 -0500355 pr_debug("creating L%d ucache for %pOF\n", level, node);
Nathan Lynch93197a32008-12-23 18:55:54 +0000356
Dave Olsonf7e9e352015-04-02 21:28:45 -0700357 return new_cache(cache_is_unified_d(node), level, node);
Nathan Lynch93197a32008-12-23 18:55:54 +0000358}
359
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400360static struct cache *cache_do_one_devnode_split(struct device_node *node,
361 int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000362{
363 struct cache *dcache, *icache;
364
Rob Herringb7c670d2017-08-21 10:16:47 -0500365 pr_debug("creating L%d dcache and icache for %pOF\n", level,
366 node);
Nathan Lynch93197a32008-12-23 18:55:54 +0000367
368 dcache = new_cache(CACHE_TYPE_DATA, level, node);
369 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
370
371 if (!dcache || !icache)
372 goto err;
373
374 dcache->next_local = icache;
375
376 return dcache;
377err:
378 release_cache(dcache);
379 release_cache(icache);
380 return NULL;
381}
382
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400383static struct cache *cache_do_one_devnode(struct device_node *node, int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000384{
385 struct cache *cache;
386
387 if (cache_node_is_unified(node))
388 cache = cache_do_one_devnode_unified(node, level);
389 else
390 cache = cache_do_one_devnode_split(node, level);
391
392 return cache;
393}
394
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400395static struct cache *cache_lookup_or_instantiate(struct device_node *node,
396 int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000397{
398 struct cache *cache;
399
400 cache = cache_lookup_by_node(node);
401
402 WARN_ONCE(cache && cache->level != level,
403 "cache level mismatch on lookup (got %d, expected %d)\n",
404 cache->level, level);
405
406 if (!cache)
407 cache = cache_do_one_devnode(node, level);
408
409 return cache;
410}
411
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400412static void link_cache_lists(struct cache *smaller, struct cache *bigger)
Nathan Lynch93197a32008-12-23 18:55:54 +0000413{
414 while (smaller->next_local) {
415 if (smaller->next_local == bigger)
416 return; /* already linked */
417 smaller = smaller->next_local;
418 }
419
420 smaller->next_local = bigger;
421}
422
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400423static void do_subsidiary_caches_debugcheck(struct cache *cache)
Nathan Lynch93197a32008-12-23 18:55:54 +0000424{
425 WARN_ON_ONCE(cache->level != 1);
Rob Herringe5480bd2018-11-16 16:11:00 -0600426 WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu"));
Nathan Lynch93197a32008-12-23 18:55:54 +0000427}
428
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400429static void do_subsidiary_caches(struct cache *cache)
Nathan Lynch93197a32008-12-23 18:55:54 +0000430{
431 struct device_node *subcache_node;
432 int level = cache->level;
433
434 do_subsidiary_caches_debugcheck(cache);
435
436 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
437 struct cache *subcache;
438
439 level++;
440 subcache = cache_lookup_or_instantiate(subcache_node, level);
441 of_node_put(subcache_node);
442 if (!subcache)
443 break;
444
445 link_cache_lists(cache, subcache);
446 cache = subcache;
447 }
448}
449
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400450static struct cache *cache_chain_instantiate(unsigned int cpu_id)
Nathan Lynch93197a32008-12-23 18:55:54 +0000451{
452 struct device_node *cpu_node;
453 struct cache *cpu_cache = NULL;
454
455 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
456
457 cpu_node = of_get_cpu_node(cpu_id, NULL);
458 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
459 if (!cpu_node)
460 goto out;
461
462 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
463 if (!cpu_cache)
464 goto out;
465
466 do_subsidiary_caches(cpu_cache);
467
468 cache_cpu_set(cpu_cache, cpu_id);
469out:
470 of_node_put(cpu_node);
471
472 return cpu_cache;
473}
474
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400475static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
Nathan Lynch93197a32008-12-23 18:55:54 +0000476{
477 struct cache_dir *cache_dir;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800478 struct device *dev;
Nathan Lynch93197a32008-12-23 18:55:54 +0000479 struct kobject *kobj = NULL;
480
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800481 dev = get_cpu_device(cpu_id);
482 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
483 if (!dev)
Nathan Lynch93197a32008-12-23 18:55:54 +0000484 goto err;
485
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800486 kobj = kobject_create_and_add("cache", &dev->kobj);
Nathan Lynch93197a32008-12-23 18:55:54 +0000487 if (!kobj)
488 goto err;
489
490 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
491 if (!cache_dir)
492 goto err;
493
494 cache_dir->kobj = kobj;
495
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000496 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
Nathan Lynch93197a32008-12-23 18:55:54 +0000497
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000498 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
Nathan Lynch93197a32008-12-23 18:55:54 +0000499
500 return cache_dir;
501err:
502 kobject_put(kobj);
503 return NULL;
504}
505
506static void cache_index_release(struct kobject *kobj)
507{
508 struct cache_index_dir *index;
509
510 index = kobj_to_cache_index_dir(kobj);
511
512 pr_debug("freeing index directory for L%d %s cache\n",
513 index->cache->level, cache_type_string(index->cache));
514
515 kfree(index);
516}
517
518static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
519{
520 struct kobj_attribute *kobj_attr;
521
522 kobj_attr = container_of(attr, struct kobj_attribute, attr);
523
524 return kobj_attr->show(k, kobj_attr, buf);
525}
526
527static struct cache *index_kobj_to_cache(struct kobject *k)
528{
529 struct cache_index_dir *index;
530
531 index = kobj_to_cache_index_dir(k);
532
533 return index->cache;
534}
535
536static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
537{
538 unsigned int size_kb;
539 struct cache *cache;
540
541 cache = index_kobj_to_cache(k);
542
543 if (cache_size_kb(cache, &size_kb))
544 return -ENODEV;
545
546 return sprintf(buf, "%uK\n", size_kb);
547}
548
549static struct kobj_attribute cache_size_attr =
550 __ATTR(size, 0444, size_show, NULL);
551
552
553static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
554{
555 unsigned int line_size;
556 struct cache *cache;
557
558 cache = index_kobj_to_cache(k);
559
560 if (cache_get_line_size(cache, &line_size))
561 return -ENODEV;
562
563 return sprintf(buf, "%u\n", line_size);
564}
565
566static struct kobj_attribute cache_line_size_attr =
567 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
568
569static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
570{
571 unsigned int nr_sets;
572 struct cache *cache;
573
574 cache = index_kobj_to_cache(k);
575
576 if (cache_nr_sets(cache, &nr_sets))
577 return -ENODEV;
578
579 return sprintf(buf, "%u\n", nr_sets);
580}
581
582static struct kobj_attribute cache_nr_sets_attr =
583 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
584
585static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
586{
587 unsigned int associativity;
588 struct cache *cache;
589
590 cache = index_kobj_to_cache(k);
591
592 if (cache_associativity(cache, &associativity))
593 return -ENODEV;
594
595 return sprintf(buf, "%u\n", associativity);
596}
597
598static struct kobj_attribute cache_assoc_attr =
599 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
600
601static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
602{
603 struct cache *cache;
604
605 cache = index_kobj_to_cache(k);
606
607 return sprintf(buf, "%s\n", cache_type_string(cache));
608}
609
610static struct kobj_attribute cache_type_attr =
611 __ATTR(type, 0444, type_show, NULL);
612
613static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
614{
615 struct cache_index_dir *index;
616 struct cache *cache;
617
618 index = kobj_to_cache_index_dir(k);
619 cache = index->cache;
620
621 return sprintf(buf, "%d\n", cache->level);
622}
623
624static struct kobj_attribute cache_level_attr =
625 __ATTR(level, 0444, level_show, NULL);
626
Gautham R. Shenoy500fe5f2018-10-11 11:03:03 +0530627static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
628{
629 struct kobject *index_dir_kobj = &index->kobj;
630 struct kobject *cache_dir_kobj = index_dir_kobj->parent;
631 struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
632 struct device *dev = kobj_to_dev(cpu_dev_kobj);
633
634 return dev->id;
635}
636
637/*
638 * On big-core systems, each core has two groups of CPUs each of which
639 * has its own L1-cache. The thread-siblings which share l1-cache with
640 * @cpu can be obtained via cpu_smallcore_mask().
641 */
642static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
643{
644 if (cache->level == 1)
645 return cpu_smallcore_mask(cpu);
646
647 return &cache->shared_cpu_map;
648}
649
Nathan Lynch93197a32008-12-23 18:55:54 +0000650static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
651{
652 struct cache_index_dir *index;
653 struct cache *cache;
Gautham R. Shenoy500fe5f2018-10-11 11:03:03 +0530654 const struct cpumask *mask;
Srikar Dronamraju5658cf02020-06-29 16:07:01 +0530655 int cpu;
Nathan Lynch93197a32008-12-23 18:55:54 +0000656
657 index = kobj_to_cache_index_dir(k);
658 cache = index->cache;
Nathan Lynch93197a32008-12-23 18:55:54 +0000659
Gautham R. Shenoy500fe5f2018-10-11 11:03:03 +0530660 if (has_big_cores) {
661 cpu = index_dir_to_cpu(index);
662 mask = get_big_core_shared_cpu_map(cpu, cache);
663 } else {
664 mask = &cache->shared_cpu_map;
665 }
666
Srikar Dronamraju5658cf02020-06-29 16:07:01 +0530667 return cpumap_print_to_pagebuf(false, buf, mask);
Nathan Lynch93197a32008-12-23 18:55:54 +0000668}
669
670static struct kobj_attribute cache_shared_cpu_map_attr =
671 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
672
673/* Attributes which should always be created -- the kobject/sysfs core
674 * does this automatically via kobj_type->default_attrs. This is the
675 * minimum data required to uniquely identify a cache.
676 */
677static struct attribute *cache_index_default_attrs[] = {
678 &cache_type_attr.attr,
679 &cache_level_attr.attr,
680 &cache_shared_cpu_map_attr.attr,
681 NULL,
682};
683
684/* Attributes which should be created if the cache device node has the
685 * right properties -- see cacheinfo_create_index_opt_attrs
686 */
687static struct kobj_attribute *cache_index_opt_attrs[] = {
688 &cache_size_attr,
689 &cache_line_size_attr,
690 &cache_nr_sets_attr,
691 &cache_assoc_attr,
692};
693
Emese Revfy52cf25d2010-01-19 02:58:23 +0100694static const struct sysfs_ops cache_index_ops = {
Nathan Lynch93197a32008-12-23 18:55:54 +0000695 .show = cache_index_show,
696};
697
698static struct kobj_type cache_index_type = {
699 .release = cache_index_release,
700 .sysfs_ops = &cache_index_ops,
701 .default_attrs = cache_index_default_attrs,
702};
703
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400704static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
Nathan Lynch93197a32008-12-23 18:55:54 +0000705{
Nathan Lynch93197a32008-12-23 18:55:54 +0000706 const char *cache_type;
707 struct cache *cache;
708 char *buf;
709 int i;
710
711 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
712 if (!buf)
713 return;
714
715 cache = dir->cache;
Nathan Lynch93197a32008-12-23 18:55:54 +0000716 cache_type = cache_type_string(cache);
717
718 /* We don't want to create an attribute that can't provide a
719 * meaningful value. Check the return value of each optional
720 * attribute's ->show method before registering the
721 * attribute.
722 */
723 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
724 struct kobj_attribute *attr;
725 ssize_t rc;
726
727 attr = cache_index_opt_attrs[i];
728
729 rc = attr->show(&dir->kobj, attr, buf);
730 if (rc <= 0) {
731 pr_debug("not creating %s attribute for "
Rob Herringb7c670d2017-08-21 10:16:47 -0500732 "%pOF(%s) (rc = %zd)\n",
733 attr->attr.name, cache->ofnode,
Nathan Lynch93197a32008-12-23 18:55:54 +0000734 cache_type, rc);
735 continue;
736 }
737 if (sysfs_create_file(&dir->kobj, &attr->attr))
Rob Herringb7c670d2017-08-21 10:16:47 -0500738 pr_debug("could not create %s attribute for %pOF(%s)\n",
739 attr->attr.name, cache->ofnode, cache_type);
Nathan Lynch93197a32008-12-23 18:55:54 +0000740 }
741
742 kfree(buf);
743}
744
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400745static void cacheinfo_create_index_dir(struct cache *cache, int index,
746 struct cache_dir *cache_dir)
Nathan Lynch93197a32008-12-23 18:55:54 +0000747{
748 struct cache_index_dir *index_dir;
749 int rc;
750
751 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
752 if (!index_dir)
Tobin C. Harding7e803972019-04-30 11:09:23 +1000753 return;
Nathan Lynch93197a32008-12-23 18:55:54 +0000754
755 index_dir->cache = cache;
756
757 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
758 cache_dir->kobj, "index%d", index);
Tobin C. Harding7e803972019-04-30 11:09:23 +1000759 if (rc) {
760 kobject_put(&index_dir->kobj);
Tobin C. Harding7e803972019-04-30 11:09:23 +1000761 return;
762 }
Nathan Lynch93197a32008-12-23 18:55:54 +0000763
764 index_dir->next = cache_dir->index;
765 cache_dir->index = index_dir;
766
767 cacheinfo_create_index_opt_attrs(index_dir);
Nathan Lynch93197a32008-12-23 18:55:54 +0000768}
769
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400770static void cacheinfo_sysfs_populate(unsigned int cpu_id,
771 struct cache *cache_list)
Nathan Lynch93197a32008-12-23 18:55:54 +0000772{
773 struct cache_dir *cache_dir;
774 struct cache *cache;
775 int index = 0;
776
777 cache_dir = cacheinfo_create_cache_dir(cpu_id);
778 if (!cache_dir)
779 return;
780
781 cache = cache_list;
782 while (cache) {
783 cacheinfo_create_index_dir(cache, index, cache_dir);
784 index++;
785 cache = cache->next_local;
786 }
787}
788
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400789void cacheinfo_cpu_online(unsigned int cpu_id)
Nathan Lynch93197a32008-12-23 18:55:54 +0000790{
791 struct cache *cache;
792
793 cache = cache_chain_instantiate(cpu_id);
794 if (!cache)
795 return;
796
797 cacheinfo_sysfs_populate(cpu_id, cache);
798}
799
Haren Myneni6b36ba82014-02-25 20:02:18 -0800800/* functions needed to remove cache entry for cpu offline or suspend/resume */
801
802#if (defined(CONFIG_PPC_PSERIES) && defined(CONFIG_SUSPEND)) || \
803 defined(CONFIG_HOTPLUG_CPU)
Nathan Lynch93197a32008-12-23 18:55:54 +0000804
805static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
806{
807 struct device_node *cpu_node;
808 struct cache *cache;
809
810 cpu_node = of_get_cpu_node(cpu_id, NULL);
811 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
812 if (!cpu_node)
813 return NULL;
814
815 cache = cache_lookup_by_node(cpu_node);
816 of_node_put(cpu_node);
817
818 return cache;
819}
820
821static void remove_index_dirs(struct cache_dir *cache_dir)
822{
823 struct cache_index_dir *index;
824
825 index = cache_dir->index;
826
827 while (index) {
828 struct cache_index_dir *next;
829
830 next = index->next;
831 kobject_put(&index->kobj);
832 index = next;
833 }
834}
835
836static void remove_cache_dir(struct cache_dir *cache_dir)
837{
838 remove_index_dirs(cache_dir);
839
Paul Mackerras91b973f2014-01-18 21:14:47 +1100840 /* Remove cache dir from sysfs */
841 kobject_del(cache_dir->kobj);
842
Nathan Lynch93197a32008-12-23 18:55:54 +0000843 kobject_put(cache_dir->kobj);
844
845 kfree(cache_dir);
846}
847
848static void cache_cpu_clear(struct cache *cache, int cpu)
849{
850 while (cache) {
851 struct cache *next = cache->next_local;
852
853 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
Rob Herringb7c670d2017-08-21 10:16:47 -0500854 "CPU %i not accounted in %pOF(%s)\n",
855 cpu, cache->ofnode,
Nathan Lynch93197a32008-12-23 18:55:54 +0000856 cache_type_string(cache));
857
858 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
859
860 /* Release the cache object if all the cpus using it
861 * are offline */
862 if (cpumask_empty(&cache->shared_cpu_map))
863 release_cache(cache);
864
865 cache = next;
866 }
867}
868
869void cacheinfo_cpu_offline(unsigned int cpu_id)
870{
871 struct cache_dir *cache_dir;
872 struct cache *cache;
873
874 /* Prevent userspace from seeing inconsistent state - remove
875 * the sysfs hierarchy first */
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000876 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
Nathan Lynch93197a32008-12-23 18:55:54 +0000877
878 /* careful, sysfs population may have failed */
879 if (cache_dir)
880 remove_cache_dir(cache_dir);
881
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000882 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
Nathan Lynch93197a32008-12-23 18:55:54 +0000883
884 /* clear the CPU's bit in its cache chain, possibly freeing
885 * cache objects */
886 cache = cache_lookup_by_cpu(cpu_id);
887 if (cache)
888 cache_cpu_clear(cache, cpu_id);
889}
Nathan Lynchd4aa2192019-06-11 23:45:04 -0500890
891void cacheinfo_teardown(void)
892{
893 unsigned int cpu;
894
895 lockdep_assert_cpus_held();
896
897 for_each_online_cpu(cpu)
898 cacheinfo_cpu_offline(cpu);
899}
900
901void cacheinfo_rebuild(void)
902{
903 unsigned int cpu;
904
905 lockdep_assert_cpus_held();
906
907 for_each_online_cpu(cpu)
908 cacheinfo_cpu_online(cpu);
909}
910
Haren Myneni6b36ba82014-02-25 20:02:18 -0800911#endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */