blob: 9262cf2bec4bd6e1e2edd7d91d476e4b900daeb3 [file] [log] [blame]
Nathan Lynch93197a32008-12-23 18:55:54 +00001/*
2 * Processor cache information made available to userspace via sysfs;
3 * intended to be compatible with x86 intel_cacheinfo implementation.
4 *
5 * Copyright 2008 IBM Corporation
6 * Author: Nathan Lynch
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 */
12
13#include <linux/cpu.h>
14#include <linux/cpumask.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/kobject.h>
18#include <linux/list.h>
19#include <linux/notifier.h>
20#include <linux/of.h>
21#include <linux/percpu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Nathan Lynch93197a32008-12-23 18:55:54 +000023#include <asm/prom.h>
24
25#include "cacheinfo.h"
26
27/* per-cpu object for tracking:
28 * - a "cache" kobject for the top-level directory
29 * - a list of "index" objects representing the cpu's local cache hierarchy
30 */
31struct cache_dir {
32 struct kobject *kobj; /* bare (not embedded) kobject for cache
33 * directory */
34 struct cache_index_dir *index; /* list of index objects */
35};
36
37/* "index" object: each cpu's cache directory has an index
38 * subdirectory corresponding to a cache object associated with the
39 * cpu. This object's lifetime is managed via the embedded kobject.
40 */
41struct cache_index_dir {
42 struct kobject kobj;
43 struct cache_index_dir *next; /* next index in parent directory */
44 struct cache *cache;
45};
46
47/* Template for determining which OF properties to query for a given
48 * cache type */
49struct cache_type_info {
50 const char *name;
51 const char *size_prop;
52
53 /* Allow for both [di]-cache-line-size and
54 * [di]-cache-block-size properties. According to the PowerPC
55 * Processor binding, -line-size should be provided if it
56 * differs from the cache block size (that which is operated
57 * on by cache instructions), so we look for -line-size first.
58 * See cache_get_line_size(). */
59
60 const char *line_size_props[2];
61 const char *nr_sets_prop;
62};
63
64/* These are used to index the cache_type_info array. */
65#define CACHE_TYPE_UNIFIED 0
66#define CACHE_TYPE_INSTRUCTION 1
67#define CACHE_TYPE_DATA 2
68
69static const struct cache_type_info cache_type_info[] = {
70 {
71 /* PowerPC Processor binding says the [di]-cache-*
72 * must be equal on unified caches, so just use
73 * d-cache properties. */
74 .name = "Unified",
75 .size_prop = "d-cache-size",
76 .line_size_props = { "d-cache-line-size",
77 "d-cache-block-size", },
78 .nr_sets_prop = "d-cache-sets",
79 },
80 {
81 .name = "Instruction",
82 .size_prop = "i-cache-size",
83 .line_size_props = { "i-cache-line-size",
84 "i-cache-block-size", },
85 .nr_sets_prop = "i-cache-sets",
86 },
87 {
88 .name = "Data",
89 .size_prop = "d-cache-size",
90 .line_size_props = { "d-cache-line-size",
91 "d-cache-block-size", },
92 .nr_sets_prop = "d-cache-sets",
93 },
94};
95
96/* Cache object: each instance of this corresponds to a distinct cache
97 * in the system. There are separate objects for Harvard caches: one
98 * each for instruction and data, and each refers to the same OF node.
99 * The refcount of the OF node is elevated for the lifetime of the
100 * cache object. A cache object is released when its shared_cpu_map
101 * is cleared (see cache_cpu_clear).
102 *
103 * A cache object is on two lists: an unsorted global list
104 * (cache_list) of cache objects; and a singly-linked list
105 * representing the local cache hierarchy, which is ordered by level
106 * (e.g. L1d -> L1i -> L2 -> L3).
107 */
108struct cache {
109 struct device_node *ofnode; /* OF node for this cache, may be cpu */
110 struct cpumask shared_cpu_map; /* online CPUs using this cache */
111 int type; /* split cache disambiguation */
112 int level; /* level not explicit in device tree */
113 struct list_head list; /* global list of cache objects */
114 struct cache *next_local; /* next cache of >= level */
115};
116
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000117static DEFINE_PER_CPU(struct cache_dir *, cache_dir_pcpu);
Nathan Lynch93197a32008-12-23 18:55:54 +0000118
119/* traversal/modification of this list occurs only at cpu hotplug time;
120 * access is serialized by cpu hotplug locking
121 */
122static LIST_HEAD(cache_list);
123
124static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *k)
125{
126 return container_of(k, struct cache_index_dir, kobj);
127}
128
129static const char *cache_type_string(const struct cache *cache)
130{
131 return cache_type_info[cache->type].name;
132}
133
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400134static void cache_init(struct cache *cache, int type, int level,
135 struct device_node *ofnode)
Nathan Lynch93197a32008-12-23 18:55:54 +0000136{
137 cache->type = type;
138 cache->level = level;
139 cache->ofnode = of_node_get(ofnode);
140 INIT_LIST_HEAD(&cache->list);
141 list_add(&cache->list, &cache_list);
142}
143
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400144static struct cache *new_cache(int type, int level, struct device_node *ofnode)
Nathan Lynch93197a32008-12-23 18:55:54 +0000145{
146 struct cache *cache;
147
148 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
149 if (cache)
150 cache_init(cache, type, level, ofnode);
151
152 return cache;
153}
154
155static void release_cache_debugcheck(struct cache *cache)
156{
157 struct cache *iter;
158
159 list_for_each_entry(iter, &cache_list, list)
160 WARN_ONCE(iter->next_local == cache,
161 "cache for %s(%s) refers to cache for %s(%s)\n",
162 iter->ofnode->full_name,
163 cache_type_string(iter),
164 cache->ofnode->full_name,
165 cache_type_string(cache));
166}
167
168static void release_cache(struct cache *cache)
169{
170 if (!cache)
171 return;
172
173 pr_debug("freeing L%d %s cache for %s\n", cache->level,
174 cache_type_string(cache), cache->ofnode->full_name);
175
176 release_cache_debugcheck(cache);
177 list_del(&cache->list);
178 of_node_put(cache->ofnode);
179 kfree(cache);
180}
181
182static void cache_cpu_set(struct cache *cache, int cpu)
183{
184 struct cache *next = cache;
185
186 while (next) {
187 WARN_ONCE(cpumask_test_cpu(cpu, &next->shared_cpu_map),
188 "CPU %i already accounted in %s(%s)\n",
189 cpu, next->ofnode->full_name,
190 cache_type_string(next));
191 cpumask_set_cpu(cpu, &next->shared_cpu_map);
192 next = next->next_local;
193 }
194}
195
196static int cache_size(const struct cache *cache, unsigned int *ret)
197{
198 const char *propname;
199 const u32 *cache_size;
200
201 propname = cache_type_info[cache->type].size_prop;
202
203 cache_size = of_get_property(cache->ofnode, propname, NULL);
204 if (!cache_size)
205 return -ENODEV;
206
207 *ret = *cache_size;
208 return 0;
209}
210
211static int cache_size_kb(const struct cache *cache, unsigned int *ret)
212{
213 unsigned int size;
214
215 if (cache_size(cache, &size))
216 return -ENODEV;
217
218 *ret = size / 1024;
219 return 0;
220}
221
222/* not cache_line_size() because that's a macro in include/linux/cache.h */
223static int cache_get_line_size(const struct cache *cache, unsigned int *ret)
224{
225 const u32 *line_size;
226 int i, lim;
227
228 lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props);
229
230 for (i = 0; i < lim; i++) {
231 const char *propname;
232
233 propname = cache_type_info[cache->type].line_size_props[i];
234 line_size = of_get_property(cache->ofnode, propname, NULL);
235 if (line_size)
236 break;
237 }
238
239 if (!line_size)
240 return -ENODEV;
241
242 *ret = *line_size;
243 return 0;
244}
245
246static int cache_nr_sets(const struct cache *cache, unsigned int *ret)
247{
248 const char *propname;
249 const u32 *nr_sets;
250
251 propname = cache_type_info[cache->type].nr_sets_prop;
252
253 nr_sets = of_get_property(cache->ofnode, propname, NULL);
254 if (!nr_sets)
255 return -ENODEV;
256
257 *ret = *nr_sets;
258 return 0;
259}
260
261static int cache_associativity(const struct cache *cache, unsigned int *ret)
262{
263 unsigned int line_size;
264 unsigned int nr_sets;
265 unsigned int size;
266
267 if (cache_nr_sets(cache, &nr_sets))
268 goto err;
269
270 /* If the cache is fully associative, there is no need to
271 * check the other properties.
272 */
273 if (nr_sets == 1) {
274 *ret = 0;
275 return 0;
276 }
277
278 if (cache_get_line_size(cache, &line_size))
279 goto err;
280 if (cache_size(cache, &size))
281 goto err;
282
283 if (!(nr_sets > 0 && size > 0 && line_size > 0))
284 goto err;
285
286 *ret = (size / nr_sets) / line_size;
287 return 0;
288err:
289 return -ENODEV;
290}
291
292/* helper for dealing with split caches */
293static struct cache *cache_find_first_sibling(struct cache *cache)
294{
295 struct cache *iter;
296
297 if (cache->type == CACHE_TYPE_UNIFIED)
298 return cache;
299
300 list_for_each_entry(iter, &cache_list, list)
301 if (iter->ofnode == cache->ofnode && iter->next_local == cache)
302 return iter;
303
304 return cache;
305}
306
307/* return the first cache on a local list matching node */
308static struct cache *cache_lookup_by_node(const struct device_node *node)
309{
310 struct cache *cache = NULL;
311 struct cache *iter;
312
313 list_for_each_entry(iter, &cache_list, list) {
314 if (iter->ofnode != node)
315 continue;
316 cache = cache_find_first_sibling(iter);
317 break;
318 }
319
320 return cache;
321}
322
323static bool cache_node_is_unified(const struct device_node *np)
324{
325 return of_get_property(np, "cache-unified", NULL);
326}
327
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400328static struct cache *cache_do_one_devnode_unified(struct device_node *node,
329 int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000330{
331 struct cache *cache;
332
333 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
334
335 cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
336
337 return cache;
338}
339
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400340static struct cache *cache_do_one_devnode_split(struct device_node *node,
341 int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000342{
343 struct cache *dcache, *icache;
344
345 pr_debug("creating L%d dcache and icache for %s\n", level,
346 node->full_name);
347
348 dcache = new_cache(CACHE_TYPE_DATA, level, node);
349 icache = new_cache(CACHE_TYPE_INSTRUCTION, level, node);
350
351 if (!dcache || !icache)
352 goto err;
353
354 dcache->next_local = icache;
355
356 return dcache;
357err:
358 release_cache(dcache);
359 release_cache(icache);
360 return NULL;
361}
362
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400363static struct cache *cache_do_one_devnode(struct device_node *node, int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000364{
365 struct cache *cache;
366
367 if (cache_node_is_unified(node))
368 cache = cache_do_one_devnode_unified(node, level);
369 else
370 cache = cache_do_one_devnode_split(node, level);
371
372 return cache;
373}
374
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400375static struct cache *cache_lookup_or_instantiate(struct device_node *node,
376 int level)
Nathan Lynch93197a32008-12-23 18:55:54 +0000377{
378 struct cache *cache;
379
380 cache = cache_lookup_by_node(node);
381
382 WARN_ONCE(cache && cache->level != level,
383 "cache level mismatch on lookup (got %d, expected %d)\n",
384 cache->level, level);
385
386 if (!cache)
387 cache = cache_do_one_devnode(node, level);
388
389 return cache;
390}
391
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400392static void link_cache_lists(struct cache *smaller, struct cache *bigger)
Nathan Lynch93197a32008-12-23 18:55:54 +0000393{
394 while (smaller->next_local) {
395 if (smaller->next_local == bigger)
396 return; /* already linked */
397 smaller = smaller->next_local;
398 }
399
400 smaller->next_local = bigger;
401}
402
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400403static void do_subsidiary_caches_debugcheck(struct cache *cache)
Nathan Lynch93197a32008-12-23 18:55:54 +0000404{
405 WARN_ON_ONCE(cache->level != 1);
406 WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
407}
408
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400409static void do_subsidiary_caches(struct cache *cache)
Nathan Lynch93197a32008-12-23 18:55:54 +0000410{
411 struct device_node *subcache_node;
412 int level = cache->level;
413
414 do_subsidiary_caches_debugcheck(cache);
415
416 while ((subcache_node = of_find_next_cache_node(cache->ofnode))) {
417 struct cache *subcache;
418
419 level++;
420 subcache = cache_lookup_or_instantiate(subcache_node, level);
421 of_node_put(subcache_node);
422 if (!subcache)
423 break;
424
425 link_cache_lists(cache, subcache);
426 cache = subcache;
427 }
428}
429
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400430static struct cache *cache_chain_instantiate(unsigned int cpu_id)
Nathan Lynch93197a32008-12-23 18:55:54 +0000431{
432 struct device_node *cpu_node;
433 struct cache *cpu_cache = NULL;
434
435 pr_debug("creating cache object(s) for CPU %i\n", cpu_id);
436
437 cpu_node = of_get_cpu_node(cpu_id, NULL);
438 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
439 if (!cpu_node)
440 goto out;
441
442 cpu_cache = cache_lookup_or_instantiate(cpu_node, 1);
443 if (!cpu_cache)
444 goto out;
445
446 do_subsidiary_caches(cpu_cache);
447
448 cache_cpu_set(cpu_cache, cpu_id);
449out:
450 of_node_put(cpu_node);
451
452 return cpu_cache;
453}
454
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400455static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
Nathan Lynch93197a32008-12-23 18:55:54 +0000456{
457 struct cache_dir *cache_dir;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800458 struct device *dev;
Nathan Lynch93197a32008-12-23 18:55:54 +0000459 struct kobject *kobj = NULL;
460
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800461 dev = get_cpu_device(cpu_id);
462 WARN_ONCE(!dev, "no dev for CPU %i\n", cpu_id);
463 if (!dev)
Nathan Lynch93197a32008-12-23 18:55:54 +0000464 goto err;
465
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800466 kobj = kobject_create_and_add("cache", &dev->kobj);
Nathan Lynch93197a32008-12-23 18:55:54 +0000467 if (!kobj)
468 goto err;
469
470 cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
471 if (!cache_dir)
472 goto err;
473
474 cache_dir->kobj = kobj;
475
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000476 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL);
Nathan Lynch93197a32008-12-23 18:55:54 +0000477
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000478 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir;
Nathan Lynch93197a32008-12-23 18:55:54 +0000479
480 return cache_dir;
481err:
482 kobject_put(kobj);
483 return NULL;
484}
485
486static void cache_index_release(struct kobject *kobj)
487{
488 struct cache_index_dir *index;
489
490 index = kobj_to_cache_index_dir(kobj);
491
492 pr_debug("freeing index directory for L%d %s cache\n",
493 index->cache->level, cache_type_string(index->cache));
494
495 kfree(index);
496}
497
498static ssize_t cache_index_show(struct kobject *k, struct attribute *attr, char *buf)
499{
500 struct kobj_attribute *kobj_attr;
501
502 kobj_attr = container_of(attr, struct kobj_attribute, attr);
503
504 return kobj_attr->show(k, kobj_attr, buf);
505}
506
507static struct cache *index_kobj_to_cache(struct kobject *k)
508{
509 struct cache_index_dir *index;
510
511 index = kobj_to_cache_index_dir(k);
512
513 return index->cache;
514}
515
516static ssize_t size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
517{
518 unsigned int size_kb;
519 struct cache *cache;
520
521 cache = index_kobj_to_cache(k);
522
523 if (cache_size_kb(cache, &size_kb))
524 return -ENODEV;
525
526 return sprintf(buf, "%uK\n", size_kb);
527}
528
529static struct kobj_attribute cache_size_attr =
530 __ATTR(size, 0444, size_show, NULL);
531
532
533static ssize_t line_size_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
534{
535 unsigned int line_size;
536 struct cache *cache;
537
538 cache = index_kobj_to_cache(k);
539
540 if (cache_get_line_size(cache, &line_size))
541 return -ENODEV;
542
543 return sprintf(buf, "%u\n", line_size);
544}
545
546static struct kobj_attribute cache_line_size_attr =
547 __ATTR(coherency_line_size, 0444, line_size_show, NULL);
548
549static ssize_t nr_sets_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
550{
551 unsigned int nr_sets;
552 struct cache *cache;
553
554 cache = index_kobj_to_cache(k);
555
556 if (cache_nr_sets(cache, &nr_sets))
557 return -ENODEV;
558
559 return sprintf(buf, "%u\n", nr_sets);
560}
561
562static struct kobj_attribute cache_nr_sets_attr =
563 __ATTR(number_of_sets, 0444, nr_sets_show, NULL);
564
565static ssize_t associativity_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
566{
567 unsigned int associativity;
568 struct cache *cache;
569
570 cache = index_kobj_to_cache(k);
571
572 if (cache_associativity(cache, &associativity))
573 return -ENODEV;
574
575 return sprintf(buf, "%u\n", associativity);
576}
577
578static struct kobj_attribute cache_assoc_attr =
579 __ATTR(ways_of_associativity, 0444, associativity_show, NULL);
580
581static ssize_t type_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
582{
583 struct cache *cache;
584
585 cache = index_kobj_to_cache(k);
586
587 return sprintf(buf, "%s\n", cache_type_string(cache));
588}
589
590static struct kobj_attribute cache_type_attr =
591 __ATTR(type, 0444, type_show, NULL);
592
593static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
594{
595 struct cache_index_dir *index;
596 struct cache *cache;
597
598 index = kobj_to_cache_index_dir(k);
599 cache = index->cache;
600
601 return sprintf(buf, "%d\n", cache->level);
602}
603
604static struct kobj_attribute cache_level_attr =
605 __ATTR(level, 0444, level_show, NULL);
606
607static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
608{
609 struct cache_index_dir *index;
610 struct cache *cache;
611 int len;
612 int n = 0;
613
614 index = kobj_to_cache_index_dir(k);
615 cache = index->cache;
616 len = PAGE_SIZE - 2;
617
618 if (len > 1) {
619 n = cpumask_scnprintf(buf, len, &cache->shared_cpu_map);
620 buf[n++] = '\n';
621 buf[n] = '\0';
622 }
623 return n;
624}
625
626static struct kobj_attribute cache_shared_cpu_map_attr =
627 __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
628
629/* Attributes which should always be created -- the kobject/sysfs core
630 * does this automatically via kobj_type->default_attrs. This is the
631 * minimum data required to uniquely identify a cache.
632 */
633static struct attribute *cache_index_default_attrs[] = {
634 &cache_type_attr.attr,
635 &cache_level_attr.attr,
636 &cache_shared_cpu_map_attr.attr,
637 NULL,
638};
639
640/* Attributes which should be created if the cache device node has the
641 * right properties -- see cacheinfo_create_index_opt_attrs
642 */
643static struct kobj_attribute *cache_index_opt_attrs[] = {
644 &cache_size_attr,
645 &cache_line_size_attr,
646 &cache_nr_sets_attr,
647 &cache_assoc_attr,
648};
649
Emese Revfy52cf25d2010-01-19 02:58:23 +0100650static const struct sysfs_ops cache_index_ops = {
Nathan Lynch93197a32008-12-23 18:55:54 +0000651 .show = cache_index_show,
652};
653
654static struct kobj_type cache_index_type = {
655 .release = cache_index_release,
656 .sysfs_ops = &cache_index_ops,
657 .default_attrs = cache_index_default_attrs,
658};
659
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400660static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
Nathan Lynch93197a32008-12-23 18:55:54 +0000661{
662 const char *cache_name;
663 const char *cache_type;
664 struct cache *cache;
665 char *buf;
666 int i;
667
668 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
669 if (!buf)
670 return;
671
672 cache = dir->cache;
673 cache_name = cache->ofnode->full_name;
674 cache_type = cache_type_string(cache);
675
676 /* We don't want to create an attribute that can't provide a
677 * meaningful value. Check the return value of each optional
678 * attribute's ->show method before registering the
679 * attribute.
680 */
681 for (i = 0; i < ARRAY_SIZE(cache_index_opt_attrs); i++) {
682 struct kobj_attribute *attr;
683 ssize_t rc;
684
685 attr = cache_index_opt_attrs[i];
686
687 rc = attr->show(&dir->kobj, attr, buf);
688 if (rc <= 0) {
689 pr_debug("not creating %s attribute for "
690 "%s(%s) (rc = %zd)\n",
691 attr->attr.name, cache_name,
692 cache_type, rc);
693 continue;
694 }
695 if (sysfs_create_file(&dir->kobj, &attr->attr))
696 pr_debug("could not create %s attribute for %s(%s)\n",
697 attr->attr.name, cache_name, cache_type);
698 }
699
700 kfree(buf);
701}
702
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400703static void cacheinfo_create_index_dir(struct cache *cache, int index,
704 struct cache_dir *cache_dir)
Nathan Lynch93197a32008-12-23 18:55:54 +0000705{
706 struct cache_index_dir *index_dir;
707 int rc;
708
709 index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
710 if (!index_dir)
711 goto err;
712
713 index_dir->cache = cache;
714
715 rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
716 cache_dir->kobj, "index%d", index);
717 if (rc)
718 goto err;
719
720 index_dir->next = cache_dir->index;
721 cache_dir->index = index_dir;
722
723 cacheinfo_create_index_opt_attrs(index_dir);
724
725 return;
726err:
727 kfree(index_dir);
728}
729
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400730static void cacheinfo_sysfs_populate(unsigned int cpu_id,
731 struct cache *cache_list)
Nathan Lynch93197a32008-12-23 18:55:54 +0000732{
733 struct cache_dir *cache_dir;
734 struct cache *cache;
735 int index = 0;
736
737 cache_dir = cacheinfo_create_cache_dir(cpu_id);
738 if (!cache_dir)
739 return;
740
741 cache = cache_list;
742 while (cache) {
743 cacheinfo_create_index_dir(cache, index, cache_dir);
744 index++;
745 cache = cache->next_local;
746 }
747}
748
Paul Gortmaker061d19f2013-06-24 15:30:09 -0400749void cacheinfo_cpu_online(unsigned int cpu_id)
Nathan Lynch93197a32008-12-23 18:55:54 +0000750{
751 struct cache *cache;
752
753 cache = cache_chain_instantiate(cpu_id);
754 if (!cache)
755 return;
756
757 cacheinfo_sysfs_populate(cpu_id, cache);
758}
759
760#ifdef CONFIG_HOTPLUG_CPU /* functions needed for cpu offline */
761
762static struct cache *cache_lookup_by_cpu(unsigned int cpu_id)
763{
764 struct device_node *cpu_node;
765 struct cache *cache;
766
767 cpu_node = of_get_cpu_node(cpu_id, NULL);
768 WARN_ONCE(!cpu_node, "no OF node found for CPU %i\n", cpu_id);
769 if (!cpu_node)
770 return NULL;
771
772 cache = cache_lookup_by_node(cpu_node);
773 of_node_put(cpu_node);
774
775 return cache;
776}
777
778static void remove_index_dirs(struct cache_dir *cache_dir)
779{
780 struct cache_index_dir *index;
781
782 index = cache_dir->index;
783
784 while (index) {
785 struct cache_index_dir *next;
786
787 next = index->next;
788 kobject_put(&index->kobj);
789 index = next;
790 }
791}
792
793static void remove_cache_dir(struct cache_dir *cache_dir)
794{
795 remove_index_dirs(cache_dir);
796
797 kobject_put(cache_dir->kobj);
798
799 kfree(cache_dir);
800}
801
802static void cache_cpu_clear(struct cache *cache, int cpu)
803{
804 while (cache) {
805 struct cache *next = cache->next_local;
806
807 WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map),
808 "CPU %i not accounted in %s(%s)\n",
809 cpu, cache->ofnode->full_name,
810 cache_type_string(cache));
811
812 cpumask_clear_cpu(cpu, &cache->shared_cpu_map);
813
814 /* Release the cache object if all the cpus using it
815 * are offline */
816 if (cpumask_empty(&cache->shared_cpu_map))
817 release_cache(cache);
818
819 cache = next;
820 }
821}
822
823void cacheinfo_cpu_offline(unsigned int cpu_id)
824{
825 struct cache_dir *cache_dir;
826 struct cache *cache;
827
828 /* Prevent userspace from seeing inconsistent state - remove
829 * the sysfs hierarchy first */
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000830 cache_dir = per_cpu(cache_dir_pcpu, cpu_id);
Nathan Lynch93197a32008-12-23 18:55:54 +0000831
832 /* careful, sysfs population may have failed */
833 if (cache_dir)
834 remove_cache_dir(cache_dir);
835
Nathan Lynchfc7a9fe2009-01-09 13:12:44 +0000836 per_cpu(cache_dir_pcpu, cpu_id) = NULL;
Nathan Lynch93197a32008-12-23 18:55:54 +0000837
838 /* clear the CPU's bit in its cache chain, possibly freeing
839 * cache objects */
840 cache = cache_lookup_by_cpu(cpu_id);
841 if (cache)
842 cache_cpu_clear(cache, cpu_id);
843}
844#endif /* CONFIG_HOTPLUG_CPU */