blob: 2880e2ab01f5d9166a132023acffc8b0607979b7 [file] [log] [blame]
Greg Kroah-Hartman989d42e2017-11-07 17:30:07 +01001// SPDX-License-Identifier: GPL-2.0
Sudeep Holla246246c2014-09-30 14:48:25 +01002/*
3 * cacheinfo support - processor cache information via sysfs
4 *
5 * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
6 * Author: Sudeep Holla <sudeep.holla@arm.com>
Sudeep Holla246246c2014-09-30 14:48:25 +01007 */
Sudeep Holla8e1073b2016-10-28 09:45:30 +01008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Sudeep Holla55877ef2016-10-28 09:45:29 +010010#include <linux/acpi.h>
Sudeep Holla246246c2014-09-30 14:48:25 +010011#include <linux/bitops.h>
12#include <linux/cacheinfo.h>
13#include <linux/compiler.h>
14#include <linux/cpu.h>
15#include <linux/device.h>
16#include <linux/init.h>
17#include <linux/of.h>
18#include <linux/sched.h>
19#include <linux/slab.h>
20#include <linux/smp.h>
21#include <linux/sysfs.h>
22
23/* pointer to per cpu cacheinfo */
24static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
25#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
26#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
27#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
28
29struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
30{
31 return ci_cacheinfo(cpu);
32}
33
34#ifdef CONFIG_OF
Sudeep Holla246246c2014-09-30 14:48:25 +010035static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
36 struct cacheinfo *sib_leaf)
37{
Jeremy Linton9b973872018-05-11 18:57:58 -050038 return sib_leaf->fw_token == this_leaf->fw_token;
Sudeep Holla246246c2014-09-30 14:48:25 +010039}
Sudeep Holladfea7472016-10-28 09:45:31 +010040
41/* OF properties to query for a given cache type */
42struct cache_type_info {
43 const char *size_prop;
44 const char *line_size_props[2];
45 const char *nr_sets_prop;
46};
47
48static const struct cache_type_info cache_type_info[] = {
49 {
50 .size_prop = "cache-size",
51 .line_size_props = { "cache-line-size",
52 "cache-block-size", },
53 .nr_sets_prop = "cache-sets",
54 }, {
55 .size_prop = "i-cache-size",
56 .line_size_props = { "i-cache-line-size",
57 "i-cache-block-size", },
58 .nr_sets_prop = "i-cache-sets",
59 }, {
60 .size_prop = "d-cache-size",
61 .line_size_props = { "d-cache-line-size",
62 "d-cache-block-size", },
63 .nr_sets_prop = "d-cache-sets",
64 },
65};
66
67static inline int get_cacheinfo_idx(enum cache_type type)
68{
69 if (type == CACHE_TYPE_UNIFIED)
70 return 0;
71 return type;
72}
73
Jeremy Linton2ff075c2018-05-11 18:57:57 -050074static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
Sudeep Holladfea7472016-10-28 09:45:31 +010075{
76 const char *propname;
77 const __be32 *cache_size;
78 int ct_idx;
79
80 ct_idx = get_cacheinfo_idx(this_leaf->type);
81 propname = cache_type_info[ct_idx].size_prop;
82
Jeremy Linton2ff075c2018-05-11 18:57:57 -050083 cache_size = of_get_property(np, propname, NULL);
Sudeep Holladfea7472016-10-28 09:45:31 +010084 if (cache_size)
85 this_leaf->size = of_read_number(cache_size, 1);
86}
87
88/* not cache_line_size() because that's a macro in include/linux/cache.h */
Jeremy Linton2ff075c2018-05-11 18:57:57 -050089static void cache_get_line_size(struct cacheinfo *this_leaf,
90 struct device_node *np)
Sudeep Holladfea7472016-10-28 09:45:31 +010091{
92 const __be32 *line_size;
93 int i, lim, ct_idx;
94
95 ct_idx = get_cacheinfo_idx(this_leaf->type);
96 lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
97
98 for (i = 0; i < lim; i++) {
99 const char *propname;
100
101 propname = cache_type_info[ct_idx].line_size_props[i];
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500102 line_size = of_get_property(np, propname, NULL);
Sudeep Holladfea7472016-10-28 09:45:31 +0100103 if (line_size)
104 break;
105 }
106
107 if (line_size)
108 this_leaf->coherency_line_size = of_read_number(line_size, 1);
109}
110
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500111static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
Sudeep Holladfea7472016-10-28 09:45:31 +0100112{
113 const char *propname;
114 const __be32 *nr_sets;
115 int ct_idx;
116
117 ct_idx = get_cacheinfo_idx(this_leaf->type);
118 propname = cache_type_info[ct_idx].nr_sets_prop;
119
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500120 nr_sets = of_get_property(np, propname, NULL);
Sudeep Holladfea7472016-10-28 09:45:31 +0100121 if (nr_sets)
122 this_leaf->number_of_sets = of_read_number(nr_sets, 1);
123}
124
125static void cache_associativity(struct cacheinfo *this_leaf)
126{
127 unsigned int line_size = this_leaf->coherency_line_size;
128 unsigned int nr_sets = this_leaf->number_of_sets;
129 unsigned int size = this_leaf->size;
130
131 /*
132 * If the cache is fully associative, there is no need to
133 * check the other properties.
134 */
135 if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
136 this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
137}
138
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500139static bool cache_node_is_unified(struct cacheinfo *this_leaf,
140 struct device_node *np)
Sudeep Hollaf57ab9a2017-11-17 11:56:41 +0000141{
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500142 return of_property_read_bool(np, "cache-unified");
Sudeep Hollaf57ab9a2017-11-17 11:56:41 +0000143}
144
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500145static void cache_of_set_props(struct cacheinfo *this_leaf,
146 struct device_node *np)
Sudeep Holladfea7472016-10-28 09:45:31 +0100147{
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500148 /*
149 * init_cache_level must setup the cache level correctly
150 * overriding the architecturally specified levels, so
151 * if type is NONE at this stage, it should be unified
152 */
153 if (this_leaf->type == CACHE_TYPE_NOCACHE &&
154 cache_node_is_unified(this_leaf, np))
155 this_leaf->type = CACHE_TYPE_UNIFIED;
156 cache_size(this_leaf, np);
157 cache_get_line_size(this_leaf, np);
158 cache_nr_sets(this_leaf, np);
159 cache_associativity(this_leaf);
Sudeep Holladfea7472016-10-28 09:45:31 +0100160}
Jeremy Lintond529a182018-05-11 18:57:56 -0500161
162static int cache_setup_of_node(unsigned int cpu)
163{
164 struct device_node *np;
165 struct cacheinfo *this_leaf;
166 struct device *cpu_dev = get_cpu_device(cpu);
167 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
168 unsigned int index = 0;
169
Jeremy Linton9b973872018-05-11 18:57:58 -0500170 /* skip if fw_token is already populated */
171 if (this_cpu_ci->info_list->fw_token) {
Jeremy Lintond529a182018-05-11 18:57:56 -0500172 return 0;
Jeremy Linton9b973872018-05-11 18:57:58 -0500173 }
Jeremy Lintond529a182018-05-11 18:57:56 -0500174
175 if (!cpu_dev) {
176 pr_err("No cpu device for CPU %d\n", cpu);
177 return -ENODEV;
178 }
179 np = cpu_dev->of_node;
180 if (!np) {
181 pr_err("Failed to find cpu%d device node\n", cpu);
182 return -ENOENT;
183 }
184
185 while (index < cache_leaves(cpu)) {
186 this_leaf = this_cpu_ci->info_list + index;
187 if (this_leaf->level != 1)
188 np = of_find_next_cache_node(np);
189 else
190 np = of_node_get(np);/* cpu node itself */
191 if (!np)
192 break;
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500193 cache_of_set_props(this_leaf, np);
Jeremy Linton9b973872018-05-11 18:57:58 -0500194 this_leaf->fw_token = np;
Jeremy Lintond529a182018-05-11 18:57:56 -0500195 index++;
196 }
197
198 if (index != cache_leaves(cpu)) /* not all OF nodes populated */
199 return -ENOENT;
200
201 return 0;
202}
Sudeep Holla246246c2014-09-30 14:48:25 +0100203#else
204static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
205static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
206 struct cacheinfo *sib_leaf)
207{
208 /*
Jeremy Linton582b4682018-05-11 18:58:02 -0500209 * For non-DT/ACPI systems, assume unique level 1 caches, system-wide
Sudeep Holla246246c2014-09-30 14:48:25 +0100210 * shared caches for all other levels. This will be used only if
211 * arch specific code has not populated shared_cpu_map
212 */
213 return !(this_leaf->level == 1);
214}
215#endif
216
Jeremy Linton582b4682018-05-11 18:58:02 -0500217int __weak cache_setup_acpi(unsigned int cpu)
218{
219 return -ENOTSUPP;
220}
221
Sudeep Holla246246c2014-09-30 14:48:25 +0100222static int cache_shared_cpu_map_setup(unsigned int cpu)
223{
224 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
225 struct cacheinfo *this_leaf, *sib_leaf;
226 unsigned int index;
Sudeep Holla55877ef2016-10-28 09:45:29 +0100227 int ret = 0;
Sudeep Holla246246c2014-09-30 14:48:25 +0100228
Sudeep Hollafac51482016-10-28 09:45:28 +0100229 if (this_cpu_ci->cpu_map_populated)
230 return 0;
231
Sudeep Holla55877ef2016-10-28 09:45:29 +0100232 if (of_have_populated_dt())
233 ret = cache_setup_of_node(cpu);
234 else if (!acpi_disabled)
Jeremy Linton582b4682018-05-11 18:58:02 -0500235 ret = cache_setup_acpi(cpu);
236
Sudeep Holla246246c2014-09-30 14:48:25 +0100237 if (ret)
238 return ret;
239
240 for (index = 0; index < cache_leaves(cpu); index++) {
241 unsigned int i;
242
243 this_leaf = this_cpu_ci->info_list + index;
244 /* skip if shared_cpu_map is already populated */
245 if (!cpumask_empty(&this_leaf->shared_cpu_map))
246 continue;
247
248 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
249 for_each_online_cpu(i) {
250 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
251
252 if (i == cpu || !sib_cpu_ci->info_list)
253 continue;/* skip if itself or no cacheinfo */
254 sib_leaf = sib_cpu_ci->info_list + index;
255 if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
256 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
257 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
258 }
259 }
260 }
261
262 return 0;
263}
264
265static void cache_shared_cpu_map_remove(unsigned int cpu)
266{
267 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
268 struct cacheinfo *this_leaf, *sib_leaf;
269 unsigned int sibling, index;
270
271 for (index = 0; index < cache_leaves(cpu); index++) {
272 this_leaf = this_cpu_ci->info_list + index;
273 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
274 struct cpu_cacheinfo *sib_cpu_ci;
275
276 if (sibling == cpu) /* skip itself */
277 continue;
Borislav Petkov2110d702015-08-08 10:46:02 +0200278
Sudeep Holla246246c2014-09-30 14:48:25 +0100279 sib_cpu_ci = get_cpu_cacheinfo(sibling);
Borislav Petkov2110d702015-08-08 10:46:02 +0200280 if (!sib_cpu_ci->info_list)
281 continue;
282
Sudeep Holla246246c2014-09-30 14:48:25 +0100283 sib_leaf = sib_cpu_ci->info_list + index;
284 cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
285 cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
286 }
Jeremy Linton582b4682018-05-11 18:58:02 -0500287 if (of_have_populated_dt())
288 of_node_put(this_leaf->fw_token);
Sudeep Holla246246c2014-09-30 14:48:25 +0100289 }
290}
291
292static void free_cache_attributes(unsigned int cpu)
293{
Borislav Petkov2110d702015-08-08 10:46:02 +0200294 if (!per_cpu_cacheinfo(cpu))
295 return;
296
Sudeep Holla246246c2014-09-30 14:48:25 +0100297 cache_shared_cpu_map_remove(cpu);
298
299 kfree(per_cpu_cacheinfo(cpu));
300 per_cpu_cacheinfo(cpu) = NULL;
301}
302
303int __weak init_cache_level(unsigned int cpu)
304{
305 return -ENOENT;
306}
307
308int __weak populate_cache_leaves(unsigned int cpu)
309{
310 return -ENOENT;
311}
312
313static int detect_cache_attributes(unsigned int cpu)
314{
315 int ret;
316
Sudeep Holla3370e132015-05-27 11:26:13 +0100317 if (init_cache_level(cpu) || !cache_leaves(cpu))
Sudeep Holla246246c2014-09-30 14:48:25 +0100318 return -ENOENT;
319
320 per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
321 sizeof(struct cacheinfo), GFP_KERNEL);
322 if (per_cpu_cacheinfo(cpu) == NULL)
323 return -ENOMEM;
324
Jeremy Linton2ff075c2018-05-11 18:57:57 -0500325 /*
326 * populate_cache_leaves() may completely setup the cache leaves and
327 * shared_cpu_map or it may leave it partially setup.
328 */
Sudeep Holla246246c2014-09-30 14:48:25 +0100329 ret = populate_cache_leaves(cpu);
330 if (ret)
331 goto free_ci;
332 /*
Jeremy Linton9b973872018-05-11 18:57:58 -0500333 * For systems using DT for cache hierarchy, fw_token
334 * and shared_cpu_map will be set up here only if they are
335 * not populated already
Sudeep Holla246246c2014-09-30 14:48:25 +0100336 */
337 ret = cache_shared_cpu_map_setup(cpu);
Sudeep Holla8a7d95f2015-03-17 17:28:46 +0000338 if (ret) {
Sudeep Holla55877ef2016-10-28 09:45:29 +0100339 pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
Sudeep Holla246246c2014-09-30 14:48:25 +0100340 goto free_ci;
Sudeep Holla8a7d95f2015-03-17 17:28:46 +0000341 }
Sudeep Holladfea7472016-10-28 09:45:31 +0100342
Sudeep Holla246246c2014-09-30 14:48:25 +0100343 return 0;
344
345free_ci:
346 free_cache_attributes(cpu);
347 return ret;
348}
349
350/* pointer to cpuX/cache device */
351static DEFINE_PER_CPU(struct device *, ci_cache_dev);
352#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
353
354static cpumask_t cache_dev_map;
355
356/* pointer to array of devices for cpuX/cache/indexY */
357static DEFINE_PER_CPU(struct device **, ci_index_dev);
358#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
359#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
360
361#define show_one(file_name, object) \
362static ssize_t file_name##_show(struct device *dev, \
363 struct device_attribute *attr, char *buf) \
364{ \
365 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
366 return sprintf(buf, "%u\n", this_leaf->object); \
367}
368
Fenghua Yue9a2ea52016-10-22 06:19:49 -0700369show_one(id, id);
Sudeep Holla246246c2014-09-30 14:48:25 +0100370show_one(level, level);
371show_one(coherency_line_size, coherency_line_size);
372show_one(number_of_sets, number_of_sets);
373show_one(physical_line_partition, physical_line_partition);
374show_one(ways_of_associativity, ways_of_associativity);
375
376static ssize_t size_show(struct device *dev,
377 struct device_attribute *attr, char *buf)
378{
379 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
380
381 return sprintf(buf, "%uK\n", this_leaf->size >> 10);
382}
383
384static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
385{
386 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
387 const struct cpumask *mask = &this_leaf->shared_cpu_map;
388
389 return cpumap_print_to_pagebuf(list, buf, mask);
390}
391
392static ssize_t shared_cpu_map_show(struct device *dev,
393 struct device_attribute *attr, char *buf)
394{
395 return shared_cpumap_show_func(dev, false, buf);
396}
397
398static ssize_t shared_cpu_list_show(struct device *dev,
399 struct device_attribute *attr, char *buf)
400{
401 return shared_cpumap_show_func(dev, true, buf);
402}
403
404static ssize_t type_show(struct device *dev,
405 struct device_attribute *attr, char *buf)
406{
407 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
408
409 switch (this_leaf->type) {
410 case CACHE_TYPE_DATA:
411 return sprintf(buf, "Data\n");
412 case CACHE_TYPE_INST:
413 return sprintf(buf, "Instruction\n");
414 case CACHE_TYPE_UNIFIED:
415 return sprintf(buf, "Unified\n");
416 default:
417 return -EINVAL;
418 }
419}
420
421static ssize_t allocation_policy_show(struct device *dev,
422 struct device_attribute *attr, char *buf)
423{
424 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
425 unsigned int ci_attr = this_leaf->attributes;
426 int n = 0;
427
428 if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
429 n = sprintf(buf, "ReadWriteAllocate\n");
430 else if (ci_attr & CACHE_READ_ALLOCATE)
431 n = sprintf(buf, "ReadAllocate\n");
432 else if (ci_attr & CACHE_WRITE_ALLOCATE)
433 n = sprintf(buf, "WriteAllocate\n");
434 return n;
435}
436
437static ssize_t write_policy_show(struct device *dev,
438 struct device_attribute *attr, char *buf)
439{
440 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
441 unsigned int ci_attr = this_leaf->attributes;
442 int n = 0;
443
444 if (ci_attr & CACHE_WRITE_THROUGH)
445 n = sprintf(buf, "WriteThrough\n");
446 else if (ci_attr & CACHE_WRITE_BACK)
447 n = sprintf(buf, "WriteBack\n");
448 return n;
449}
450
Fenghua Yue9a2ea52016-10-22 06:19:49 -0700451static DEVICE_ATTR_RO(id);
Sudeep Holla246246c2014-09-30 14:48:25 +0100452static DEVICE_ATTR_RO(level);
453static DEVICE_ATTR_RO(type);
454static DEVICE_ATTR_RO(coherency_line_size);
455static DEVICE_ATTR_RO(ways_of_associativity);
456static DEVICE_ATTR_RO(number_of_sets);
457static DEVICE_ATTR_RO(size);
458static DEVICE_ATTR_RO(allocation_policy);
459static DEVICE_ATTR_RO(write_policy);
460static DEVICE_ATTR_RO(shared_cpu_map);
461static DEVICE_ATTR_RO(shared_cpu_list);
462static DEVICE_ATTR_RO(physical_line_partition);
463
464static struct attribute *cache_default_attrs[] = {
Fenghua Yue9a2ea52016-10-22 06:19:49 -0700465 &dev_attr_id.attr,
Sudeep Holla246246c2014-09-30 14:48:25 +0100466 &dev_attr_type.attr,
467 &dev_attr_level.attr,
468 &dev_attr_shared_cpu_map.attr,
469 &dev_attr_shared_cpu_list.attr,
470 &dev_attr_coherency_line_size.attr,
471 &dev_attr_ways_of_associativity.attr,
472 &dev_attr_number_of_sets.attr,
473 &dev_attr_size.attr,
474 &dev_attr_allocation_policy.attr,
475 &dev_attr_write_policy.attr,
476 &dev_attr_physical_line_partition.attr,
477 NULL
478};
479
480static umode_t
481cache_default_attrs_is_visible(struct kobject *kobj,
482 struct attribute *attr, int unused)
483{
484 struct device *dev = kobj_to_dev(kobj);
485 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
486 const struct cpumask *mask = &this_leaf->shared_cpu_map;
487 umode_t mode = attr->mode;
488
Fenghua Yue9a2ea52016-10-22 06:19:49 -0700489 if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
490 return mode;
Sudeep Holla246246c2014-09-30 14:48:25 +0100491 if ((attr == &dev_attr_type.attr) && this_leaf->type)
492 return mode;
493 if ((attr == &dev_attr_level.attr) && this_leaf->level)
494 return mode;
495 if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
496 return mode;
497 if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
498 return mode;
499 if ((attr == &dev_attr_coherency_line_size.attr) &&
500 this_leaf->coherency_line_size)
501 return mode;
502 if ((attr == &dev_attr_ways_of_associativity.attr) &&
503 this_leaf->size) /* allow 0 = full associativity */
504 return mode;
505 if ((attr == &dev_attr_number_of_sets.attr) &&
506 this_leaf->number_of_sets)
507 return mode;
508 if ((attr == &dev_attr_size.attr) && this_leaf->size)
509 return mode;
510 if ((attr == &dev_attr_write_policy.attr) &&
511 (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
512 return mode;
513 if ((attr == &dev_attr_allocation_policy.attr) &&
514 (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
515 return mode;
516 if ((attr == &dev_attr_physical_line_partition.attr) &&
517 this_leaf->physical_line_partition)
518 return mode;
519
520 return 0;
521}
522
523static const struct attribute_group cache_default_group = {
524 .attrs = cache_default_attrs,
525 .is_visible = cache_default_attrs_is_visible,
526};
527
528static const struct attribute_group *cache_default_groups[] = {
529 &cache_default_group,
530 NULL,
531};
532
533static const struct attribute_group *cache_private_groups[] = {
534 &cache_default_group,
535 NULL, /* Place holder for private group */
536 NULL,
537};
538
539const struct attribute_group *
540__weak cache_get_priv_group(struct cacheinfo *this_leaf)
541{
542 return NULL;
543}
544
545static const struct attribute_group **
546cache_get_attribute_groups(struct cacheinfo *this_leaf)
547{
548 const struct attribute_group *priv_group =
549 cache_get_priv_group(this_leaf);
550
551 if (!priv_group)
552 return cache_default_groups;
553
554 if (!cache_private_groups[1])
555 cache_private_groups[1] = priv_group;
556
557 return cache_private_groups;
558}
559
560/* Add/Remove cache interface for CPU device */
561static void cpu_cache_sysfs_exit(unsigned int cpu)
562{
563 int i;
564 struct device *ci_dev;
565
566 if (per_cpu_index_dev(cpu)) {
567 for (i = 0; i < cache_leaves(cpu); i++) {
568 ci_dev = per_cache_index_dev(cpu, i);
569 if (!ci_dev)
570 continue;
571 device_unregister(ci_dev);
572 }
573 kfree(per_cpu_index_dev(cpu));
574 per_cpu_index_dev(cpu) = NULL;
575 }
576 device_unregister(per_cpu_cache_dev(cpu));
577 per_cpu_cache_dev(cpu) = NULL;
578}
579
580static int cpu_cache_sysfs_init(unsigned int cpu)
581{
582 struct device *dev = get_cpu_device(cpu);
583
584 if (per_cpu_cacheinfo(cpu) == NULL)
585 return -ENOENT;
586
587 per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
588 if (IS_ERR(per_cpu_cache_dev(cpu)))
589 return PTR_ERR(per_cpu_cache_dev(cpu));
590
591 /* Allocate all required memory */
592 per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
593 sizeof(struct device *), GFP_KERNEL);
594 if (unlikely(per_cpu_index_dev(cpu) == NULL))
595 goto err_out;
596
597 return 0;
598
599err_out:
600 cpu_cache_sysfs_exit(cpu);
601 return -ENOMEM;
602}
603
604static int cache_add_dev(unsigned int cpu)
605{
606 unsigned int i;
607 int rc;
608 struct device *ci_dev, *parent;
609 struct cacheinfo *this_leaf;
610 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
611 const struct attribute_group **cache_groups;
612
613 rc = cpu_cache_sysfs_init(cpu);
614 if (unlikely(rc < 0))
615 return rc;
616
617 parent = per_cpu_cache_dev(cpu);
618 for (i = 0; i < cache_leaves(cpu); i++) {
619 this_leaf = this_cpu_ci->info_list + i;
620 if (this_leaf->disable_sysfs)
621 continue;
622 cache_groups = cache_get_attribute_groups(this_leaf);
623 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
624 "index%1u", i);
625 if (IS_ERR(ci_dev)) {
626 rc = PTR_ERR(ci_dev);
627 goto err;
628 }
629 per_cache_index_dev(cpu, i) = ci_dev;
630 }
631 cpumask_set_cpu(cpu, &cache_dev_map);
632
633 return 0;
634err:
635 cpu_cache_sysfs_exit(cpu);
636 return rc;
637}
638
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100639static int cacheinfo_cpu_online(unsigned int cpu)
Sudeep Holla246246c2014-09-30 14:48:25 +0100640{
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100641 int rc = detect_cache_attributes(cpu);
Sudeep Holla246246c2014-09-30 14:48:25 +0100642
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100643 if (rc)
644 return rc;
645 rc = cache_add_dev(cpu);
646 if (rc)
647 free_cache_attributes(cpu);
648 return rc;
Sudeep Holla246246c2014-09-30 14:48:25 +0100649}
650
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100651static int cacheinfo_cpu_pre_down(unsigned int cpu)
Sudeep Holla246246c2014-09-30 14:48:25 +0100652{
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100653 if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
654 cpu_cache_sysfs_exit(cpu);
Sudeep Holla246246c2014-09-30 14:48:25 +0100655
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100656 free_cache_attributes(cpu);
657 return 0;
Sudeep Holla246246c2014-09-30 14:48:25 +0100658}
659
660static int __init cacheinfo_sysfs_init(void)
661{
Sebastian Andrzej Siewior7cc277b2016-11-03 15:50:08 +0100662 return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
663 cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
Sudeep Holla246246c2014-09-30 14:48:25 +0100664}
Sudeep Holla246246c2014-09-30 14:48:25 +0100665device_initcall(cacheinfo_sysfs_init);