blob: f648c610b10c50882b7fcbd0869e19b3659539b6 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * This file contains NUMA specific variables and functions which can
7 * be split away from DISCONTIGMEM and are used on NUMA machines with
8 * contiguous memory.
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj
Zhang, Yanminf1918002006-02-27 11:37:45 +080012 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/cpu.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/node.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/nodemask.h>
Zhang, Yanminf1918002006-02-27 11:37:45 +080023#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/mmzone.h>
25#include <asm/numa.h>
26#include <asm/cpu.h>
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static struct ia64_cpu *sysfs_cpus;
29
30int arch_register_cpu(int num)
31{
Ashok Rajb88e9262006-01-19 16:18:47 -080032#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
Ashok Raj55e59c52005-03-31 22:51:10 -050033 /*
34 * If CPEI cannot be re-targetted, and this is
35 * CPEI target, then dont create the control file
36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1;
Ian Wienand46906c42005-07-13 21:09:00 -070039#endif
Ashok Raj55e59c52005-03-31 22:51:10 -050040
KAMEZAWA Hiroyuki76b67ed92006-06-27 02:53:41 -070041 return register_cpu(&sysfs_cpus[num].cpu, num);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042}
43
44#ifdef CONFIG_HOTPLUG_CPU
45
46void arch_unregister_cpu(int num)
47{
KAMEZAWA Hiroyuki76b67ed92006-06-27 02:53:41 -070048 return unregister_cpu(&sysfs_cpus[num].cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049}
50EXPORT_SYMBOL(arch_register_cpu);
51EXPORT_SYMBOL(arch_unregister_cpu);
52#endif /*CONFIG_HOTPLUG_CPU*/
53
54
55static int __init topology_init(void)
56{
57 int i, err = 0;
58
59#ifdef CONFIG_NUMA
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080060 /*
61 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
62 */
63 for_each_online_node(i) {
Yasunori Goto0fc44152006-06-27 02:53:38 -070064 if ((err = register_one_node(i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 goto out;
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#endif
68
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080069 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
Paul Jacksona8132132006-08-14 22:45:49 -070070 if (!sysfs_cpus)
71 panic("kzalloc in topology_init failed - NR_CPUS too big?");
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080073 for_each_present_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 if((err = arch_register_cpu(i)))
75 goto out;
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080076 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070077out:
78 return err;
79}
80
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080081subsys_initcall(topology_init);
Zhang, Yanminf1918002006-02-27 11:37:45 +080082
83
84/*
85 * Export cpu cache information through sysfs
86 */
87
88/*
89 * A bunch of string array to get pretty printing
90 */
91static const char *cache_types[] = {
92 "", /* not used */
93 "Instruction",
94 "Data",
95 "Unified" /* unified */
96};
97
98static const char *cache_mattrib[]={
99 "WriteThrough",
100 "WriteBack",
101 "", /* reserved */
102 "" /* reserved */
103};
104
105struct cache_info {
106 pal_cache_config_info_t cci;
107 cpumask_t shared_cpu_map;
108 int level;
109 int type;
110 struct kobject kobj;
111};
112
113struct cpu_cache_info {
114 struct cache_info *cache_leaves;
115 int num_cache_leaves;
116 struct kobject kobj;
117};
118
119static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
120#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
121
122#ifdef CONFIG_SMP
123static void cache_shared_cpu_map_setup( unsigned int cpu,
124 struct cache_info * this_leaf)
125{
126 pal_cache_shared_info_t csi;
127 int num_shared, i = 0;
128 unsigned int j;
129
130 if (cpu_data(cpu)->threads_per_core <= 1 &&
131 cpu_data(cpu)->cores_per_socket <= 1) {
132 cpu_set(cpu, this_leaf->shared_cpu_map);
133 return;
134 }
135
136 if (ia64_pal_cache_shared_info(this_leaf->level,
137 this_leaf->type,
138 0,
139 &csi) != PAL_STATUS_SUCCESS)
140 return;
141
142 num_shared = (int) csi.num_shared;
143 do {
Andrew Mortonfb1bb342006-06-25 05:46:43 -0700144 for_each_possible_cpu(j)
Zhang, Yanminf1918002006-02-27 11:37:45 +0800145 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
146 && cpu_data(j)->core_id == csi.log1_cid
147 && cpu_data(j)->thread_id == csi.log1_tid)
148 cpu_set(j, this_leaf->shared_cpu_map);
149
150 i++;
151 } while (i < num_shared &&
152 ia64_pal_cache_shared_info(this_leaf->level,
153 this_leaf->type,
154 i,
155 &csi) == PAL_STATUS_SUCCESS);
156}
157#else
158static void cache_shared_cpu_map_setup(unsigned int cpu,
159 struct cache_info * this_leaf)
160{
161 cpu_set(cpu, this_leaf->shared_cpu_map);
162 return;
163}
164#endif
165
166static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
167 char *buf)
168{
169 return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
170}
171
172static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
173 char *buf)
174{
175 return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
176}
177
178static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
179{
180 return sprintf(buf,
181 "%s\n",
182 cache_mattrib[this_leaf->cci.pcci_cache_attr]);
183}
184
185static ssize_t show_size(struct cache_info *this_leaf, char *buf)
186{
187 return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
188}
189
190static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
191{
192 unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
193 number_of_sets /= this_leaf->cci.pcci_assoc;
194 number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
195
196 return sprintf(buf, "%u\n", number_of_sets);
197}
198
199static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
200{
201 ssize_t len;
202 cpumask_t shared_cpu_map;
203
204 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
205 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
206 len += sprintf(buf+len, "\n");
207 return len;
208}
209
210static ssize_t show_type(struct cache_info *this_leaf, char *buf)
211{
212 int type = this_leaf->type + this_leaf->cci.pcci_unified;
213 return sprintf(buf, "%s\n", cache_types[type]);
214}
215
216static ssize_t show_level(struct cache_info *this_leaf, char *buf)
217{
218 return sprintf(buf, "%u\n", this_leaf->level);
219}
220
221struct cache_attr {
222 struct attribute attr;
223 ssize_t (*show)(struct cache_info *, char *);
224 ssize_t (*store)(struct cache_info *, const char *, size_t count);
225};
226
227#ifdef define_one_ro
228 #undef define_one_ro
229#endif
230#define define_one_ro(_name) \
231 static struct cache_attr _name = \
232__ATTR(_name, 0444, show_##_name, NULL)
233
234define_one_ro(level);
235define_one_ro(type);
236define_one_ro(coherency_line_size);
237define_one_ro(ways_of_associativity);
238define_one_ro(size);
239define_one_ro(number_of_sets);
240define_one_ro(shared_cpu_map);
241define_one_ro(attributes);
242
243static struct attribute * cache_default_attrs[] = {
244 &type.attr,
245 &level.attr,
246 &coherency_line_size.attr,
247 &ways_of_associativity.attr,
248 &attributes.attr,
249 &size.attr,
250 &number_of_sets.attr,
251 &shared_cpu_map.attr,
252 NULL
253};
254
255#define to_object(k) container_of(k, struct cache_info, kobj)
256#define to_attr(a) container_of(a, struct cache_attr, attr)
257
258static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
259{
260 struct cache_attr *fattr = to_attr(attr);
261 struct cache_info *this_leaf = to_object(kobj);
262 ssize_t ret;
263
264 ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
265 return ret;
266}
267
268static struct sysfs_ops cache_sysfs_ops = {
269 .show = cache_show
270};
271
272static struct kobj_type cache_ktype = {
273 .sysfs_ops = &cache_sysfs_ops,
274 .default_attrs = cache_default_attrs,
275};
276
277static struct kobj_type cache_ktype_percpu_entry = {
278 .sysfs_ops = &cache_sysfs_ops,
279};
280
281static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
282{
Jesper Juhlcbf283c2006-04-20 10:11:09 -0700283 kfree(all_cpu_cache_info[cpu].cache_leaves);
284 all_cpu_cache_info[cpu].cache_leaves = NULL;
Zhang, Yanminf1918002006-02-27 11:37:45 +0800285 all_cpu_cache_info[cpu].num_cache_leaves = 0;
286 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
Zhang, Yanminf1918002006-02-27 11:37:45 +0800287 return;
288}
289
290static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
291{
292 u64 i, levels, unique_caches;
293 pal_cache_config_info_t cci;
294 int j;
295 s64 status;
296 struct cache_info *this_cache;
297 int num_cache_leaves = 0;
298
299 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
300 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
301 return -1;
302 }
303
304 this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
305 GFP_KERNEL);
306 if (this_cache == NULL)
307 return -ENOMEM;
308
309 for (i=0; i < levels; i++) {
310 for (j=2; j >0 ; j--) {
311 if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
312 PAL_STATUS_SUCCESS)
313 continue;
314
315 this_cache[num_cache_leaves].cci = cci;
316 this_cache[num_cache_leaves].level = i + 1;
317 this_cache[num_cache_leaves].type = j;
318
319 cache_shared_cpu_map_setup(cpu,
320 &this_cache[num_cache_leaves]);
321 num_cache_leaves ++;
322 }
323 }
324
325 all_cpu_cache_info[cpu].cache_leaves = this_cache;
326 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
327
328 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
329
330 return 0;
331}
332
333/* Add cache interface for CPU device */
334static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
335{
336 unsigned int cpu = sys_dev->id;
337 unsigned long i, j;
338 struct cache_info *this_object;
339 int retval = 0;
340 cpumask_t oldmask;
341
342 if (all_cpu_cache_info[cpu].kobj.parent)
343 return 0;
344
345 oldmask = current->cpus_allowed;
346 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
347 if (unlikely(retval))
348 return retval;
349
350 retval = cpu_cache_sysfs_init(cpu);
351 set_cpus_allowed(current, oldmask);
352 if (unlikely(retval < 0))
353 return retval;
354
355 all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
356 kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
357 all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
358 retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
359
360 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
361 this_object = LEAF_KOBJECT_PTR(cpu,i);
362 this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
363 kobject_set_name(&(this_object->kobj), "index%1lu", i);
364 this_object->kobj.ktype = &cache_ktype;
365 retval = kobject_register(&(this_object->kobj));
366 if (unlikely(retval)) {
367 for (j = 0; j < i; j++) {
368 kobject_unregister(
369 &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
370 }
371 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
372 cpu_cache_sysfs_exit(cpu);
373 break;
374 }
375 }
376 return retval;
377}
378
379/* Remove cache interface for CPU device */
380static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
381{
382 unsigned int cpu = sys_dev->id;
383 unsigned long i;
384
385 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
386 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
387
388 if (all_cpu_cache_info[cpu].kobj.parent) {
389 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
390 memset(&all_cpu_cache_info[cpu].kobj,
391 0,
392 sizeof(struct kobject));
393 }
394
395 cpu_cache_sysfs_exit(cpu);
396
397 return 0;
398}
399
400/*
401 * When a cpu is hot-plugged, do a check and initiate
402 * cache kobject if necessary
403 */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700404static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
Zhang, Yanminf1918002006-02-27 11:37:45 +0800405 unsigned long action, void *hcpu)
406{
407 unsigned int cpu = (unsigned long)hcpu;
408 struct sys_device *sys_dev;
409
410 sys_dev = get_cpu_sysdev(cpu);
411 switch (action) {
412 case CPU_ONLINE:
413 cache_add_dev(sys_dev);
414 break;
415 case CPU_DEAD:
416 cache_remove_dev(sys_dev);
417 break;
418 }
419 return NOTIFY_OK;
420}
421
Chandra Seetharaman74b85f32006-06-27 02:54:09 -0700422static struct notifier_block __cpuinitdata cache_cpu_notifier =
Zhang, Yanminf1918002006-02-27 11:37:45 +0800423{
424 .notifier_call = cache_cpu_callback
425};
426
427static int __cpuinit cache_sysfs_init(void)
428{
429 int i;
430
431 for_each_online_cpu(i) {
432 cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
433 (void *)(long)i);
434 }
435
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -0700436 register_hotcpu_notifier(&cache_cpu_notifier);
Zhang, Yanminf1918002006-02-27 11:37:45 +0800437
438 return 0;
439}
440
441device_initcall(cache_sysfs_init);
442