blob: 05bdf7affb431742a7c0f7ea8c167bea6300c530 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * This file contains NUMA specific variables and functions which can
7 * be split away from DISCONTIGMEM and are used on NUMA machines with
8 * contiguous memory.
9 * 2002/08/07 Erich Focht <efocht@ess.nec.de>
10 * Populate cpu entries in sysfs for non-numa systems as well
11 * Intel Corporation - Ashok Raj
Zhang, Yanminf1918002006-02-27 11:37:45 +080012 * 02/27/2006 Zhang, Yanmin
13 * Populate cpu cache entries in sysfs for cpu cache info
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/cpu.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/node.h>
20#include <linux/init.h>
21#include <linux/bootmem.h>
22#include <linux/nodemask.h>
Zhang, Yanminf1918002006-02-27 11:37:45 +080023#include <linux/notifier.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/mmzone.h>
25#include <asm/numa.h>
26#include <asm/cpu.h>
27
Linus Torvalds1da177e2005-04-16 15:20:36 -070028static struct ia64_cpu *sysfs_cpus;
29
30int arch_register_cpu(int num)
31{
Ashok Rajb88e9262006-01-19 16:18:47 -080032#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
Ashok Raj55e59c52005-03-31 22:51:10 -050033 /*
34 * If CPEI cannot be re-targetted, and this is
35 * CPEI target, then dont create the control file
36 */
37 if (!can_cpei_retarget() && is_cpu_cpei_target(num))
38 sysfs_cpus[num].cpu.no_control = 1;
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070039#ifdef CONFIG_NUMA
40 map_cpu_to_node(num, node_cpuid[num].nid);
41#endif
Ian Wienand46906c42005-07-13 21:09:00 -070042#endif
Ashok Raj55e59c52005-03-31 22:51:10 -050043
KAMEZAWA Hiroyuki76b67ed92006-06-27 02:53:41 -070044 return register_cpu(&sysfs_cpus[num].cpu, num);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045}
46
47#ifdef CONFIG_HOTPLUG_CPU
48
49void arch_unregister_cpu(int num)
50{
KAMEZAWA Hiroyuki3212fe12006-09-25 16:25:31 -070051 unregister_cpu(&sysfs_cpus[num].cpu);
52 unmap_cpu_from_node(num, cpu_to_node(num));
Linus Torvalds1da177e2005-04-16 15:20:36 -070053}
54EXPORT_SYMBOL(arch_register_cpu);
55EXPORT_SYMBOL(arch_unregister_cpu);
56#endif /*CONFIG_HOTPLUG_CPU*/
57
58
59static int __init topology_init(void)
60{
61 int i, err = 0;
62
63#ifdef CONFIG_NUMA
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080064 /*
65 * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
66 */
67 for_each_online_node(i) {
Yasunori Goto0fc44152006-06-27 02:53:38 -070068 if ((err = register_one_node(i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 goto out;
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080070 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#endif
72
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080073 sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
Paul Jacksona8132132006-08-14 22:45:49 -070074 if (!sysfs_cpus)
75 panic("kzalloc in topology_init failed - NR_CPUS too big?");
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080077 for_each_present_cpu(i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 if((err = arch_register_cpu(i)))
79 goto out;
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080080 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070081out:
82 return err;
83}
84
Zhang, Yanmin69dcc992006-02-03 03:04:36 -080085subsys_initcall(topology_init);
Zhang, Yanminf1918002006-02-27 11:37:45 +080086
87
88/*
89 * Export cpu cache information through sysfs
90 */
91
92/*
93 * A bunch of string array to get pretty printing
94 */
95static const char *cache_types[] = {
96 "", /* not used */
97 "Instruction",
98 "Data",
99 "Unified" /* unified */
100};
101
102static const char *cache_mattrib[]={
103 "WriteThrough",
104 "WriteBack",
105 "", /* reserved */
106 "" /* reserved */
107};
108
109struct cache_info {
110 pal_cache_config_info_t cci;
111 cpumask_t shared_cpu_map;
112 int level;
113 int type;
114 struct kobject kobj;
115};
116
117struct cpu_cache_info {
118 struct cache_info *cache_leaves;
119 int num_cache_leaves;
120 struct kobject kobj;
121};
122
123static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
124#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
125
126#ifdef CONFIG_SMP
127static void cache_shared_cpu_map_setup( unsigned int cpu,
128 struct cache_info * this_leaf)
129{
130 pal_cache_shared_info_t csi;
131 int num_shared, i = 0;
132 unsigned int j;
133
134 if (cpu_data(cpu)->threads_per_core <= 1 &&
135 cpu_data(cpu)->cores_per_socket <= 1) {
136 cpu_set(cpu, this_leaf->shared_cpu_map);
137 return;
138 }
139
140 if (ia64_pal_cache_shared_info(this_leaf->level,
141 this_leaf->type,
142 0,
143 &csi) != PAL_STATUS_SUCCESS)
144 return;
145
146 num_shared = (int) csi.num_shared;
147 do {
Andrew Mortonfb1bb342006-06-25 05:46:43 -0700148 for_each_possible_cpu(j)
Zhang, Yanminf1918002006-02-27 11:37:45 +0800149 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
150 && cpu_data(j)->core_id == csi.log1_cid
151 && cpu_data(j)->thread_id == csi.log1_tid)
152 cpu_set(j, this_leaf->shared_cpu_map);
153
154 i++;
155 } while (i < num_shared &&
156 ia64_pal_cache_shared_info(this_leaf->level,
157 this_leaf->type,
158 i,
159 &csi) == PAL_STATUS_SUCCESS);
160}
161#else
162static void cache_shared_cpu_map_setup(unsigned int cpu,
163 struct cache_info * this_leaf)
164{
165 cpu_set(cpu, this_leaf->shared_cpu_map);
166 return;
167}
168#endif
169
170static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
171 char *buf)
172{
173 return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
174}
175
176static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
177 char *buf)
178{
179 return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
180}
181
182static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
183{
184 return sprintf(buf,
185 "%s\n",
186 cache_mattrib[this_leaf->cci.pcci_cache_attr]);
187}
188
189static ssize_t show_size(struct cache_info *this_leaf, char *buf)
190{
191 return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
192}
193
194static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
195{
196 unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
197 number_of_sets /= this_leaf->cci.pcci_assoc;
198 number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
199
200 return sprintf(buf, "%u\n", number_of_sets);
201}
202
203static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
204{
205 ssize_t len;
206 cpumask_t shared_cpu_map;
207
208 cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
209 len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
210 len += sprintf(buf+len, "\n");
211 return len;
212}
213
214static ssize_t show_type(struct cache_info *this_leaf, char *buf)
215{
216 int type = this_leaf->type + this_leaf->cci.pcci_unified;
217 return sprintf(buf, "%s\n", cache_types[type]);
218}
219
220static ssize_t show_level(struct cache_info *this_leaf, char *buf)
221{
222 return sprintf(buf, "%u\n", this_leaf->level);
223}
224
225struct cache_attr {
226 struct attribute attr;
227 ssize_t (*show)(struct cache_info *, char *);
228 ssize_t (*store)(struct cache_info *, const char *, size_t count);
229};
230
231#ifdef define_one_ro
232 #undef define_one_ro
233#endif
234#define define_one_ro(_name) \
235 static struct cache_attr _name = \
236__ATTR(_name, 0444, show_##_name, NULL)
237
238define_one_ro(level);
239define_one_ro(type);
240define_one_ro(coherency_line_size);
241define_one_ro(ways_of_associativity);
242define_one_ro(size);
243define_one_ro(number_of_sets);
244define_one_ro(shared_cpu_map);
245define_one_ro(attributes);
246
247static struct attribute * cache_default_attrs[] = {
248 &type.attr,
249 &level.attr,
250 &coherency_line_size.attr,
251 &ways_of_associativity.attr,
252 &attributes.attr,
253 &size.attr,
254 &number_of_sets.attr,
255 &shared_cpu_map.attr,
256 NULL
257};
258
259#define to_object(k) container_of(k, struct cache_info, kobj)
260#define to_attr(a) container_of(a, struct cache_attr, attr)
261
262static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
263{
264 struct cache_attr *fattr = to_attr(attr);
265 struct cache_info *this_leaf = to_object(kobj);
266 ssize_t ret;
267
268 ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
269 return ret;
270}
271
272static struct sysfs_ops cache_sysfs_ops = {
273 .show = cache_show
274};
275
276static struct kobj_type cache_ktype = {
277 .sysfs_ops = &cache_sysfs_ops,
278 .default_attrs = cache_default_attrs,
279};
280
281static struct kobj_type cache_ktype_percpu_entry = {
282 .sysfs_ops = &cache_sysfs_ops,
283};
284
285static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
286{
Jesper Juhlcbf283c2006-04-20 10:11:09 -0700287 kfree(all_cpu_cache_info[cpu].cache_leaves);
288 all_cpu_cache_info[cpu].cache_leaves = NULL;
Zhang, Yanminf1918002006-02-27 11:37:45 +0800289 all_cpu_cache_info[cpu].num_cache_leaves = 0;
290 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
Zhang, Yanminf1918002006-02-27 11:37:45 +0800291 return;
292}
293
294static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
295{
296 u64 i, levels, unique_caches;
297 pal_cache_config_info_t cci;
298 int j;
299 s64 status;
300 struct cache_info *this_cache;
301 int num_cache_leaves = 0;
302
303 if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
304 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
305 return -1;
306 }
307
308 this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
309 GFP_KERNEL);
310 if (this_cache == NULL)
311 return -ENOMEM;
312
313 for (i=0; i < levels; i++) {
314 for (j=2; j >0 ; j--) {
315 if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
316 PAL_STATUS_SUCCESS)
317 continue;
318
319 this_cache[num_cache_leaves].cci = cci;
320 this_cache[num_cache_leaves].level = i + 1;
321 this_cache[num_cache_leaves].type = j;
322
323 cache_shared_cpu_map_setup(cpu,
324 &this_cache[num_cache_leaves]);
325 num_cache_leaves ++;
326 }
327 }
328
329 all_cpu_cache_info[cpu].cache_leaves = this_cache;
330 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
331
332 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
333
334 return 0;
335}
336
337/* Add cache interface for CPU device */
338static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
339{
340 unsigned int cpu = sys_dev->id;
341 unsigned long i, j;
342 struct cache_info *this_object;
343 int retval = 0;
344 cpumask_t oldmask;
345
346 if (all_cpu_cache_info[cpu].kobj.parent)
347 return 0;
348
349 oldmask = current->cpus_allowed;
350 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
351 if (unlikely(retval))
352 return retval;
353
354 retval = cpu_cache_sysfs_init(cpu);
355 set_cpus_allowed(current, oldmask);
356 if (unlikely(retval < 0))
357 return retval;
358
359 all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
360 kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
361 all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
362 retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
363
364 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
365 this_object = LEAF_KOBJECT_PTR(cpu,i);
366 this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
367 kobject_set_name(&(this_object->kobj), "index%1lu", i);
368 this_object->kobj.ktype = &cache_ktype;
369 retval = kobject_register(&(this_object->kobj));
370 if (unlikely(retval)) {
371 for (j = 0; j < i; j++) {
372 kobject_unregister(
373 &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
374 }
375 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
376 cpu_cache_sysfs_exit(cpu);
377 break;
378 }
379 }
380 return retval;
381}
382
383/* Remove cache interface for CPU device */
384static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
385{
386 unsigned int cpu = sys_dev->id;
387 unsigned long i;
388
389 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
390 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
391
392 if (all_cpu_cache_info[cpu].kobj.parent) {
393 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
394 memset(&all_cpu_cache_info[cpu].kobj,
395 0,
396 sizeof(struct kobject));
397 }
398
399 cpu_cache_sysfs_exit(cpu);
400
401 return 0;
402}
403
404/*
405 * When a cpu is hot-plugged, do a check and initiate
406 * cache kobject if necessary
407 */
Chandra Seetharaman9c7b2162006-06-27 02:54:07 -0700408static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
Zhang, Yanminf1918002006-02-27 11:37:45 +0800409 unsigned long action, void *hcpu)
410{
411 unsigned int cpu = (unsigned long)hcpu;
412 struct sys_device *sys_dev;
413
414 sys_dev = get_cpu_sysdev(cpu);
415 switch (action) {
416 case CPU_ONLINE:
417 cache_add_dev(sys_dev);
418 break;
419 case CPU_DEAD:
420 cache_remove_dev(sys_dev);
421 break;
422 }
423 return NOTIFY_OK;
424}
425
Chandra Seetharaman74b85f32006-06-27 02:54:09 -0700426static struct notifier_block __cpuinitdata cache_cpu_notifier =
Zhang, Yanminf1918002006-02-27 11:37:45 +0800427{
428 .notifier_call = cache_cpu_callback
429};
430
431static int __cpuinit cache_sysfs_init(void)
432{
433 int i;
434
435 for_each_online_cpu(i) {
436 cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
437 (void *)(long)i);
438 }
439
Chandra Seetharamanbe6b5a32006-07-30 03:03:37 -0700440 register_hotcpu_notifier(&cache_cpu_notifier);
Zhang, Yanminf1918002006-02-27 11:37:45 +0800441
442 return 0;
443}
444
445device_initcall(cache_sysfs_init);
446