blob: 4f8dc942257c378bba5a940f41cc8e6c270861c0 [file] [log] [blame]
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02001/*
Heiko Carstens4baeb962011-12-27 11:27:12 +01002 * Copyright IBM Corp. 2007,2011
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02003 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
Martin Schwidefsky395d31d2008-12-25 13:39:50 +01006#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
Heiko Carstensdbd70fb2008-04-17 07:46:12 +02009#include <linux/workqueue.h>
Heiko Carstens83a24e32011-12-27 11:27:09 +010010#include <linux/bootmem.h>
11#include <linux/cpuset.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/init.h>
16#include <linux/delay.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020017#include <linux/cpu.h>
18#include <linux/smp.h>
Heiko Carstens83a24e32011-12-27 11:27:09 +010019#include <linux/mm.h>
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020020
Heiko Carstensc10fde02008-04-17 07:46:13 +020021#define PTF_HORIZONTAL (0UL)
22#define PTF_VERTICAL (1UL)
23#define PTF_CHECK (2UL)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020024
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020025struct mask_info {
26 struct mask_info *next;
Heiko Carstens10d38582010-05-17 10:00:12 +020027 unsigned char id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020028 cpumask_t mask;
29};
30
Heiko Carstensc9af3fa2010-10-25 16:10:43 +020031static int topology_enabled = 1;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020032static void topology_work_fn(struct work_struct *work);
Heiko Carstensc30f91b2010-10-25 16:10:53 +020033static struct sysinfo_15_1_x *tl_info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020034static void set_topology_timer(void);
35static DECLARE_WORK(topology_work, topology_work_fn);
Heiko Carstens74af2832008-11-14 18:18:07 +010036/* topology_lock protects the core linked list */
37static DEFINE_SPINLOCK(topology_lock);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020038
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020039static struct mask_info core_info;
Heiko Carstensd00aa4e2008-04-30 13:38:40 +020040cpumask_t cpu_core_map[NR_CPUS];
Heiko Carstens10d38582010-05-17 10:00:12 +020041unsigned char cpu_core_id[NR_CPUS];
Heiko Carstensd00aa4e2008-04-30 13:38:40 +020042
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020043static struct mask_info book_info;
44cpumask_t cpu_book_map[NR_CPUS];
45unsigned char cpu_book_id[NR_CPUS];
Heiko Carstens83a24e32011-12-27 11:27:09 +010046
47/* smp_cpu_state_mutex must be held when accessing this array */
48int cpu_polarization[NR_CPUS];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020049
50static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020051{
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020052 cpumask_t mask;
53
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020054 cpumask_clear(&mask);
Heiko Carstens0b527832010-10-29 16:50:38 +020055 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY) {
56 cpumask_copy(&mask, cpumask_of(cpu));
57 return mask;
58 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020059 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020060 if (cpumask_test_cpu(cpu, &info->mask)) {
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020061 mask = info->mask;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020062 break;
63 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020064 info = info->next;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020065 }
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020066 if (cpumask_empty(&mask))
67 cpumask_copy(&mask, cpumask_of(cpu));
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020068 return mask;
69}
70
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010071static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu,
72 struct mask_info *book,
73 struct mask_info *core,
Heiko Carstens4baeb962011-12-27 11:27:12 +010074 int one_core_per_cpu)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020075{
76 unsigned int cpu;
77
Heiko Carstensc30f91b2010-10-25 16:10:53 +020078 for (cpu = find_first_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS);
79 cpu < TOPOLOGY_CPU_BITS;
80 cpu = find_next_bit(&tl_cpu->mask[0], TOPOLOGY_CPU_BITS, cpu + 1))
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020081 {
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040082 unsigned int rcpu;
83 int lcpu;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020084
Heiko Carstensc30f91b2010-10-25 16:10:53 +020085 rcpu = TOPOLOGY_CPU_BITS - 1 - cpu + tl_cpu->origin;
Martin Schwidefsky8b646bd2012-03-11 11:59:26 -040086 lcpu = smp_find_processor_id(rcpu);
87 if (lcpu >= 0) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020088 cpumask_set_cpu(lcpu, &book->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +020089 cpu_book_id[lcpu] = book->id;
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +020090 cpumask_set_cpu(lcpu, &core->mask);
Heiko Carstens4baeb962011-12-27 11:27:12 +010091 if (one_core_per_cpu) {
Heiko Carstensf6bf1a82011-11-14 11:19:08 +010092 cpu_core_id[lcpu] = rcpu;
93 core = core->next;
94 } else {
95 cpu_core_id[lcpu] = core->id;
96 }
Heiko Carstens83a24e32011-12-27 11:27:09 +010097 cpu_set_polarization(lcpu, tl_cpu->pp);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +020098 }
99 }
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100100 return core;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200101}
102
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200103static void clear_masks(void)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200104{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200105 struct mask_info *info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200106
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200107 info = &core_info;
108 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +0200109 cpumask_clear(&info->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200110 info = info->next;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200111 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200112 info = &book_info;
113 while (info) {
KOSAKI Motohiro0f1959f2011-05-23 10:24:36 +0200114 cpumask_clear(&info->mask);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200115 info = info->next;
116 }
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200117}
118
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200119static union topology_entry *next_tle(union topology_entry *tle)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200120{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200121 if (!tle->nl)
122 return (union topology_entry *)((struct topology_cpu *)tle + 1);
123 return (union topology_entry *)((struct topology_container *)tle + 1);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200124}
125
Heiko Carstens4baeb962011-12-27 11:27:12 +0100126static void __tl_to_cores_generic(struct sysinfo_15_1_x *info)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200127{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200128 struct mask_info *core = &core_info;
Heiko Carstens83a24e32011-12-27 11:27:09 +0100129 struct mask_info *book = &book_info;
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200130 union topology_entry *tle, *end;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200131
Heiko Carstensc10fde02008-04-17 07:46:13 +0200132 tle = info->tle;
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200133 end = (union topology_entry *)((unsigned long)info + info->length);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200134 while (tle < end) {
135 switch (tle->nl) {
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200136 case 2:
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200137 book = book->next;
138 book->id = tle->container.id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200139 break;
140 case 1:
141 core = core->next;
Heiko Carstens10d38582010-05-17 10:00:12 +0200142 core->id = tle->container.id;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200143 break;
144 case 0:
Heiko Carstens4baeb962011-12-27 11:27:12 +0100145 add_cpus_to_mask(&tle->cpu, book, core, 0);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200146 break;
147 default:
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200148 clear_masks();
Heiko Carstens4baeb962011-12-27 11:27:12 +0100149 return;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200150 }
151 tle = next_tle(tle);
152 }
Heiko Carstens4baeb962011-12-27 11:27:12 +0100153}
154
155static void __tl_to_cores_z10(struct sysinfo_15_1_x *info)
156{
157 struct mask_info *core = &core_info;
158 struct mask_info *book = &book_info;
159 union topology_entry *tle, *end;
160
161 tle = info->tle;
162 end = (union topology_entry *)((unsigned long)info + info->length);
163 while (tle < end) {
164 switch (tle->nl) {
165 case 1:
166 book = book->next;
167 book->id = tle->container.id;
168 break;
169 case 0:
170 core = add_cpus_to_mask(&tle->cpu, book, core, 1);
171 break;
172 default:
173 clear_masks();
174 return;
175 }
176 tle = next_tle(tle);
177 }
178}
179
180static void tl_to_cores(struct sysinfo_15_1_x *info)
181{
182 struct cpuid cpu_id;
183
184 get_cpu_id(&cpu_id);
185 spin_lock_irq(&topology_lock);
186 clear_masks();
187 switch (cpu_id.machine) {
188 case 0x2097:
189 case 0x2098:
190 __tl_to_cores_z10(info);
191 break;
192 default:
193 __tl_to_cores_generic(info);
194 }
Heiko Carstens74af2832008-11-14 18:18:07 +0100195 spin_unlock_irq(&topology_lock);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200196}
197
Heiko Carstensc10fde02008-04-17 07:46:13 +0200198static void topology_update_polarization_simple(void)
199{
200 int cpu;
201
202 mutex_lock(&smp_cpu_state_mutex);
Heiko Carstens54390502008-12-25 13:37:57 +0100203 for_each_possible_cpu(cpu)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100204 cpu_set_polarization(cpu, POLARIZATION_HRZ);
Heiko Carstensc10fde02008-04-17 07:46:13 +0200205 mutex_unlock(&smp_cpu_state_mutex);
206}
207
208static int ptf(unsigned long fc)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200209{
210 int rc;
211
212 asm volatile(
213 " .insn rre,0xb9a20000,%1,%1\n"
214 " ipm %0\n"
215 " srl %0,28\n"
216 : "=d" (rc)
Heiko Carstensc10fde02008-04-17 07:46:13 +0200217 : "d" (fc) : "cc");
218 return rc;
219}
220
221int topology_set_cpu_management(int fc)
222{
Heiko Carstens83a24e32011-12-27 11:27:09 +0100223 int cpu, rc;
Heiko Carstensc10fde02008-04-17 07:46:13 +0200224
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200225 if (!MACHINE_HAS_TOPOLOGY)
Heiko Carstensc10fde02008-04-17 07:46:13 +0200226 return -EOPNOTSUPP;
227 if (fc)
228 rc = ptf(PTF_VERTICAL);
229 else
230 rc = ptf(PTF_HORIZONTAL);
231 if (rc)
232 return -EBUSY;
Heiko Carstens54390502008-12-25 13:37:57 +0100233 for_each_possible_cpu(cpu)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100234 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200235 return rc;
236}
237
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200238static void update_cpu_core_map(void)
239{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200240 unsigned long flags;
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200241 int cpu;
242
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200243 spin_lock_irqsave(&topology_lock, flags);
244 for_each_possible_cpu(cpu) {
245 cpu_core_map[cpu] = cpu_group_map(&core_info, cpu);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200246 cpu_book_map[cpu] = cpu_group_map(&book_info, cpu);
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200247 }
248 spin_unlock_irqrestore(&topology_lock, flags);
249}
250
Heiko Carstens96f4a702010-10-25 16:10:54 +0200251void store_topology(struct sysinfo_15_1_x *info)
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200252{
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200253 int rc;
254
255 rc = stsi(info, 15, 1, 3);
256 if (rc != -ENOSYS)
257 return;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200258 stsi(info, 15, 1, 2);
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200259}
260
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100261int arch_update_cpu_topology(void)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200262{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200263 struct sysinfo_15_1_x *info = tl_info;
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800264 struct device *dev;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200265 int cpu;
266
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200267 if (!MACHINE_HAS_TOPOLOGY) {
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200268 update_cpu_core_map();
Heiko Carstensc10fde02008-04-17 07:46:13 +0200269 topology_update_polarization_simple();
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100270 return 0;
Heiko Carstensc10fde02008-04-17 07:46:13 +0200271 }
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200272 store_topology(info);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200273 tl_to_cores(info);
Heiko Carstensd00aa4e2008-04-30 13:38:40 +0200274 update_cpu_core_map();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200275 for_each_online_cpu(cpu) {
Kay Sievers8a25a2f2011-12-21 14:29:42 -0800276 dev = get_cpu_device(cpu);
277 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200278 }
Heiko Carstensee79d1b2008-12-09 18:49:50 +0100279 return 1;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200280}
281
Heiko Carstensfd781fa2008-04-30 13:38:41 +0200282static void topology_work_fn(struct work_struct *work)
283{
Heiko Carstensf414f5f2008-12-25 13:37:59 +0100284 rebuild_sched_domains();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200285}
286
Heiko Carstensc10fde02008-04-17 07:46:13 +0200287void topology_schedule_update(void)
288{
289 schedule_work(&topology_work);
290}
291
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200292static void topology_timer_fn(unsigned long ignored)
293{
Heiko Carstensc10fde02008-04-17 07:46:13 +0200294 if (ptf(PTF_CHECK))
295 topology_schedule_update();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200296 set_topology_timer();
297}
298
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100299static struct timer_list topology_timer =
300 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
301
302static atomic_t topology_poll = ATOMIC_INIT(0);
303
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200304static void set_topology_timer(void)
305{
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100306 if (atomic_add_unless(&topology_poll, -1, 0))
307 mod_timer(&topology_timer, jiffies + HZ / 10);
308 else
309 mod_timer(&topology_timer, jiffies + HZ * 60);
310}
311
312void topology_expect_change(void)
313{
314 if (!MACHINE_HAS_TOPOLOGY)
315 return;
316 /* This is racy, but it doesn't matter since it is just a heuristic.
317 * Worst case is that we poll in a higher frequency for a bit longer.
318 */
319 if (atomic_read(&topology_poll) > 60)
320 return;
321 atomic_add(60, &topology_poll);
322 set_topology_timer();
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200323}
324
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100325static int __init early_parse_topology(char *p)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200326{
Heiko Carstensc9af3fa2010-10-25 16:10:43 +0200327 if (strncmp(p, "off", 3))
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100328 return 0;
Heiko Carstensc9af3fa2010-10-25 16:10:43 +0200329 topology_enabled = 0;
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100330 return 0;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200331}
Heiko Carstens2b1a61f2008-12-25 13:39:23 +0100332early_param("topology", early_parse_topology);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200333
Sebastian Ottcaa04f62011-10-30 15:16:06 +0100334static void __init alloc_masks(struct sysinfo_15_1_x *info,
335 struct mask_info *mask, int offset)
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200336{
337 int i, nr_masks;
338
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200339 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200340 for (i = 0; i < info->mnest - offset; i++)
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200341 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200342 nr_masks = max(nr_masks, 1);
343 for (i = 0; i < nr_masks; i++) {
344 mask->next = alloc_bootmem(sizeof(struct mask_info));
345 mask = mask->next;
346 }
347}
348
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200349void __init s390_init_cpu_topology(void)
350{
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200351 struct sysinfo_15_1_x *info;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200352 int i;
353
Heiko Carstens9186d7a2010-10-25 16:10:52 +0200354 if (!MACHINE_HAS_TOPOLOGY)
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200355 return;
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200356 tl_info = alloc_bootmem_pages(PAGE_SIZE);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200357 info = tl_info;
Heiko Carstens4cb14bc2010-08-31 10:28:18 +0200358 store_topology(info);
Martin Schwidefsky395d31d2008-12-25 13:39:50 +0100359 pr_info("The CPU configuration topology of the machine is:");
Heiko Carstensc30f91b2010-10-25 16:10:53 +0200360 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100361 printk(KERN_CONT " %d", info->mag[i]);
362 printk(KERN_CONT " / %d\n", info->mnest);
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100363 alloc_masks(info, &core_info, 1);
Heiko Carstensf6bf1a82011-11-14 11:19:08 +0100364 alloc_masks(info, &book_info, 2);
Heiko Carstensdbd70fb2008-04-17 07:46:12 +0200365}
Heiko Carstens83a24e32011-12-27 11:27:09 +0100366
367static int cpu_management;
368
Linus Torvalds72f31882012-01-09 08:11:13 -0800369static ssize_t dispatching_show(struct device *dev,
370 struct device_attribute *attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100371 char *buf)
372{
373 ssize_t count;
374
375 mutex_lock(&smp_cpu_state_mutex);
376 count = sprintf(buf, "%d\n", cpu_management);
377 mutex_unlock(&smp_cpu_state_mutex);
378 return count;
379}
380
Linus Torvalds72f31882012-01-09 08:11:13 -0800381static ssize_t dispatching_store(struct device *dev,
382 struct device_attribute *attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100383 const char *buf,
384 size_t count)
385{
386 int val, rc;
387 char delim;
388
389 if (sscanf(buf, "%d %c", &val, &delim) != 1)
390 return -EINVAL;
391 if (val != 0 && val != 1)
392 return -EINVAL;
393 rc = 0;
394 get_online_cpus();
395 mutex_lock(&smp_cpu_state_mutex);
396 if (cpu_management == val)
397 goto out;
398 rc = topology_set_cpu_management(val);
Heiko Carstensd68bddb2011-12-27 11:27:16 +0100399 if (rc)
400 goto out;
401 cpu_management = val;
402 topology_expect_change();
Heiko Carstens83a24e32011-12-27 11:27:09 +0100403out:
404 mutex_unlock(&smp_cpu_state_mutex);
405 put_online_cpus();
406 return rc ? rc : count;
407}
Linus Torvalds72f31882012-01-09 08:11:13 -0800408static DEVICE_ATTR(dispatching, 0644, dispatching_show,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100409 dispatching_store);
410
Linus Torvalds72f31882012-01-09 08:11:13 -0800411static ssize_t cpu_polarization_show(struct device *dev,
412 struct device_attribute *attr, char *buf)
Heiko Carstens83a24e32011-12-27 11:27:09 +0100413{
414 int cpu = dev->id;
415 ssize_t count;
416
417 mutex_lock(&smp_cpu_state_mutex);
418 switch (cpu_read_polarization(cpu)) {
419 case POLARIZATION_HRZ:
420 count = sprintf(buf, "horizontal\n");
421 break;
422 case POLARIZATION_VL:
423 count = sprintf(buf, "vertical:low\n");
424 break;
425 case POLARIZATION_VM:
426 count = sprintf(buf, "vertical:medium\n");
427 break;
428 case POLARIZATION_VH:
429 count = sprintf(buf, "vertical:high\n");
430 break;
431 default:
432 count = sprintf(buf, "unknown\n");
433 break;
434 }
435 mutex_unlock(&smp_cpu_state_mutex);
436 return count;
437}
Linus Torvalds72f31882012-01-09 08:11:13 -0800438static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100439
440static struct attribute *topology_cpu_attrs[] = {
Linus Torvalds72f31882012-01-09 08:11:13 -0800441 &dev_attr_polarization.attr,
Heiko Carstens83a24e32011-12-27 11:27:09 +0100442 NULL,
443};
444
445static struct attribute_group topology_cpu_attr_group = {
446 .attrs = topology_cpu_attrs,
447};
448
449int topology_cpu_init(struct cpu *cpu)
450{
Linus Torvalds72f31882012-01-09 08:11:13 -0800451 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100452}
453
454static int __init topology_init(void)
455{
456 if (!MACHINE_HAS_TOPOLOGY) {
457 topology_update_polarization_simple();
458 goto out;
459 }
Heiko Carstens83a24e32011-12-27 11:27:09 +0100460 set_topology_timer();
461out:
462 update_cpu_core_map();
Linus Torvalds72f31882012-01-09 08:11:13 -0800463 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
Heiko Carstens83a24e32011-12-27 11:27:09 +0100464}
465device_initcall(topology_init);