blob: 148c2f2d9780dc59c09841136c5e25813cb6f785 [file] [log] [blame]
Kumar Galad5b26db2008-11-19 09:35:56 -06001/*
2 * Author: Andy Fleming <afleming@freescale.com>
3 * Kumar Gala <galak@kernel.crashing.org>
4 *
Zhao Chenhui15f34eb2012-07-20 20:42:33 +08005 * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc.
Kumar Galad5b26db2008-11-19 09:35:56 -06006 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/stddef.h>
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/delay.h>
17#include <linux/of.h>
Matthew McClintockf933a412010-07-21 16:14:53 -050018#include <linux/kexec.h>
Matthew McClintock677de422010-09-16 17:58:26 -050019#include <linux/highmem.h>
Zhao Chenhui15f34eb2012-07-20 20:42:33 +080020#include <linux/cpu.h>
Kumar Galad5b26db2008-11-19 09:35:56 -060021
22#include <asm/machdep.h>
23#include <asm/pgtable.h>
24#include <asm/page.h>
25#include <asm/mpic.h>
26#include <asm/cacheflush.h>
Kumar Gala563fdd42009-02-11 22:50:42 -060027#include <asm/dbell.h>
Zhao Chenhuibf345262012-07-20 20:42:35 +080028#include <asm/fsl_guts.h>
Kumar Galad5b26db2008-11-19 09:35:56 -060029
30#include <sysdev/fsl_soc.h>
Matthew McClintockf933a412010-07-21 16:14:53 -050031#include <sysdev/mpic.h>
Kyle Moffett582d3e02011-12-02 06:27:58 +000032#include "smp.h"
Kumar Galad5b26db2008-11-19 09:35:56 -060033
Zhao Chenhui15f34eb2012-07-20 20:42:33 +080034struct epapr_spin_table {
35 u32 addr_h;
36 u32 addr_l;
37 u32 r3_h;
38 u32 r3_l;
39 u32 reserved;
40 u32 pir;
41};
Kumar Galad5b26db2008-11-19 09:35:56 -060042
Zhao Chenhuibf345262012-07-20 20:42:35 +080043static struct ccsr_guts __iomem *guts;
44static u64 timebase;
45static int tb_req;
46static int tb_valid;
47
48static void mpc85xx_timebase_freeze(int freeze)
49{
50 uint32_t mask;
51
52 mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1;
53 if (freeze)
54 setbits32(&guts->devdisr, mask);
55 else
56 clrbits32(&guts->devdisr, mask);
57
58 in_be32(&guts->devdisr);
59}
60
61static void mpc85xx_give_timebase(void)
62{
63 unsigned long flags;
64
65 local_irq_save(flags);
66
67 while (!tb_req)
68 barrier();
69 tb_req = 0;
70
71 mpc85xx_timebase_freeze(1);
72 timebase = get_tb();
73 mb();
74 tb_valid = 1;
75
76 while (tb_valid)
77 barrier();
78
79 mpc85xx_timebase_freeze(0);
80
81 local_irq_restore(flags);
82}
83
84static void mpc85xx_take_timebase(void)
85{
86 unsigned long flags;
87
88 local_irq_save(flags);
89
90 tb_req = 1;
91 while (!tb_valid)
92 barrier();
93
94 set_tb(timebase >> 32, timebase & 0xffffffff);
95 isync();
96 tb_valid = 0;
97
98 local_irq_restore(flags);
99}
100
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800101#ifdef CONFIG_HOTPLUG_CPU
102static void __cpuinit smp_85xx_mach_cpu_die(void)
103{
104 unsigned int cpu = smp_processor_id();
105 u32 tmp;
106
107 local_irq_disable();
108 idle_task_exit();
109 generic_set_cpu_dead(cpu);
110 mb();
111
112 mtspr(SPRN_TCR, 0);
113
114 __flush_disable_L1();
115 tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
116 mtspr(SPRN_HID0, tmp);
117 isync();
118
119 /* Enter NAP mode. */
120 tmp = mfmsr();
121 tmp |= MSR_WE;
122 mb();
123 mtmsr(tmp);
124 isync();
125
126 while (1)
127 ;
128}
129#endif
130
York Sunbc152362012-09-29 16:44:35 -0700131static inline void flush_spin_table(void *spin_table)
132{
133 flush_dcache_range((ulong)spin_table,
134 (ulong)spin_table + sizeof(struct epapr_spin_table));
135}
136
137static inline u32 read_spin_table_addr_l(void *spin_table)
138{
139 flush_dcache_range((ulong)spin_table,
140 (ulong)spin_table + sizeof(struct epapr_spin_table));
141 return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
142}
143
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800144static int __cpuinit smp_85xx_kick_cpu(int nr)
Kumar Galad5b26db2008-11-19 09:35:56 -0600145{
146 unsigned long flags;
147 const u64 *cpu_rel_addr;
Zhao Chenhui15f34eb2012-07-20 20:42:33 +0800148 __iomem struct epapr_spin_table *spin_table;
Kumar Galad5b26db2008-11-19 09:35:56 -0600149 struct device_node *np;
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800150 int hw_cpu = get_hard_smp_processor_id(nr);
Peter Tyserd1d47ec2009-12-18 16:50:37 -0600151 int ioremappable;
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800152 int ret = 0;
Kumar Galad5b26db2008-11-19 09:35:56 -0600153
Kumar Gala45116802011-10-13 10:13:09 -0500154 WARN_ON(nr < 0 || nr >= NR_CPUS);
155 WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
Kumar Galad5b26db2008-11-19 09:35:56 -0600156
157 pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
158
Kumar Galad5b26db2008-11-19 09:35:56 -0600159 np = of_get_cpu_node(nr, NULL);
160 cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL);
161
162 if (cpu_rel_addr == NULL) {
163 printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr);
Michael Ellermande300972011-04-11 21:46:19 +0000164 return -ENOENT;
Kumar Galad5b26db2008-11-19 09:35:56 -0600165 }
166
Peter Tyserd1d47ec2009-12-18 16:50:37 -0600167 /*
168 * A secondary core could be in a spinloop in the bootpage
169 * (0xfffff000), somewhere in highmem, or somewhere in lowmem.
170 * The bootpage and highmem can be accessed via ioremap(), but
171 * we need to directly access the spinloop if its in lowmem.
172 */
173 ioremappable = *cpu_rel_addr > virt_to_phys(high_memory);
174
Kumar Galad5b26db2008-11-19 09:35:56 -0600175 /* Map the spin table */
Peter Tyserd1d47ec2009-12-18 16:50:37 -0600176 if (ioremappable)
York Sunbc152362012-09-29 16:44:35 -0700177 spin_table = ioremap_prot(*cpu_rel_addr,
178 sizeof(struct epapr_spin_table), _PAGE_COHERENT);
Peter Tyserd1d47ec2009-12-18 16:50:37 -0600179 else
Zhao Chenhui15f34eb2012-07-20 20:42:33 +0800180 spin_table = phys_to_virt(*cpu_rel_addr);
Kumar Galad5b26db2008-11-19 09:35:56 -0600181
Kumar Galacb1ffb6202009-06-19 03:30:42 -0500182 local_irq_save(flags);
Kumar Gala5b8544c2010-10-08 10:37:31 -0500183#ifdef CONFIG_PPC32
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800184#ifdef CONFIG_HOTPLUG_CPU
185 /* Corresponding to generic_set_cpu_dead() */
186 generic_set_cpu_up(nr);
187
188 if (system_state == SYSTEM_RUNNING) {
York Sunbc152362012-09-29 16:44:35 -0700189 /*
190 * To keep it compatible with old boot program which uses
191 * cache-inhibit spin table, we need to flush the cache
192 * before accessing spin table to invalidate any staled data.
193 * We also need to flush the cache after writing to spin
194 * table to push data out.
195 */
196 flush_spin_table(spin_table);
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800197 out_be32(&spin_table->addr_l, 0);
York Sunbc152362012-09-29 16:44:35 -0700198 flush_spin_table(spin_table);
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800199
200 /*
201 * We don't set the BPTR register here since it already points
202 * to the boot page properly.
203 */
204 mpic_reset_core(hw_cpu);
205
York Sunbc152362012-09-29 16:44:35 -0700206 /*
207 * wait until core is ready...
208 * We need to invalidate the stale data, in case the boot
209 * loader uses a cache-inhibited spin table.
210 */
211 if (!spin_event_timeout(
212 read_spin_table_addr_l(spin_table) == 1,
213 10000, 100)) {
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800214 pr_err("%s: timeout waiting for core %d to reset\n",
215 __func__, hw_cpu);
216 ret = -ENOENT;
217 goto out;
218 }
219
220 /* clear the acknowledge status */
221 __secondary_hold_acknowledge = -1;
222 }
223#endif
York Sunbc152362012-09-29 16:44:35 -0700224 flush_spin_table(spin_table);
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800225 out_be32(&spin_table->pir, hw_cpu);
Zhao Chenhui15f34eb2012-07-20 20:42:33 +0800226 out_be32(&spin_table->addr_l, __pa(__early_start));
York Sunbc152362012-09-29 16:44:35 -0700227 flush_spin_table(spin_table);
Peter Tyserd1d47ec2009-12-18 16:50:37 -0600228
Kumar Galad5b26db2008-11-19 09:35:56 -0600229 /* Wait a bit for the CPU to ack. */
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800230 if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu,
231 10000, 100)) {
232 pr_err("%s: timeout waiting for core %d to ack\n",
233 __func__, hw_cpu);
234 ret = -ENOENT;
235 goto out;
236 }
237out:
Kumar Gala5b8544c2010-10-08 10:37:31 -0500238#else
Kumar Galadecbb282011-02-14 22:45:48 -0600239 smp_generic_kick_cpu(nr);
240
York Sunbc152362012-09-29 16:44:35 -0700241 flush_spin_table(spin_table);
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800242 out_be32(&spin_table->pir, hw_cpu);
Zhao Chenhui15f34eb2012-07-20 20:42:33 +0800243 out_be64((u64 *)(&spin_table->addr_h),
244 __pa((u64)*((unsigned long long *)generic_secondary_smp_init)));
York Sunbc152362012-09-29 16:44:35 -0700245 flush_spin_table(spin_table);
Kumar Gala5b8544c2010-10-08 10:37:31 -0500246#endif
Kumar Galad5b26db2008-11-19 09:35:56 -0600247
Kumar Galad5b26db2008-11-19 09:35:56 -0600248 local_irq_restore(flags);
249
Peter Tyserd1d47ec2009-12-18 16:50:37 -0600250 if (ioremappable)
Zhao Chenhui15f34eb2012-07-20 20:42:33 +0800251 iounmap(spin_table);
Kumar Galacb1ffb6202009-06-19 03:30:42 -0500252
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800253 return ret;
Kumar Galad5b26db2008-11-19 09:35:56 -0600254}
255
Kumar Galad5b26db2008-11-19 09:35:56 -0600256struct smp_ops_t smp_85xx_ops = {
Kumar Galad5b26db2008-11-19 09:35:56 -0600257 .kick_cpu = smp_85xx_kick_cpu,
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800258#ifdef CONFIG_HOTPLUG_CPU
259 .cpu_disable = generic_cpu_disable,
260 .cpu_die = generic_cpu_die,
261#endif
Matthew McClintockf933a412010-07-21 16:14:53 -0500262#ifdef CONFIG_KEXEC
263 .give_timebase = smp_generic_give_timebase,
264 .take_timebase = smp_generic_take_timebase,
265#endif
Kumar Galad5b26db2008-11-19 09:35:56 -0600266};
267
Matthew McClintockf933a412010-07-21 16:14:53 -0500268#ifdef CONFIG_KEXEC
Matthew McClintock5d692962010-09-16 17:58:25 -0500269atomic_t kexec_down_cpus = ATOMIC_INIT(0);
Matthew McClintockf933a412010-07-21 16:14:53 -0500270
271void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
272{
Matthew McClintock5d692962010-09-16 17:58:25 -0500273 local_irq_disable();
Matthew McClintockf933a412010-07-21 16:14:53 -0500274
Matthew McClintock5d692962010-09-16 17:58:25 -0500275 if (secondary) {
276 atomic_inc(&kexec_down_cpus);
277 /* loop forever */
Matthew McClintockf933a412010-07-21 16:14:53 -0500278 while (1);
279 }
280}
281
282static void mpc85xx_smp_kexec_down(void *arg)
283{
284 if (ppc_md.kexec_cpu_down)
285 ppc_md.kexec_cpu_down(0,1);
286}
287
Matthew McClintock677de422010-09-16 17:58:26 -0500288static void map_and_flush(unsigned long paddr)
289{
290 struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
291 unsigned long kaddr = (unsigned long)kmap(page);
292
293 flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
294 kunmap(page);
295}
296
297/**
298 * Before we reset the other cores, we need to flush relevant cache
299 * out to memory so we don't get anything corrupted, some of these flushes
300 * are performed out of an overabundance of caution as interrupts are not
301 * disabled yet and we can switch cores
302 */
303static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image)
304{
305 kimage_entry_t *ptr, entry;
306 unsigned long paddr;
307 int i;
308
309 if (image->type == KEXEC_TYPE_DEFAULT) {
310 /* normal kexec images are stored in temporary pages */
311 for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
312 ptr = (entry & IND_INDIRECTION) ?
313 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
314 if (!(entry & IND_DESTINATION)) {
315 map_and_flush(entry);
316 }
317 }
318 /* flush out last IND_DONE page */
319 map_and_flush(entry);
320 } else {
321 /* crash type kexec images are copied to the crash region */
322 for (i = 0; i < image->nr_segments; i++) {
323 struct kexec_segment *seg = &image->segment[i];
324 for (paddr = seg->mem; paddr < seg->mem + seg->memsz;
325 paddr += PAGE_SIZE) {
326 map_and_flush(paddr);
327 }
328 }
329 }
330
331 /* also flush the kimage struct to be passed in as well */
332 flush_dcache_range((unsigned long)image,
333 (unsigned long)image + sizeof(*image));
334}
335
Matthew McClintockf933a412010-07-21 16:14:53 -0500336static void mpc85xx_smp_machine_kexec(struct kimage *image)
337{
Matthew McClintock5d692962010-09-16 17:58:25 -0500338 int timeout = INT_MAX;
339 int i, num_cpus = num_present_cpus();
Matthew McClintockf933a412010-07-21 16:14:53 -0500340
Matthew McClintock677de422010-09-16 17:58:26 -0500341 mpc85xx_smp_flush_dcache_kexec(image);
Matthew McClintockf933a412010-07-21 16:14:53 -0500342
Matthew McClintock5d692962010-09-16 17:58:25 -0500343 if (image->type == KEXEC_TYPE_DEFAULT)
344 smp_call_function(mpc85xx_smp_kexec_down, NULL, 0);
Matthew McClintockf933a412010-07-21 16:14:53 -0500345
Matthew McClintock5d692962010-09-16 17:58:25 -0500346 while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) &&
Matthew McClintockf933a412010-07-21 16:14:53 -0500347 ( timeout > 0 ) )
348 {
349 timeout--;
350 }
351
352 if ( !timeout )
353 printk(KERN_ERR "Unable to bring down secondary cpu(s)");
354
Matthew McClintock43a327b2011-10-25 17:54:04 -0500355 for_each_online_cpu(i)
Matthew McClintockf933a412010-07-21 16:14:53 -0500356 {
357 if ( i == smp_processor_id() ) continue;
358 mpic_reset_core(i);
359 }
360
361 default_machine_kexec(image);
362}
363#endif /* CONFIG_KEXEC */
364
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800365static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
Scott Wooddc2c9c52010-08-26 02:49:07 -0500366{
367 if (smp_85xx_ops.probe == smp_mpic_probe)
368 mpic_setup_this_cpu();
369
370 if (cpu_has_feature(CPU_FTR_DBELL))
371 doorbell_setup_this_cpu();
372}
373
Zhao Chenhuibf345262012-07-20 20:42:35 +0800374static const struct of_device_id mpc85xx_smp_guts_ids[] = {
375 { .compatible = "fsl,mpc8572-guts", },
376 { .compatible = "fsl,p1020-guts", },
377 { .compatible = "fsl,p1021-guts", },
378 { .compatible = "fsl,p1022-guts", },
379 { .compatible = "fsl,p1023-guts", },
380 { .compatible = "fsl,p2020-guts", },
381 {},
382};
383
Kumar Gala563fdd42009-02-11 22:50:42 -0600384void __init mpc85xx_smp_init(void)
385{
386 struct device_node *np;
387
Scott Wooddc2c9c52010-08-26 02:49:07 -0500388 smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu;
389
Kumar Gala563fdd42009-02-11 22:50:42 -0600390 np = of_find_node_by_type(NULL, "open-pic");
391 if (np) {
392 smp_85xx_ops.probe = smp_mpic_probe;
Kumar Gala563fdd42009-02-11 22:50:42 -0600393 smp_85xx_ops.message_pass = smp_mpic_message_pass;
Kumar Gala563fdd42009-02-11 22:50:42 -0600394 }
395
Milton Miller23d72bf2011-05-10 19:29:39 +0000396 if (cpu_has_feature(CPU_FTR_DBELL)) {
Laurentiu TUDOR2647aa12011-07-07 16:44:30 +0300397 /*
398 * If left NULL, .message_pass defaults to
399 * smp_muxed_ipi_message_pass
400 */
Matthew McClintockde423ff2011-10-11 19:06:42 -0500401 smp_85xx_ops.message_pass = NULL;
Milton Miller23d72bf2011-05-10 19:29:39 +0000402 smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
403 }
Kumar Gala563fdd42009-02-11 22:50:42 -0600404
Zhao Chenhuibf345262012-07-20 20:42:35 +0800405 np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids);
406 if (np) {
407 guts = of_iomap(np, 0);
408 of_node_put(np);
409 if (!guts) {
410 pr_err("%s: Could not map guts node address\n",
411 __func__);
412 return;
413 }
414 smp_85xx_ops.give_timebase = mpc85xx_give_timebase;
415 smp_85xx_ops.take_timebase = mpc85xx_take_timebase;
Zhao Chenhuid0832a72012-07-20 20:42:36 +0800416#ifdef CONFIG_HOTPLUG_CPU
417 ppc_md.cpu_die = smp_85xx_mach_cpu_die;
418#endif
Zhao Chenhuibf345262012-07-20 20:42:35 +0800419 }
420
Kumar Galad5b26db2008-11-19 09:35:56 -0600421 smp_ops = &smp_85xx_ops;
Matthew McClintockf933a412010-07-21 16:14:53 -0500422
423#ifdef CONFIG_KEXEC
424 ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down;
425 ppc_md.machine_kexec = mpc85xx_smp_machine_kexec;
426#endif
Kumar Galad5b26db2008-11-19 09:35:56 -0600427}