Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Author: Andy Fleming <afleming@freescale.com> |
| 3 | * Kumar Gala <galak@kernel.crashing.org> |
| 4 | * |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 5 | * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc. |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License as published by the |
| 9 | * Free Software Foundation; either version 2 of the License, or (at your |
| 10 | * option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/stddef.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/of.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 18 | #include <linux/kexec.h> |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 19 | #include <linux/highmem.h> |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 20 | #include <linux/cpu.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 21 | |
| 22 | #include <asm/machdep.h> |
| 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/page.h> |
| 25 | #include <asm/mpic.h> |
| 26 | #include <asm/cacheflush.h> |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 27 | #include <asm/dbell.h> |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 28 | #include <asm/fsl_guts.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 29 | |
| 30 | #include <sysdev/fsl_soc.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 31 | #include <sysdev/mpic.h> |
Kyle Moffett | 582d3e0 | 2011-12-02 06:27:58 +0000 | [diff] [blame] | 32 | #include "smp.h" |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 33 | |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 34 | struct epapr_spin_table { |
| 35 | u32 addr_h; |
| 36 | u32 addr_l; |
| 37 | u32 r3_h; |
| 38 | u32 r3_l; |
| 39 | u32 reserved; |
| 40 | u32 pir; |
| 41 | }; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 42 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 43 | static struct ccsr_guts __iomem *guts; |
| 44 | static u64 timebase; |
| 45 | static int tb_req; |
| 46 | static int tb_valid; |
| 47 | |
| 48 | static void mpc85xx_timebase_freeze(int freeze) |
| 49 | { |
| 50 | uint32_t mask; |
| 51 | |
| 52 | mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1; |
| 53 | if (freeze) |
| 54 | setbits32(&guts->devdisr, mask); |
| 55 | else |
| 56 | clrbits32(&guts->devdisr, mask); |
| 57 | |
| 58 | in_be32(&guts->devdisr); |
| 59 | } |
| 60 | |
| 61 | static void mpc85xx_give_timebase(void) |
| 62 | { |
| 63 | unsigned long flags; |
| 64 | |
| 65 | local_irq_save(flags); |
| 66 | |
| 67 | while (!tb_req) |
| 68 | barrier(); |
| 69 | tb_req = 0; |
| 70 | |
| 71 | mpc85xx_timebase_freeze(1); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 72 | #ifdef CONFIG_PPC64 |
| 73 | /* |
| 74 | * e5500/e6500 have a workaround for erratum A-006958 in place |
| 75 | * that will reread the timebase until TBL is non-zero. |
| 76 | * That would be a bad thing when the timebase is frozen. |
| 77 | * |
| 78 | * Thus, we read it manually, and instead of checking that |
| 79 | * TBL is non-zero, we ensure that TB does not change. We don't |
| 80 | * do that for the main mftb implementation, because it requires |
| 81 | * a scratch register |
| 82 | */ |
| 83 | { |
| 84 | u64 prev; |
| 85 | |
Scott Wood | beb2dc0 | 2013-08-20 19:33:12 -0500 | [diff] [blame^] | 86 | asm volatile("mfspr %0, %1" : "=r" (timebase) : |
| 87 | "i" (SPRN_TBRL)); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 88 | |
| 89 | do { |
| 90 | prev = timebase; |
Scott Wood | beb2dc0 | 2013-08-20 19:33:12 -0500 | [diff] [blame^] | 91 | asm volatile("mfspr %0, %1" : "=r" (timebase) : |
| 92 | "i" (SPRN_TBRL)); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 93 | } while (prev != timebase); |
| 94 | } |
| 95 | #else |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 96 | timebase = get_tb(); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 97 | #endif |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 98 | mb(); |
| 99 | tb_valid = 1; |
| 100 | |
| 101 | while (tb_valid) |
| 102 | barrier(); |
| 103 | |
| 104 | mpc85xx_timebase_freeze(0); |
| 105 | |
| 106 | local_irq_restore(flags); |
| 107 | } |
| 108 | |
| 109 | static void mpc85xx_take_timebase(void) |
| 110 | { |
| 111 | unsigned long flags; |
| 112 | |
| 113 | local_irq_save(flags); |
| 114 | |
| 115 | tb_req = 1; |
| 116 | while (!tb_valid) |
| 117 | barrier(); |
| 118 | |
| 119 | set_tb(timebase >> 32, timebase & 0xffffffff); |
| 120 | isync(); |
| 121 | tb_valid = 0; |
| 122 | |
| 123 | local_irq_restore(flags); |
| 124 | } |
| 125 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 126 | #ifdef CONFIG_HOTPLUG_CPU |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 127 | static void smp_85xx_mach_cpu_die(void) |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 128 | { |
| 129 | unsigned int cpu = smp_processor_id(); |
| 130 | u32 tmp; |
| 131 | |
| 132 | local_irq_disable(); |
| 133 | idle_task_exit(); |
| 134 | generic_set_cpu_dead(cpu); |
| 135 | mb(); |
| 136 | |
| 137 | mtspr(SPRN_TCR, 0); |
| 138 | |
| 139 | __flush_disable_L1(); |
| 140 | tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; |
| 141 | mtspr(SPRN_HID0, tmp); |
| 142 | isync(); |
| 143 | |
| 144 | /* Enter NAP mode. */ |
| 145 | tmp = mfmsr(); |
| 146 | tmp |= MSR_WE; |
| 147 | mb(); |
| 148 | mtmsr(tmp); |
| 149 | isync(); |
| 150 | |
| 151 | while (1) |
| 152 | ; |
| 153 | } |
| 154 | #endif |
| 155 | |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 156 | static inline void flush_spin_table(void *spin_table) |
| 157 | { |
| 158 | flush_dcache_range((ulong)spin_table, |
| 159 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
| 160 | } |
| 161 | |
| 162 | static inline u32 read_spin_table_addr_l(void *spin_table) |
| 163 | { |
| 164 | flush_dcache_range((ulong)spin_table, |
| 165 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
| 166 | return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); |
| 167 | } |
| 168 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 169 | static int smp_85xx_kick_cpu(int nr) |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 170 | { |
| 171 | unsigned long flags; |
| 172 | const u64 *cpu_rel_addr; |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 173 | __iomem struct epapr_spin_table *spin_table; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 174 | struct device_node *np; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 175 | int hw_cpu = get_hard_smp_processor_id(nr); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 176 | int ioremappable; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 177 | int ret = 0; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 178 | |
Kumar Gala | 4511680 | 2011-10-13 10:13:09 -0500 | [diff] [blame] | 179 | WARN_ON(nr < 0 || nr >= NR_CPUS); |
| 180 | WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 181 | |
| 182 | pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); |
| 183 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 184 | np = of_get_cpu_node(nr, NULL); |
| 185 | cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); |
| 186 | |
| 187 | if (cpu_rel_addr == NULL) { |
| 188 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); |
Michael Ellerman | de30097 | 2011-04-11 21:46:19 +0000 | [diff] [blame] | 189 | return -ENOENT; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 190 | } |
| 191 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 192 | /* |
| 193 | * A secondary core could be in a spinloop in the bootpage |
| 194 | * (0xfffff000), somewhere in highmem, or somewhere in lowmem. |
| 195 | * The bootpage and highmem can be accessed via ioremap(), but |
| 196 | * we need to directly access the spinloop if its in lowmem. |
| 197 | */ |
| 198 | ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); |
| 199 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 200 | /* Map the spin table */ |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 201 | if (ioremappable) |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 202 | spin_table = ioremap_prot(*cpu_rel_addr, |
| 203 | sizeof(struct epapr_spin_table), _PAGE_COHERENT); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 204 | else |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 205 | spin_table = phys_to_virt(*cpu_rel_addr); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 206 | |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 207 | local_irq_save(flags); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 208 | #ifdef CONFIG_PPC32 |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 209 | #ifdef CONFIG_HOTPLUG_CPU |
| 210 | /* Corresponding to generic_set_cpu_dead() */ |
| 211 | generic_set_cpu_up(nr); |
| 212 | |
| 213 | if (system_state == SYSTEM_RUNNING) { |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 214 | /* |
| 215 | * To keep it compatible with old boot program which uses |
| 216 | * cache-inhibit spin table, we need to flush the cache |
| 217 | * before accessing spin table to invalidate any staled data. |
| 218 | * We also need to flush the cache after writing to spin |
| 219 | * table to push data out. |
| 220 | */ |
| 221 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 222 | out_be32(&spin_table->addr_l, 0); |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 223 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 224 | |
| 225 | /* |
| 226 | * We don't set the BPTR register here since it already points |
| 227 | * to the boot page properly. |
| 228 | */ |
Chen-Hui Zhao | ddb487d | 2013-04-03 21:09:09 +0800 | [diff] [blame] | 229 | mpic_reset_core(nr); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 230 | |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 231 | /* |
| 232 | * wait until core is ready... |
| 233 | * We need to invalidate the stale data, in case the boot |
| 234 | * loader uses a cache-inhibited spin table. |
| 235 | */ |
| 236 | if (!spin_event_timeout( |
| 237 | read_spin_table_addr_l(spin_table) == 1, |
| 238 | 10000, 100)) { |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 239 | pr_err("%s: timeout waiting for core %d to reset\n", |
| 240 | __func__, hw_cpu); |
| 241 | ret = -ENOENT; |
| 242 | goto out; |
| 243 | } |
| 244 | |
| 245 | /* clear the acknowledge status */ |
| 246 | __secondary_hold_acknowledge = -1; |
| 247 | } |
| 248 | #endif |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 249 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 250 | out_be32(&spin_table->pir, hw_cpu); |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 251 | out_be32(&spin_table->addr_l, __pa(__early_start)); |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 252 | flush_spin_table(spin_table); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 253 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 254 | /* Wait a bit for the CPU to ack. */ |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 255 | if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, |
| 256 | 10000, 100)) { |
| 257 | pr_err("%s: timeout waiting for core %d to ack\n", |
| 258 | __func__, hw_cpu); |
| 259 | ret = -ENOENT; |
| 260 | goto out; |
| 261 | } |
| 262 | out: |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 263 | #else |
Kumar Gala | decbb28 | 2011-02-14 22:45:48 -0600 | [diff] [blame] | 264 | smp_generic_kick_cpu(nr); |
| 265 | |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 266 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 267 | out_be32(&spin_table->pir, hw_cpu); |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 268 | out_be64((u64 *)(&spin_table->addr_h), |
| 269 | __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 270 | flush_spin_table(spin_table); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 271 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 272 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 273 | local_irq_restore(flags); |
| 274 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 275 | if (ioremappable) |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 276 | iounmap(spin_table); |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 277 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 278 | return ret; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 279 | } |
| 280 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 281 | struct smp_ops_t smp_85xx_ops = { |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 282 | .kick_cpu = smp_85xx_kick_cpu, |
Andy Fleming | 39fd402 | 2013-08-05 14:58:35 -0500 | [diff] [blame] | 283 | .cpu_bootable = smp_generic_cpu_bootable, |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 284 | #ifdef CONFIG_HOTPLUG_CPU |
| 285 | .cpu_disable = generic_cpu_disable, |
| 286 | .cpu_die = generic_cpu_die, |
| 287 | #endif |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 288 | #ifdef CONFIG_KEXEC |
| 289 | .give_timebase = smp_generic_give_timebase, |
| 290 | .take_timebase = smp_generic_take_timebase, |
| 291 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 292 | }; |
| 293 | |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 294 | #ifdef CONFIG_KEXEC |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 295 | atomic_t kexec_down_cpus = ATOMIC_INIT(0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 296 | |
| 297 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
| 298 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 299 | local_irq_disable(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 300 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 301 | if (secondary) { |
| 302 | atomic_inc(&kexec_down_cpus); |
| 303 | /* loop forever */ |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 304 | while (1); |
| 305 | } |
| 306 | } |
| 307 | |
| 308 | static void mpc85xx_smp_kexec_down(void *arg) |
| 309 | { |
| 310 | if (ppc_md.kexec_cpu_down) |
| 311 | ppc_md.kexec_cpu_down(0,1); |
| 312 | } |
| 313 | |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 314 | static void map_and_flush(unsigned long paddr) |
| 315 | { |
| 316 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 317 | unsigned long kaddr = (unsigned long)kmap(page); |
| 318 | |
| 319 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); |
| 320 | kunmap(page); |
| 321 | } |
| 322 | |
| 323 | /** |
| 324 | * Before we reset the other cores, we need to flush relevant cache |
| 325 | * out to memory so we don't get anything corrupted, some of these flushes |
| 326 | * are performed out of an overabundance of caution as interrupts are not |
| 327 | * disabled yet and we can switch cores |
| 328 | */ |
| 329 | static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image) |
| 330 | { |
| 331 | kimage_entry_t *ptr, entry; |
| 332 | unsigned long paddr; |
| 333 | int i; |
| 334 | |
| 335 | if (image->type == KEXEC_TYPE_DEFAULT) { |
| 336 | /* normal kexec images are stored in temporary pages */ |
| 337 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); |
| 338 | ptr = (entry & IND_INDIRECTION) ? |
| 339 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { |
| 340 | if (!(entry & IND_DESTINATION)) { |
| 341 | map_and_flush(entry); |
| 342 | } |
| 343 | } |
| 344 | /* flush out last IND_DONE page */ |
| 345 | map_and_flush(entry); |
| 346 | } else { |
| 347 | /* crash type kexec images are copied to the crash region */ |
| 348 | for (i = 0; i < image->nr_segments; i++) { |
| 349 | struct kexec_segment *seg = &image->segment[i]; |
| 350 | for (paddr = seg->mem; paddr < seg->mem + seg->memsz; |
| 351 | paddr += PAGE_SIZE) { |
| 352 | map_and_flush(paddr); |
| 353 | } |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | /* also flush the kimage struct to be passed in as well */ |
| 358 | flush_dcache_range((unsigned long)image, |
| 359 | (unsigned long)image + sizeof(*image)); |
| 360 | } |
| 361 | |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 362 | static void mpc85xx_smp_machine_kexec(struct kimage *image) |
| 363 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 364 | int timeout = INT_MAX; |
| 365 | int i, num_cpus = num_present_cpus(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 366 | |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 367 | mpc85xx_smp_flush_dcache_kexec(image); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 368 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 369 | if (image->type == KEXEC_TYPE_DEFAULT) |
| 370 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 371 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 372 | while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 373 | ( timeout > 0 ) ) |
| 374 | { |
| 375 | timeout--; |
| 376 | } |
| 377 | |
| 378 | if ( !timeout ) |
| 379 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); |
| 380 | |
Matthew McClintock | 43a327b | 2011-10-25 17:54:04 -0500 | [diff] [blame] | 381 | for_each_online_cpu(i) |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 382 | { |
| 383 | if ( i == smp_processor_id() ) continue; |
| 384 | mpic_reset_core(i); |
| 385 | } |
| 386 | |
| 387 | default_machine_kexec(image); |
| 388 | } |
| 389 | #endif /* CONFIG_KEXEC */ |
| 390 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 391 | static void smp_85xx_setup_cpu(int cpu_nr) |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 392 | { |
| 393 | if (smp_85xx_ops.probe == smp_mpic_probe) |
| 394 | mpic_setup_this_cpu(); |
| 395 | |
| 396 | if (cpu_has_feature(CPU_FTR_DBELL)) |
| 397 | doorbell_setup_this_cpu(); |
| 398 | } |
| 399 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 400 | static const struct of_device_id mpc85xx_smp_guts_ids[] = { |
| 401 | { .compatible = "fsl,mpc8572-guts", }, |
| 402 | { .compatible = "fsl,p1020-guts", }, |
| 403 | { .compatible = "fsl,p1021-guts", }, |
| 404 | { .compatible = "fsl,p1022-guts", }, |
| 405 | { .compatible = "fsl,p1023-guts", }, |
| 406 | { .compatible = "fsl,p2020-guts", }, |
| 407 | {}, |
| 408 | }; |
| 409 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 410 | void __init mpc85xx_smp_init(void) |
| 411 | { |
| 412 | struct device_node *np; |
| 413 | |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 414 | smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; |
| 415 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 416 | np = of_find_node_by_type(NULL, "open-pic"); |
| 417 | if (np) { |
| 418 | smp_85xx_ops.probe = smp_mpic_probe; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 419 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 420 | } |
| 421 | |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 422 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
Laurentiu TUDOR | 2647aa1 | 2011-07-07 16:44:30 +0300 | [diff] [blame] | 423 | /* |
| 424 | * If left NULL, .message_pass defaults to |
| 425 | * smp_muxed_ipi_message_pass |
| 426 | */ |
Matthew McClintock | de423ff | 2011-10-11 19:06:42 -0500 | [diff] [blame] | 427 | smp_85xx_ops.message_pass = NULL; |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 428 | smp_85xx_ops.cause_ipi = doorbell_cause_ipi; |
| 429 | } |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 430 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 431 | np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids); |
| 432 | if (np) { |
| 433 | guts = of_iomap(np, 0); |
| 434 | of_node_put(np); |
| 435 | if (!guts) { |
| 436 | pr_err("%s: Could not map guts node address\n", |
| 437 | __func__); |
| 438 | return; |
| 439 | } |
| 440 | smp_85xx_ops.give_timebase = mpc85xx_give_timebase; |
| 441 | smp_85xx_ops.take_timebase = mpc85xx_take_timebase; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 442 | #ifdef CONFIG_HOTPLUG_CPU |
| 443 | ppc_md.cpu_die = smp_85xx_mach_cpu_die; |
| 444 | #endif |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 445 | } |
| 446 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 447 | smp_ops = &smp_85xx_ops; |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 448 | |
| 449 | #ifdef CONFIG_KEXEC |
| 450 | ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; |
| 451 | ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; |
| 452 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 453 | } |