Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Author: Andy Fleming <afleming@freescale.com> |
| 3 | * Kumar Gala <galak@kernel.crashing.org> |
| 4 | * |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 5 | * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc. |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License as published by the |
| 9 | * Free Software Foundation; either version 2 of the License, or (at your |
| 10 | * option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/stddef.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/of.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 18 | #include <linux/kexec.h> |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 19 | #include <linux/highmem.h> |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 20 | #include <linux/cpu.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 21 | |
| 22 | #include <asm/machdep.h> |
| 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/page.h> |
| 25 | #include <asm/mpic.h> |
| 26 | #include <asm/cacheflush.h> |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 27 | #include <asm/dbell.h> |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 28 | #include <asm/fsl_guts.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 29 | |
| 30 | #include <sysdev/fsl_soc.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 31 | #include <sysdev/mpic.h> |
Kyle Moffett | 582d3e0 | 2011-12-02 06:27:58 +0000 | [diff] [blame] | 32 | #include "smp.h" |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 33 | |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 34 | struct epapr_spin_table { |
| 35 | u32 addr_h; |
| 36 | u32 addr_l; |
| 37 | u32 r3_h; |
| 38 | u32 r3_l; |
| 39 | u32 reserved; |
| 40 | u32 pir; |
| 41 | }; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 42 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 43 | static struct ccsr_guts __iomem *guts; |
| 44 | static u64 timebase; |
| 45 | static int tb_req; |
| 46 | static int tb_valid; |
| 47 | |
| 48 | static void mpc85xx_timebase_freeze(int freeze) |
| 49 | { |
| 50 | uint32_t mask; |
| 51 | |
| 52 | mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1; |
| 53 | if (freeze) |
| 54 | setbits32(&guts->devdisr, mask); |
| 55 | else |
| 56 | clrbits32(&guts->devdisr, mask); |
| 57 | |
| 58 | in_be32(&guts->devdisr); |
| 59 | } |
| 60 | |
| 61 | static void mpc85xx_give_timebase(void) |
| 62 | { |
| 63 | unsigned long flags; |
| 64 | |
| 65 | local_irq_save(flags); |
| 66 | |
| 67 | while (!tb_req) |
| 68 | barrier(); |
| 69 | tb_req = 0; |
| 70 | |
| 71 | mpc85xx_timebase_freeze(1); |
| 72 | timebase = get_tb(); |
| 73 | mb(); |
| 74 | tb_valid = 1; |
| 75 | |
| 76 | while (tb_valid) |
| 77 | barrier(); |
| 78 | |
| 79 | mpc85xx_timebase_freeze(0); |
| 80 | |
| 81 | local_irq_restore(flags); |
| 82 | } |
| 83 | |
| 84 | static void mpc85xx_take_timebase(void) |
| 85 | { |
| 86 | unsigned long flags; |
| 87 | |
| 88 | local_irq_save(flags); |
| 89 | |
| 90 | tb_req = 1; |
| 91 | while (!tb_valid) |
| 92 | barrier(); |
| 93 | |
| 94 | set_tb(timebase >> 32, timebase & 0xffffffff); |
| 95 | isync(); |
| 96 | tb_valid = 0; |
| 97 | |
| 98 | local_irq_restore(flags); |
| 99 | } |
| 100 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 101 | #ifdef CONFIG_HOTPLUG_CPU |
| 102 | static void __cpuinit smp_85xx_mach_cpu_die(void) |
| 103 | { |
| 104 | unsigned int cpu = smp_processor_id(); |
| 105 | u32 tmp; |
| 106 | |
| 107 | local_irq_disable(); |
| 108 | idle_task_exit(); |
| 109 | generic_set_cpu_dead(cpu); |
| 110 | mb(); |
| 111 | |
| 112 | mtspr(SPRN_TCR, 0); |
| 113 | |
| 114 | __flush_disable_L1(); |
| 115 | tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; |
| 116 | mtspr(SPRN_HID0, tmp); |
| 117 | isync(); |
| 118 | |
| 119 | /* Enter NAP mode. */ |
| 120 | tmp = mfmsr(); |
| 121 | tmp |= MSR_WE; |
| 122 | mb(); |
| 123 | mtmsr(tmp); |
| 124 | isync(); |
| 125 | |
| 126 | while (1) |
| 127 | ; |
| 128 | } |
| 129 | #endif |
| 130 | |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 131 | static inline void flush_spin_table(void *spin_table) |
| 132 | { |
| 133 | flush_dcache_range((ulong)spin_table, |
| 134 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
| 135 | } |
| 136 | |
| 137 | static inline u32 read_spin_table_addr_l(void *spin_table) |
| 138 | { |
| 139 | flush_dcache_range((ulong)spin_table, |
| 140 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
| 141 | return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); |
| 142 | } |
| 143 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 144 | static int __cpuinit smp_85xx_kick_cpu(int nr) |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 145 | { |
| 146 | unsigned long flags; |
| 147 | const u64 *cpu_rel_addr; |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 148 | __iomem struct epapr_spin_table *spin_table; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 149 | struct device_node *np; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 150 | int hw_cpu = get_hard_smp_processor_id(nr); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 151 | int ioremappable; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 152 | int ret = 0; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 153 | |
Kumar Gala | 4511680 | 2011-10-13 10:13:09 -0500 | [diff] [blame] | 154 | WARN_ON(nr < 0 || nr >= NR_CPUS); |
| 155 | WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 156 | |
| 157 | pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); |
| 158 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 159 | np = of_get_cpu_node(nr, NULL); |
| 160 | cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); |
| 161 | |
| 162 | if (cpu_rel_addr == NULL) { |
| 163 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); |
Michael Ellerman | de30097 | 2011-04-11 21:46:19 +0000 | [diff] [blame] | 164 | return -ENOENT; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 165 | } |
| 166 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 167 | /* |
| 168 | * A secondary core could be in a spinloop in the bootpage |
| 169 | * (0xfffff000), somewhere in highmem, or somewhere in lowmem. |
| 170 | * The bootpage and highmem can be accessed via ioremap(), but |
| 171 | * we need to directly access the spinloop if its in lowmem. |
| 172 | */ |
| 173 | ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); |
| 174 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 175 | /* Map the spin table */ |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 176 | if (ioremappable) |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 177 | spin_table = ioremap_prot(*cpu_rel_addr, |
| 178 | sizeof(struct epapr_spin_table), _PAGE_COHERENT); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 179 | else |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 180 | spin_table = phys_to_virt(*cpu_rel_addr); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 181 | |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 182 | local_irq_save(flags); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 183 | #ifdef CONFIG_PPC32 |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 184 | #ifdef CONFIG_HOTPLUG_CPU |
| 185 | /* Corresponding to generic_set_cpu_dead() */ |
| 186 | generic_set_cpu_up(nr); |
| 187 | |
| 188 | if (system_state == SYSTEM_RUNNING) { |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 189 | /* |
| 190 | * To keep it compatible with old boot program which uses |
| 191 | * cache-inhibit spin table, we need to flush the cache |
| 192 | * before accessing spin table to invalidate any staled data. |
| 193 | * We also need to flush the cache after writing to spin |
| 194 | * table to push data out. |
| 195 | */ |
| 196 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 197 | out_be32(&spin_table->addr_l, 0); |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 198 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 199 | |
| 200 | /* |
| 201 | * We don't set the BPTR register here since it already points |
| 202 | * to the boot page properly. |
| 203 | */ |
| 204 | mpic_reset_core(hw_cpu); |
| 205 | |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 206 | /* |
| 207 | * wait until core is ready... |
| 208 | * We need to invalidate the stale data, in case the boot |
| 209 | * loader uses a cache-inhibited spin table. |
| 210 | */ |
| 211 | if (!spin_event_timeout( |
| 212 | read_spin_table_addr_l(spin_table) == 1, |
| 213 | 10000, 100)) { |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 214 | pr_err("%s: timeout waiting for core %d to reset\n", |
| 215 | __func__, hw_cpu); |
| 216 | ret = -ENOENT; |
| 217 | goto out; |
| 218 | } |
| 219 | |
| 220 | /* clear the acknowledge status */ |
| 221 | __secondary_hold_acknowledge = -1; |
| 222 | } |
| 223 | #endif |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 224 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 225 | out_be32(&spin_table->pir, hw_cpu); |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 226 | out_be32(&spin_table->addr_l, __pa(__early_start)); |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 227 | flush_spin_table(spin_table); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 228 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 229 | /* Wait a bit for the CPU to ack. */ |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 230 | if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, |
| 231 | 10000, 100)) { |
| 232 | pr_err("%s: timeout waiting for core %d to ack\n", |
| 233 | __func__, hw_cpu); |
| 234 | ret = -ENOENT; |
| 235 | goto out; |
| 236 | } |
| 237 | out: |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 238 | #else |
Kumar Gala | decbb28 | 2011-02-14 22:45:48 -0600 | [diff] [blame] | 239 | smp_generic_kick_cpu(nr); |
| 240 | |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 241 | flush_spin_table(spin_table); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 242 | out_be32(&spin_table->pir, hw_cpu); |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 243 | out_be64((u64 *)(&spin_table->addr_h), |
| 244 | __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame^] | 245 | flush_spin_table(spin_table); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 246 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 247 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 248 | local_irq_restore(flags); |
| 249 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 250 | if (ioremappable) |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 251 | iounmap(spin_table); |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 252 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 253 | return ret; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 254 | } |
| 255 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 256 | struct smp_ops_t smp_85xx_ops = { |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 257 | .kick_cpu = smp_85xx_kick_cpu, |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 258 | #ifdef CONFIG_HOTPLUG_CPU |
| 259 | .cpu_disable = generic_cpu_disable, |
| 260 | .cpu_die = generic_cpu_die, |
| 261 | #endif |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 262 | #ifdef CONFIG_KEXEC |
| 263 | .give_timebase = smp_generic_give_timebase, |
| 264 | .take_timebase = smp_generic_take_timebase, |
| 265 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 266 | }; |
| 267 | |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 268 | #ifdef CONFIG_KEXEC |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 269 | atomic_t kexec_down_cpus = ATOMIC_INIT(0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 270 | |
| 271 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
| 272 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 273 | local_irq_disable(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 274 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 275 | if (secondary) { |
| 276 | atomic_inc(&kexec_down_cpus); |
| 277 | /* loop forever */ |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 278 | while (1); |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | static void mpc85xx_smp_kexec_down(void *arg) |
| 283 | { |
| 284 | if (ppc_md.kexec_cpu_down) |
| 285 | ppc_md.kexec_cpu_down(0,1); |
| 286 | } |
| 287 | |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 288 | static void map_and_flush(unsigned long paddr) |
| 289 | { |
| 290 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 291 | unsigned long kaddr = (unsigned long)kmap(page); |
| 292 | |
| 293 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); |
| 294 | kunmap(page); |
| 295 | } |
| 296 | |
| 297 | /** |
| 298 | * Before we reset the other cores, we need to flush relevant cache |
| 299 | * out to memory so we don't get anything corrupted, some of these flushes |
| 300 | * are performed out of an overabundance of caution as interrupts are not |
| 301 | * disabled yet and we can switch cores |
| 302 | */ |
| 303 | static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image) |
| 304 | { |
| 305 | kimage_entry_t *ptr, entry; |
| 306 | unsigned long paddr; |
| 307 | int i; |
| 308 | |
| 309 | if (image->type == KEXEC_TYPE_DEFAULT) { |
| 310 | /* normal kexec images are stored in temporary pages */ |
| 311 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); |
| 312 | ptr = (entry & IND_INDIRECTION) ? |
| 313 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { |
| 314 | if (!(entry & IND_DESTINATION)) { |
| 315 | map_and_flush(entry); |
| 316 | } |
| 317 | } |
| 318 | /* flush out last IND_DONE page */ |
| 319 | map_and_flush(entry); |
| 320 | } else { |
| 321 | /* crash type kexec images are copied to the crash region */ |
| 322 | for (i = 0; i < image->nr_segments; i++) { |
| 323 | struct kexec_segment *seg = &image->segment[i]; |
| 324 | for (paddr = seg->mem; paddr < seg->mem + seg->memsz; |
| 325 | paddr += PAGE_SIZE) { |
| 326 | map_and_flush(paddr); |
| 327 | } |
| 328 | } |
| 329 | } |
| 330 | |
| 331 | /* also flush the kimage struct to be passed in as well */ |
| 332 | flush_dcache_range((unsigned long)image, |
| 333 | (unsigned long)image + sizeof(*image)); |
| 334 | } |
| 335 | |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 336 | static void mpc85xx_smp_machine_kexec(struct kimage *image) |
| 337 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 338 | int timeout = INT_MAX; |
| 339 | int i, num_cpus = num_present_cpus(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 340 | |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 341 | mpc85xx_smp_flush_dcache_kexec(image); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 342 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 343 | if (image->type == KEXEC_TYPE_DEFAULT) |
| 344 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 345 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 346 | while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 347 | ( timeout > 0 ) ) |
| 348 | { |
| 349 | timeout--; |
| 350 | } |
| 351 | |
| 352 | if ( !timeout ) |
| 353 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); |
| 354 | |
Matthew McClintock | 43a327b | 2011-10-25 17:54:04 -0500 | [diff] [blame] | 355 | for_each_online_cpu(i) |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 356 | { |
| 357 | if ( i == smp_processor_id() ) continue; |
| 358 | mpic_reset_core(i); |
| 359 | } |
| 360 | |
| 361 | default_machine_kexec(image); |
| 362 | } |
| 363 | #endif /* CONFIG_KEXEC */ |
| 364 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 365 | static void __cpuinit smp_85xx_setup_cpu(int cpu_nr) |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 366 | { |
| 367 | if (smp_85xx_ops.probe == smp_mpic_probe) |
| 368 | mpic_setup_this_cpu(); |
| 369 | |
| 370 | if (cpu_has_feature(CPU_FTR_DBELL)) |
| 371 | doorbell_setup_this_cpu(); |
| 372 | } |
| 373 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 374 | static const struct of_device_id mpc85xx_smp_guts_ids[] = { |
| 375 | { .compatible = "fsl,mpc8572-guts", }, |
| 376 | { .compatible = "fsl,p1020-guts", }, |
| 377 | { .compatible = "fsl,p1021-guts", }, |
| 378 | { .compatible = "fsl,p1022-guts", }, |
| 379 | { .compatible = "fsl,p1023-guts", }, |
| 380 | { .compatible = "fsl,p2020-guts", }, |
| 381 | {}, |
| 382 | }; |
| 383 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 384 | void __init mpc85xx_smp_init(void) |
| 385 | { |
| 386 | struct device_node *np; |
| 387 | |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 388 | smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; |
| 389 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 390 | np = of_find_node_by_type(NULL, "open-pic"); |
| 391 | if (np) { |
| 392 | smp_85xx_ops.probe = smp_mpic_probe; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 393 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 394 | } |
| 395 | |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 396 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
Laurentiu TUDOR | 2647aa1 | 2011-07-07 16:44:30 +0300 | [diff] [blame] | 397 | /* |
| 398 | * If left NULL, .message_pass defaults to |
| 399 | * smp_muxed_ipi_message_pass |
| 400 | */ |
Matthew McClintock | de423ff | 2011-10-11 19:06:42 -0500 | [diff] [blame] | 401 | smp_85xx_ops.message_pass = NULL; |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 402 | smp_85xx_ops.cause_ipi = doorbell_cause_ipi; |
| 403 | } |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 404 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 405 | np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids); |
| 406 | if (np) { |
| 407 | guts = of_iomap(np, 0); |
| 408 | of_node_put(np); |
| 409 | if (!guts) { |
| 410 | pr_err("%s: Could not map guts node address\n", |
| 411 | __func__); |
| 412 | return; |
| 413 | } |
| 414 | smp_85xx_ops.give_timebase = mpc85xx_give_timebase; |
| 415 | smp_85xx_ops.take_timebase = mpc85xx_take_timebase; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 416 | #ifdef CONFIG_HOTPLUG_CPU |
| 417 | ppc_md.cpu_die = smp_85xx_mach_cpu_die; |
| 418 | #endif |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 419 | } |
| 420 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 421 | smp_ops = &smp_85xx_ops; |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 422 | |
| 423 | #ifdef CONFIG_KEXEC |
| 424 | ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; |
| 425 | ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; |
| 426 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 427 | } |