Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Author: Andy Fleming <afleming@freescale.com> |
| 3 | * Kumar Gala <galak@kernel.crashing.org> |
| 4 | * |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 5 | * Copyright 2006-2008, 2011-2012 Freescale Semiconductor Inc. |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify it |
| 8 | * under the terms of the GNU General Public License as published by the |
| 9 | * Free Software Foundation; either version 2 of the License, or (at your |
| 10 | * option) any later version. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/stddef.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/of.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 18 | #include <linux/kexec.h> |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 19 | #include <linux/highmem.h> |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 20 | #include <linux/cpu.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 21 | |
| 22 | #include <asm/machdep.h> |
| 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/page.h> |
| 25 | #include <asm/mpic.h> |
| 26 | #include <asm/cacheflush.h> |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 27 | #include <asm/dbell.h> |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 28 | #include <asm/fsl_guts.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 29 | |
| 30 | #include <sysdev/fsl_soc.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 31 | #include <sysdev/mpic.h> |
Kyle Moffett | 582d3e0 | 2011-12-02 06:27:58 +0000 | [diff] [blame] | 32 | #include "smp.h" |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 33 | |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 34 | struct epapr_spin_table { |
| 35 | u32 addr_h; |
| 36 | u32 addr_l; |
| 37 | u32 r3_h; |
| 38 | u32 r3_l; |
| 39 | u32 reserved; |
| 40 | u32 pir; |
| 41 | }; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 42 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 43 | static struct ccsr_guts __iomem *guts; |
| 44 | static u64 timebase; |
| 45 | static int tb_req; |
| 46 | static int tb_valid; |
| 47 | |
| 48 | static void mpc85xx_timebase_freeze(int freeze) |
| 49 | { |
| 50 | uint32_t mask; |
| 51 | |
| 52 | mask = CCSR_GUTS_DEVDISR_TB0 | CCSR_GUTS_DEVDISR_TB1; |
| 53 | if (freeze) |
| 54 | setbits32(&guts->devdisr, mask); |
| 55 | else |
| 56 | clrbits32(&guts->devdisr, mask); |
| 57 | |
| 58 | in_be32(&guts->devdisr); |
| 59 | } |
| 60 | |
| 61 | static void mpc85xx_give_timebase(void) |
| 62 | { |
| 63 | unsigned long flags; |
| 64 | |
| 65 | local_irq_save(flags); |
| 66 | |
| 67 | while (!tb_req) |
| 68 | barrier(); |
| 69 | tb_req = 0; |
| 70 | |
| 71 | mpc85xx_timebase_freeze(1); |
| 72 | timebase = get_tb(); |
| 73 | mb(); |
| 74 | tb_valid = 1; |
| 75 | |
| 76 | while (tb_valid) |
| 77 | barrier(); |
| 78 | |
| 79 | mpc85xx_timebase_freeze(0); |
| 80 | |
| 81 | local_irq_restore(flags); |
| 82 | } |
| 83 | |
| 84 | static void mpc85xx_take_timebase(void) |
| 85 | { |
| 86 | unsigned long flags; |
| 87 | |
| 88 | local_irq_save(flags); |
| 89 | |
| 90 | tb_req = 1; |
| 91 | while (!tb_valid) |
| 92 | barrier(); |
| 93 | |
| 94 | set_tb(timebase >> 32, timebase & 0xffffffff); |
| 95 | isync(); |
| 96 | tb_valid = 0; |
| 97 | |
| 98 | local_irq_restore(flags); |
| 99 | } |
| 100 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 101 | #ifdef CONFIG_HOTPLUG_CPU |
| 102 | static void __cpuinit smp_85xx_mach_cpu_die(void) |
| 103 | { |
| 104 | unsigned int cpu = smp_processor_id(); |
| 105 | u32 tmp; |
| 106 | |
| 107 | local_irq_disable(); |
| 108 | idle_task_exit(); |
| 109 | generic_set_cpu_dead(cpu); |
| 110 | mb(); |
| 111 | |
| 112 | mtspr(SPRN_TCR, 0); |
| 113 | |
| 114 | __flush_disable_L1(); |
| 115 | tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; |
| 116 | mtspr(SPRN_HID0, tmp); |
| 117 | isync(); |
| 118 | |
| 119 | /* Enter NAP mode. */ |
| 120 | tmp = mfmsr(); |
| 121 | tmp |= MSR_WE; |
| 122 | mb(); |
| 123 | mtmsr(tmp); |
| 124 | isync(); |
| 125 | |
| 126 | while (1) |
| 127 | ; |
| 128 | } |
| 129 | #endif |
| 130 | |
| 131 | static int __cpuinit smp_85xx_kick_cpu(int nr) |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 132 | { |
| 133 | unsigned long flags; |
| 134 | const u64 *cpu_rel_addr; |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 135 | __iomem struct epapr_spin_table *spin_table; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 136 | struct device_node *np; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 137 | int hw_cpu = get_hard_smp_processor_id(nr); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 138 | int ioremappable; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 139 | int ret = 0; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 140 | |
Kumar Gala | 4511680 | 2011-10-13 10:13:09 -0500 | [diff] [blame] | 141 | WARN_ON(nr < 0 || nr >= NR_CPUS); |
| 142 | WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 143 | |
| 144 | pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr); |
| 145 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 146 | np = of_get_cpu_node(nr, NULL); |
| 147 | cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); |
| 148 | |
| 149 | if (cpu_rel_addr == NULL) { |
| 150 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); |
Michael Ellerman | de30097 | 2011-04-11 21:46:19 +0000 | [diff] [blame] | 151 | return -ENOENT; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 152 | } |
| 153 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 154 | /* |
| 155 | * A secondary core could be in a spinloop in the bootpage |
| 156 | * (0xfffff000), somewhere in highmem, or somewhere in lowmem. |
| 157 | * The bootpage and highmem can be accessed via ioremap(), but |
| 158 | * we need to directly access the spinloop if its in lowmem. |
| 159 | */ |
| 160 | ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); |
| 161 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 162 | /* Map the spin table */ |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 163 | if (ioremappable) |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 164 | spin_table = ioremap(*cpu_rel_addr, |
| 165 | sizeof(struct epapr_spin_table)); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 166 | else |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 167 | spin_table = phys_to_virt(*cpu_rel_addr); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 168 | |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 169 | local_irq_save(flags); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 170 | #ifdef CONFIG_PPC32 |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 171 | #ifdef CONFIG_HOTPLUG_CPU |
| 172 | /* Corresponding to generic_set_cpu_dead() */ |
| 173 | generic_set_cpu_up(nr); |
| 174 | |
| 175 | if (system_state == SYSTEM_RUNNING) { |
| 176 | out_be32(&spin_table->addr_l, 0); |
| 177 | |
| 178 | /* |
| 179 | * We don't set the BPTR register here since it already points |
| 180 | * to the boot page properly. |
| 181 | */ |
| 182 | mpic_reset_core(hw_cpu); |
| 183 | |
| 184 | /* wait until core is ready... */ |
| 185 | if (!spin_event_timeout(in_be32(&spin_table->addr_l) == 1, |
| 186 | 10000, 100)) { |
| 187 | pr_err("%s: timeout waiting for core %d to reset\n", |
| 188 | __func__, hw_cpu); |
| 189 | ret = -ENOENT; |
| 190 | goto out; |
| 191 | } |
| 192 | |
| 193 | /* clear the acknowledge status */ |
| 194 | __secondary_hold_acknowledge = -1; |
| 195 | } |
| 196 | #endif |
| 197 | out_be32(&spin_table->pir, hw_cpu); |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 198 | out_be32(&spin_table->addr_l, __pa(__early_start)); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 199 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 200 | if (!ioremappable) |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 201 | flush_dcache_range((ulong)spin_table, |
| 202 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 203 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 204 | /* Wait a bit for the CPU to ack. */ |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 205 | if (!spin_event_timeout(__secondary_hold_acknowledge == hw_cpu, |
| 206 | 10000, 100)) { |
| 207 | pr_err("%s: timeout waiting for core %d to ack\n", |
| 208 | __func__, hw_cpu); |
| 209 | ret = -ENOENT; |
| 210 | goto out; |
| 211 | } |
| 212 | out: |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 213 | #else |
Kumar Gala | decbb28 | 2011-02-14 22:45:48 -0600 | [diff] [blame] | 214 | smp_generic_kick_cpu(nr); |
| 215 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 216 | out_be32(&spin_table->pir, hw_cpu); |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 217 | out_be64((u64 *)(&spin_table->addr_h), |
| 218 | __pa((u64)*((unsigned long long *)generic_secondary_smp_init))); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 219 | |
Kumar Gala | decbb28 | 2011-02-14 22:45:48 -0600 | [diff] [blame] | 220 | if (!ioremappable) |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 221 | flush_dcache_range((ulong)spin_table, |
| 222 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 223 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 224 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 225 | local_irq_restore(flags); |
| 226 | |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 227 | if (ioremappable) |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 228 | iounmap(spin_table); |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 229 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 230 | return ret; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 231 | } |
| 232 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 233 | struct smp_ops_t smp_85xx_ops = { |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 234 | .kick_cpu = smp_85xx_kick_cpu, |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 235 | #ifdef CONFIG_HOTPLUG_CPU |
| 236 | .cpu_disable = generic_cpu_disable, |
| 237 | .cpu_die = generic_cpu_die, |
| 238 | #endif |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 239 | #ifdef CONFIG_KEXEC |
| 240 | .give_timebase = smp_generic_give_timebase, |
| 241 | .take_timebase = smp_generic_take_timebase, |
| 242 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 243 | }; |
| 244 | |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 245 | #ifdef CONFIG_KEXEC |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 246 | atomic_t kexec_down_cpus = ATOMIC_INIT(0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 247 | |
| 248 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
| 249 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 250 | local_irq_disable(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 251 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 252 | if (secondary) { |
| 253 | atomic_inc(&kexec_down_cpus); |
| 254 | /* loop forever */ |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 255 | while (1); |
| 256 | } |
| 257 | } |
| 258 | |
| 259 | static void mpc85xx_smp_kexec_down(void *arg) |
| 260 | { |
| 261 | if (ppc_md.kexec_cpu_down) |
| 262 | ppc_md.kexec_cpu_down(0,1); |
| 263 | } |
| 264 | |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 265 | static void map_and_flush(unsigned long paddr) |
| 266 | { |
| 267 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
| 268 | unsigned long kaddr = (unsigned long)kmap(page); |
| 269 | |
| 270 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); |
| 271 | kunmap(page); |
| 272 | } |
| 273 | |
| 274 | /** |
| 275 | * Before we reset the other cores, we need to flush relevant cache |
| 276 | * out to memory so we don't get anything corrupted, some of these flushes |
| 277 | * are performed out of an overabundance of caution as interrupts are not |
| 278 | * disabled yet and we can switch cores |
| 279 | */ |
| 280 | static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image) |
| 281 | { |
| 282 | kimage_entry_t *ptr, entry; |
| 283 | unsigned long paddr; |
| 284 | int i; |
| 285 | |
| 286 | if (image->type == KEXEC_TYPE_DEFAULT) { |
| 287 | /* normal kexec images are stored in temporary pages */ |
| 288 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); |
| 289 | ptr = (entry & IND_INDIRECTION) ? |
| 290 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { |
| 291 | if (!(entry & IND_DESTINATION)) { |
| 292 | map_and_flush(entry); |
| 293 | } |
| 294 | } |
| 295 | /* flush out last IND_DONE page */ |
| 296 | map_and_flush(entry); |
| 297 | } else { |
| 298 | /* crash type kexec images are copied to the crash region */ |
| 299 | for (i = 0; i < image->nr_segments; i++) { |
| 300 | struct kexec_segment *seg = &image->segment[i]; |
| 301 | for (paddr = seg->mem; paddr < seg->mem + seg->memsz; |
| 302 | paddr += PAGE_SIZE) { |
| 303 | map_and_flush(paddr); |
| 304 | } |
| 305 | } |
| 306 | } |
| 307 | |
| 308 | /* also flush the kimage struct to be passed in as well */ |
| 309 | flush_dcache_range((unsigned long)image, |
| 310 | (unsigned long)image + sizeof(*image)); |
| 311 | } |
| 312 | |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 313 | static void mpc85xx_smp_machine_kexec(struct kimage *image) |
| 314 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 315 | int timeout = INT_MAX; |
| 316 | int i, num_cpus = num_present_cpus(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 317 | |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 318 | mpc85xx_smp_flush_dcache_kexec(image); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 319 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 320 | if (image->type == KEXEC_TYPE_DEFAULT) |
| 321 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 322 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 323 | while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 324 | ( timeout > 0 ) ) |
| 325 | { |
| 326 | timeout--; |
| 327 | } |
| 328 | |
| 329 | if ( !timeout ) |
| 330 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); |
| 331 | |
Matthew McClintock | 43a327b | 2011-10-25 17:54:04 -0500 | [diff] [blame] | 332 | for_each_online_cpu(i) |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 333 | { |
| 334 | if ( i == smp_processor_id() ) continue; |
| 335 | mpic_reset_core(i); |
| 336 | } |
| 337 | |
| 338 | default_machine_kexec(image); |
| 339 | } |
| 340 | #endif /* CONFIG_KEXEC */ |
| 341 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 342 | static void __cpuinit smp_85xx_setup_cpu(int cpu_nr) |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 343 | { |
| 344 | if (smp_85xx_ops.probe == smp_mpic_probe) |
| 345 | mpic_setup_this_cpu(); |
| 346 | |
| 347 | if (cpu_has_feature(CPU_FTR_DBELL)) |
| 348 | doorbell_setup_this_cpu(); |
| 349 | } |
| 350 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 351 | static const struct of_device_id mpc85xx_smp_guts_ids[] = { |
| 352 | { .compatible = "fsl,mpc8572-guts", }, |
| 353 | { .compatible = "fsl,p1020-guts", }, |
| 354 | { .compatible = "fsl,p1021-guts", }, |
| 355 | { .compatible = "fsl,p1022-guts", }, |
| 356 | { .compatible = "fsl,p1023-guts", }, |
| 357 | { .compatible = "fsl,p2020-guts", }, |
| 358 | {}, |
| 359 | }; |
| 360 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 361 | void __init mpc85xx_smp_init(void) |
| 362 | { |
| 363 | struct device_node *np; |
| 364 | |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 365 | smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; |
| 366 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 367 | np = of_find_node_by_type(NULL, "open-pic"); |
| 368 | if (np) { |
| 369 | smp_85xx_ops.probe = smp_mpic_probe; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 370 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 371 | } |
| 372 | |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 373 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
Laurentiu TUDOR | 2647aa1 | 2011-07-07 16:44:30 +0300 | [diff] [blame] | 374 | /* |
| 375 | * If left NULL, .message_pass defaults to |
| 376 | * smp_muxed_ipi_message_pass |
| 377 | */ |
Matthew McClintock | de423ff | 2011-10-11 19:06:42 -0500 | [diff] [blame] | 378 | smp_85xx_ops.message_pass = NULL; |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 379 | smp_85xx_ops.cause_ipi = doorbell_cause_ipi; |
| 380 | } |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 381 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 382 | np = of_find_matching_node(NULL, mpc85xx_smp_guts_ids); |
| 383 | if (np) { |
| 384 | guts = of_iomap(np, 0); |
| 385 | of_node_put(np); |
| 386 | if (!guts) { |
| 387 | pr_err("%s: Could not map guts node address\n", |
| 388 | __func__); |
| 389 | return; |
| 390 | } |
| 391 | smp_85xx_ops.give_timebase = mpc85xx_give_timebase; |
| 392 | smp_85xx_ops.take_timebase = mpc85xx_take_timebase; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 393 | #ifdef CONFIG_HOTPLUG_CPU |
| 394 | ppc_md.cpu_die = smp_85xx_mach_cpu_die; |
| 395 | #endif |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 396 | } |
| 397 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 398 | smp_ops = &smp_85xx_ops; |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 399 | |
| 400 | #ifdef CONFIG_KEXEC |
| 401 | ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; |
| 402 | ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; |
| 403 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 404 | } |