Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 2 | /* |
| 3 | * Author: Andy Fleming <afleming@freescale.com> |
| 4 | * Kumar Gala <galak@kernel.crashing.org> |
| 5 | * |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 6 | * Copyright 2006-2008, 2011-2012, 2015 Freescale Semiconductor Inc. |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/stddef.h> |
| 10 | #include <linux/kernel.h> |
Ingo Molnar | ef8bd77 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 11 | #include <linux/sched/hotplug.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 12 | #include <linux/init.h> |
| 13 | #include <linux/delay.h> |
| 14 | #include <linux/of.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 15 | #include <linux/kexec.h> |
Matthew McClintock | 677de42 | 2010-09-16 17:58:26 -0500 | [diff] [blame] | 16 | #include <linux/highmem.h> |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 17 | #include <linux/cpu.h> |
Scott Wood | 9484865 | 2015-09-19 23:29:53 -0500 | [diff] [blame] | 18 | #include <linux/fsl/guts.h> |
Mike Rapoport | 65fddcf | 2020-06-08 21:32:42 -0700 | [diff] [blame] | 19 | #include <linux/pgtable.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 20 | |
| 21 | #include <asm/machdep.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 22 | #include <asm/page.h> |
| 23 | #include <asm/mpic.h> |
| 24 | #include <asm/cacheflush.h> |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 25 | #include <asm/dbell.h> |
Anton Blanchard | 2751b62 | 2014-03-11 11:54:06 +1100 | [diff] [blame] | 26 | #include <asm/code-patching.h> |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 27 | #include <asm/cputhreads.h> |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 28 | #include <asm/fsl_pm.h> |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 29 | |
| 30 | #include <sysdev/fsl_soc.h> |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 31 | #include <sysdev/mpic.h> |
Kyle Moffett | 582d3e0 | 2011-12-02 06:27:58 +0000 | [diff] [blame] | 32 | #include "smp.h" |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 33 | |
Zhao Chenhui | 15f34eb | 2012-07-20 20:42:33 +0800 | [diff] [blame] | 34 | struct epapr_spin_table { |
| 35 | u32 addr_h; |
| 36 | u32 addr_l; |
| 37 | u32 r3_h; |
| 38 | u32 r3_l; |
| 39 | u32 reserved; |
| 40 | u32 pir; |
| 41 | }; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 42 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 43 | static u64 timebase; |
| 44 | static int tb_req; |
| 45 | static int tb_valid; |
| 46 | |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 47 | static void mpc85xx_give_timebase(void) |
| 48 | { |
| 49 | unsigned long flags; |
| 50 | |
| 51 | local_irq_save(flags); |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 52 | hard_irq_disable(); |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 53 | |
| 54 | while (!tb_req) |
| 55 | barrier(); |
| 56 | tb_req = 0; |
| 57 | |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 58 | qoriq_pm_ops->freeze_time_base(true); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 59 | #ifdef CONFIG_PPC64 |
| 60 | /* |
| 61 | * e5500/e6500 have a workaround for erratum A-006958 in place |
| 62 | * that will reread the timebase until TBL is non-zero. |
| 63 | * That would be a bad thing when the timebase is frozen. |
| 64 | * |
| 65 | * Thus, we read it manually, and instead of checking that |
| 66 | * TBL is non-zero, we ensure that TB does not change. We don't |
| 67 | * do that for the main mftb implementation, because it requires |
| 68 | * a scratch register |
| 69 | */ |
| 70 | { |
| 71 | u64 prev; |
| 72 | |
Scott Wood | beb2dc0 | 2013-08-20 19:33:12 -0500 | [diff] [blame] | 73 | asm volatile("mfspr %0, %1" : "=r" (timebase) : |
| 74 | "i" (SPRN_TBRL)); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 75 | |
| 76 | do { |
| 77 | prev = timebase; |
Scott Wood | beb2dc0 | 2013-08-20 19:33:12 -0500 | [diff] [blame] | 78 | asm volatile("mfspr %0, %1" : "=r" (timebase) : |
| 79 | "i" (SPRN_TBRL)); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 80 | } while (prev != timebase); |
| 81 | } |
| 82 | #else |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 83 | timebase = get_tb(); |
Scott Wood | d52459c | 2013-07-23 20:21:11 -0500 | [diff] [blame] | 84 | #endif |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 85 | mb(); |
| 86 | tb_valid = 1; |
| 87 | |
| 88 | while (tb_valid) |
| 89 | barrier(); |
| 90 | |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 91 | qoriq_pm_ops->freeze_time_base(false); |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 92 | |
| 93 | local_irq_restore(flags); |
| 94 | } |
| 95 | |
| 96 | static void mpc85xx_take_timebase(void) |
| 97 | { |
| 98 | unsigned long flags; |
| 99 | |
| 100 | local_irq_save(flags); |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 101 | hard_irq_disable(); |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 102 | |
| 103 | tb_req = 1; |
| 104 | while (!tb_valid) |
| 105 | barrier(); |
| 106 | |
| 107 | set_tb(timebase >> 32, timebase & 0xffffffff); |
| 108 | isync(); |
| 109 | tb_valid = 0; |
| 110 | |
| 111 | local_irq_restore(flags); |
| 112 | } |
| 113 | |
Xiaoming Ni | c45361ab | 2021-09-29 11:36:46 +0800 | [diff] [blame] | 114 | #ifdef CONFIG_HOTPLUG_CPU |
Michael Ellerman | 39f8756 | 2020-08-19 11:56:34 +1000 | [diff] [blame] | 115 | static void smp_85xx_cpu_offline_self(void) |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 116 | { |
| 117 | unsigned int cpu = smp_processor_id(); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 118 | |
| 119 | local_irq_disable(); |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 120 | hard_irq_disable(); |
| 121 | /* mask all irqs to prevent cpu wakeup */ |
| 122 | qoriq_pm_ops->irq_mask(cpu); |
| 123 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 124 | idle_task_exit(); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 125 | |
| 126 | mtspr(SPRN_TCR, 0); |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 127 | mtspr(SPRN_TSR, mfspr(SPRN_TSR)); |
| 128 | |
| 129 | generic_set_cpu_dead(cpu); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 130 | |
chenhui zhao | e7affb1 | 2015-11-20 17:13:58 +0800 | [diff] [blame] | 131 | cur_cpu_spec->cpu_down_flush(); |
| 132 | |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 133 | qoriq_pm_ops->cpu_die(cpu); |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 134 | |
| 135 | while (1) |
| 136 | ; |
| 137 | } |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 138 | |
| 139 | static void qoriq_cpu_kill(unsigned int cpu) |
| 140 | { |
| 141 | int i; |
| 142 | |
| 143 | for (i = 0; i < 500; i++) { |
| 144 | if (is_cpu_dead(cpu)) { |
| 145 | #ifdef CONFIG_PPC64 |
Nicholas Piggin | d2e6007 | 2018-02-14 01:08:12 +1000 | [diff] [blame] | 146 | paca_ptrs[cpu]->cpu_start = 0; |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 147 | #endif |
| 148 | return; |
| 149 | } |
| 150 | msleep(20); |
| 151 | } |
| 152 | pr_err("CPU%d didn't die...\n", cpu); |
| 153 | } |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 154 | #endif |
| 155 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 156 | /* |
| 157 | * To keep it compatible with old boot program which uses |
| 158 | * cache-inhibit spin table, we need to flush the cache |
| 159 | * before accessing spin table to invalidate any staled data. |
| 160 | * We also need to flush the cache after writing to spin |
| 161 | * table to push data out. |
| 162 | */ |
York Sun | bc15236 | 2012-09-29 16:44:35 -0700 | [diff] [blame] | 163 | static inline void flush_spin_table(void *spin_table) |
| 164 | { |
| 165 | flush_dcache_range((ulong)spin_table, |
| 166 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
| 167 | } |
| 168 | |
| 169 | static inline u32 read_spin_table_addr_l(void *spin_table) |
| 170 | { |
| 171 | flush_dcache_range((ulong)spin_table, |
| 172 | (ulong)spin_table + sizeof(struct epapr_spin_table)); |
| 173 | return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l); |
| 174 | } |
| 175 | |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 176 | #ifdef CONFIG_PPC64 |
| 177 | static void wake_hw_thread(void *info) |
| 178 | { |
| 179 | void fsl_secondary_thread_init(void); |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 180 | unsigned long inia; |
| 181 | int cpu = *(const int *)info; |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 182 | |
Scott Wood | 01c593d | 2015-10-06 22:48:05 -0500 | [diff] [blame] | 183 | inia = *(unsigned long *)fsl_secondary_thread_init; |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 184 | book3e_start_thread(cpu_thread_in_core(cpu), inia); |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 185 | } |
| 186 | #endif |
| 187 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 188 | static int smp_85xx_start_cpu(int cpu) |
| 189 | { |
| 190 | int ret = 0; |
| 191 | struct device_node *np; |
| 192 | const u64 *cpu_rel_addr; |
| 193 | unsigned long flags; |
| 194 | int ioremappable; |
| 195 | int hw_cpu = get_hard_smp_processor_id(cpu); |
| 196 | struct epapr_spin_table __iomem *spin_table; |
| 197 | |
| 198 | np = of_get_cpu_node(cpu, NULL); |
| 199 | cpu_rel_addr = of_get_property(np, "cpu-release-addr", NULL); |
| 200 | if (!cpu_rel_addr) { |
| 201 | pr_err("No cpu-release-addr for cpu %d\n", cpu); |
| 202 | return -ENOENT; |
| 203 | } |
| 204 | |
| 205 | /* |
| 206 | * A secondary core could be in a spinloop in the bootpage |
| 207 | * (0xfffff000), somewhere in highmem, or somewhere in lowmem. |
| 208 | * The bootpage and highmem can be accessed via ioremap(), but |
| 209 | * we need to directly access the spinloop if its in lowmem. |
| 210 | */ |
| 211 | ioremappable = *cpu_rel_addr > virt_to_phys(high_memory); |
| 212 | |
| 213 | /* Map the spin table */ |
| 214 | if (ioremappable) |
Christophe Leroy | aa91796e | 2018-10-09 13:51:41 +0000 | [diff] [blame] | 215 | spin_table = ioremap_coherent(*cpu_rel_addr, |
| 216 | sizeof(struct epapr_spin_table)); |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 217 | else |
| 218 | spin_table = phys_to_virt(*cpu_rel_addr); |
| 219 | |
| 220 | local_irq_save(flags); |
| 221 | hard_irq_disable(); |
| 222 | |
Xiaoming Ni | 3dc709e | 2021-11-26 12:11:53 +0800 | [diff] [blame] | 223 | if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 224 | qoriq_pm_ops->cpu_up_prepare(cpu); |
| 225 | |
| 226 | /* if cpu is not spinning, reset it */ |
| 227 | if (read_spin_table_addr_l(spin_table) != 1) { |
| 228 | /* |
| 229 | * We don't set the BPTR register here since it already points |
| 230 | * to the boot page properly. |
| 231 | */ |
| 232 | mpic_reset_core(cpu); |
| 233 | |
| 234 | /* |
| 235 | * wait until core is ready... |
| 236 | * We need to invalidate the stale data, in case the boot |
| 237 | * loader uses a cache-inhibited spin table. |
| 238 | */ |
| 239 | if (!spin_event_timeout( |
| 240 | read_spin_table_addr_l(spin_table) == 1, |
| 241 | 10000, 100)) { |
| 242 | pr_err("timeout waiting for cpu %d to reset\n", |
| 243 | hw_cpu); |
| 244 | ret = -EAGAIN; |
| 245 | goto err; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | flush_spin_table(spin_table); |
| 250 | out_be32(&spin_table->pir, hw_cpu); |
| 251 | #ifdef CONFIG_PPC64 |
| 252 | out_be64((u64 *)(&spin_table->addr_h), |
| 253 | __pa(ppc_function_entry(generic_secondary_smp_init))); |
| 254 | #else |
Bai Yingjie | eeb0991 | 2020-01-06 12:29:54 +0800 | [diff] [blame] | 255 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 256 | /* |
| 257 | * We need also to write addr_h to spin table for systems |
| 258 | * in which their physical memory start address was configured |
| 259 | * to above 4G, otherwise the secondary core can not get |
| 260 | * correct entry to start from. |
| 261 | */ |
| 262 | out_be32(&spin_table->addr_h, __pa(__early_start) >> 32); |
| 263 | #endif |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 264 | out_be32(&spin_table->addr_l, __pa(__early_start)); |
| 265 | #endif |
| 266 | flush_spin_table(spin_table); |
| 267 | err: |
| 268 | local_irq_restore(flags); |
| 269 | |
| 270 | if (ioremappable) |
| 271 | iounmap(spin_table); |
| 272 | |
| 273 | return ret; |
| 274 | } |
| 275 | |
Paul Gortmaker | 061d19f | 2013-06-24 15:30:09 -0400 | [diff] [blame] | 276 | static int smp_85xx_kick_cpu(int nr) |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 277 | { |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 278 | int ret = 0; |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 279 | #ifdef CONFIG_PPC64 |
| 280 | int primary = nr; |
| 281 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 282 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 283 | WARN_ON(nr < 0 || nr >= num_possible_cpus()); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 284 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 285 | pr_debug("kick CPU #%d\n", nr); |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 286 | |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 287 | #ifdef CONFIG_PPC64 |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 288 | if (threads_per_core == 2) { |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 289 | if (WARN_ON_ONCE(!cpu_has_feature(CPU_FTR_SMT))) |
| 290 | return -ENOENT; |
| 291 | |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 292 | booting_thread_hwid = cpu_thread_in_core(nr); |
| 293 | primary = cpu_first_thread_sibling(nr); |
| 294 | |
Xiaoming Ni | 3dc709e | 2021-11-26 12:11:53 +0800 | [diff] [blame] | 295 | if (qoriq_pm_ops && qoriq_pm_ops->cpu_up_prepare) |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 296 | qoriq_pm_ops->cpu_up_prepare(nr); |
| 297 | |
| 298 | /* |
| 299 | * If either thread in the core is online, use it to start |
| 300 | * the other. |
| 301 | */ |
| 302 | if (cpu_online(primary)) { |
| 303 | smp_call_function_single(primary, |
| 304 | wake_hw_thread, &nr, 1); |
| 305 | goto done; |
| 306 | } else if (cpu_online(primary + 1)) { |
| 307 | smp_call_function_single(primary + 1, |
| 308 | wake_hw_thread, &nr, 1); |
| 309 | goto done; |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 310 | } |
| 311 | |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 312 | /* |
| 313 | * If getting here, it means both threads in the core are |
| 314 | * offline. So start the primary thread, then it will start |
| 315 | * the thread specified in booting_thread_hwid, the one |
| 316 | * corresponding to nr. |
| 317 | */ |
Andy Fleming | e16c876 | 2011-12-08 01:20:27 -0600 | [diff] [blame] | 318 | |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 319 | } else if (threads_per_core == 1) { |
| 320 | /* |
| 321 | * If one core has only one thread, set booting_thread_hwid to |
| 322 | * an invalid value. |
| 323 | */ |
| 324 | booting_thread_hwid = INVALID_THREAD_HWID; |
| 325 | |
| 326 | } else if (threads_per_core > 2) { |
| 327 | pr_err("Do not support more than 2 threads per CPU."); |
| 328 | return -EINVAL; |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 329 | } |
| 330 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 331 | ret = smp_85xx_start_cpu(primary); |
| 332 | if (ret) |
| 333 | return ret; |
Peter Tyser | d1d47ec | 2009-12-18 16:50:37 -0600 | [diff] [blame] | 334 | |
chenhui zhao | 6becef7 | 2015-11-20 17:14:02 +0800 | [diff] [blame] | 335 | done: |
Nicholas Piggin | d2e6007 | 2018-02-14 01:08:12 +1000 | [diff] [blame] | 336 | paca_ptrs[nr]->cpu_start = 1; |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 337 | generic_set_cpu_up(nr); |
| 338 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 339 | return ret; |
Kumar Gala | 5b8544c | 2010-10-08 10:37:31 -0500 | [diff] [blame] | 340 | #else |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 341 | ret = smp_85xx_start_cpu(nr); |
| 342 | if (ret) |
| 343 | return ret; |
Kumar Gala | decbb28 | 2011-02-14 22:45:48 -0600 | [diff] [blame] | 344 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 345 | generic_set_cpu_up(nr); |
Kumar Gala | cb1ffb620 | 2009-06-19 03:30:42 -0500 | [diff] [blame] | 346 | |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 347 | return ret; |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 348 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 349 | } |
| 350 | |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 351 | struct smp_ops_t smp_85xx_ops = { |
Nicholas Piggin | c64af64 | 2016-12-20 04:30:09 +1000 | [diff] [blame] | 352 | .cause_nmi_ipi = NULL, |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 353 | .kick_cpu = smp_85xx_kick_cpu, |
Andy Fleming | 39fd402 | 2013-08-05 14:58:35 -0500 | [diff] [blame] | 354 | .cpu_bootable = smp_generic_cpu_bootable, |
Zhao Chenhui | d0832a7 | 2012-07-20 20:42:36 +0800 | [diff] [blame] | 355 | #ifdef CONFIG_HOTPLUG_CPU |
| 356 | .cpu_disable = generic_cpu_disable, |
| 357 | .cpu_die = generic_cpu_die, |
| 358 | #endif |
Thiago Jung Bauermann | da66588 | 2016-11-29 23:45:50 +1100 | [diff] [blame] | 359 | #if defined(CONFIG_KEXEC_CORE) && !defined(CONFIG_PPC64) |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 360 | .give_timebase = smp_generic_give_timebase, |
| 361 | .take_timebase = smp_generic_take_timebase, |
| 362 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 363 | }; |
| 364 | |
Thiago Jung Bauermann | da66588 | 2016-11-29 23:45:50 +1100 | [diff] [blame] | 365 | #ifdef CONFIG_KEXEC_CORE |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 366 | #ifdef CONFIG_PPC32 |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 367 | atomic_t kexec_down_cpus = ATOMIC_INIT(0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 368 | |
Michael Ellerman | 84a61fb | 2021-11-24 20:32:50 +1100 | [diff] [blame] | 369 | static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 370 | { |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 371 | local_irq_disable(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 372 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 373 | if (secondary) { |
chenhui zhao | e7affb1 | 2015-11-20 17:13:58 +0800 | [diff] [blame] | 374 | cur_cpu_spec->cpu_down_flush(); |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 375 | atomic_inc(&kexec_down_cpus); |
| 376 | /* loop forever */ |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 377 | while (1); |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | static void mpc85xx_smp_kexec_down(void *arg) |
| 382 | { |
| 383 | if (ppc_md.kexec_cpu_down) |
| 384 | ppc_md.kexec_cpu_down(0,1); |
| 385 | } |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 386 | #else |
Michael Ellerman | 84a61fb | 2021-11-24 20:32:50 +1100 | [diff] [blame] | 387 | static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 388 | { |
Scott Wood | f34b3e1 | 2015-10-06 22:48:12 -0500 | [diff] [blame] | 389 | int cpu = smp_processor_id(); |
| 390 | int sibling = cpu_last_thread_sibling(cpu); |
| 391 | bool notified = false; |
| 392 | int disable_cpu; |
| 393 | int disable_threadbit = 0; |
| 394 | long start = mftb(); |
| 395 | long now; |
| 396 | |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 397 | local_irq_disable(); |
| 398 | hard_irq_disable(); |
| 399 | mpic_teardown_this_cpu(secondary); |
Scott Wood | f34b3e1 | 2015-10-06 22:48:12 -0500 | [diff] [blame] | 400 | |
| 401 | if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) { |
| 402 | /* |
| 403 | * We enter the crash kernel on whatever cpu crashed, |
| 404 | * even if it's a secondary thread. If that's the case, |
| 405 | * disable the corresponding primary thread. |
| 406 | */ |
| 407 | disable_threadbit = 1; |
| 408 | disable_cpu = cpu_first_thread_sibling(cpu); |
| 409 | } else if (sibling != crashing_cpu && |
| 410 | cpu_thread_in_core(cpu) == 0 && |
| 411 | cpu_thread_in_core(sibling) != 0) { |
| 412 | disable_threadbit = 2; |
| 413 | disable_cpu = sibling; |
| 414 | } |
| 415 | |
| 416 | if (disable_threadbit) { |
Nicholas Piggin | d2e6007 | 2018-02-14 01:08:12 +1000 | [diff] [blame] | 417 | while (paca_ptrs[disable_cpu]->kexec_state < KEXEC_STATE_REAL_MODE) { |
Scott Wood | f34b3e1 | 2015-10-06 22:48:12 -0500 | [diff] [blame] | 418 | barrier(); |
| 419 | now = mftb(); |
| 420 | if (!notified && now - start > 1000000) { |
| 421 | pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n", |
| 422 | __func__, smp_processor_id(), |
| 423 | disable_cpu, |
Nicholas Piggin | d2e6007 | 2018-02-14 01:08:12 +1000 | [diff] [blame] | 424 | paca_ptrs[disable_cpu]->kexec_state); |
Scott Wood | f34b3e1 | 2015-10-06 22:48:12 -0500 | [diff] [blame] | 425 | notified = true; |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | if (notified) { |
| 430 | pr_info("%s: cpu %d done waiting\n", |
| 431 | __func__, disable_cpu); |
| 432 | } |
| 433 | |
| 434 | mtspr(SPRN_TENC, disable_threadbit); |
| 435 | while (mfspr(SPRN_TENSR) & disable_threadbit) |
| 436 | cpu_relax(); |
| 437 | } |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 438 | } |
| 439 | #endif |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 440 | |
| 441 | static void mpc85xx_smp_machine_kexec(struct kimage *image) |
| 442 | { |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 443 | #ifdef CONFIG_PPC32 |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 444 | int timeout = INT_MAX; |
| 445 | int i, num_cpus = num_present_cpus(); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 446 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 447 | if (image->type == KEXEC_TYPE_DEFAULT) |
| 448 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 449 | |
Matthew McClintock | 5d69296 | 2010-09-16 17:58:25 -0500 | [diff] [blame] | 450 | while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 451 | ( timeout > 0 ) ) |
| 452 | { |
| 453 | timeout--; |
| 454 | } |
| 455 | |
| 456 | if ( !timeout ) |
| 457 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); |
| 458 | |
Matthew McClintock | 43a327b | 2011-10-25 17:54:04 -0500 | [diff] [blame] | 459 | for_each_online_cpu(i) |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 460 | { |
| 461 | if ( i == smp_processor_id() ) continue; |
| 462 | mpic_reset_core(i); |
| 463 | } |
Tiejun Chen | 939fbf0 | 2015-10-06 22:48:11 -0500 | [diff] [blame] | 464 | #endif |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 465 | |
| 466 | default_machine_kexec(image); |
| 467 | } |
Thiago Jung Bauermann | da66588 | 2016-11-29 23:45:50 +1100 | [diff] [blame] | 468 | #endif /* CONFIG_KEXEC_CORE */ |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 469 | |
Kevin Hao | 455d23a | 2013-11-07 15:17:17 +0800 | [diff] [blame] | 470 | static void smp_85xx_setup_cpu(int cpu_nr) |
| 471 | { |
| 472 | mpic_setup_this_cpu(); |
Kevin Hao | 455d23a | 2013-11-07 15:17:17 +0800 | [diff] [blame] | 473 | } |
| 474 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 475 | void __init mpc85xx_smp_init(void) |
| 476 | { |
| 477 | struct device_node *np; |
| 478 | |
Scott Wood | dc2c9c5 | 2010-08-26 02:49:07 -0500 | [diff] [blame] | 479 | |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 480 | np = of_find_node_by_type(NULL, "open-pic"); |
| 481 | if (np) { |
| 482 | smp_85xx_ops.probe = smp_mpic_probe; |
Kevin Hao | 455d23a | 2013-11-07 15:17:17 +0800 | [diff] [blame] | 483 | smp_85xx_ops.setup_cpu = smp_85xx_setup_cpu; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 484 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
Kevin Hao | 455d23a | 2013-11-07 15:17:17 +0800 | [diff] [blame] | 485 | } else |
Nicholas Piggin | b866cc2 | 2017-04-13 20:16:21 +1000 | [diff] [blame] | 486 | smp_85xx_ops.setup_cpu = NULL; |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 487 | |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 488 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
Laurentiu TUDOR | 2647aa1 | 2011-07-07 16:44:30 +0300 | [diff] [blame] | 489 | /* |
| 490 | * If left NULL, .message_pass defaults to |
| 491 | * smp_muxed_ipi_message_pass |
| 492 | */ |
Matthew McClintock | de423ff | 2011-10-11 19:06:42 -0500 | [diff] [blame] | 493 | smp_85xx_ops.message_pass = NULL; |
Nicholas Piggin | b866cc2 | 2017-04-13 20:16:21 +1000 | [diff] [blame] | 494 | smp_85xx_ops.cause_ipi = doorbell_global_ipi; |
Kevin Hao | 455d23a | 2013-11-07 15:17:17 +0800 | [diff] [blame] | 495 | smp_85xx_ops.probe = NULL; |
Milton Miller | 23d72bf | 2011-05-10 19:29:39 +0000 | [diff] [blame] | 496 | } |
Kumar Gala | 563fdd4 | 2009-02-11 22:50:42 -0600 | [diff] [blame] | 497 | |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 498 | #ifdef CONFIG_FSL_CORENET_RCPM |
Xiaoming Ni | c45361ab | 2021-09-29 11:36:46 +0800 | [diff] [blame] | 499 | /* Assign a value to qoriq_pm_ops on PPC_E500MC */ |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 500 | fsl_rcpm_init(); |
Xiaoming Ni | c45361ab | 2021-09-29 11:36:46 +0800 | [diff] [blame] | 501 | #else |
| 502 | /* Assign a value to qoriq_pm_ops on !PPC_E500MC */ |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 503 | mpc85xx_setup_pmc(); |
| 504 | #endif |
| 505 | if (qoriq_pm_ops) { |
Zhao Chenhui | bf34526 | 2012-07-20 20:42:35 +0800 | [diff] [blame] | 506 | smp_85xx_ops.give_timebase = mpc85xx_give_timebase; |
| 507 | smp_85xx_ops.take_timebase = mpc85xx_take_timebase; |
Xiaoming Ni | c45361ab | 2021-09-29 11:36:46 +0800 | [diff] [blame] | 508 | #ifdef CONFIG_HOTPLUG_CPU |
Michael Ellerman | 39f8756 | 2020-08-19 11:56:34 +1000 | [diff] [blame] | 509 | smp_85xx_ops.cpu_offline_self = smp_85xx_cpu_offline_self; |
chenhui zhao | 2f4f1f8 | 2015-11-20 17:14:01 +0800 | [diff] [blame] | 510 | smp_85xx_ops.cpu_die = qoriq_cpu_kill; |
chenhui zhao | 56f1ba2 | 2015-11-20 17:14:00 +0800 | [diff] [blame] | 511 | #endif |
Xiaoming Ni | c45361ab | 2021-09-29 11:36:46 +0800 | [diff] [blame] | 512 | } |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 513 | smp_ops = &smp_85xx_ops; |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 514 | |
Thiago Jung Bauermann | da66588 | 2016-11-29 23:45:50 +1100 | [diff] [blame] | 515 | #ifdef CONFIG_KEXEC_CORE |
Matthew McClintock | f933a41 | 2010-07-21 16:14:53 -0500 | [diff] [blame] | 516 | ppc_md.kexec_cpu_down = mpc85xx_smp_kexec_cpu_down; |
| 517 | ppc_md.machine_kexec = mpc85xx_smp_machine_kexec; |
| 518 | #endif |
Kumar Gala | d5b26db | 2008-11-19 09:35:56 -0600 | [diff] [blame] | 519 | } |