Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> |
| 3 | * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> |
| 4 | * |
| 5 | * Based on arm64 and arc implementations |
| 6 | * Copyright (C) 2013 ARM Ltd. |
| 7 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 8 | * |
| 9 | * This file is licensed under the terms of the GNU General Public License |
| 10 | * version 2. This program is licensed "as is" without any warranty of any |
| 11 | * kind, whether express or implied. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/smp.h> |
| 15 | #include <linux/cpu.h> |
| 16 | #include <linux/sched.h> |
Julia Lawall | fc74d71 | 2019-12-29 16:42:58 +0100 | [diff] [blame] | 17 | #include <linux/sched/mm.h> |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 18 | #include <linux/irq.h> |
Jan Henrik Weinstock | 8f722f6 | 2021-02-08 15:27:16 +0100 | [diff] [blame^] | 19 | #include <linux/of.h> |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 20 | #include <asm/cpuinfo.h> |
| 21 | #include <asm/mmu_context.h> |
| 22 | #include <asm/tlbflush.h> |
Jan Henrik Weinstock | 4ee93d8 | 2015-11-04 17:26:10 +0100 | [diff] [blame] | 23 | #include <asm/cacheflush.h> |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 24 | #include <asm/time.h> |
| 25 | |
| 26 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
| 27 | |
| 28 | unsigned long secondary_release = -1; |
| 29 | struct thread_info *secondary_thread_info; |
| 30 | |
| 31 | enum ipi_msg_type { |
Stafford Horne | c056718 | 2017-06-24 07:09:59 +0900 | [diff] [blame] | 32 | IPI_WAKEUP, |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 33 | IPI_RESCHEDULE, |
| 34 | IPI_CALL_FUNC, |
| 35 | IPI_CALL_FUNC_SINGLE, |
| 36 | }; |
| 37 | |
| 38 | static DEFINE_SPINLOCK(boot_lock); |
| 39 | |
| 40 | static void boot_secondary(unsigned int cpu, struct task_struct *idle) |
| 41 | { |
| 42 | /* |
| 43 | * set synchronisation state between this boot processor |
| 44 | * and the secondary one |
| 45 | */ |
| 46 | spin_lock(&boot_lock); |
| 47 | |
| 48 | secondary_release = cpu; |
Stafford Horne | c056718 | 2017-06-24 07:09:59 +0900 | [diff] [blame] | 49 | smp_cross_call(cpumask_of(cpu), IPI_WAKEUP); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 50 | |
| 51 | /* |
| 52 | * now the secondary core is starting up let it run its |
| 53 | * calibrations, then wait for it to finish |
| 54 | */ |
| 55 | spin_unlock(&boot_lock); |
| 56 | } |
| 57 | |
| 58 | void __init smp_prepare_boot_cpu(void) |
| 59 | { |
| 60 | } |
| 61 | |
| 62 | void __init smp_init_cpus(void) |
| 63 | { |
Jan Henrik Weinstock | 8f722f6 | 2021-02-08 15:27:16 +0100 | [diff] [blame^] | 64 | struct device_node *cpu; |
| 65 | u32 cpu_id; |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 66 | |
Jan Henrik Weinstock | 8f722f6 | 2021-02-08 15:27:16 +0100 | [diff] [blame^] | 67 | for_each_of_cpu_node(cpu) { |
| 68 | if (of_property_read_u32(cpu, "reg", &cpu_id)) { |
| 69 | pr_warn("%s missing reg property", cpu->full_name); |
| 70 | continue; |
| 71 | } |
| 72 | |
| 73 | if (cpu_id < NR_CPUS) |
| 74 | set_cpu_possible(cpu_id, true); |
| 75 | } |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 76 | } |
| 77 | |
| 78 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 79 | { |
Jan Henrik Weinstock | 8f722f6 | 2021-02-08 15:27:16 +0100 | [diff] [blame^] | 80 | unsigned int cpu; |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * Initialise the present map, which describes the set of CPUs |
| 84 | * actually populated at the present time. |
| 85 | */ |
Jan Henrik Weinstock | 8f722f6 | 2021-02-08 15:27:16 +0100 | [diff] [blame^] | 86 | for_each_possible_cpu(cpu) { |
| 87 | if (cpu < max_cpus) |
| 88 | set_cpu_present(cpu, true); |
| 89 | } |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | void __init smp_cpus_done(unsigned int max_cpus) |
| 93 | { |
| 94 | } |
| 95 | |
| 96 | static DECLARE_COMPLETION(cpu_running); |
| 97 | |
| 98 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
| 99 | { |
| 100 | if (smp_cross_call == NULL) { |
| 101 | pr_warn("CPU%u: failed to start, IPI controller missing", |
| 102 | cpu); |
| 103 | return -EIO; |
| 104 | } |
| 105 | |
| 106 | secondary_thread_info = task_thread_info(idle); |
| 107 | current_pgd[cpu] = init_mm.pgd; |
| 108 | |
| 109 | boot_secondary(cpu, idle); |
| 110 | if (!wait_for_completion_timeout(&cpu_running, |
| 111 | msecs_to_jiffies(1000))) { |
| 112 | pr_crit("CPU%u: failed to start\n", cpu); |
| 113 | return -EIO; |
| 114 | } |
Stafford Horne | 4553474 | 2017-07-07 06:06:30 +0900 | [diff] [blame] | 115 | synchronise_count_master(cpu); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 116 | |
| 117 | return 0; |
| 118 | } |
| 119 | |
| 120 | asmlinkage __init void secondary_start_kernel(void) |
| 121 | { |
| 122 | struct mm_struct *mm = &init_mm; |
| 123 | unsigned int cpu = smp_processor_id(); |
| 124 | /* |
| 125 | * All kernel threads share the same mm context; grab a |
| 126 | * reference and switch to it. |
| 127 | */ |
Julia Lawall | fc74d71 | 2019-12-29 16:42:58 +0100 | [diff] [blame] | 128 | mmgrab(mm); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 129 | current->active_mm = mm; |
| 130 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
| 131 | |
| 132 | pr_info("CPU%u: Booted secondary processor\n", cpu); |
| 133 | |
| 134 | setup_cpuinfo(); |
| 135 | openrisc_clockevent_init(); |
| 136 | |
| 137 | notify_cpu_starting(cpu); |
| 138 | |
| 139 | /* |
| 140 | * OK, now it's safe to let the boot CPU continue |
| 141 | */ |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 142 | complete(&cpu_running); |
| 143 | |
Stafford Horne | 4553474 | 2017-07-07 06:06:30 +0900 | [diff] [blame] | 144 | synchronise_count_slave(cpu); |
Stafford Horne | 610f01b | 2017-11-03 12:22:27 +0900 | [diff] [blame] | 145 | set_cpu_online(cpu, true); |
Stafford Horne | 4553474 | 2017-07-07 06:06:30 +0900 | [diff] [blame] | 146 | |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 147 | local_irq_enable(); |
| 148 | |
Stafford Horne | b441aab | 2017-07-12 17:20:38 +0900 | [diff] [blame] | 149 | preempt_disable(); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 150 | /* |
| 151 | * OK, it's off to the idle thread for us |
| 152 | */ |
| 153 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); |
| 154 | } |
| 155 | |
| 156 | void handle_IPI(unsigned int ipi_msg) |
| 157 | { |
| 158 | unsigned int cpu = smp_processor_id(); |
| 159 | |
| 160 | switch (ipi_msg) { |
Stafford Horne | c056718 | 2017-06-24 07:09:59 +0900 | [diff] [blame] | 161 | case IPI_WAKEUP: |
| 162 | break; |
| 163 | |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 164 | case IPI_RESCHEDULE: |
| 165 | scheduler_ipi(); |
| 166 | break; |
| 167 | |
| 168 | case IPI_CALL_FUNC: |
| 169 | generic_smp_call_function_interrupt(); |
| 170 | break; |
| 171 | |
| 172 | case IPI_CALL_FUNC_SINGLE: |
| 173 | generic_smp_call_function_single_interrupt(); |
| 174 | break; |
| 175 | |
| 176 | default: |
| 177 | WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg); |
| 178 | break; |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | void smp_send_reschedule(int cpu) |
| 183 | { |
| 184 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
| 185 | } |
| 186 | |
| 187 | static void stop_this_cpu(void *dummy) |
| 188 | { |
| 189 | /* Remove this CPU */ |
| 190 | set_cpu_online(smp_processor_id(), false); |
| 191 | |
| 192 | local_irq_disable(); |
| 193 | /* CPU Doze */ |
| 194 | if (mfspr(SPR_UPR) & SPR_UPR_PMP) |
| 195 | mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); |
| 196 | /* If that didn't work, infinite loop */ |
| 197 | while (1) |
| 198 | ; |
| 199 | } |
| 200 | |
| 201 | void smp_send_stop(void) |
| 202 | { |
| 203 | smp_call_function(stop_this_cpu, NULL, 0); |
| 204 | } |
| 205 | |
| 206 | /* not supported, yet */ |
| 207 | int setup_profiling_timer(unsigned int multiplier) |
| 208 | { |
| 209 | return -EINVAL; |
| 210 | } |
| 211 | |
| 212 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
| 213 | { |
| 214 | smp_cross_call = fn; |
| 215 | } |
| 216 | |
| 217 | void arch_send_call_function_single_ipi(int cpu) |
| 218 | { |
| 219 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
| 220 | } |
| 221 | |
| 222 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
| 223 | { |
| 224 | smp_cross_call(mask, IPI_CALL_FUNC); |
| 225 | } |
| 226 | |
| 227 | /* TLB flush operations - Performed on each CPU*/ |
| 228 | static inline void ipi_flush_tlb_all(void *ignored) |
| 229 | { |
| 230 | local_flush_tlb_all(); |
| 231 | } |
| 232 | |
Stafford Horne | c28b274 | 2020-06-24 04:44:05 +0900 | [diff] [blame] | 233 | static inline void ipi_flush_tlb_mm(void *info) |
| 234 | { |
| 235 | struct mm_struct *mm = (struct mm_struct *)info; |
| 236 | |
| 237 | local_flush_tlb_mm(mm); |
| 238 | } |
| 239 | |
| 240 | static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm) |
| 241 | { |
| 242 | unsigned int cpuid; |
| 243 | |
| 244 | if (cpumask_empty(cmask)) |
| 245 | return; |
| 246 | |
| 247 | cpuid = get_cpu(); |
| 248 | |
| 249 | if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { |
| 250 | /* local cpu is the only cpu present in cpumask */ |
| 251 | local_flush_tlb_mm(mm); |
| 252 | } else { |
| 253 | on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1); |
| 254 | } |
| 255 | put_cpu(); |
| 256 | } |
| 257 | |
| 258 | struct flush_tlb_data { |
| 259 | unsigned long addr1; |
| 260 | unsigned long addr2; |
| 261 | }; |
| 262 | |
| 263 | static inline void ipi_flush_tlb_page(void *info) |
| 264 | { |
| 265 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
| 266 | |
| 267 | local_flush_tlb_page(NULL, fd->addr1); |
| 268 | } |
| 269 | |
| 270 | static inline void ipi_flush_tlb_range(void *info) |
| 271 | { |
| 272 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
| 273 | |
| 274 | local_flush_tlb_range(NULL, fd->addr1, fd->addr2); |
| 275 | } |
| 276 | |
| 277 | static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start, |
| 278 | unsigned long end) |
| 279 | { |
| 280 | unsigned int cpuid; |
| 281 | |
| 282 | if (cpumask_empty(cmask)) |
| 283 | return; |
| 284 | |
| 285 | cpuid = get_cpu(); |
| 286 | |
| 287 | if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { |
| 288 | /* local cpu is the only cpu present in cpumask */ |
| 289 | if ((end - start) <= PAGE_SIZE) |
| 290 | local_flush_tlb_page(NULL, start); |
| 291 | else |
| 292 | local_flush_tlb_range(NULL, start, end); |
| 293 | } else { |
| 294 | struct flush_tlb_data fd; |
| 295 | |
| 296 | fd.addr1 = start; |
| 297 | fd.addr2 = end; |
| 298 | |
| 299 | if ((end - start) <= PAGE_SIZE) |
| 300 | on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1); |
| 301 | else |
| 302 | on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1); |
| 303 | } |
| 304 | put_cpu(); |
| 305 | } |
| 306 | |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 307 | void flush_tlb_all(void) |
| 308 | { |
| 309 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
| 310 | } |
| 311 | |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 312 | void flush_tlb_mm(struct mm_struct *mm) |
| 313 | { |
Stafford Horne | c28b274 | 2020-06-24 04:44:05 +0900 | [diff] [blame] | 314 | smp_flush_tlb_mm(mm_cpumask(mm), mm); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 315 | } |
| 316 | |
| 317 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
| 318 | { |
Stafford Horne | c28b274 | 2020-06-24 04:44:05 +0900 | [diff] [blame] | 319 | smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | void flush_tlb_range(struct vm_area_struct *vma, |
| 323 | unsigned long start, unsigned long end) |
| 324 | { |
Stafford Horne | c28b274 | 2020-06-24 04:44:05 +0900 | [diff] [blame] | 325 | smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end); |
Stefan Kristiansson | 8e6d08e | 2014-05-11 21:49:34 +0300 | [diff] [blame] | 326 | } |
Jan Henrik Weinstock | 4ee93d8 | 2015-11-04 17:26:10 +0100 | [diff] [blame] | 327 | |
| 328 | /* Instruction cache invalidate - performed on each cpu */ |
| 329 | static void ipi_icache_page_inv(void *arg) |
| 330 | { |
| 331 | struct page *page = arg; |
| 332 | |
| 333 | local_icache_page_inv(page); |
| 334 | } |
| 335 | |
| 336 | void smp_icache_page_inv(struct page *page) |
| 337 | { |
| 338 | on_each_cpu(ipi_icache_page_inv, page, 1); |
| 339 | } |
| 340 | EXPORT_SYMBOL(smp_icache_page_inv); |