Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 2 | /* |
| 3 | * SMP initialisation and IPI support |
| 4 | * Based on arch/arm64/kernel/smp.c |
| 5 | * |
| 6 | * Copyright (C) 2012 ARM Ltd. |
| 7 | * Copyright (C) 2015 Regents of the University of California |
| 8 | * Copyright (C) 2017 SiFive |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
Paul Walmsley | 5ed881b | 2019-10-17 15:21:28 -0700 | [diff] [blame] | 11 | #include <linux/cpu.h> |
Guo Ren | 2f10058 | 2021-03-07 10:24:46 +0800 | [diff] [blame] | 12 | #include <linux/clockchips.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 13 | #include <linux/interrupt.h> |
Anup Patel | 7391efa | 2020-04-24 10:29:26 +0530 | [diff] [blame] | 14 | #include <linux/module.h> |
Paul Walmsley | 5ed881b | 2019-10-17 15:21:28 -0700 | [diff] [blame] | 15 | #include <linux/profile.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 16 | #include <linux/smp.h> |
| 17 | #include <linux/sched.h> |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 18 | #include <linux/seq_file.h> |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 19 | #include <linux/delay.h> |
Greentime Hu | 2984479 | 2020-06-24 17:03:15 +0800 | [diff] [blame] | 20 | #include <linux/irq_work.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 21 | |
| 22 | #include <asm/sbi.h> |
| 23 | #include <asm/tlbflush.h> |
| 24 | #include <asm/cacheflush.h> |
| 25 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 26 | enum ipi_message_type { |
| 27 | IPI_RESCHEDULE, |
| 28 | IPI_CALL_FUNC, |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 29 | IPI_CPU_STOP, |
Greentime Hu | 2984479 | 2020-06-24 17:03:15 +0800 | [diff] [blame] | 30 | IPI_IRQ_WORK, |
Guo Ren | 2f10058 | 2021-03-07 10:24:46 +0800 | [diff] [blame] | 31 | IPI_TIMER, |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 32 | IPI_MAX |
| 33 | }; |
| 34 | |
Jisheng Zhang | de31ea4 | 2021-03-30 02:22:51 +0800 | [diff] [blame] | 35 | unsigned long __cpuid_to_hartid_map[NR_CPUS] __ro_after_init = { |
Atish Patra | 78d1daa | 2019-02-22 11:41:36 -0800 | [diff] [blame] | 36 | [0 ... NR_CPUS-1] = INVALID_HARTID |
| 37 | }; |
| 38 | |
| 39 | void __init smp_setup_processor_id(void) |
| 40 | { |
Atish Patra | f1f47c6 | 2019-04-24 14:48:01 -0700 | [diff] [blame] | 41 | cpuid_to_hartid_map(0) = boot_cpu_hartid; |
Atish Patra | 78d1daa | 2019-02-22 11:41:36 -0800 | [diff] [blame] | 42 | } |
| 43 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 44 | /* A collection of single bit ipi messages. */ |
| 45 | static struct { |
| 46 | unsigned long stats[IPI_MAX] ____cacheline_aligned; |
| 47 | unsigned long bits ____cacheline_aligned; |
| 48 | } ipi_data[NR_CPUS] __cacheline_aligned; |
| 49 | |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 50 | int riscv_hartid_to_cpuid(int hartid) |
| 51 | { |
Atish Patra | f1f47c6 | 2019-04-24 14:48:01 -0700 | [diff] [blame] | 52 | int i; |
Olof Johansson | 4bde632 | 2017-11-29 17:55:17 -0800 | [diff] [blame] | 53 | |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 54 | for (i = 0; i < NR_CPUS; i++) |
| 55 | if (cpuid_to_hartid_map(i) == hartid) |
| 56 | return i; |
| 57 | |
| 58 | pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); |
Anup Patel | 533b4f3 | 2021-04-15 14:25:22 +0530 | [diff] [blame] | 59 | return -ENOENT; |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) |
| 63 | { |
| 64 | int cpu; |
| 65 | |
Christoph Hellwig | f5bf645 | 2019-08-21 23:58:35 +0900 | [diff] [blame] | 66 | cpumask_clear(out); |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 67 | for_each_cpu(cpu, in) |
| 68 | cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); |
| 69 | } |
Anup Patel | 7391efa | 2020-04-24 10:29:26 +0530 | [diff] [blame] | 70 | EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask); |
Atish Patra | 7011456 | 2019-04-24 14:47:58 -0700 | [diff] [blame] | 71 | |
| 72 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
| 73 | { |
| 74 | return phys_id == cpuid_to_hartid_map(cpu); |
| 75 | } |
| 76 | |
Olof Johansson | 4bde632 | 2017-11-29 17:55:17 -0800 | [diff] [blame] | 77 | /* Unsupported */ |
| 78 | int setup_profiling_timer(unsigned int multiplier) |
| 79 | { |
| 80 | return -EINVAL; |
| 81 | } |
| 82 | |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 83 | static void ipi_stop(void) |
| 84 | { |
| 85 | set_cpu_online(smp_processor_id(), false); |
| 86 | while (1) |
| 87 | wait_for_interrupt(); |
| 88 | } |
| 89 | |
Jisheng Zhang | 300f62c | 2021-03-30 02:23:54 +0800 | [diff] [blame] | 90 | static const struct riscv_ipi_ops *ipi_ops __ro_after_init; |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 91 | |
Jisheng Zhang | 300f62c | 2021-03-30 02:23:54 +0800 | [diff] [blame] | 92 | void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 93 | { |
| 94 | ipi_ops = ops; |
| 95 | } |
| 96 | EXPORT_SYMBOL_GPL(riscv_set_ipi_ops); |
| 97 | |
| 98 | void riscv_clear_ipi(void) |
| 99 | { |
| 100 | if (ipi_ops && ipi_ops->ipi_clear) |
| 101 | ipi_ops->ipi_clear(); |
| 102 | |
| 103 | csr_clear(CSR_IP, IE_SIE); |
| 104 | } |
| 105 | EXPORT_SYMBOL_GPL(riscv_clear_ipi); |
| 106 | |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 107 | static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) |
| 108 | { |
Christoph Hellwig | 1db7a7c | 2019-08-21 23:58:33 +0900 | [diff] [blame] | 109 | int cpu; |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 110 | |
Christoph Hellwig | 1db7a7c | 2019-08-21 23:58:33 +0900 | [diff] [blame] | 111 | smp_mb__before_atomic(); |
| 112 | for_each_cpu(cpu, mask) |
| 113 | set_bit(op, &ipi_data[cpu].bits); |
| 114 | smp_mb__after_atomic(); |
| 115 | |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 116 | if (ipi_ops && ipi_ops->ipi_inject) |
| 117 | ipi_ops->ipi_inject(mask); |
Christoph Hellwig | fcdc653 | 2019-10-28 13:10:38 +0100 | [diff] [blame] | 118 | else |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 119 | pr_warn("SMP: IPI inject method not available\n"); |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | static void send_ipi_single(int cpu, enum ipi_message_type op) |
| 123 | { |
Christoph Hellwig | e11ea2a | 2019-08-21 23:58:34 +0900 | [diff] [blame] | 124 | smp_mb__before_atomic(); |
| 125 | set_bit(op, &ipi_data[cpu].bits); |
| 126 | smp_mb__after_atomic(); |
| 127 | |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 128 | if (ipi_ops && ipi_ops->ipi_inject) |
| 129 | ipi_ops->ipi_inject(cpumask_of(cpu)); |
Christoph Hellwig | fcdc653 | 2019-10-28 13:10:38 +0100 | [diff] [blame] | 130 | else |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 131 | pr_warn("SMP: IPI inject method not available\n"); |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 132 | } |
| 133 | |
Greentime Hu | 2984479 | 2020-06-24 17:03:15 +0800 | [diff] [blame] | 134 | #ifdef CONFIG_IRQ_WORK |
| 135 | void arch_irq_work_raise(void) |
| 136 | { |
| 137 | send_ipi_single(smp_processor_id(), IPI_IRQ_WORK); |
| 138 | } |
| 139 | #endif |
| 140 | |
Anup Patel | 5cf998b | 2020-06-01 14:45:38 +0530 | [diff] [blame] | 141 | void handle_IPI(struct pt_regs *regs) |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 142 | { |
| 143 | unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 144 | unsigned long *stats = ipi_data[smp_processor_id()].stats; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 145 | |
Anup Patel | cc7f3f7 | 2020-08-17 18:12:48 +0530 | [diff] [blame] | 146 | riscv_clear_ipi(); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 147 | |
| 148 | while (true) { |
| 149 | unsigned long ops; |
| 150 | |
| 151 | /* Order bit clearing and data access. */ |
| 152 | mb(); |
| 153 | |
| 154 | ops = xchg(pending_ipis, 0); |
| 155 | if (ops == 0) |
Mark Rutland | 7ecbc64 | 2021-10-20 11:33:49 +0100 | [diff] [blame] | 156 | return; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 157 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 158 | if (ops & (1 << IPI_RESCHEDULE)) { |
| 159 | stats[IPI_RESCHEDULE]++; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 160 | scheduler_ipi(); |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 161 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 162 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 163 | if (ops & (1 << IPI_CALL_FUNC)) { |
| 164 | stats[IPI_CALL_FUNC]++; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 165 | generic_smp_call_function_interrupt(); |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 166 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 167 | |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 168 | if (ops & (1 << IPI_CPU_STOP)) { |
| 169 | stats[IPI_CPU_STOP]++; |
| 170 | ipi_stop(); |
| 171 | } |
| 172 | |
Greentime Hu | 2984479 | 2020-06-24 17:03:15 +0800 | [diff] [blame] | 173 | if (ops & (1 << IPI_IRQ_WORK)) { |
| 174 | stats[IPI_IRQ_WORK]++; |
| 175 | irq_work_run(); |
| 176 | } |
| 177 | |
Guo Ren | 2f10058 | 2021-03-07 10:24:46 +0800 | [diff] [blame] | 178 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 179 | if (ops & (1 << IPI_TIMER)) { |
| 180 | stats[IPI_TIMER]++; |
| 181 | tick_receive_broadcast(); |
| 182 | } |
| 183 | #endif |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 184 | BUG_ON((ops >> IPI_MAX) != 0); |
| 185 | |
| 186 | /* Order data access and bit testing. */ |
| 187 | mb(); |
| 188 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 189 | } |
| 190 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 191 | static const char * const ipi_names[] = { |
| 192 | [IPI_RESCHEDULE] = "Rescheduling interrupts", |
| 193 | [IPI_CALL_FUNC] = "Function call interrupts", |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 194 | [IPI_CPU_STOP] = "CPU stop interrupts", |
Greentime Hu | 2984479 | 2020-06-24 17:03:15 +0800 | [diff] [blame] | 195 | [IPI_IRQ_WORK] = "IRQ work interrupts", |
Guo Ren | 2f10058 | 2021-03-07 10:24:46 +0800 | [diff] [blame] | 196 | [IPI_TIMER] = "Timer broadcast interrupts", |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 197 | }; |
| 198 | |
| 199 | void show_ipi_stats(struct seq_file *p, int prec) |
| 200 | { |
| 201 | unsigned int cpu, i; |
| 202 | |
| 203 | for (i = 0; i < IPI_MAX; i++) { |
| 204 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
| 205 | prec >= 4 ? " " : ""); |
| 206 | for_each_online_cpu(cpu) |
| 207 | seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]); |
| 208 | seq_printf(p, " %s\n", ipi_names[i]); |
| 209 | } |
| 210 | } |
| 211 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 212 | void arch_send_call_function_ipi_mask(struct cpumask *mask) |
| 213 | { |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 214 | send_ipi_mask(mask, IPI_CALL_FUNC); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 215 | } |
| 216 | |
| 217 | void arch_send_call_function_single_ipi(int cpu) |
| 218 | { |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 219 | send_ipi_single(cpu, IPI_CALL_FUNC); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 220 | } |
| 221 | |
Guo Ren | 2f10058 | 2021-03-07 10:24:46 +0800 | [diff] [blame] | 222 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
| 223 | void tick_broadcast(const struct cpumask *mask) |
| 224 | { |
| 225 | send_ipi_mask(mask, IPI_TIMER); |
| 226 | } |
| 227 | #endif |
| 228 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 229 | void smp_send_stop(void) |
| 230 | { |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 231 | unsigned long timeout; |
| 232 | |
| 233 | if (num_online_cpus() > 1) { |
| 234 | cpumask_t mask; |
| 235 | |
| 236 | cpumask_copy(&mask, cpu_online_mask); |
| 237 | cpumask_clear_cpu(smp_processor_id(), &mask); |
| 238 | |
| 239 | if (system_state <= SYSTEM_RUNNING) |
| 240 | pr_crit("SMP: stopping secondary CPUs\n"); |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 241 | send_ipi_mask(&mask, IPI_CPU_STOP); |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | /* Wait up to one second for other CPUs to stop */ |
| 245 | timeout = USEC_PER_SEC; |
| 246 | while (num_online_cpus() > 1 && timeout--) |
| 247 | udelay(1); |
| 248 | |
| 249 | if (num_online_cpus() > 1) |
| 250 | pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
| 251 | cpumask_pr_args(cpu_online_mask)); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | void smp_send_reschedule(int cpu) |
| 255 | { |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame] | 256 | send_ipi_single(cpu, IPI_RESCHEDULE); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 257 | } |
Atish Patra | d3d7a0c | 2019-09-04 16:14:06 +0000 | [diff] [blame] | 258 | EXPORT_SYMBOL_GPL(smp_send_reschedule); |