Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * SMP initialisation and IPI support |
| 3 | * Based on arch/arm64/kernel/smp.c |
| 4 | * |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| 6 | * Copyright (C) 2015 Regents of the University of California |
| 7 | * Copyright (C) 2017 SiFive |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/interrupt.h> |
| 23 | #include <linux/smp.h> |
| 24 | #include <linux/sched.h> |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 25 | #include <linux/seq_file.h> |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 26 | #include <linux/delay.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 27 | |
| 28 | #include <asm/sbi.h> |
| 29 | #include <asm/tlbflush.h> |
| 30 | #include <asm/cacheflush.h> |
| 31 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 32 | enum ipi_message_type { |
| 33 | IPI_RESCHEDULE, |
| 34 | IPI_CALL_FUNC, |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 35 | IPI_CPU_STOP, |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 36 | IPI_MAX |
| 37 | }; |
| 38 | |
Atish Patra | 78d1daa | 2019-02-22 11:41:36 -0800 | [diff] [blame^] | 39 | unsigned long __cpuid_to_hartid_map[NR_CPUS] = { |
| 40 | [0 ... NR_CPUS-1] = INVALID_HARTID |
| 41 | }; |
| 42 | |
| 43 | void __init smp_setup_processor_id(void) |
| 44 | { |
| 45 | cpuid_to_hartid_map(0) = boot_cpu_hartid; |
| 46 | } |
| 47 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 48 | /* A collection of single bit ipi messages. */ |
| 49 | static struct { |
| 50 | unsigned long stats[IPI_MAX] ____cacheline_aligned; |
| 51 | unsigned long bits ____cacheline_aligned; |
| 52 | } ipi_data[NR_CPUS] __cacheline_aligned; |
| 53 | |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 54 | int riscv_hartid_to_cpuid(int hartid) |
| 55 | { |
| 56 | int i = -1; |
Olof Johansson | 4bde632 | 2017-11-29 17:55:17 -0800 | [diff] [blame] | 57 | |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 58 | for (i = 0; i < NR_CPUS; i++) |
| 59 | if (cpuid_to_hartid_map(i) == hartid) |
| 60 | return i; |
| 61 | |
| 62 | pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); |
| 63 | BUG(); |
| 64 | return i; |
| 65 | } |
| 66 | |
| 67 | void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) |
| 68 | { |
| 69 | int cpu; |
| 70 | |
| 71 | for_each_cpu(cpu, in) |
| 72 | cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); |
| 73 | } |
Olof Johansson | 4bde632 | 2017-11-29 17:55:17 -0800 | [diff] [blame] | 74 | /* Unsupported */ |
| 75 | int setup_profiling_timer(unsigned int multiplier) |
| 76 | { |
| 77 | return -EINVAL; |
| 78 | } |
| 79 | |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 80 | static void ipi_stop(void) |
| 81 | { |
| 82 | set_cpu_online(smp_processor_id(), false); |
| 83 | while (1) |
| 84 | wait_for_interrupt(); |
| 85 | } |
| 86 | |
Christoph Hellwig | b9d5535 | 2018-08-04 10:23:13 +0200 | [diff] [blame] | 87 | void riscv_software_interrupt(void) |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 88 | { |
| 89 | unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 90 | unsigned long *stats = ipi_data[smp_processor_id()].stats; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 91 | |
| 92 | /* Clear pending IPI */ |
| 93 | csr_clear(sip, SIE_SSIE); |
| 94 | |
| 95 | while (true) { |
| 96 | unsigned long ops; |
| 97 | |
| 98 | /* Order bit clearing and data access. */ |
| 99 | mb(); |
| 100 | |
| 101 | ops = xchg(pending_ipis, 0); |
| 102 | if (ops == 0) |
Christoph Hellwig | b9d5535 | 2018-08-04 10:23:13 +0200 | [diff] [blame] | 103 | return; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 104 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 105 | if (ops & (1 << IPI_RESCHEDULE)) { |
| 106 | stats[IPI_RESCHEDULE]++; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 107 | scheduler_ipi(); |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 108 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 109 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 110 | if (ops & (1 << IPI_CALL_FUNC)) { |
| 111 | stats[IPI_CALL_FUNC]++; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 112 | generic_smp_call_function_interrupt(); |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 113 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 114 | |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 115 | if (ops & (1 << IPI_CPU_STOP)) { |
| 116 | stats[IPI_CPU_STOP]++; |
| 117 | ipi_stop(); |
| 118 | } |
| 119 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 120 | BUG_ON((ops >> IPI_MAX) != 0); |
| 121 | |
| 122 | /* Order data access and bit testing. */ |
| 123 | mb(); |
| 124 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static void |
| 128 | send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) |
| 129 | { |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 130 | int cpuid, hartid; |
| 131 | struct cpumask hartid_mask; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 132 | |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 133 | cpumask_clear(&hartid_mask); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 134 | mb(); |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 135 | for_each_cpu(cpuid, to_whom) { |
| 136 | set_bit(operation, &ipi_data[cpuid].bits); |
| 137 | hartid = cpuid_to_hartid_map(cpuid); |
| 138 | cpumask_set_cpu(hartid, &hartid_mask); |
| 139 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 140 | mb(); |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 141 | sbi_send_ipi(cpumask_bits(&hartid_mask)); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 142 | } |
| 143 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 144 | static const char * const ipi_names[] = { |
| 145 | [IPI_RESCHEDULE] = "Rescheduling interrupts", |
| 146 | [IPI_CALL_FUNC] = "Function call interrupts", |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 147 | [IPI_CPU_STOP] = "CPU stop interrupts", |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 148 | }; |
| 149 | |
| 150 | void show_ipi_stats(struct seq_file *p, int prec) |
| 151 | { |
| 152 | unsigned int cpu, i; |
| 153 | |
| 154 | for (i = 0; i < IPI_MAX; i++) { |
| 155 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
| 156 | prec >= 4 ? " " : ""); |
| 157 | for_each_online_cpu(cpu) |
| 158 | seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]); |
| 159 | seq_printf(p, " %s\n", ipi_names[i]); |
| 160 | } |
| 161 | } |
| 162 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 163 | void arch_send_call_function_ipi_mask(struct cpumask *mask) |
| 164 | { |
| 165 | send_ipi_message(mask, IPI_CALL_FUNC); |
| 166 | } |
| 167 | |
| 168 | void arch_send_call_function_single_ipi(int cpu) |
| 169 | { |
| 170 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); |
| 171 | } |
| 172 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 173 | void smp_send_stop(void) |
| 174 | { |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 175 | unsigned long timeout; |
| 176 | |
| 177 | if (num_online_cpus() > 1) { |
| 178 | cpumask_t mask; |
| 179 | |
| 180 | cpumask_copy(&mask, cpu_online_mask); |
| 181 | cpumask_clear_cpu(smp_processor_id(), &mask); |
| 182 | |
| 183 | if (system_state <= SYSTEM_RUNNING) |
| 184 | pr_crit("SMP: stopping secondary CPUs\n"); |
| 185 | send_ipi_message(&mask, IPI_CPU_STOP); |
| 186 | } |
| 187 | |
| 188 | /* Wait up to one second for other CPUs to stop */ |
| 189 | timeout = USEC_PER_SEC; |
| 190 | while (num_online_cpus() > 1 && timeout--) |
| 191 | udelay(1); |
| 192 | |
| 193 | if (num_online_cpus() > 1) |
| 194 | pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
| 195 | cpumask_pr_args(cpu_online_mask)); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | void smp_send_reschedule(int cpu) |
| 199 | { |
| 200 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); |
| 201 | } |
Andrew Waterman | 08f051e | 2017-10-25 14:30:32 -0700 | [diff] [blame] | 202 | |
| 203 | /* |
| 204 | * Performs an icache flush for the given MM context. RISC-V has no direct |
| 205 | * mechanism for instruction cache shoot downs, so instead we send an IPI that |
| 206 | * informs the remote harts they need to flush their local instruction caches. |
| 207 | * To avoid pathologically slow behavior in a common case (a bunch of |
| 208 | * single-hart processes on a many-hart machine, ie 'make -j') we avoid the |
| 209 | * IPIs for harts that are not currently executing a MM context and instead |
| 210 | * schedule a deferred local instruction cache flush to be performed before |
| 211 | * execution resumes on each hart. |
| 212 | */ |
| 213 | void flush_icache_mm(struct mm_struct *mm, bool local) |
| 214 | { |
| 215 | unsigned int cpu; |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 216 | cpumask_t others, hmask, *mask; |
Andrew Waterman | 08f051e | 2017-10-25 14:30:32 -0700 | [diff] [blame] | 217 | |
| 218 | preempt_disable(); |
| 219 | |
| 220 | /* Mark every hart's icache as needing a flush for this MM. */ |
| 221 | mask = &mm->context.icache_stale_mask; |
| 222 | cpumask_setall(mask); |
| 223 | /* Flush this hart's I$ now, and mark it as flushed. */ |
| 224 | cpu = smp_processor_id(); |
| 225 | cpumask_clear_cpu(cpu, mask); |
| 226 | local_flush_icache_all(); |
| 227 | |
| 228 | /* |
| 229 | * Flush the I$ of other harts concurrently executing, and mark them as |
| 230 | * flushed. |
| 231 | */ |
| 232 | cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); |
| 233 | local |= cpumask_empty(&others); |
Atish Patra | f99fb60 | 2018-10-02 12:15:05 -0700 | [diff] [blame] | 234 | if (mm != current->active_mm || !local) { |
| 235 | cpumask_clear(&hmask); |
| 236 | riscv_cpuid_to_hartid_mask(&others, &hmask); |
| 237 | sbi_remote_fence_i(hmask.bits); |
| 238 | } else { |
Andrew Waterman | 08f051e | 2017-10-25 14:30:32 -0700 | [diff] [blame] | 239 | /* |
| 240 | * It's assumed that at least one strongly ordered operation is |
| 241 | * performed on this hart between setting a hart's cpumask bit |
| 242 | * and scheduling this MM context on that hart. Sending an SBI |
| 243 | * remote message will do this, but in the case where no |
| 244 | * messages are sent we still need to order this hart's writes |
| 245 | * with flush_icache_deferred(). |
| 246 | */ |
| 247 | smp_mb(); |
| 248 | } |
| 249 | |
| 250 | preempt_enable(); |
| 251 | } |