Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 2 | /* |
| 3 | * SMP initialisation and IPI support |
| 4 | * Based on arch/arm64/kernel/smp.c |
| 5 | * |
| 6 | * Copyright (C) 2012 ARM Ltd. |
| 7 | * Copyright (C) 2015 Regents of the University of California |
| 8 | * Copyright (C) 2017 SiFive |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/smp.h> |
| 13 | #include <linux/sched.h> |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 14 | #include <linux/seq_file.h> |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 15 | #include <linux/delay.h> |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 16 | |
| 17 | #include <asm/sbi.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | #include <asm/cacheflush.h> |
| 20 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 21 | enum ipi_message_type { |
| 22 | IPI_RESCHEDULE, |
| 23 | IPI_CALL_FUNC, |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 24 | IPI_CPU_STOP, |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 25 | IPI_MAX |
| 26 | }; |
| 27 | |
Atish Patra | 78d1daa | 2019-02-22 11:41:36 -0800 | [diff] [blame] | 28 | unsigned long __cpuid_to_hartid_map[NR_CPUS] = { |
| 29 | [0 ... NR_CPUS-1] = INVALID_HARTID |
| 30 | }; |
| 31 | |
| 32 | void __init smp_setup_processor_id(void) |
| 33 | { |
Atish Patra | f1f47c6 | 2019-04-24 14:48:01 -0700 | [diff] [blame] | 34 | cpuid_to_hartid_map(0) = boot_cpu_hartid; |
Atish Patra | 78d1daa | 2019-02-22 11:41:36 -0800 | [diff] [blame] | 35 | } |
| 36 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 37 | /* A collection of single bit ipi messages. */ |
| 38 | static struct { |
| 39 | unsigned long stats[IPI_MAX] ____cacheline_aligned; |
| 40 | unsigned long bits ____cacheline_aligned; |
| 41 | } ipi_data[NR_CPUS] __cacheline_aligned; |
| 42 | |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 43 | int riscv_hartid_to_cpuid(int hartid) |
| 44 | { |
Atish Patra | f1f47c6 | 2019-04-24 14:48:01 -0700 | [diff] [blame] | 45 | int i; |
Olof Johansson | 4bde632 | 2017-11-29 17:55:17 -0800 | [diff] [blame] | 46 | |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 47 | for (i = 0; i < NR_CPUS; i++) |
| 48 | if (cpuid_to_hartid_map(i) == hartid) |
| 49 | return i; |
| 50 | |
| 51 | pr_err("Couldn't find cpu id for hartid [%d]\n", hartid); |
Atish Patra | 6825c7a | 2018-10-02 12:15:04 -0700 | [diff] [blame] | 52 | return i; |
| 53 | } |
| 54 | |
| 55 | void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out) |
| 56 | { |
| 57 | int cpu; |
| 58 | |
| 59 | for_each_cpu(cpu, in) |
| 60 | cpumask_set_cpu(cpuid_to_hartid_map(cpu), out); |
| 61 | } |
Atish Patra | 7011456 | 2019-04-24 14:47:58 -0700 | [diff] [blame] | 62 | |
| 63 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
| 64 | { |
| 65 | return phys_id == cpuid_to_hartid_map(cpu); |
| 66 | } |
| 67 | |
Olof Johansson | 4bde632 | 2017-11-29 17:55:17 -0800 | [diff] [blame] | 68 | /* Unsupported */ |
| 69 | int setup_profiling_timer(unsigned int multiplier) |
| 70 | { |
| 71 | return -EINVAL; |
| 72 | } |
| 73 | |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 74 | static void ipi_stop(void) |
| 75 | { |
| 76 | set_cpu_online(smp_processor_id(), false); |
| 77 | while (1) |
| 78 | wait_for_interrupt(); |
| 79 | } |
| 80 | |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame^] | 81 | static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) |
| 82 | { |
| 83 | int cpuid, hartid; |
| 84 | struct cpumask hartid_mask; |
| 85 | |
| 86 | cpumask_clear(&hartid_mask); |
| 87 | mb(); |
| 88 | for_each_cpu(cpuid, mask) { |
| 89 | set_bit(op, &ipi_data[cpuid].bits); |
| 90 | hartid = cpuid_to_hartid_map(cpuid); |
| 91 | cpumask_set_cpu(hartid, &hartid_mask); |
| 92 | } |
| 93 | mb(); |
| 94 | sbi_send_ipi(cpumask_bits(&hartid_mask)); |
| 95 | } |
| 96 | |
| 97 | static void send_ipi_single(int cpu, enum ipi_message_type op) |
| 98 | { |
| 99 | send_ipi_mask(cpumask_of(cpu), op); |
| 100 | } |
| 101 | |
| 102 | static inline void clear_ipi(void) |
| 103 | { |
| 104 | csr_clear(CSR_SIP, SIE_SSIE); |
| 105 | } |
| 106 | |
Christoph Hellwig | b9d5535 | 2018-08-04 10:23:13 +0200 | [diff] [blame] | 107 | void riscv_software_interrupt(void) |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 108 | { |
| 109 | unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 110 | unsigned long *stats = ipi_data[smp_processor_id()].stats; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 111 | |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame^] | 112 | clear_ipi(); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 113 | |
| 114 | while (true) { |
| 115 | unsigned long ops; |
| 116 | |
| 117 | /* Order bit clearing and data access. */ |
| 118 | mb(); |
| 119 | |
| 120 | ops = xchg(pending_ipis, 0); |
| 121 | if (ops == 0) |
Christoph Hellwig | b9d5535 | 2018-08-04 10:23:13 +0200 | [diff] [blame] | 122 | return; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 123 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 124 | if (ops & (1 << IPI_RESCHEDULE)) { |
| 125 | stats[IPI_RESCHEDULE]++; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 126 | scheduler_ipi(); |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 127 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 128 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 129 | if (ops & (1 << IPI_CALL_FUNC)) { |
| 130 | stats[IPI_CALL_FUNC]++; |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 131 | generic_smp_call_function_interrupt(); |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 132 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 133 | |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 134 | if (ops & (1 << IPI_CPU_STOP)) { |
| 135 | stats[IPI_CPU_STOP]++; |
| 136 | ipi_stop(); |
| 137 | } |
| 138 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 139 | BUG_ON((ops >> IPI_MAX) != 0); |
| 140 | |
| 141 | /* Order data access and bit testing. */ |
| 142 | mb(); |
| 143 | } |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 144 | } |
| 145 | |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 146 | static const char * const ipi_names[] = { |
| 147 | [IPI_RESCHEDULE] = "Rescheduling interrupts", |
| 148 | [IPI_CALL_FUNC] = "Function call interrupts", |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 149 | [IPI_CPU_STOP] = "CPU stop interrupts", |
Anup Patel | 8b20d2d | 2018-10-02 12:15:07 -0700 | [diff] [blame] | 150 | }; |
| 151 | |
| 152 | void show_ipi_stats(struct seq_file *p, int prec) |
| 153 | { |
| 154 | unsigned int cpu, i; |
| 155 | |
| 156 | for (i = 0; i < IPI_MAX; i++) { |
| 157 | seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, |
| 158 | prec >= 4 ? " " : ""); |
| 159 | for_each_online_cpu(cpu) |
| 160 | seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]); |
| 161 | seq_printf(p, " %s\n", ipi_names[i]); |
| 162 | } |
| 163 | } |
| 164 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 165 | void arch_send_call_function_ipi_mask(struct cpumask *mask) |
| 166 | { |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame^] | 167 | send_ipi_mask(mask, IPI_CALL_FUNC); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | void arch_send_call_function_single_ipi(int cpu) |
| 171 | { |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame^] | 172 | send_ipi_single(cpu, IPI_CALL_FUNC); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 173 | } |
| 174 | |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 175 | void smp_send_stop(void) |
| 176 | { |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 177 | unsigned long timeout; |
| 178 | |
| 179 | if (num_online_cpus() > 1) { |
| 180 | cpumask_t mask; |
| 181 | |
| 182 | cpumask_copy(&mask, cpu_online_mask); |
| 183 | cpumask_clear_cpu(smp_processor_id(), &mask); |
| 184 | |
| 185 | if (system_state <= SYSTEM_RUNNING) |
| 186 | pr_crit("SMP: stopping secondary CPUs\n"); |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame^] | 187 | send_ipi_mask(&mask, IPI_CPU_STOP); |
Andreas Schwab | 37a107f | 2018-12-11 11:20:40 +0100 | [diff] [blame] | 188 | } |
| 189 | |
| 190 | /* Wait up to one second for other CPUs to stop */ |
| 191 | timeout = USEC_PER_SEC; |
| 192 | while (num_online_cpus() > 1 && timeout--) |
| 193 | udelay(1); |
| 194 | |
| 195 | if (num_online_cpus() > 1) |
| 196 | pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", |
| 197 | cpumask_pr_args(cpu_online_mask)); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | void smp_send_reschedule(int cpu) |
| 201 | { |
Christoph Hellwig | 7e0e508 | 2019-08-21 23:58:32 +0900 | [diff] [blame^] | 202 | send_ipi_single(cpu, IPI_RESCHEDULE); |
Palmer Dabbelt | 76d2a04 | 2017-07-10 18:00:26 -0700 | [diff] [blame] | 203 | } |