blob: 8cd730239613d047f4d89c82c105ccf0fb79d5c6 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
5 *
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07009 */
10
11#include <linux/interrupt.h>
12#include <linux/smp.h>
13#include <linux/sched.h>
Anup Patel8b20d2d2018-10-02 12:15:07 -070014#include <linux/seq_file.h>
Andreas Schwab37a107f2018-12-11 11:20:40 +010015#include <linux/delay.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070016
17#include <asm/sbi.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070021enum ipi_message_type {
22 IPI_RESCHEDULE,
23 IPI_CALL_FUNC,
Andreas Schwab37a107f2018-12-11 11:20:40 +010024 IPI_CPU_STOP,
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070025 IPI_MAX
26};
27
Atish Patra78d1daa2019-02-22 11:41:36 -080028unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
29 [0 ... NR_CPUS-1] = INVALID_HARTID
30};
31
32void __init smp_setup_processor_id(void)
33{
Atish Patraf1f47c62019-04-24 14:48:01 -070034 cpuid_to_hartid_map(0) = boot_cpu_hartid;
Atish Patra78d1daa2019-02-22 11:41:36 -080035}
36
Anup Patel8b20d2d2018-10-02 12:15:07 -070037/* A collection of single bit ipi messages. */
38static struct {
39 unsigned long stats[IPI_MAX] ____cacheline_aligned;
40 unsigned long bits ____cacheline_aligned;
41} ipi_data[NR_CPUS] __cacheline_aligned;
42
Atish Patra6825c7a2018-10-02 12:15:04 -070043int riscv_hartid_to_cpuid(int hartid)
44{
Atish Patraf1f47c62019-04-24 14:48:01 -070045 int i;
Olof Johansson4bde6322017-11-29 17:55:17 -080046
Atish Patra6825c7a2018-10-02 12:15:04 -070047 for (i = 0; i < NR_CPUS; i++)
48 if (cpuid_to_hartid_map(i) == hartid)
49 return i;
50
51 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
Atish Patra6825c7a2018-10-02 12:15:04 -070052 return i;
53}
54
55void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
56{
57 int cpu;
58
59 for_each_cpu(cpu, in)
60 cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
61}
Atish Patra70114562019-04-24 14:47:58 -070062
63bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
64{
65 return phys_id == cpuid_to_hartid_map(cpu);
66}
67
Olof Johansson4bde6322017-11-29 17:55:17 -080068/* Unsupported */
69int setup_profiling_timer(unsigned int multiplier)
70{
71 return -EINVAL;
72}
73
Andreas Schwab37a107f2018-12-11 11:20:40 +010074static void ipi_stop(void)
75{
76 set_cpu_online(smp_processor_id(), false);
77 while (1)
78 wait_for_interrupt();
79}
80
Christoph Hellwig7e0e5082019-08-21 23:58:32 +090081static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
82{
83 int cpuid, hartid;
84 struct cpumask hartid_mask;
85
86 cpumask_clear(&hartid_mask);
87 mb();
88 for_each_cpu(cpuid, mask) {
89 set_bit(op, &ipi_data[cpuid].bits);
90 hartid = cpuid_to_hartid_map(cpuid);
91 cpumask_set_cpu(hartid, &hartid_mask);
92 }
93 mb();
94 sbi_send_ipi(cpumask_bits(&hartid_mask));
95}
96
97static void send_ipi_single(int cpu, enum ipi_message_type op)
98{
99 send_ipi_mask(cpumask_of(cpu), op);
100}
101
102static inline void clear_ipi(void)
103{
104 csr_clear(CSR_SIP, SIE_SSIE);
105}
106
Christoph Hellwigb9d55352018-08-04 10:23:13 +0200107void riscv_software_interrupt(void)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700108{
109 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
Anup Patel8b20d2d2018-10-02 12:15:07 -0700110 unsigned long *stats = ipi_data[smp_processor_id()].stats;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700111
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900112 clear_ipi();
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700113
114 while (true) {
115 unsigned long ops;
116
117 /* Order bit clearing and data access. */
118 mb();
119
120 ops = xchg(pending_ipis, 0);
121 if (ops == 0)
Christoph Hellwigb9d55352018-08-04 10:23:13 +0200122 return;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700123
Anup Patel8b20d2d2018-10-02 12:15:07 -0700124 if (ops & (1 << IPI_RESCHEDULE)) {
125 stats[IPI_RESCHEDULE]++;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700126 scheduler_ipi();
Anup Patel8b20d2d2018-10-02 12:15:07 -0700127 }
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700128
Anup Patel8b20d2d2018-10-02 12:15:07 -0700129 if (ops & (1 << IPI_CALL_FUNC)) {
130 stats[IPI_CALL_FUNC]++;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700131 generic_smp_call_function_interrupt();
Anup Patel8b20d2d2018-10-02 12:15:07 -0700132 }
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700133
Andreas Schwab37a107f2018-12-11 11:20:40 +0100134 if (ops & (1 << IPI_CPU_STOP)) {
135 stats[IPI_CPU_STOP]++;
136 ipi_stop();
137 }
138
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700139 BUG_ON((ops >> IPI_MAX) != 0);
140
141 /* Order data access and bit testing. */
142 mb();
143 }
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700144}
145
Anup Patel8b20d2d2018-10-02 12:15:07 -0700146static const char * const ipi_names[] = {
147 [IPI_RESCHEDULE] = "Rescheduling interrupts",
148 [IPI_CALL_FUNC] = "Function call interrupts",
Andreas Schwab37a107f2018-12-11 11:20:40 +0100149 [IPI_CPU_STOP] = "CPU stop interrupts",
Anup Patel8b20d2d2018-10-02 12:15:07 -0700150};
151
152void show_ipi_stats(struct seq_file *p, int prec)
153{
154 unsigned int cpu, i;
155
156 for (i = 0; i < IPI_MAX; i++) {
157 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
158 prec >= 4 ? " " : "");
159 for_each_online_cpu(cpu)
160 seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
161 seq_printf(p, " %s\n", ipi_names[i]);
162 }
163}
164
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700165void arch_send_call_function_ipi_mask(struct cpumask *mask)
166{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900167 send_ipi_mask(mask, IPI_CALL_FUNC);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700168}
169
170void arch_send_call_function_single_ipi(int cpu)
171{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900172 send_ipi_single(cpu, IPI_CALL_FUNC);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700173}
174
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700175void smp_send_stop(void)
176{
Andreas Schwab37a107f2018-12-11 11:20:40 +0100177 unsigned long timeout;
178
179 if (num_online_cpus() > 1) {
180 cpumask_t mask;
181
182 cpumask_copy(&mask, cpu_online_mask);
183 cpumask_clear_cpu(smp_processor_id(), &mask);
184
185 if (system_state <= SYSTEM_RUNNING)
186 pr_crit("SMP: stopping secondary CPUs\n");
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900187 send_ipi_mask(&mask, IPI_CPU_STOP);
Andreas Schwab37a107f2018-12-11 11:20:40 +0100188 }
189
190 /* Wait up to one second for other CPUs to stop */
191 timeout = USEC_PER_SEC;
192 while (num_online_cpus() > 1 && timeout--)
193 udelay(1);
194
195 if (num_online_cpus() > 1)
196 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
197 cpumask_pr_args(cpu_online_mask));
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700198}
199
200void smp_send_reschedule(int cpu)
201{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900202 send_ipi_single(cpu, IPI_RESCHEDULE);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700203}