blob: b18cd6c8e8fbbf8383b0149da14be73b3c89f328 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07002/*
3 * SMP initialisation and IPI support
4 * Based on arch/arm64/kernel/smp.c
5 *
6 * Copyright (C) 2012 ARM Ltd.
7 * Copyright (C) 2015 Regents of the University of California
8 * Copyright (C) 2017 SiFive
Palmer Dabbelt76d2a042017-07-10 18:00:26 -07009 */
10
11#include <linux/interrupt.h>
12#include <linux/smp.h>
13#include <linux/sched.h>
Anup Patel8b20d2d2018-10-02 12:15:07 -070014#include <linux/seq_file.h>
Andreas Schwab37a107f2018-12-11 11:20:40 +010015#include <linux/delay.h>
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070016
17#include <asm/sbi.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070021enum ipi_message_type {
22 IPI_RESCHEDULE,
23 IPI_CALL_FUNC,
Andreas Schwab37a107f2018-12-11 11:20:40 +010024 IPI_CPU_STOP,
Palmer Dabbelt76d2a042017-07-10 18:00:26 -070025 IPI_MAX
26};
27
Atish Patra78d1daa2019-02-22 11:41:36 -080028unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
29 [0 ... NR_CPUS-1] = INVALID_HARTID
30};
31
32void __init smp_setup_processor_id(void)
33{
Atish Patraf1f47c62019-04-24 14:48:01 -070034 cpuid_to_hartid_map(0) = boot_cpu_hartid;
Atish Patra78d1daa2019-02-22 11:41:36 -080035}
36
Anup Patel8b20d2d2018-10-02 12:15:07 -070037/* A collection of single bit ipi messages. */
38static struct {
39 unsigned long stats[IPI_MAX] ____cacheline_aligned;
40 unsigned long bits ____cacheline_aligned;
41} ipi_data[NR_CPUS] __cacheline_aligned;
42
Atish Patra6825c7a2018-10-02 12:15:04 -070043int riscv_hartid_to_cpuid(int hartid)
44{
Atish Patraf1f47c62019-04-24 14:48:01 -070045 int i;
Olof Johansson4bde6322017-11-29 17:55:17 -080046
Atish Patra6825c7a2018-10-02 12:15:04 -070047 for (i = 0; i < NR_CPUS; i++)
48 if (cpuid_to_hartid_map(i) == hartid)
49 return i;
50
51 pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
Atish Patra6825c7a2018-10-02 12:15:04 -070052 return i;
53}
54
55void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
56{
57 int cpu;
58
Christoph Hellwigf5bf6452019-08-21 23:58:35 +090059 cpumask_clear(out);
Atish Patra6825c7a2018-10-02 12:15:04 -070060 for_each_cpu(cpu, in)
61 cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
62}
Atish Patra70114562019-04-24 14:47:58 -070063
64bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
65{
66 return phys_id == cpuid_to_hartid_map(cpu);
67}
68
Olof Johansson4bde6322017-11-29 17:55:17 -080069/* Unsupported */
70int setup_profiling_timer(unsigned int multiplier)
71{
72 return -EINVAL;
73}
74
Andreas Schwab37a107f2018-12-11 11:20:40 +010075static void ipi_stop(void)
76{
77 set_cpu_online(smp_processor_id(), false);
78 while (1)
79 wait_for_interrupt();
80}
81
Christoph Hellwig7e0e5082019-08-21 23:58:32 +090082static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
83{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +090084 struct cpumask hartid_mask;
Christoph Hellwig1db7a7c2019-08-21 23:58:33 +090085 int cpu;
Christoph Hellwig7e0e5082019-08-21 23:58:32 +090086
Christoph Hellwig1db7a7c2019-08-21 23:58:33 +090087 smp_mb__before_atomic();
88 for_each_cpu(cpu, mask)
89 set_bit(op, &ipi_data[cpu].bits);
90 smp_mb__after_atomic();
91
92 riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
Christoph Hellwig7e0e5082019-08-21 23:58:32 +090093 sbi_send_ipi(cpumask_bits(&hartid_mask));
94}
95
96static void send_ipi_single(int cpu, enum ipi_message_type op)
97{
Christoph Hellwige11ea2a2019-08-21 23:58:34 +090098 int hartid = cpuid_to_hartid_map(cpu);
99
100 smp_mb__before_atomic();
101 set_bit(op, &ipi_data[cpu].bits);
102 smp_mb__after_atomic();
103
104 sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900105}
106
107static inline void clear_ipi(void)
108{
109 csr_clear(CSR_SIP, SIE_SSIE);
110}
111
Christoph Hellwigb9d55352018-08-04 10:23:13 +0200112void riscv_software_interrupt(void)
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700113{
114 unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
Anup Patel8b20d2d2018-10-02 12:15:07 -0700115 unsigned long *stats = ipi_data[smp_processor_id()].stats;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700116
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900117 clear_ipi();
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700118
119 while (true) {
120 unsigned long ops;
121
122 /* Order bit clearing and data access. */
123 mb();
124
125 ops = xchg(pending_ipis, 0);
126 if (ops == 0)
Christoph Hellwigb9d55352018-08-04 10:23:13 +0200127 return;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700128
Anup Patel8b20d2d2018-10-02 12:15:07 -0700129 if (ops & (1 << IPI_RESCHEDULE)) {
130 stats[IPI_RESCHEDULE]++;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700131 scheduler_ipi();
Anup Patel8b20d2d2018-10-02 12:15:07 -0700132 }
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700133
Anup Patel8b20d2d2018-10-02 12:15:07 -0700134 if (ops & (1 << IPI_CALL_FUNC)) {
135 stats[IPI_CALL_FUNC]++;
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700136 generic_smp_call_function_interrupt();
Anup Patel8b20d2d2018-10-02 12:15:07 -0700137 }
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700138
Andreas Schwab37a107f2018-12-11 11:20:40 +0100139 if (ops & (1 << IPI_CPU_STOP)) {
140 stats[IPI_CPU_STOP]++;
141 ipi_stop();
142 }
143
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700144 BUG_ON((ops >> IPI_MAX) != 0);
145
146 /* Order data access and bit testing. */
147 mb();
148 }
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700149}
150
Anup Patel8b20d2d2018-10-02 12:15:07 -0700151static const char * const ipi_names[] = {
152 [IPI_RESCHEDULE] = "Rescheduling interrupts",
153 [IPI_CALL_FUNC] = "Function call interrupts",
Andreas Schwab37a107f2018-12-11 11:20:40 +0100154 [IPI_CPU_STOP] = "CPU stop interrupts",
Anup Patel8b20d2d2018-10-02 12:15:07 -0700155};
156
157void show_ipi_stats(struct seq_file *p, int prec)
158{
159 unsigned int cpu, i;
160
161 for (i = 0; i < IPI_MAX; i++) {
162 seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
163 prec >= 4 ? " " : "");
164 for_each_online_cpu(cpu)
165 seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
166 seq_printf(p, " %s\n", ipi_names[i]);
167 }
168}
169
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700170void arch_send_call_function_ipi_mask(struct cpumask *mask)
171{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900172 send_ipi_mask(mask, IPI_CALL_FUNC);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700173}
174
175void arch_send_call_function_single_ipi(int cpu)
176{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900177 send_ipi_single(cpu, IPI_CALL_FUNC);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700178}
179
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700180void smp_send_stop(void)
181{
Andreas Schwab37a107f2018-12-11 11:20:40 +0100182 unsigned long timeout;
183
184 if (num_online_cpus() > 1) {
185 cpumask_t mask;
186
187 cpumask_copy(&mask, cpu_online_mask);
188 cpumask_clear_cpu(smp_processor_id(), &mask);
189
190 if (system_state <= SYSTEM_RUNNING)
191 pr_crit("SMP: stopping secondary CPUs\n");
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900192 send_ipi_mask(&mask, IPI_CPU_STOP);
Andreas Schwab37a107f2018-12-11 11:20:40 +0100193 }
194
195 /* Wait up to one second for other CPUs to stop */
196 timeout = USEC_PER_SEC;
197 while (num_online_cpus() > 1 && timeout--)
198 udelay(1);
199
200 if (num_online_cpus() > 1)
201 pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
202 cpumask_pr_args(cpu_online_mask));
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700203}
204
205void smp_send_reschedule(int cpu)
206{
Christoph Hellwig7e0e5082019-08-21 23:58:32 +0900207 send_ipi_single(cpu, IPI_RESCHEDULE);
Palmer Dabbelt76d2a042017-07-10 18:00:26 -0700208}
Atish Patrad3d7a0c2019-09-04 16:14:06 +0000209EXPORT_SYMBOL_GPL(smp_send_reschedule);