blob: cbc9162689d0f56c3f0fa7b8776df10cf10c4482 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_SMP_H
3#define __LINUX_SMP_H
4
5/*
6 * Generic SMP support
7 * Alan Cox. <alan@redhat.com>
8 */
9
Heiko Carstens79974a02007-05-16 22:11:09 -070010#include <linux/errno.h>
David S. Miller54514a72008-09-23 22:15:57 -070011#include <linux/types.h>
Jens Axboe3d442232008-06-26 11:21:34 +020012#include <linux/list.h>
Jens Axboe3d442232008-06-26 11:21:34 +020013#include <linux/cpumask.h>
Heiko Carstens04948c72011-03-23 08:24:58 +010014#include <linux/init.h>
Christoph Hellwig6897fc22014-01-30 15:45:47 -080015#include <linux/llist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
David Howells3a5f65df2010-10-27 17:28:36 +010017typedef void (*smp_call_func_t)(void *info);
Sebastian Andrzej Siewior5671d812020-01-17 10:01:35 +010018typedef bool (*smp_cond_func_t)(int cpu, void *info);
Ying Huang966a9672017-08-08 12:30:00 +080019struct __call_single_data {
Jan Kara0ebeb792014-02-24 16:39:56 +010020 struct llist_node llist;
David Howells3a5f65df2010-10-27 17:28:36 +010021 smp_call_func_t func;
Jens Axboe3d442232008-06-26 11:21:34 +020022 void *info;
Linus Torvaldsf4d03bd2015-04-20 09:08:49 -070023 unsigned int flags;
Jens Axboe3d442232008-06-26 11:21:34 +020024};
25
Ying Huang966a9672017-08-08 12:30:00 +080026/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
27typedef struct __call_single_data call_single_data_t
28 __aligned(sizeof(struct __call_single_data));
29
Mike Travise057d7a2008-12-15 20:26:48 -080030/* total number of cpus in this system (may exceed NR_CPUS) */
31extern unsigned int total_cpus;
32
David Howells3a5f65df2010-10-27 17:28:36 +010033int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
34 int wait);
Andrew Morton53ce3d92009-01-09 12:27:08 -080035
David Daneyfa688202013-09-11 14:23:24 -070036/*
David Daneybff2dc42013-09-11 14:23:26 -070037 * Call a function on all processors
38 */
Nadav Amitcaa75932019-06-12 23:48:05 -070039void on_each_cpu(smp_call_func_t func, void *info, int wait);
David Daneybff2dc42013-09-11 14:23:26 -070040
41/*
David Daneyfa688202013-09-11 14:23:24 -070042 * Call a function on processors specified by mask, which might include
43 * the local one.
44 */
45void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
46 void *info, bool wait);
47
48/*
49 * Call a function on each processor for which the supplied function
50 * cond_func returns a positive value. This may include the local
51 * processor.
52 */
Sebastian Andrzej Siewior5671d812020-01-17 10:01:35 +010053void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
Sebastian Andrzej Siewiorcb923152020-01-17 10:01:37 +010054 void *info, bool wait);
David Daneyfa688202013-09-11 14:23:24 -070055
Sebastian Andrzej Siewior5671d812020-01-17 10:01:35 +010056void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
Sebastian Andrzej Siewiorcb923152020-01-17 10:01:37 +010057 void *info, bool wait, const struct cpumask *mask);
Rik van Riel7d49b282018-09-25 23:58:41 -040058
Ying Huang966a9672017-08-08 12:30:00 +080059int smp_call_function_single_async(int cpu, call_single_data_t *csd);
Andrew Morton7cf64f82013-11-14 14:32:09 -080060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061#ifdef CONFIG_SMP
62
63#include <linux/preempt.h>
64#include <linux/kernel.h>
65#include <linux/compiler.h>
66#include <linux/thread_info.h>
67#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69/*
70 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
71 * (defined in asm header):
Ingo Molnard1dedb52009-03-13 11:14:06 +010072 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
74/*
75 * stops all CPUs but the current one:
76 */
77extern void smp_send_stop(void);
78
79/*
80 * sends a 'reschedule' event to another CPU:
81 */
82extern void smp_send_reschedule(int cpu);
83
84
85/*
86 * Prepare machine for booting other CPUs.
87 */
88extern void smp_prepare_cpus(unsigned int max_cpus);
89
90/*
91 * Bring a CPU up
92 */
Thomas Gleixner8239c252012-04-20 13:05:42 +000093extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
95/*
96 * Final polishing of CPUs
97 */
98extern void smp_cpus_done(unsigned int max_cpus);
99
100/*
101 * Call a function on all other processors
102 */
Nadav Amitcaa75932019-06-12 23:48:05 -0700103void smp_call_function(smp_call_func_t func, void *info, int wait);
Rusty Russell54b11e62008-12-30 09:05:16 +1030104void smp_call_function_many(const struct cpumask *mask,
David Howells3a5f65df2010-10-27 17:28:36 +0100105 smp_call_func_t func, void *info, bool wait);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100106
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800107int smp_call_function_any(const struct cpumask *mask,
David Howells3a5f65df2010-10-27 17:28:36 +0100108 smp_call_func_t func, void *info, int wait);
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800109
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000110void kick_all_cpus_sync(void);
Chuansheng Liuc6f44592014-09-04 15:17:54 +0800111void wake_up_all_idle_cpus(void);
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000112
Jens Axboe3d442232008-06-26 11:21:34 +0200113/*
114 * Generic and arch helpers
115 */
Takao Indohd8ad7d12011-03-29 12:35:04 -0400116void __init call_function_init(void);
Jens Axboe3d442232008-06-26 11:21:34 +0200117void generic_smp_call_function_single_interrupt(void);
Shaohua Li9a46ad62013-02-21 16:43:03 -0800118#define generic_smp_call_function_interrupt \
119 generic_smp_call_function_single_interrupt
Andrew Mortona3bc0db2006-09-25 23:32:33 -0700120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 * Mark the boot cpu "online" so that it can call console drivers in
123 * printk() and can access its per-cpu storage.
124 */
125void smp_prepare_boot_cpu(void);
126
Andi Kleenca74a6f2008-01-30 13:33:17 +0100127extern unsigned int setup_max_cpus;
Amerigo Wang34db18a02011-03-22 16:34:06 -0700128extern void __init setup_nr_cpu_ids(void);
129extern void __init smp_init(void);
Andi Kleenca74a6f2008-01-30 13:33:17 +0100130
Peter Zijlstra8ce371f2017-03-20 12:26:55 +0100131extern int __boot_cpu_id;
132
133static inline int get_boot_cpu_id(void)
134{
135 return __boot_cpu_id;
136}
137
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138#else /* !SMP */
139
Ingo Molnard1dedb52009-03-13 11:14:06 +0100140static inline void smp_send_stop(void) { }
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142/*
143 * These macros fold the SMP functionality into a single CPU system
144 */
Ingo Molnar39c715b2005-06-21 17:14:34 -0700145#define raw_smp_processor_id() 0
Nadav Amitcaa75932019-06-12 23:48:05 -0700146static inline void up_smp_call_function(smp_call_func_t func, void *info)
Con Kolivas3c30b062006-03-26 01:37:19 -0800147{
Con Kolivas3c30b062006-03-26 01:37:19 -0800148}
Jens Axboe8691e5a2008-06-06 11:18:06 +0200149#define smp_call_function(func, info, wait) \
Ingo Molnara5fbb6d2007-11-09 22:39:38 +0100150 (up_smp_call_function(func, info))
Andrew Morton3b8967d2013-09-11 14:19:37 -0700151
Richard Henderson79a88102005-07-28 01:07:41 -0700152static inline void smp_send_reschedule(int cpu) { }
Linus Torvalds2ac66082005-07-28 10:34:47 -0700153#define smp_prepare_boot_cpu() do {} while (0)
Rusty Russelld2ff9112008-12-15 19:04:35 +1030154#define smp_call_function_many(mask, func, info, wait) \
155 (up_smp_call_function(func, info))
Takao Indohd8ad7d12011-03-29 12:35:04 -0400156static inline void call_function_init(void) { }
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800157
158static inline int
David Howells3a5f65df2010-10-27 17:28:36 +0100159smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800160 void *info, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200161{
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800162 return smp_call_function_single(0, func, info, wait);
Jens Axboe3d442232008-06-26 11:21:34 +0200163}
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800164
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000165static inline void kick_all_cpus_sync(void) { }
Chuansheng Liuc6f44592014-09-04 15:17:54 +0800166static inline void wake_up_all_idle_cpus(void) { }
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000167
Thomas Gleixner30b8b002015-01-15 21:22:39 +0000168#ifdef CONFIG_UP_LATE_INIT
169extern void __init up_late_init(void);
170static inline void smp_init(void) { up_late_init(); }
171#else
172static inline void smp_init(void) { }
173#endif
174
Peter Zijlstra8ce371f2017-03-20 12:26:55 +0100175static inline int get_boot_cpu_id(void)
176{
177 return 0;
178}
179
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180#endif /* !SMP */
181
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100182/**
183 * raw_processor_id() - get the current (unstable) CPU id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 *
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100185 * For then you know what you are doing and need an unstable
186 * CPU id.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 */
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100188
189/**
190 * smp_processor_id() - get the current (stable) CPU id
191 *
192 * This is the normal accessor to the CPU id and should be used
193 * whenever possible.
194 *
195 * The CPU id is stable when:
196 *
197 * - IRQs are disabled;
198 * - preemption is disabled;
199 * - the task is CPU affine.
200 *
201 * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
202 * when smp_processor_id() is used when the CPU id is not stable.
203 */
204
205/*
206 * Allow the architecture to differentiate between a stable and unstable read.
207 * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
208 * regular asm read for the stable.
209 */
210#ifndef __smp_processor_id
211#define __smp_processor_id(x) raw_smp_processor_id(x)
212#endif
213
Ingo Molnar39c715b2005-06-21 17:14:34 -0700214#ifdef CONFIG_DEBUG_PREEMPT
215 extern unsigned int debug_smp_processor_id(void);
216# define smp_processor_id() debug_smp_processor_id()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217#else
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100218# define smp_processor_id() __smp_processor_id()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219#endif
220
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100221#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222#define put_cpu() preempt_enable()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Ingo Molnara1466492009-01-31 14:09:06 +0100224/*
225 * Callback to arch code if there's nosmp or maxcpus=0 on the
226 * boot command line:
227 */
228extern void arch_disable_smp_support(void);
229
Paul Gortmakerfb37bb02014-02-10 14:25:49 -0800230extern void arch_enable_nonboot_cpus_begin(void);
231extern void arch_enable_nonboot_cpus_end(void);
232
Andrew Morton033ab7f2006-06-30 01:55:50 -0700233void smp_setup_processor_id(void);
234
Juergen Grossdf8ce9d2016-08-29 08:48:44 +0200235int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
236 bool phys);
237
Richard Weinberger31487f82016-07-13 17:17:01 +0000238/* SMP core functions */
239int smpcfd_prepare_cpu(unsigned int cpu);
240int smpcfd_dead_cpu(unsigned int cpu);
241int smpcfd_dying_cpu(unsigned int cpu);
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243#endif /* __LINUX_SMP_H */