blob: 7ce15c3be7aa7bb974cf65c9bb355bdacd89e1be [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002#ifndef __LINUX_SMP_H
3#define __LINUX_SMP_H
4
5/*
6 * Generic SMP support
7 * Alan Cox. <alan@redhat.com>
8 */
9
Heiko Carstens79974a02007-05-16 22:11:09 -070010#include <linux/errno.h>
David S. Miller54514a72008-09-23 22:15:57 -070011#include <linux/types.h>
Jens Axboe3d442232008-06-26 11:21:34 +020012#include <linux/list.h>
Jens Axboe3d442232008-06-26 11:21:34 +020013#include <linux/cpumask.h>
Heiko Carstens04948c72011-03-23 08:24:58 +010014#include <linux/init.h>
Peter Zijlstra8c4890d2020-06-22 12:01:25 +020015#include <linux/smp_types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
David Howells3a5f65df2010-10-27 17:28:36 +010017typedef void (*smp_call_func_t)(void *info);
Sebastian Andrzej Siewior5671d812020-01-17 10:01:35 +010018typedef bool (*smp_cond_func_t)(int cpu, void *info);
Peter Zijlstra4b44a212020-05-26 18:11:02 +020019
Peter Zijlstra4b44a212020-05-26 18:11:02 +020020/*
21 * structure shares (partial) layout with struct irq_work
22 */
Ying Huang966a9672017-08-08 12:30:00 +080023struct __call_single_data {
Peter Zijlstra8c4890d2020-06-22 12:01:25 +020024 union {
25 struct __call_single_node node;
26 struct {
27 struct llist_node llist;
28 unsigned int flags;
Paul E. McKenneye48c15b2020-06-29 17:21:32 -070029#ifdef CONFIG_64BIT
30 u16 src, dst;
31#endif
Peter Zijlstra8c4890d2020-06-22 12:01:25 +020032 };
33 };
David Howells3a5f65df2010-10-27 17:28:36 +010034 smp_call_func_t func;
Jens Axboe3d442232008-06-26 11:21:34 +020035 void *info;
Jens Axboe3d442232008-06-26 11:21:34 +020036};
37
Ying Huang966a9672017-08-08 12:30:00 +080038/* Use __aligned() to avoid to use 2 cache lines for 1 csd */
39typedef struct __call_single_data call_single_data_t
40 __aligned(sizeof(struct __call_single_data));
41
Peter Zijlstra4b44a212020-05-26 18:11:02 +020042/*
43 * Enqueue a llist_node on the call_single_queue; be very careful, read
44 * flush_smp_call_function_queue() in detail.
45 */
46extern void __smp_call_single_queue(int cpu, struct llist_node *node);
47
Mike Travise057d7a2008-12-15 20:26:48 -080048/* total number of cpus in this system (may exceed NR_CPUS) */
49extern unsigned int total_cpus;
50
David Howells3a5f65df2010-10-27 17:28:36 +010051int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
52 int wait);
Andrew Morton53ce3d92009-01-09 12:27:08 -080053
David Daneyfa688202013-09-11 14:23:24 -070054/*
David Daneybff2dc42013-09-11 14:23:26 -070055 * Call a function on all processors
56 */
Nadav Amitcaa75932019-06-12 23:48:05 -070057void on_each_cpu(smp_call_func_t func, void *info, int wait);
David Daneybff2dc42013-09-11 14:23:26 -070058
59/*
David Daneyfa688202013-09-11 14:23:24 -070060 * Call a function on processors specified by mask, which might include
61 * the local one.
62 */
63void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
64 void *info, bool wait);
65
66/*
67 * Call a function on each processor for which the supplied function
68 * cond_func returns a positive value. This may include the local
69 * processor.
70 */
Sebastian Andrzej Siewior5671d812020-01-17 10:01:35 +010071void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
Sebastian Andrzej Siewiorcb923152020-01-17 10:01:37 +010072 void *info, bool wait);
David Daneyfa688202013-09-11 14:23:24 -070073
Sebastian Andrzej Siewior5671d812020-01-17 10:01:35 +010074void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
Sebastian Andrzej Siewiorcb923152020-01-17 10:01:37 +010075 void *info, bool wait, const struct cpumask *mask);
Rik van Riel7d49b282018-09-25 23:58:41 -040076
Arnd Bergmann41f1aed2021-05-05 23:12:42 +020077int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
Andrew Morton7cf64f82013-11-14 14:32:09 -080078
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#ifdef CONFIG_SMP
80
81#include <linux/preempt.h>
82#include <linux/kernel.h>
83#include <linux/compiler.h>
84#include <linux/thread_info.h>
85#include <asm/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87/*
88 * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
89 * (defined in asm header):
Ingo Molnard1dedb52009-03-13 11:14:06 +010090 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92/*
93 * stops all CPUs but the current one:
94 */
95extern void smp_send_stop(void);
96
97/*
98 * sends a 'reschedule' event to another CPU:
99 */
100extern void smp_send_reschedule(int cpu);
101
102
103/*
104 * Prepare machine for booting other CPUs.
105 */
106extern void smp_prepare_cpus(unsigned int max_cpus);
107
108/*
109 * Bring a CPU up
110 */
Thomas Gleixner8239c252012-04-20 13:05:42 +0000111extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113/*
114 * Final polishing of CPUs
115 */
116extern void smp_cpus_done(unsigned int max_cpus);
117
118/*
119 * Call a function on all other processors
120 */
Nadav Amitcaa75932019-06-12 23:48:05 -0700121void smp_call_function(smp_call_func_t func, void *info, int wait);
Rusty Russell54b11e62008-12-30 09:05:16 +1030122void smp_call_function_many(const struct cpumask *mask,
David Howells3a5f65df2010-10-27 17:28:36 +0100123 smp_call_func_t func, void *info, bool wait);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100124
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800125int smp_call_function_any(const struct cpumask *mask,
David Howells3a5f65df2010-10-27 17:28:36 +0100126 smp_call_func_t func, void *info, int wait);
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800127
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000128void kick_all_cpus_sync(void);
Chuansheng Liuc6f44592014-09-04 15:17:54 +0800129void wake_up_all_idle_cpus(void);
Maulik Shahf0b280c2021-06-30 16:40:22 +0530130void wake_up_all_online_idle_cpus(void);
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000131
Jens Axboe3d442232008-06-26 11:21:34 +0200132/*
133 * Generic and arch helpers
134 */
Takao Indohd8ad7d12011-03-29 12:35:04 -0400135void __init call_function_init(void);
Jens Axboe3d442232008-06-26 11:21:34 +0200136void generic_smp_call_function_single_interrupt(void);
Shaohua Li9a46ad62013-02-21 16:43:03 -0800137#define generic_smp_call_function_interrupt \
138 generic_smp_call_function_single_interrupt
Andrew Mortona3bc0db2006-09-25 23:32:33 -0700139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 * Mark the boot cpu "online" so that it can call console drivers in
142 * printk() and can access its per-cpu storage.
143 */
144void smp_prepare_boot_cpu(void);
145
Andi Kleenca74a6f2008-01-30 13:33:17 +0100146extern unsigned int setup_max_cpus;
Amerigo Wang34db18a02011-03-22 16:34:06 -0700147extern void __init setup_nr_cpu_ids(void);
148extern void __init smp_init(void);
Andi Kleenca74a6f2008-01-30 13:33:17 +0100149
Peter Zijlstra8ce371f2017-03-20 12:26:55 +0100150extern int __boot_cpu_id;
151
152static inline int get_boot_cpu_id(void)
153{
154 return __boot_cpu_id;
155}
156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157#else /* !SMP */
158
Ingo Molnard1dedb52009-03-13 11:14:06 +0100159static inline void smp_send_stop(void) { }
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161/*
162 * These macros fold the SMP functionality into a single CPU system
163 */
Ingo Molnar39c715b2005-06-21 17:14:34 -0700164#define raw_smp_processor_id() 0
Nadav Amitcaa75932019-06-12 23:48:05 -0700165static inline void up_smp_call_function(smp_call_func_t func, void *info)
Con Kolivas3c30b062006-03-26 01:37:19 -0800166{
Con Kolivas3c30b062006-03-26 01:37:19 -0800167}
Jens Axboe8691e5a2008-06-06 11:18:06 +0200168#define smp_call_function(func, info, wait) \
Ingo Molnara5fbb6d2007-11-09 22:39:38 +0100169 (up_smp_call_function(func, info))
Andrew Morton3b8967d2013-09-11 14:19:37 -0700170
Richard Henderson79a88102005-07-28 01:07:41 -0700171static inline void smp_send_reschedule(int cpu) { }
Linus Torvalds2ac66082005-07-28 10:34:47 -0700172#define smp_prepare_boot_cpu() do {} while (0)
Rusty Russelld2ff9112008-12-15 19:04:35 +1030173#define smp_call_function_many(mask, func, info, wait) \
174 (up_smp_call_function(func, info))
Takao Indohd8ad7d12011-03-29 12:35:04 -0400175static inline void call_function_init(void) { }
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800176
177static inline int
David Howells3a5f65df2010-10-27 17:28:36 +0100178smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800179 void *info, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200180{
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800181 return smp_call_function_single(0, func, info, wait);
Jens Axboe3d442232008-06-26 11:21:34 +0200182}
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800183
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000184static inline void kick_all_cpus_sync(void) { }
Chuansheng Liuc6f44592014-09-04 15:17:54 +0800185static inline void wake_up_all_idle_cpus(void) { }
Maulik Shahf0b280c2021-06-30 16:40:22 +0530186static inline void wake_up_all_online_idle_cpus(void) { }
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000187
Thomas Gleixner30b8b002015-01-15 21:22:39 +0000188#ifdef CONFIG_UP_LATE_INIT
189extern void __init up_late_init(void);
190static inline void smp_init(void) { up_late_init(); }
191#else
192static inline void smp_init(void) { }
193#endif
194
Peter Zijlstra8ce371f2017-03-20 12:26:55 +0100195static inline int get_boot_cpu_id(void)
196{
197 return 0;
198}
199
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200#endif /* !SMP */
201
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100202/**
203 * raw_processor_id() - get the current (unstable) CPU id
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 *
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100205 * For then you know what you are doing and need an unstable
206 * CPU id.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 */
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100208
209/**
210 * smp_processor_id() - get the current (stable) CPU id
211 *
212 * This is the normal accessor to the CPU id and should be used
213 * whenever possible.
214 *
215 * The CPU id is stable when:
216 *
217 * - IRQs are disabled;
218 * - preemption is disabled;
219 * - the task is CPU affine.
220 *
221 * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
222 * when smp_processor_id() is used when the CPU id is not stable.
223 */
224
225/*
226 * Allow the architecture to differentiate between a stable and unstable read.
227 * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
228 * regular asm read for the stable.
229 */
230#ifndef __smp_processor_id
231#define __smp_processor_id(x) raw_smp_processor_id(x)
232#endif
233
Ingo Molnar39c715b2005-06-21 17:14:34 -0700234#ifdef CONFIG_DEBUG_PREEMPT
235 extern unsigned int debug_smp_processor_id(void);
236# define smp_processor_id() debug_smp_processor_id()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237#else
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100238# define smp_processor_id() __smp_processor_id()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239#endif
240
Peter Zijlstra9ed7d752019-02-27 09:48:51 +0100241#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242#define put_cpu() preempt_enable()
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Ingo Molnara1466492009-01-31 14:09:06 +0100244/*
245 * Callback to arch code if there's nosmp or maxcpus=0 on the
246 * boot command line:
247 */
248extern void arch_disable_smp_support(void);
249
Qais Yousef56555852020-04-30 12:40:03 +0100250extern void arch_thaw_secondary_cpus_begin(void);
251extern void arch_thaw_secondary_cpus_end(void);
Paul Gortmakerfb37bb02014-02-10 14:25:49 -0800252
Andrew Morton033ab7f2006-06-30 01:55:50 -0700253void smp_setup_processor_id(void);
254
Juergen Grossdf8ce9d2016-08-29 08:48:44 +0200255int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
256 bool phys);
257
Richard Weinberger31487f82016-07-13 17:17:01 +0000258/* SMP core functions */
259int smpcfd_prepare_cpu(unsigned int cpu);
260int smpcfd_dead_cpu(unsigned int cpu);
261int smpcfd_dying_cpu(unsigned int cpu);
262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263#endif /* __LINUX_SMP_H */