blob: 4dba0f7b72ad716cf447f1fdbdd082ff7e0e73cf [file] [log] [blame]
Jens Axboe3d442232008-06-26 11:21:34 +02001/*
2 * Generic helpers for smp ipi calls
3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
Jens Axboe3d442232008-06-26 11:21:34 +02005 */
Jens Axboe3d442232008-06-26 11:21:34 +02006#include <linux/rcupdate.h>
Linus Torvalds59190f42008-07-15 14:02:33 -07007#include <linux/rculist.h>
Ingo Molnar641cd4c2009-03-13 10:47:34 +01008#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04009#include <linux/export.h>
Ingo Molnar0b13fda2009-02-25 16:52:11 +010010#include <linux/percpu.h>
11#include <linux/init.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090012#include <linux/gfp.h>
Jens Axboe3d442232008-06-26 11:21:34 +020013#include <linux/smp.h>
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010014#include <linux/cpu.h>
Jens Axboe3d442232008-06-26 11:21:34 +020015
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070016#include "smpboot.h"
17
Amerigo Wang351f8f82011-01-12 16:59:39 -080018#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
Jens Axboe3d442232008-06-26 11:21:34 +020019enum {
Peter Zijlstra6e275632009-02-25 13:59:48 +010020 CSD_FLAG_LOCK = 0x01,
Jens Axboe3d442232008-06-26 11:21:34 +020021};
22
23struct call_function_data {
Shaohua Li9a46ad62013-02-21 16:43:03 -080024 struct call_single_data __percpu *csd;
Ingo Molnar0b13fda2009-02-25 16:52:11 +010025 cpumask_var_t cpumask;
Wang YanQingf44310b2013-01-26 15:53:57 +080026 cpumask_var_t cpumask_ipi;
Jens Axboe3d442232008-06-26 11:21:34 +020027};
28
Milton Millere03bcb62010-01-18 13:00:51 +110029static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
30
Jens Axboe3d442232008-06-26 11:21:34 +020031struct call_single_queue {
Ingo Molnar0b13fda2009-02-25 16:52:11 +010032 struct list_head list;
Thomas Gleixner9f5a5622009-11-17 15:40:01 +010033 raw_spinlock_t lock;
Jens Axboe3d442232008-06-26 11:21:34 +020034};
35
Milton Millere03bcb62010-01-18 13:00:51 +110036static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_queue, call_single_queue);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010037
38static int
39hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
40{
41 long cpu = (long)hcpu;
42 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
43
44 switch (action) {
45 case CPU_UP_PREPARE:
46 case CPU_UP_PREPARE_FROZEN:
Yinghai Lueaa95842009-06-06 14:51:36 -070047 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010048 cpu_to_node(cpu)))
Akinobu Mita80b51842010-05-26 14:43:32 -070049 return notifier_from_errno(-ENOMEM);
Wang YanQingf44310b2013-01-26 15:53:57 +080050 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
51 cpu_to_node(cpu)))
52 return notifier_from_errno(-ENOMEM);
Shaohua Li9a46ad62013-02-21 16:43:03 -080053 cfd->csd = alloc_percpu(struct call_single_data);
54 if (!cfd->csd) {
55 free_cpumask_var(cfd->cpumask);
56 return notifier_from_errno(-ENOMEM);
57 }
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010058 break;
59
Xiao Guangrong69dd6472009-08-06 15:07:29 -070060#ifdef CONFIG_HOTPLUG_CPU
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010061 case CPU_UP_CANCELED:
62 case CPU_UP_CANCELED_FROZEN:
63
64 case CPU_DEAD:
65 case CPU_DEAD_FROZEN:
66 free_cpumask_var(cfd->cpumask);
Wang YanQingf44310b2013-01-26 15:53:57 +080067 free_cpumask_var(cfd->cpumask_ipi);
Shaohua Li9a46ad62013-02-21 16:43:03 -080068 free_percpu(cfd->csd);
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010069 break;
70#endif
71 };
72
73 return NOTIFY_OK;
74}
75
76static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
Ingo Molnar0b13fda2009-02-25 16:52:11 +010077 .notifier_call = hotplug_cfd,
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010078};
79
Takao Indohd8ad7d12011-03-29 12:35:04 -040080void __init call_function_init(void)
Jens Axboe3d442232008-06-26 11:21:34 +020081{
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010082 void *cpu = (void *)(long)smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +020083 int i;
84
85 for_each_possible_cpu(i) {
86 struct call_single_queue *q = &per_cpu(call_single_queue, i);
87
Thomas Gleixner9f5a5622009-11-17 15:40:01 +010088 raw_spin_lock_init(&q->lock);
Jens Axboe3d442232008-06-26 11:21:34 +020089 INIT_LIST_HEAD(&q->list);
90 }
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010091
92 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
93 register_cpu_notifier(&hotplug_cfd_notifier);
Jens Axboe3d442232008-06-26 11:21:34 +020094}
95
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010096/*
Peter Zijlstra8969a5e2009-02-25 13:59:47 +010097 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
98 *
Ingo Molnar0b13fda2009-02-25 16:52:11 +010099 * For non-synchronous ipi calls the csd can still be in use by the
100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd.
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100102 */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700103static void csd_lock_wait(struct call_single_data *csd)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100104{
Andrew Mortone1d12f32013-04-30 15:27:28 -0700105 while (csd->flags & CSD_FLAG_LOCK)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100106 cpu_relax();
Peter Zijlstra6e275632009-02-25 13:59:48 +0100107}
108
Andrew Mortone1d12f32013-04-30 15:27:28 -0700109static void csd_lock(struct call_single_data *csd)
Peter Zijlstra6e275632009-02-25 13:59:48 +0100110{
Andrew Mortone1d12f32013-04-30 15:27:28 -0700111 csd_lock_wait(csd);
112 csd->flags |= CSD_FLAG_LOCK;
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100113
114 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100115 * prevent CPU from reordering the above assignment
116 * to ->flags with any subsequent assignments to other
117 * fields of the specified call_single_data structure:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100118 */
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100119 smp_mb();
120}
121
Andrew Mortone1d12f32013-04-30 15:27:28 -0700122static void csd_unlock(struct call_single_data *csd)
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100123{
Andrew Mortone1d12f32013-04-30 15:27:28 -0700124 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100125
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100126 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100127 * ensure we're all done before releasing data:
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100128 */
129 smp_mb();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100130
Andrew Mortone1d12f32013-04-30 15:27:28 -0700131 csd->flags &= ~CSD_FLAG_LOCK;
Jens Axboe3d442232008-06-26 11:21:34 +0200132}
133
134/*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100135 * Insert a previously allocated call_single_data element
136 * for execution on the given CPU. data must already have
137 * ->func, ->info, and ->flags set.
Jens Axboe3d442232008-06-26 11:21:34 +0200138 */
Peter Zijlstra6e275632009-02-25 13:59:48 +0100139static
Andrew Mortone1d12f32013-04-30 15:27:28 -0700140void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
Jens Axboe3d442232008-06-26 11:21:34 +0200143 unsigned long flags;
Peter Zijlstra6e275632009-02-25 13:59:48 +0100144 int ipi;
Jens Axboe3d442232008-06-26 11:21:34 +0200145
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100146 raw_spin_lock_irqsave(&dst->lock, flags);
Jens Axboe3d442232008-06-26 11:21:34 +0200147 ipi = list_empty(&dst->list);
Andrew Mortone1d12f32013-04-30 15:27:28 -0700148 list_add_tail(&csd->list, &dst->list);
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100149 raw_spin_unlock_irqrestore(&dst->lock, flags);
Jens Axboe3d442232008-06-26 11:21:34 +0200150
Suresh Siddha561920a02008-10-30 18:28:41 +0100151 /*
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100152 * The list addition should be visible before sending the IPI
153 * handler locks the list to pull the entry off it because of
154 * normal cache coherency rules implied by spinlocks.
155 *
156 * If IPIs can go out of order to the cache coherency protocol
157 * in an architecture, sufficient synchronisation should be added
158 * to arch code to make it appear to obey cache coherency WRT
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100159 * locking and barrier primitives. Generic code isn't really
160 * equipped to do the right thing...
Suresh Siddha561920a02008-10-30 18:28:41 +0100161 */
Jens Axboe3d442232008-06-26 11:21:34 +0200162 if (ipi)
163 arch_send_call_function_single_ipi(cpu);
164
165 if (wait)
Andrew Mortone1d12f32013-04-30 15:27:28 -0700166 csd_lock_wait(csd);
Jens Axboe3d442232008-06-26 11:21:34 +0200167}
168
169/*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100170 * Invoked by arch to handle an IPI for call function single. Must be
171 * called from the arch with interrupts disabled.
Jens Axboe3d442232008-06-26 11:21:34 +0200172 */
173void generic_smp_call_function_single_interrupt(void)
174{
175 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100176 LIST_HEAD(list);
Jens Axboe3d442232008-06-26 11:21:34 +0200177
Suresh Siddha269c8612009-08-19 18:05:35 -0700178 /*
179 * Shouldn't receive this interrupt on a cpu that is not yet online.
180 */
181 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
182
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100183 raw_spin_lock(&q->lock);
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100184 list_replace_init(&q->list, &list);
Thomas Gleixner9f5a5622009-11-17 15:40:01 +0100185 raw_spin_unlock(&q->lock);
Jens Axboe3d442232008-06-26 11:21:34 +0200186
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100187 while (!list_empty(&list)) {
Andrew Mortone1d12f32013-04-30 15:27:28 -0700188 struct call_single_data *csd;
189 unsigned int csd_flags;
Jens Axboe3d442232008-06-26 11:21:34 +0200190
Andrew Mortone1d12f32013-04-30 15:27:28 -0700191 csd = list_entry(list.next, struct call_single_data, list);
192 list_del(&csd->list);
Jens Axboe3d442232008-06-26 11:21:34 +0200193
Jens Axboe3d442232008-06-26 11:21:34 +0200194 /*
Andrew Mortone1d12f32013-04-30 15:27:28 -0700195 * 'csd' can be invalid after this call if flags == 0
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100196 * (when called through generic_exec_single()),
197 * so save them away before making the call:
Jens Axboe3d442232008-06-26 11:21:34 +0200198 */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700199 csd_flags = csd->flags;
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100200
Andrew Mortone1d12f32013-04-30 15:27:28 -0700201 csd->func(csd->info);
Nick Piggin15d0d3b2009-02-25 06:22:45 +0100202
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100203 /*
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100204 * Unlocked CSDs are valid through generic_exec_single():
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100205 */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700206 if (csd_flags & CSD_FLAG_LOCK)
207 csd_unlock(csd);
Jens Axboe3d442232008-06-26 11:21:34 +0200208 }
209}
210
Milton Millere03bcb62010-01-18 13:00:51 +1100211static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
Steven Rostedtd7240b92009-01-29 10:08:01 -0500212
Jens Axboe3d442232008-06-26 11:21:34 +0200213/*
214 * smp_call_function_single - Run a function on a specific CPU
215 * @func: The function to run. This must be fast and non-blocking.
216 * @info: An arbitrary pointer to pass to the function.
Jens Axboe3d442232008-06-26 11:21:34 +0200217 * @wait: If true, wait until function has completed on other CPUs.
218 *
Sheng Yang72f279b2009-10-22 19:19:34 +0800219 * Returns 0 on success, else a negative status code.
Jens Axboe3d442232008-06-26 11:21:34 +0200220 */
David Howells3a5f65d2010-10-27 17:28:36 +0100221int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
Jens Axboe8691e5a2008-06-06 11:18:06 +0200222 int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200223{
Peter Zijlstra8969a5e2009-02-25 13:59:47 +0100224 struct call_single_data d = {
225 .flags = 0,
226 };
Jens Axboe3d442232008-06-26 11:21:34 +0200227 unsigned long flags;
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100228 int this_cpu;
H. Peter Anvinf73be6d2008-08-25 17:07:14 -0700229 int err = 0;
Jens Axboe3d442232008-06-26 11:21:34 +0200230
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100231 /*
232 * prevent preemption and reschedule on another processor,
233 * as well as CPU removal
234 */
235 this_cpu = get_cpu();
236
Suresh Siddha269c8612009-08-19 18:05:35 -0700237 /*
238 * Can deadlock when called with interrupts disabled.
239 * We allow cpu's that are not yet online though, as no one else can
240 * send smp call function interrupt to this cpu and as such deadlocks
241 * can't happen.
242 */
243 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
244 && !oops_in_progress);
Jens Axboe3d442232008-06-26 11:21:34 +0200245
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100246 if (cpu == this_cpu) {
Jens Axboe3d442232008-06-26 11:21:34 +0200247 local_irq_save(flags);
248 func(info);
249 local_irq_restore(flags);
H. Peter Anvinf73be6d2008-08-25 17:07:14 -0700250 } else {
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
Andrew Mortone1d12f32013-04-30 15:27:28 -0700252 struct call_single_data *csd = &d;
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100253
254 if (!wait)
Andrew Mortone1d12f32013-04-30 15:27:28 -0700255 csd = &__get_cpu_var(csd_data);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100256
Andrew Mortone1d12f32013-04-30 15:27:28 -0700257 csd_lock(csd);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100258
Andrew Mortone1d12f32013-04-30 15:27:28 -0700259 csd->func = func;
260 csd->info = info;
261 generic_exec_single(cpu, csd, wait);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100262 } else {
263 err = -ENXIO; /* CPU not online */
264 }
Jens Axboe3d442232008-06-26 11:21:34 +0200265 }
266
267 put_cpu();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100268
H. Peter Anvinf73be6d2008-08-25 17:07:14 -0700269 return err;
Jens Axboe3d442232008-06-26 11:21:34 +0200270}
271EXPORT_SYMBOL(smp_call_function_single);
272
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800273/*
274 * smp_call_function_any - Run a function on any of the given cpus
275 * @mask: The mask of cpus it can run on.
276 * @func: The function to run. This must be fast and non-blocking.
277 * @info: An arbitrary pointer to pass to the function.
278 * @wait: If true, wait until function has completed.
279 *
280 * Returns 0 on success, else a negative status code (if no cpus were online).
281 * Note that @wait will be implicitly turned on in case of allocation failures,
282 * since we fall back to on-stack allocation.
283 *
284 * Selection preference:
285 * 1) current cpu if in @mask
286 * 2) any cpu of current node if in @mask
287 * 3) any other online cpu in @mask
288 */
289int smp_call_function_any(const struct cpumask *mask,
David Howells3a5f65d2010-10-27 17:28:36 +0100290 smp_call_func_t func, void *info, int wait)
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800291{
292 unsigned int cpu;
293 const struct cpumask *nodemask;
294 int ret;
295
296 /* Try for same CPU (cheapest) */
297 cpu = get_cpu();
298 if (cpumask_test_cpu(cpu, mask))
299 goto call;
300
301 /* Try for same node. */
David Johnaf2422c2010-01-15 17:01:23 -0800302 nodemask = cpumask_of_node(cpu_to_node(cpu));
Rusty Russell2ea6dec2009-11-17 14:27:27 -0800303 for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
304 cpu = cpumask_next_and(cpu, nodemask, mask)) {
305 if (cpu_online(cpu))
306 goto call;
307 }
308
309 /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
310 cpu = cpumask_any_and(mask, cpu_online_mask);
311call:
312 ret = smp_call_function_single(cpu, func, info, wait);
313 put_cpu();
314 return ret;
315}
316EXPORT_SYMBOL_GPL(smp_call_function_any);
317
Jens Axboe3d442232008-06-26 11:21:34 +0200318/**
Heiko Carstens27c379f2010-09-10 13:47:29 +0200319 * __smp_call_function_single(): Run a function on a specific CPU
Jens Axboe3d442232008-06-26 11:21:34 +0200320 * @cpu: The CPU to run on.
321 * @data: Pre-allocated and setup data structure
Heiko Carstens27c379f2010-09-10 13:47:29 +0200322 * @wait: If true, wait until function has completed on specified CPU.
Jens Axboe3d442232008-06-26 11:21:34 +0200323 *
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100324 * Like smp_call_function_single(), but allow caller to pass in a
325 * pre-allocated data structure. Useful for embedding @data inside
326 * other structures, for instance.
Jens Axboe3d442232008-06-26 11:21:34 +0200327 */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700328void __smp_call_function_single(int cpu, struct call_single_data *csd,
Peter Zijlstra6e275632009-02-25 13:59:48 +0100329 int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200330{
Heiko Carstens27c379f2010-09-10 13:47:29 +0200331 unsigned int this_cpu;
332 unsigned long flags;
Jens Axboe3d442232008-06-26 11:21:34 +0200333
Heiko Carstens27c379f2010-09-10 13:47:29 +0200334 this_cpu = get_cpu();
Suresh Siddha269c8612009-08-19 18:05:35 -0700335 /*
336 * Can deadlock when called with interrupts disabled.
337 * We allow cpu's that are not yet online though, as no one else can
338 * send smp call function interrupt to this cpu and as such deadlocks
339 * can't happen.
340 */
341 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
342 && !oops_in_progress);
Peter Zijlstra6e275632009-02-25 13:59:48 +0100343
Heiko Carstens27c379f2010-09-10 13:47:29 +0200344 if (cpu == this_cpu) {
345 local_irq_save(flags);
Andrew Mortone1d12f32013-04-30 15:27:28 -0700346 csd->func(csd->info);
Heiko Carstens27c379f2010-09-10 13:47:29 +0200347 local_irq_restore(flags);
348 } else {
Andrew Mortone1d12f32013-04-30 15:27:28 -0700349 csd_lock(csd);
350 generic_exec_single(cpu, csd, wait);
Heiko Carstens27c379f2010-09-10 13:47:29 +0200351 }
352 put_cpu();
Jens Axboe3d442232008-06-26 11:21:34 +0200353}
354
355/**
Rusty Russell54b11e62008-12-30 09:05:16 +1030356 * smp_call_function_many(): Run a function on a set of other CPUs.
357 * @mask: The set of cpus to run on (only runs on online subset).
Jens Axboe3d442232008-06-26 11:21:34 +0200358 * @func: The function to run. This must be fast and non-blocking.
359 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100360 * @wait: If true, wait (atomically) until function has completed
361 * on other CPUs.
Jens Axboe3d442232008-06-26 11:21:34 +0200362 *
Sheng Yang72f279b2009-10-22 19:19:34 +0800363 * If @wait is true, then returns once @func has returned.
Jens Axboe3d442232008-06-26 11:21:34 +0200364 *
365 * You must not call this function with disabled interrupts or from a
366 * hardware interrupt handler or from a bottom half handler. Preemption
367 * must be disabled when calling this function.
368 */
Rusty Russell54b11e62008-12-30 09:05:16 +1030369void smp_call_function_many(const struct cpumask *mask,
David Howells3a5f65d2010-10-27 17:28:36 +0100370 smp_call_func_t func, void *info, bool wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200371{
Andrew Mortone1d12f32013-04-30 15:27:28 -0700372 struct call_function_data *cfd;
Shaohua Li9a46ad62013-02-21 16:43:03 -0800373 int cpu, next_cpu, this_cpu = smp_processor_id();
Jens Axboe3d442232008-06-26 11:21:34 +0200374
Suresh Siddha269c8612009-08-19 18:05:35 -0700375 /*
376 * Can deadlock when called with interrupts disabled.
377 * We allow cpu's that are not yet online though, as no one else can
378 * send smp call function interrupt to this cpu and as such deadlocks
379 * can't happen.
380 */
381 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
Tejun Heobd924e82011-01-20 12:07:13 +0100382 && !oops_in_progress && !early_boot_irqs_disabled);
Jens Axboe3d442232008-06-26 11:21:34 +0200383
Milton Miller723aae22011-03-15 13:27:17 -0600384 /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
Rusty Russell54b11e62008-12-30 09:05:16 +1030385 cpu = cpumask_first_and(mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100386 if (cpu == this_cpu)
Rusty Russell54b11e62008-12-30 09:05:16 +1030387 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100388
Rusty Russell54b11e62008-12-30 09:05:16 +1030389 /* No online cpus? We're done. */
390 if (cpu >= nr_cpu_ids)
391 return;
Jens Axboe3d442232008-06-26 11:21:34 +0200392
Rusty Russell54b11e62008-12-30 09:05:16 +1030393 /* Do we have another CPU which isn't us? */
394 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100395 if (next_cpu == this_cpu)
Rusty Russell54b11e62008-12-30 09:05:16 +1030396 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
397
398 /* Fastpath: do that cpu by itself. */
399 if (next_cpu >= nr_cpu_ids) {
400 smp_call_function_single(cpu, func, info, wait);
401 return;
Jens Axboe3d442232008-06-26 11:21:34 +0200402 }
403
Andrew Mortone1d12f32013-04-30 15:27:28 -0700404 cfd = &__get_cpu_var(cfd_data);
Milton Miller45a57912011-03-15 13:27:16 -0600405
Andrew Mortone1d12f32013-04-30 15:27:28 -0700406 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
407 cpumask_clear_cpu(this_cpu, cfd->cpumask);
Milton Miller723aae22011-03-15 13:27:17 -0600408
409 /* Some callers race with other cpus changing the passed mask */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700410 if (unlikely(!cpumask_weight(cfd->cpumask)))
Milton Miller723aae22011-03-15 13:27:17 -0600411 return;
Anton Blanchard6dc19892011-01-20 14:44:33 -0800412
Wang YanQingf44310b2013-01-26 15:53:57 +0800413 /*
Andrew Mortone1d12f32013-04-30 15:27:28 -0700414 * After we put an entry into the list, cfd->cpumask may be cleared
415 * again when another CPU sends another IPI for a SMP function call, so
416 * cfd->cpumask will be zero.
Wang YanQingf44310b2013-01-26 15:53:57 +0800417 */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700418 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
Jens Axboe3d442232008-06-26 11:21:34 +0200419
Andrew Mortone1d12f32013-04-30 15:27:28 -0700420 for_each_cpu(cpu, cfd->cpumask) {
421 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
Shaohua Li9a46ad62013-02-21 16:43:03 -0800422 struct call_single_queue *dst =
423 &per_cpu(call_single_queue, cpu);
424 unsigned long flags;
425
426 csd_lock(csd);
427 csd->func = func;
428 csd->info = info;
429
430 raw_spin_lock_irqsave(&dst->lock, flags);
431 list_add_tail(&csd->list, &dst->list);
432 raw_spin_unlock_irqrestore(&dst->lock, flags);
433 }
Suresh Siddha561920a02008-10-30 18:28:41 +0100434
Jens Axboe3d442232008-06-26 11:21:34 +0200435 /* Send a message to all CPUs in the map */
Andrew Mortone1d12f32013-04-30 15:27:28 -0700436 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
Jens Axboe3d442232008-06-26 11:21:34 +0200437
Shaohua Li9a46ad62013-02-21 16:43:03 -0800438 if (wait) {
Andrew Mortone1d12f32013-04-30 15:27:28 -0700439 for_each_cpu(cpu, cfd->cpumask) {
440 struct call_single_data *csd;
441
442 csd = per_cpu_ptr(cfd->csd, cpu);
Shaohua Li9a46ad62013-02-21 16:43:03 -0800443 csd_lock_wait(csd);
444 }
445 }
Jens Axboe3d442232008-06-26 11:21:34 +0200446}
Rusty Russell54b11e62008-12-30 09:05:16 +1030447EXPORT_SYMBOL(smp_call_function_many);
Jens Axboe3d442232008-06-26 11:21:34 +0200448
449/**
450 * smp_call_function(): Run a function on all other CPUs.
451 * @func: The function to run. This must be fast and non-blocking.
452 * @info: An arbitrary pointer to pass to the function.
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100453 * @wait: If true, wait (atomically) until function has completed
454 * on other CPUs.
Jens Axboe3d442232008-06-26 11:21:34 +0200455 *
Rusty Russell54b11e62008-12-30 09:05:16 +1030456 * Returns 0.
Jens Axboe3d442232008-06-26 11:21:34 +0200457 *
458 * If @wait is true, then returns once @func has returned; otherwise
Sheng Yang72f279b2009-10-22 19:19:34 +0800459 * it returns just before the target cpu calls @func.
Jens Axboe3d442232008-06-26 11:21:34 +0200460 *
461 * You must not call this function with disabled interrupts or from a
462 * hardware interrupt handler or from a bottom half handler.
463 */
David Howells3a5f65d2010-10-27 17:28:36 +0100464int smp_call_function(smp_call_func_t func, void *info, int wait)
Jens Axboe3d442232008-06-26 11:21:34 +0200465{
Jens Axboe3d442232008-06-26 11:21:34 +0200466 preempt_disable();
Rusty Russell54b11e62008-12-30 09:05:16 +1030467 smp_call_function_many(cpu_online_mask, func, info, wait);
Jens Axboe3d442232008-06-26 11:21:34 +0200468 preempt_enable();
Ingo Molnar0b13fda2009-02-25 16:52:11 +0100469
Rusty Russell54b11e62008-12-30 09:05:16 +1030470 return 0;
Jens Axboe3d442232008-06-26 11:21:34 +0200471}
472EXPORT_SYMBOL(smp_call_function);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800473#endif /* USE_GENERIC_SMP_HELPERS */
474
Amerigo Wang34db18a02011-03-22 16:34:06 -0700475/* Setup configured maximum number of CPUs to activate */
476unsigned int setup_max_cpus = NR_CPUS;
477EXPORT_SYMBOL(setup_max_cpus);
478
479
480/*
481 * Setup routine for controlling SMP activation
482 *
483 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
484 * activation entirely (the MPS table probe still happens, though).
485 *
486 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
487 * greater than 0, limits the maximum number of CPUs activated in
488 * SMP mode to <NUM>.
489 */
490
491void __weak arch_disable_smp_support(void) { }
492
493static int __init nosmp(char *str)
494{
495 setup_max_cpus = 0;
496 arch_disable_smp_support();
497
498 return 0;
499}
500
501early_param("nosmp", nosmp);
502
503/* this is hard limit */
504static int __init nrcpus(char *str)
505{
506 int nr_cpus;
507
508 get_option(&str, &nr_cpus);
509 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
510 nr_cpu_ids = nr_cpus;
511
512 return 0;
513}
514
515early_param("nr_cpus", nrcpus);
516
517static int __init maxcpus(char *str)
518{
519 get_option(&str, &setup_max_cpus);
520 if (setup_max_cpus == 0)
521 arch_disable_smp_support();
522
523 return 0;
524}
525
526early_param("maxcpus", maxcpus);
527
528/* Setup number of possible processor ids */
529int nr_cpu_ids __read_mostly = NR_CPUS;
530EXPORT_SYMBOL(nr_cpu_ids);
531
532/* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
533void __init setup_nr_cpu_ids(void)
534{
535 nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
536}
537
538/* Called by boot processor to activate the rest. */
539void __init smp_init(void)
540{
541 unsigned int cpu;
542
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700543 idle_threads_init();
544
Amerigo Wang34db18a02011-03-22 16:34:06 -0700545 /* FIXME: This should be done in userspace --RR */
546 for_each_present_cpu(cpu) {
547 if (num_online_cpus() >= setup_max_cpus)
548 break;
549 if (!cpu_online(cpu))
550 cpu_up(cpu);
551 }
552
553 /* Any cleanup work */
554 printk(KERN_INFO "Brought up %ld CPUs\n", (long)num_online_cpus());
555 smp_cpus_done(setup_max_cpus);
556}
557
Amerigo Wang351f8f82011-01-12 16:59:39 -0800558/*
Tejun Heobd924e82011-01-20 12:07:13 +0100559 * Call a function on all processors. May be used during early boot while
560 * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
561 * of local_irq_disable/enable().
Amerigo Wang351f8f82011-01-12 16:59:39 -0800562 */
563int on_each_cpu(void (*func) (void *info), void *info, int wait)
564{
Tejun Heobd924e82011-01-20 12:07:13 +0100565 unsigned long flags;
Amerigo Wang351f8f82011-01-12 16:59:39 -0800566 int ret = 0;
567
568 preempt_disable();
569 ret = smp_call_function(func, info, wait);
Tejun Heobd924e82011-01-20 12:07:13 +0100570 local_irq_save(flags);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800571 func(info);
Tejun Heobd924e82011-01-20 12:07:13 +0100572 local_irq_restore(flags);
Amerigo Wang351f8f82011-01-12 16:59:39 -0800573 preempt_enable();
574 return ret;
575}
576EXPORT_SYMBOL(on_each_cpu);
Gilad Ben-Yossef3fc498f2012-03-28 14:42:43 -0700577
578/**
579 * on_each_cpu_mask(): Run a function on processors specified by
580 * cpumask, which may include the local processor.
581 * @mask: The set of cpus to run on (only runs on online subset).
582 * @func: The function to run. This must be fast and non-blocking.
583 * @info: An arbitrary pointer to pass to the function.
584 * @wait: If true, wait (atomically) until function has completed
585 * on other CPUs.
586 *
587 * If @wait is true, then returns once @func has returned.
588 *
589 * You must not call this function with disabled interrupts or
590 * from a hardware interrupt handler or from a bottom half handler.
591 */
592void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
593 void *info, bool wait)
594{
595 int cpu = get_cpu();
596
597 smp_call_function_many(mask, func, info, wait);
598 if (cpumask_test_cpu(cpu, mask)) {
599 local_irq_disable();
600 func(info);
601 local_irq_enable();
602 }
603 put_cpu();
604}
605EXPORT_SYMBOL(on_each_cpu_mask);
Gilad Ben-Yossefb3a7e982012-03-28 14:42:43 -0700606
607/*
608 * on_each_cpu_cond(): Call a function on each processor for which
609 * the supplied function cond_func returns true, optionally waiting
610 * for all the required CPUs to finish. This may include the local
611 * processor.
612 * @cond_func: A callback function that is passed a cpu id and
613 * the the info parameter. The function is called
614 * with preemption disabled. The function should
615 * return a blooean value indicating whether to IPI
616 * the specified CPU.
617 * @func: The function to run on all applicable CPUs.
618 * This must be fast and non-blocking.
619 * @info: An arbitrary pointer to pass to both functions.
620 * @wait: If true, wait (atomically) until function has
621 * completed on other CPUs.
622 * @gfp_flags: GFP flags to use when allocating the cpumask
623 * used internally by the function.
624 *
625 * The function might sleep if the GFP flags indicates a non
626 * atomic allocation is allowed.
627 *
628 * Preemption is disabled to protect against CPUs going offline but not online.
629 * CPUs going online during the call will not be seen or sent an IPI.
630 *
631 * You must not call this function with disabled interrupts or
632 * from a hardware interrupt handler or from a bottom half handler.
633 */
634void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
635 smp_call_func_t func, void *info, bool wait,
636 gfp_t gfp_flags)
637{
638 cpumask_var_t cpus;
639 int cpu, ret;
640
641 might_sleep_if(gfp_flags & __GFP_WAIT);
642
643 if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
644 preempt_disable();
645 for_each_online_cpu(cpu)
646 if (cond_func(cpu, info))
647 cpumask_set_cpu(cpu, cpus);
648 on_each_cpu_mask(cpus, func, info, wait);
649 preempt_enable();
650 free_cpumask_var(cpus);
651 } else {
652 /*
653 * No free cpumask, bother. No matter, we'll
654 * just have to IPI them one by one.
655 */
656 preempt_disable();
657 for_each_online_cpu(cpu)
658 if (cond_func(cpu, info)) {
659 ret = smp_call_function_single(cpu, func,
660 info, wait);
661 WARN_ON_ONCE(!ret);
662 }
663 preempt_enable();
664 }
665}
666EXPORT_SYMBOL(on_each_cpu_cond);
Thomas Gleixnerf37f4352012-05-07 17:59:48 +0000667
668static void do_nothing(void *unused)
669{
670}
671
672/**
673 * kick_all_cpus_sync - Force all cpus out of idle
674 *
675 * Used to synchronize the update of pm_idle function pointer. It's
676 * called after the pointer is updated and returns after the dummy
677 * callback function has been executed on all cpus. The execution of
678 * the function can only happen on the remote cpus after they have
679 * left the idle function which had been called via pm_idle function
680 * pointer. So it's guaranteed that nothing uses the previous pointer
681 * anymore.
682 */
683void kick_all_cpus_sync(void)
684{
685 /* Make sure the change is visible before we kick the cpus */
686 smp_mb();
687 smp_call_function(do_nothing, NULL, 1);
688}
689EXPORT_SYMBOL_GPL(kick_all_cpus_sync);