blob: 42c46bf3e0a5ca5b6ece7dc967fc34c72dd581a2 [file] [log] [blame]
Andrew Morton53ce3d92009-01-09 12:27:08 -08001/*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
Ingo Molnar6e962812009-01-12 16:04:37 +01005#include <linux/interrupt.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08006#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04007#include <linux/export.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08008#include <linux/smp.h>
Juergen Gross47ae4b02016-08-29 08:48:43 +02009#include <linux/hypervisor.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -080010
11int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
12 int wait)
13{
David Daney081192b2013-09-11 14:23:25 -070014 unsigned long flags;
15
Ingo Molnar93423b82009-01-11 05:15:21 +010016 WARN_ON(cpu != 0);
17
David Daney081192b2013-09-11 14:23:25 -070018 local_irq_save(flags);
19 func(info);
20 local_irq_restore(flags);
Ingo Molnar93423b82009-01-11 05:15:21 +010021
Andrew Morton53ce3d92009-01-09 12:27:08 -080022 return 0;
23}
24EXPORT_SYMBOL(smp_call_function_single);
David Daneyfa688202013-09-11 14:23:24 -070025
Ying Huang966a9672017-08-08 12:30:00 +080026int smp_call_function_single_async(int cpu, call_single_data_t *csd)
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080027{
28 unsigned long flags;
29
30 local_irq_save(flags);
31 csd->func(csd->info);
32 local_irq_restore(flags);
Jan Kara08eed442014-02-24 16:39:57 +010033 return 0;
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080034}
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +010035EXPORT_SYMBOL(smp_call_function_single_async);
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080036
David Daneybff2dc42013-09-11 14:23:26 -070037int on_each_cpu(smp_call_func_t func, void *info, int wait)
38{
39 unsigned long flags;
40
41 local_irq_save(flags);
42 func(info);
43 local_irq_restore(flags);
44 return 0;
45}
46EXPORT_SYMBOL(on_each_cpu);
47
David Daneyfa688202013-09-11 14:23:24 -070048/*
49 * Note we still need to test the mask even for UP
50 * because we actually can get an empty mask from
51 * code that on SMP might call us without the local
52 * CPU in the mask.
53 */
54void on_each_cpu_mask(const struct cpumask *mask,
55 smp_call_func_t func, void *info, bool wait)
56{
57 unsigned long flags;
58
59 if (cpumask_test_cpu(0, mask)) {
60 local_irq_save(flags);
61 func(info);
62 local_irq_restore(flags);
63 }
64}
65EXPORT_SYMBOL(on_each_cpu_mask);
66
67/*
68 * Preemption is disabled here to make sure the cond_func is called under the
69 * same condtions in UP and SMP.
70 */
71void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
72 smp_call_func_t func, void *info, bool wait,
73 gfp_t gfp_flags)
74{
75 unsigned long flags;
76
77 preempt_disable();
78 if (cond_func(0, info)) {
79 local_irq_save(flags);
80 func(info);
81 local_irq_restore(flags);
82 }
83 preempt_enable();
84}
85EXPORT_SYMBOL(on_each_cpu_cond);
Juergen Grossdf8ce9d2016-08-29 08:48:44 +020086
87int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
88{
89 int ret;
90
91 if (cpu != 0)
92 return -ENXIO;
93
94 if (phys)
95 hypervisor_pin_vcpu(0);
96 ret = func(par);
97 if (phys)
98 hypervisor_pin_vcpu(-1);
99
100 return ret;
101}
102EXPORT_SYMBOL_GPL(smp_call_on_cpu);