blob: ff536f9cc8a25ef561d72a4feb451fdea9bf9667 [file] [log] [blame]
Andrew Morton53ce3d92009-01-09 12:27:08 -08001/*
2 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
3 */
4
Ingo Molnar6e962812009-01-12 16:04:37 +01005#include <linux/interrupt.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08006#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04007#include <linux/export.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08008#include <linux/smp.h>
Juergen Gross47ae4b02016-08-29 08:48:43 +02009#include <linux/hypervisor.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -080010
11int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
12 int wait)
13{
David Daney081192b2013-09-11 14:23:25 -070014 unsigned long flags;
15
Ingo Molnar93423b82009-01-11 05:15:21 +010016 WARN_ON(cpu != 0);
17
David Daney081192b2013-09-11 14:23:25 -070018 local_irq_save(flags);
19 func(info);
20 local_irq_restore(flags);
Ingo Molnar93423b82009-01-11 05:15:21 +010021
Andrew Morton53ce3d92009-01-09 12:27:08 -080022 return 0;
23}
24EXPORT_SYMBOL(smp_call_function_single);
David Daneyfa688202013-09-11 14:23:24 -070025
Ying Huang966a9672017-08-08 12:30:00 +080026int smp_call_function_single_async(int cpu, call_single_data_t *csd)
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080027{
28 unsigned long flags;
29
30 local_irq_save(flags);
31 csd->func(csd->info);
32 local_irq_restore(flags);
Jan Kara08eed442014-02-24 16:39:57 +010033 return 0;
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080034}
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +010035EXPORT_SYMBOL(smp_call_function_single_async);
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080036
David Daneybff2dc42013-09-11 14:23:26 -070037int on_each_cpu(smp_call_func_t func, void *info, int wait)
38{
39 unsigned long flags;
40
41 local_irq_save(flags);
42 func(info);
43 local_irq_restore(flags);
44 return 0;
45}
46EXPORT_SYMBOL(on_each_cpu);
47
David Daneyfa688202013-09-11 14:23:24 -070048/*
49 * Note we still need to test the mask even for UP
50 * because we actually can get an empty mask from
51 * code that on SMP might call us without the local
52 * CPU in the mask.
53 */
54void on_each_cpu_mask(const struct cpumask *mask,
55 smp_call_func_t func, void *info, bool wait)
56{
57 unsigned long flags;
58
59 if (cpumask_test_cpu(0, mask)) {
60 local_irq_save(flags);
61 func(info);
62 local_irq_restore(flags);
63 }
64}
65EXPORT_SYMBOL(on_each_cpu_mask);
66
67/*
68 * Preemption is disabled here to make sure the cond_func is called under the
69 * same condtions in UP and SMP.
70 */
Rik van Riel7d49b282018-09-25 23:58:41 -040071void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
72 smp_call_func_t func, void *info, bool wait,
73 gfp_t gfp_flags, const struct cpumask *mask)
David Daneyfa688202013-09-11 14:23:24 -070074{
75 unsigned long flags;
76
77 preempt_disable();
78 if (cond_func(0, info)) {
79 local_irq_save(flags);
80 func(info);
81 local_irq_restore(flags);
82 }
83 preempt_enable();
84}
Rik van Riel7d49b282018-09-25 23:58:41 -040085EXPORT_SYMBOL(on_each_cpu_cond_mask);
86
87void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
88 smp_call_func_t func, void *info, bool wait,
89 gfp_t gfp_flags)
90{
91 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
92}
David Daneyfa688202013-09-11 14:23:24 -070093EXPORT_SYMBOL(on_each_cpu_cond);
Juergen Grossdf8ce9d2016-08-29 08:48:44 +020094
95int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
96{
97 int ret;
98
99 if (cpu != 0)
100 return -ENXIO;
101
102 if (phys)
103 hypervisor_pin_vcpu(0);
104 ret = func(par);
105 if (phys)
106 hypervisor_pin_vcpu(-1);
107
108 return ret;
109}
110EXPORT_SYMBOL_GPL(smp_call_on_cpu);