blob: 483c9962c99947d9e5f86e8cf1aba5019b965959 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Andrew Morton53ce3d92009-01-09 12:27:08 -08002/*
3 * Uniprocessor-only support functions. The counterpart to kernel/smp.c
4 */
5
Ingo Molnar6e962812009-01-12 16:04:37 +01006#include <linux/interrupt.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08007#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -04008#include <linux/export.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -08009#include <linux/smp.h>
Juergen Gross47ae4b02016-08-29 08:48:43 +020010#include <linux/hypervisor.h>
Andrew Morton53ce3d92009-01-09 12:27:08 -080011
12int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13 int wait)
14{
David Daney081192b2013-09-11 14:23:25 -070015 unsigned long flags;
16
Ingo Molnar93423b82009-01-11 05:15:21 +010017 WARN_ON(cpu != 0);
18
David Daney081192b2013-09-11 14:23:25 -070019 local_irq_save(flags);
20 func(info);
21 local_irq_restore(flags);
Ingo Molnar93423b82009-01-11 05:15:21 +010022
Andrew Morton53ce3d92009-01-09 12:27:08 -080023 return 0;
24}
25EXPORT_SYMBOL(smp_call_function_single);
David Daneyfa688202013-09-11 14:23:24 -070026
Ying Huang966a9672017-08-08 12:30:00 +080027int smp_call_function_single_async(int cpu, call_single_data_t *csd)
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080028{
29 unsigned long flags;
30
31 local_irq_save(flags);
32 csd->func(csd->info);
33 local_irq_restore(flags);
Jan Kara08eed442014-02-24 16:39:57 +010034 return 0;
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080035}
Frederic Weisbeckerc46fff22014-02-24 16:40:02 +010036EXPORT_SYMBOL(smp_call_function_single_async);
Christoph Hellwig40c01e8b2013-11-14 14:32:08 -080037
David Daneybff2dc42013-09-11 14:23:26 -070038int on_each_cpu(smp_call_func_t func, void *info, int wait)
39{
40 unsigned long flags;
41
42 local_irq_save(flags);
43 func(info);
44 local_irq_restore(flags);
45 return 0;
46}
47EXPORT_SYMBOL(on_each_cpu);
48
David Daneyfa688202013-09-11 14:23:24 -070049/*
50 * Note we still need to test the mask even for UP
51 * because we actually can get an empty mask from
52 * code that on SMP might call us without the local
53 * CPU in the mask.
54 */
55void on_each_cpu_mask(const struct cpumask *mask,
56 smp_call_func_t func, void *info, bool wait)
57{
58 unsigned long flags;
59
60 if (cpumask_test_cpu(0, mask)) {
61 local_irq_save(flags);
62 func(info);
63 local_irq_restore(flags);
64 }
65}
66EXPORT_SYMBOL(on_each_cpu_mask);
67
68/*
69 * Preemption is disabled here to make sure the cond_func is called under the
70 * same condtions in UP and SMP.
71 */
Rik van Riel7d49b282018-09-25 23:58:41 -040072void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
73 smp_call_func_t func, void *info, bool wait,
74 gfp_t gfp_flags, const struct cpumask *mask)
David Daneyfa688202013-09-11 14:23:24 -070075{
76 unsigned long flags;
77
78 preempt_disable();
79 if (cond_func(0, info)) {
80 local_irq_save(flags);
81 func(info);
82 local_irq_restore(flags);
83 }
84 preempt_enable();
85}
Rik van Riel7d49b282018-09-25 23:58:41 -040086EXPORT_SYMBOL(on_each_cpu_cond_mask);
87
88void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
89 smp_call_func_t func, void *info, bool wait,
90 gfp_t gfp_flags)
91{
92 on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
93}
David Daneyfa688202013-09-11 14:23:24 -070094EXPORT_SYMBOL(on_each_cpu_cond);
Juergen Grossdf8ce9d2016-08-29 08:48:44 +020095
96int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
97{
98 int ret;
99
100 if (cpu != 0)
101 return -ENXIO;
102
103 if (phys)
104 hypervisor_pin_vcpu(0);
105 ret = func(par);
106 if (phys)
107 hypervisor_pin_vcpu(-1);
108
109 return ret;
110}
111EXPORT_SYMBOL_GPL(smp_call_on_cpu);