Thomas Gleixner | 457c899 | 2019-05-19 13:08:55 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andrew Morton | 53ce3d9 | 2009-01-09 12:27:08 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Uniprocessor-only support functions. The counterpart to kernel/smp.c |
| 4 | */ |
| 5 | |
Ingo Molnar | 6e96281 | 2009-01-12 16:04:37 +0100 | [diff] [blame] | 6 | #include <linux/interrupt.h> |
Andrew Morton | 53ce3d9 | 2009-01-09 12:27:08 -0800 | [diff] [blame] | 7 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 8 | #include <linux/export.h> |
Andrew Morton | 53ce3d9 | 2009-01-09 12:27:08 -0800 | [diff] [blame] | 9 | #include <linux/smp.h> |
Juergen Gross | 47ae4b0 | 2016-08-29 08:48:43 +0200 | [diff] [blame] | 10 | #include <linux/hypervisor.h> |
Andrew Morton | 53ce3d9 | 2009-01-09 12:27:08 -0800 | [diff] [blame] | 11 | |
| 12 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
| 13 | int wait) |
| 14 | { |
David Daney | 081192b | 2013-09-11 14:23:25 -0700 | [diff] [blame] | 15 | unsigned long flags; |
| 16 | |
Ingo Molnar | 93423b8 | 2009-01-11 05:15:21 +0100 | [diff] [blame] | 17 | WARN_ON(cpu != 0); |
| 18 | |
David Daney | 081192b | 2013-09-11 14:23:25 -0700 | [diff] [blame] | 19 | local_irq_save(flags); |
| 20 | func(info); |
| 21 | local_irq_restore(flags); |
Ingo Molnar | 93423b8 | 2009-01-11 05:15:21 +0100 | [diff] [blame] | 22 | |
Andrew Morton | 53ce3d9 | 2009-01-09 12:27:08 -0800 | [diff] [blame] | 23 | return 0; |
| 24 | } |
| 25 | EXPORT_SYMBOL(smp_call_function_single); |
David Daney | fa68820 | 2013-09-11 14:23:24 -0700 | [diff] [blame] | 26 | |
Ying Huang | 966a967 | 2017-08-08 12:30:00 +0800 | [diff] [blame] | 27 | int smp_call_function_single_async(int cpu, call_single_data_t *csd) |
Christoph Hellwig | 40c01e8b | 2013-11-14 14:32:08 -0800 | [diff] [blame] | 28 | { |
| 29 | unsigned long flags; |
| 30 | |
| 31 | local_irq_save(flags); |
| 32 | csd->func(csd->info); |
| 33 | local_irq_restore(flags); |
Jan Kara | 08eed44 | 2014-02-24 16:39:57 +0100 | [diff] [blame] | 34 | return 0; |
Christoph Hellwig | 40c01e8b | 2013-11-14 14:32:08 -0800 | [diff] [blame] | 35 | } |
Frederic Weisbecker | c46fff2 | 2014-02-24 16:40:02 +0100 | [diff] [blame] | 36 | EXPORT_SYMBOL(smp_call_function_single_async); |
Christoph Hellwig | 40c01e8b | 2013-11-14 14:32:08 -0800 | [diff] [blame] | 37 | |
Nadav Amit | caa7593 | 2019-06-12 23:48:05 -0700 | [diff] [blame] | 38 | void on_each_cpu(smp_call_func_t func, void *info, int wait) |
David Daney | bff2dc4 | 2013-09-11 14:23:26 -0700 | [diff] [blame] | 39 | { |
| 40 | unsigned long flags; |
| 41 | |
| 42 | local_irq_save(flags); |
| 43 | func(info); |
| 44 | local_irq_restore(flags); |
David Daney | bff2dc4 | 2013-09-11 14:23:26 -0700 | [diff] [blame] | 45 | } |
| 46 | EXPORT_SYMBOL(on_each_cpu); |
| 47 | |
David Daney | fa68820 | 2013-09-11 14:23:24 -0700 | [diff] [blame] | 48 | /* |
| 49 | * Note we still need to test the mask even for UP |
| 50 | * because we actually can get an empty mask from |
| 51 | * code that on SMP might call us without the local |
| 52 | * CPU in the mask. |
| 53 | */ |
| 54 | void on_each_cpu_mask(const struct cpumask *mask, |
| 55 | smp_call_func_t func, void *info, bool wait) |
| 56 | { |
| 57 | unsigned long flags; |
| 58 | |
| 59 | if (cpumask_test_cpu(0, mask)) { |
| 60 | local_irq_save(flags); |
| 61 | func(info); |
| 62 | local_irq_restore(flags); |
| 63 | } |
| 64 | } |
| 65 | EXPORT_SYMBOL(on_each_cpu_mask); |
| 66 | |
| 67 | /* |
| 68 | * Preemption is disabled here to make sure the cond_func is called under the |
| 69 | * same condtions in UP and SMP. |
| 70 | */ |
Sebastian Andrzej Siewior | 5671d81 | 2020-01-17 10:01:35 +0100 | [diff] [blame^] | 71 | void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func, |
| 72 | void *info, bool wait, gfp_t gfp_flags, |
| 73 | const struct cpumask *mask) |
David Daney | fa68820 | 2013-09-11 14:23:24 -0700 | [diff] [blame] | 74 | { |
| 75 | unsigned long flags; |
| 76 | |
| 77 | preempt_disable(); |
| 78 | if (cond_func(0, info)) { |
| 79 | local_irq_save(flags); |
| 80 | func(info); |
| 81 | local_irq_restore(flags); |
| 82 | } |
| 83 | preempt_enable(); |
| 84 | } |
Rik van Riel | 7d49b28 | 2018-09-25 23:58:41 -0400 | [diff] [blame] | 85 | EXPORT_SYMBOL(on_each_cpu_cond_mask); |
| 86 | |
Sebastian Andrzej Siewior | 5671d81 | 2020-01-17 10:01:35 +0100 | [diff] [blame^] | 87 | void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func, |
| 88 | void *info, bool wait, gfp_t gfp_flags) |
Rik van Riel | 7d49b28 | 2018-09-25 23:58:41 -0400 | [diff] [blame] | 89 | { |
| 90 | on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL); |
| 91 | } |
David Daney | fa68820 | 2013-09-11 14:23:24 -0700 | [diff] [blame] | 92 | EXPORT_SYMBOL(on_each_cpu_cond); |
Juergen Gross | df8ce9d | 2016-08-29 08:48:44 +0200 | [diff] [blame] | 93 | |
| 94 | int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys) |
| 95 | { |
| 96 | int ret; |
| 97 | |
| 98 | if (cpu != 0) |
| 99 | return -ENXIO; |
| 100 | |
| 101 | if (phys) |
| 102 | hypervisor_pin_vcpu(0); |
| 103 | ret = func(par); |
| 104 | if (phys) |
| 105 | hypervisor_pin_vcpu(-1); |
| 106 | |
| 107 | return ret; |
| 108 | } |
| 109 | EXPORT_SYMBOL_GPL(smp_call_on_cpu); |