Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _LINUX_STOP_MACHINE |
| 3 | #define _LINUX_STOP_MACHINE |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/cpu.h> |
Rusty Russell | eeec4fa | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 6 | #include <linux/cpumask.h> |
Paul Gortmaker | bb2eac6 | 2011-07-17 17:11:26 -0400 | [diff] [blame] | 7 | #include <linux/smp.h> |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 8 | #include <linux/list.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 10 | /* |
| 11 | * stop_cpu[s]() is simplistic per-cpu maximum priority cpu |
| 12 | * monopolization mechanism. The caller can specify a non-sleeping |
| 13 | * function to be executed on a single or multiple cpus preempting all |
| 14 | * other processes and monopolizing those cpus until it finishes. |
| 15 | * |
| 16 | * Resources for this mechanism are preallocated when a cpu is brought |
| 17 | * up and requests are guaranteed to be served as long as the target |
| 18 | * cpus are online. |
| 19 | */ |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 20 | typedef int (*cpu_stop_fn_t)(void *arg); |
| 21 | |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 22 | #ifdef CONFIG_SMP |
| 23 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 24 | struct cpu_stop_work { |
| 25 | struct list_head list; /* cpu_stopper->works */ |
| 26 | cpu_stop_fn_t fn; |
| 27 | void *arg; |
| 28 | struct cpu_stop_done *done; |
| 29 | }; |
| 30 | |
| 31 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg); |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 32 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg); |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 33 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 34 | struct cpu_stop_work *work_buf); |
Oleg Nesterov | 233e7f2 | 2015-10-08 16:51:31 +0200 | [diff] [blame] | 35 | void stop_machine_park(int cpu); |
Oleg Nesterov | c00166d | 2015-10-09 18:00:49 +0200 | [diff] [blame] | 36 | void stop_machine_unpark(int cpu); |
Heiko Carstens | 4ecf0a4 | 2019-06-08 12:13:57 +0200 | [diff] [blame] | 37 | void stop_machine_yield(const struct cpumask *cpumask); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 38 | |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 39 | #else /* CONFIG_SMP */ |
| 40 | |
| 41 | #include <linux/workqueue.h> |
| 42 | |
| 43 | struct cpu_stop_work { |
| 44 | struct work_struct work; |
| 45 | cpu_stop_fn_t fn; |
| 46 | void *arg; |
| 47 | }; |
| 48 | |
| 49 | static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
| 50 | { |
| 51 | int ret = -ENOENT; |
| 52 | preempt_disable(); |
| 53 | if (cpu == smp_processor_id()) |
| 54 | ret = fn(arg); |
| 55 | preempt_enable(); |
| 56 | return ret; |
| 57 | } |
| 58 | |
| 59 | static void stop_one_cpu_nowait_workfn(struct work_struct *work) |
| 60 | { |
| 61 | struct cpu_stop_work *stwork = |
| 62 | container_of(work, struct cpu_stop_work, work); |
| 63 | preempt_disable(); |
| 64 | stwork->fn(stwork->arg); |
| 65 | preempt_enable(); |
| 66 | } |
| 67 | |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 68 | static inline bool stop_one_cpu_nowait(unsigned int cpu, |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 69 | cpu_stop_fn_t fn, void *arg, |
| 70 | struct cpu_stop_work *work_buf) |
| 71 | { |
| 72 | if (cpu == smp_processor_id()) { |
| 73 | INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn); |
| 74 | work_buf->fn = fn; |
| 75 | work_buf->arg = arg; |
| 76 | schedule_work(&work_buf->work); |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 77 | return true; |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 78 | } |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 79 | |
| 80 | return false; |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 81 | } |
| 82 | |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 83 | #endif /* CONFIG_SMP */ |
| 84 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 85 | /* |
| 86 | * stop_machine "Bogolock": stop the entire machine, disable |
| 87 | * interrupts. This is a very heavy lock, which is equivalent to |
| 88 | * grabbing every spinlock (and more). So the "read" side to such a |
Jonathan Neuschäfer | 1816315 | 2011-06-19 11:50:22 +0200 | [diff] [blame] | 89 | * lock is anything which disables preemption. |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 90 | */ |
Chris Wilson | 86fffe4 | 2015-12-11 13:40:46 -0800 | [diff] [blame] | 91 | #if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 92 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | /** |
Rusty Russell | eeec4fa | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 94 | * stop_machine: freeze the machine on all CPUs and run this function |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | * @fn: the function to run |
| 96 | * @data: the data ptr for the @fn() |
Rusty Russell | eeec4fa | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 97 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | * |
Rusty Russell | ffdb597 | 2008-07-28 12:16:28 -0500 | [diff] [blame] | 99 | * Description: This causes a thread to be scheduled on every cpu, |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 100 | * each of which disables interrupts. The result is that no one is |
Rusty Russell | ffdb597 | 2008-07-28 12:16:28 -0500 | [diff] [blame] | 101 | * holding a spinlock or inside any other preempt-disabled region when |
| 102 | * @fn() runs. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | * |
| 104 | * This can be thought of as a very heavy write lock, equivalent to |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 105 | * grabbing every spinlock in the kernel. |
| 106 | * |
| 107 | * Protects against CPU hotplug. |
| 108 | */ |
Oleg Nesterov | 9a301f2 | 2015-06-30 03:29:55 +0200 | [diff] [blame] | 109 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 111 | /** |
| 112 | * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function |
| 113 | * @fn: the function to run |
| 114 | * @data: the data ptr for the @fn() |
| 115 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
| 116 | * |
| 117 | * Same as above. Must be called from with in a cpus_read_lock() protected |
| 118 | * region. Avoids nested calls to cpus_read_lock(). |
| 119 | */ |
| 120 | int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus); |
| 121 | |
Oleg Nesterov | 9a301f2 | 2015-06-30 03:29:55 +0200 | [diff] [blame] | 122 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 123 | const struct cpumask *cpus); |
Chris Wilson | 86fffe4 | 2015-12-11 13:40:46 -0800 | [diff] [blame] | 124 | #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 126 | static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, |
| 127 | const struct cpumask *cpus) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | { |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 129 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | int ret; |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 131 | local_irq_save(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | ret = fn(data); |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 133 | local_irq_restore(flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | return ret; |
| 135 | } |
Heiko Carstens | 9ea09af | 2008-12-22 12:36:30 +0100 | [diff] [blame] | 136 | |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 137 | static inline int stop_machine(cpu_stop_fn_t fn, void *data, |
| 138 | const struct cpumask *cpus) |
| 139 | { |
| 140 | return stop_machine_cpuslocked(fn, data, cpus); |
| 141 | } |
| 142 | |
Oleg Nesterov | 9a301f2 | 2015-06-30 03:29:55 +0200 | [diff] [blame] | 143 | static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 144 | const struct cpumask *cpus) |
| 145 | { |
Oleg Nesterov | 7eeb088 | 2015-06-30 03:29:51 +0200 | [diff] [blame] | 146 | return stop_machine(fn, data, cpus); |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Chris Wilson | 86fffe4 | 2015-12-11 13:40:46 -0800 | [diff] [blame] | 149 | #endif /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */ |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 150 | #endif /* _LINUX_STOP_MACHINE */ |