Thomas Gleixner | 6ff3f91 | 2019-05-20 19:08:03 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 2 | /* |
| 3 | * kernel/stop_machine.c |
| 4 | * |
| 5 | * Copyright (C) 2008, 2005 IBM Corporation. |
| 6 | * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au |
| 7 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 8 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
Rusty Russell | e5582ca | 2006-09-29 02:01:35 -0700 | [diff] [blame] | 9 | */ |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 10 | #include <linux/completion.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/cpu.h> |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 12 | #include <linux/init.h> |
Prarit Bhargava | ee527cd | 2007-05-08 00:25:08 -0700 | [diff] [blame] | 13 | #include <linux/kthread.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 14 | #include <linux/export.h> |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 15 | #include <linux/percpu.h> |
Prarit Bhargava | ee527cd | 2007-05-08 00:25:08 -0700 | [diff] [blame] | 16 | #include <linux/sched.h> |
| 17 | #include <linux/stop_machine.h> |
Benjamin Herrenschmidt | a12bb44 | 2007-05-10 22:22:47 -0700 | [diff] [blame] | 18 | #include <linux/interrupt.h> |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 19 | #include <linux/kallsyms.h> |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 20 | #include <linux/smpboot.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 21 | #include <linux/atomic.h> |
Oleg Nesterov | ce4f06d | 2016-07-26 20:57:36 +0200 | [diff] [blame] | 22 | #include <linux/nmi.h> |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 23 | #include <linux/sched/wake_q.h> |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * Structure to determine completion condition and record errors. May |
| 27 | * be shared by works on different cpus. |
| 28 | */ |
| 29 | struct cpu_stop_done { |
| 30 | atomic_t nr_todo; /* nr left to execute */ |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 31 | int ret; /* collected return value */ |
| 32 | struct completion completion; /* fired if nr_todo reaches 0 */ |
| 33 | }; |
| 34 | |
| 35 | /* the actual stopper, one per every possible cpu, enabled on online cpus */ |
| 36 | struct cpu_stopper { |
Oleg Nesterov | 02cb7aa | 2015-06-30 03:29:44 +0200 | [diff] [blame] | 37 | struct task_struct *thread; |
| 38 | |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 39 | raw_spinlock_t lock; |
Richard Kennedy | 878ae12 | 2010-08-09 17:20:34 -0700 | [diff] [blame] | 40 | bool enabled; /* is this stopper enabled? */ |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 41 | struct list_head works; /* list of pending works */ |
Oleg Nesterov | 02cb7aa | 2015-06-30 03:29:44 +0200 | [diff] [blame] | 42 | |
| 43 | struct cpu_stop_work stop_work; /* for stop_cpus */ |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 44 | }; |
| 45 | |
| 46 | static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); |
Jeremy Fitzhardinge | f445027 | 2011-10-31 17:11:15 -0700 | [diff] [blame] | 47 | static bool stop_machine_initialized = false; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 48 | |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 49 | /* static data for stop_cpus */ |
| 50 | static DEFINE_MUTEX(stop_cpus_mutex); |
| 51 | static bool stop_cpus_in_progress; |
Rik van Riel | 7053ea1 | 2013-11-01 10:41:46 -0400 | [diff] [blame] | 52 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 53 | static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) |
| 54 | { |
| 55 | memset(done, 0, sizeof(*done)); |
| 56 | atomic_set(&done->nr_todo, nr_todo); |
| 57 | init_completion(&done->completion); |
| 58 | } |
| 59 | |
| 60 | /* signal completion unless @done is NULL */ |
Oleg Nesterov | 6fa3b82 | 2015-11-15 20:33:26 +0100 | [diff] [blame] | 61 | static void cpu_stop_signal_done(struct cpu_stop_done *done) |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 62 | { |
Oleg Nesterov | dd2e312 | 2015-11-15 20:33:29 +0100 | [diff] [blame] | 63 | if (atomic_dec_and_test(&done->nr_todo)) |
| 64 | complete(&done->completion); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 65 | } |
| 66 | |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 67 | static void __cpu_stop_queue_work(struct cpu_stopper *stopper, |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 68 | struct cpu_stop_work *work, |
| 69 | struct wake_q_head *wakeq) |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 70 | { |
| 71 | list_add_tail(&work->list, &stopper->works); |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 72 | wake_q_add(wakeq, stopper->thread); |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 73 | } |
| 74 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 75 | /* queue @work to @stopper. if offline, @work is completed immediately */ |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 76 | static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 77 | { |
Thomas Gleixner | 860a0ff | 2013-01-31 12:11:13 +0000 | [diff] [blame] | 78 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 79 | DEFINE_WAKE_Q(wakeq); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 80 | unsigned long flags; |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 81 | bool enabled; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 82 | |
Prasad Sodagudi | cfd3551 | 2018-08-03 13:56:06 -0700 | [diff] [blame] | 83 | preempt_disable(); |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 84 | raw_spin_lock_irqsave(&stopper->lock, flags); |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 85 | enabled = stopper->enabled; |
| 86 | if (enabled) |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 87 | __cpu_stop_queue_work(stopper, work, &wakeq); |
Oleg Nesterov | dd2e312 | 2015-11-15 20:33:29 +0100 | [diff] [blame] | 88 | else if (work->done) |
Oleg Nesterov | 6fa3b82 | 2015-11-15 20:33:26 +0100 | [diff] [blame] | 89 | cpu_stop_signal_done(work->done); |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 90 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 91 | |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 92 | wake_up_q(&wakeq); |
Prasad Sodagudi | cfd3551 | 2018-08-03 13:56:06 -0700 | [diff] [blame] | 93 | preempt_enable(); |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 94 | |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 95 | return enabled; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | /** |
| 99 | * stop_one_cpu - stop a cpu |
| 100 | * @cpu: cpu to stop |
| 101 | * @fn: function to execute |
| 102 | * @arg: argument to @fn |
| 103 | * |
| 104 | * Execute @fn(@arg) on @cpu. @fn is run in a process context with |
| 105 | * the highest priority preempting any task on the cpu and |
| 106 | * monopolizing it. This function returns after the execution is |
| 107 | * complete. |
| 108 | * |
| 109 | * This function doesn't guarantee @cpu stays online till @fn |
| 110 | * completes. If @cpu goes down in the middle, execution may happen |
| 111 | * partially or fully on different cpus. @fn should either be ready |
| 112 | * for that or the caller should ensure that @cpu stays online until |
| 113 | * this function completes. |
| 114 | * |
| 115 | * CONTEXT: |
| 116 | * Might sleep. |
| 117 | * |
| 118 | * RETURNS: |
| 119 | * -ENOENT if @fn(@arg) was not executed because @cpu was offline; |
| 120 | * otherwise, the return value of @fn. |
| 121 | */ |
| 122 | int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) |
| 123 | { |
| 124 | struct cpu_stop_done done; |
| 125 | struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; |
| 126 | |
| 127 | cpu_stop_init_done(&done, 1); |
Oleg Nesterov | 958c5f84 | 2015-11-15 20:33:20 +0100 | [diff] [blame] | 128 | if (!cpu_stop_queue_work(cpu, &work)) |
| 129 | return -ENOENT; |
Cheng Chao | bf89a30 | 2016-09-14 10:01:50 +0800 | [diff] [blame] | 130 | /* |
| 131 | * In case @cpu == smp_proccessor_id() we can avoid a sleep+wakeup |
| 132 | * cycle by doing a preemption: |
| 133 | */ |
| 134 | cond_resched(); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 135 | wait_for_completion(&done.completion); |
Oleg Nesterov | 958c5f84 | 2015-11-15 20:33:20 +0100 | [diff] [blame] | 136 | return done.ret; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 137 | } |
| 138 | |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 139 | /* This controls the threads on each CPU. */ |
| 140 | enum multi_stop_state { |
| 141 | /* Dummy starting state for thread. */ |
| 142 | MULTI_STOP_NONE, |
| 143 | /* Awaiting everyone to be scheduled. */ |
| 144 | MULTI_STOP_PREPARE, |
| 145 | /* Disable interrupts. */ |
| 146 | MULTI_STOP_DISABLE_IRQ, |
| 147 | /* Run the function */ |
| 148 | MULTI_STOP_RUN, |
| 149 | /* Exit */ |
| 150 | MULTI_STOP_EXIT, |
| 151 | }; |
| 152 | |
| 153 | struct multi_stop_data { |
Oleg Nesterov | 9a301f2 | 2015-06-30 03:29:55 +0200 | [diff] [blame] | 154 | cpu_stop_fn_t fn; |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 155 | void *data; |
| 156 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
| 157 | unsigned int num_threads; |
| 158 | const struct cpumask *active_cpus; |
| 159 | |
| 160 | enum multi_stop_state state; |
| 161 | atomic_t thread_ack; |
| 162 | }; |
| 163 | |
| 164 | static void set_state(struct multi_stop_data *msdata, |
| 165 | enum multi_stop_state newstate) |
| 166 | { |
| 167 | /* Reset ack counter. */ |
| 168 | atomic_set(&msdata->thread_ack, msdata->num_threads); |
| 169 | smp_wmb(); |
| 170 | msdata->state = newstate; |
| 171 | } |
| 172 | |
| 173 | /* Last one to ack a state moves to the next state. */ |
| 174 | static void ack_state(struct multi_stop_data *msdata) |
| 175 | { |
| 176 | if (atomic_dec_and_test(&msdata->thread_ack)) |
| 177 | set_state(msdata, msdata->state + 1); |
| 178 | } |
| 179 | |
Heiko Carstens | 4ecf0a4 | 2019-06-08 12:13:57 +0200 | [diff] [blame] | 180 | void __weak stop_machine_yield(const struct cpumask *cpumask) |
| 181 | { |
| 182 | cpu_relax(); |
| 183 | } |
| 184 | |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 185 | /* This is the cpu_stop function which stops the CPU. */ |
| 186 | static int multi_cpu_stop(void *data) |
| 187 | { |
| 188 | struct multi_stop_data *msdata = data; |
| 189 | enum multi_stop_state curstate = MULTI_STOP_NONE; |
| 190 | int cpu = smp_processor_id(), err = 0; |
Martin Schwidefsky | 38f2c691 | 2019-05-17 12:50:42 +0200 | [diff] [blame] | 191 | const struct cpumask *cpumask; |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 192 | unsigned long flags; |
| 193 | bool is_active; |
| 194 | |
| 195 | /* |
| 196 | * When called from stop_machine_from_inactive_cpu(), irq might |
| 197 | * already be disabled. Save the state and restore it on exit. |
| 198 | */ |
| 199 | local_save_flags(flags); |
| 200 | |
Martin Schwidefsky | 38f2c691 | 2019-05-17 12:50:42 +0200 | [diff] [blame] | 201 | if (!msdata->active_cpus) { |
| 202 | cpumask = cpu_online_mask; |
| 203 | is_active = cpu == cpumask_first(cpumask); |
| 204 | } else { |
| 205 | cpumask = msdata->active_cpus; |
| 206 | is_active = cpumask_test_cpu(cpu, cpumask); |
| 207 | } |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 208 | |
| 209 | /* Simple state machine */ |
| 210 | do { |
| 211 | /* Chill out and ensure we re-read multi_stop_state. */ |
Heiko Carstens | 4ecf0a4 | 2019-06-08 12:13:57 +0200 | [diff] [blame] | 212 | stop_machine_yield(cpumask); |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 213 | if (msdata->state != curstate) { |
| 214 | curstate = msdata->state; |
| 215 | switch (curstate) { |
| 216 | case MULTI_STOP_DISABLE_IRQ: |
| 217 | local_irq_disable(); |
| 218 | hard_irq_disable(); |
| 219 | break; |
| 220 | case MULTI_STOP_RUN: |
| 221 | if (is_active) |
| 222 | err = msdata->fn(msdata->data); |
| 223 | break; |
| 224 | default: |
| 225 | break; |
| 226 | } |
| 227 | ack_state(msdata); |
Oleg Nesterov | ce4f06d | 2016-07-26 20:57:36 +0200 | [diff] [blame] | 228 | } else if (curstate > MULTI_STOP_PREPARE) { |
| 229 | /* |
| 230 | * At this stage all other CPUs we depend on must spin |
| 231 | * in the same loop. Any reason for hard-lockup should |
| 232 | * be detected and reported on their side. |
| 233 | */ |
| 234 | touch_nmi_watchdog(); |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 235 | } |
| 236 | } while (curstate != MULTI_STOP_EXIT); |
| 237 | |
| 238 | local_irq_restore(flags); |
| 239 | return err; |
| 240 | } |
| 241 | |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 242 | static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, |
| 243 | int cpu2, struct cpu_stop_work *work2) |
| 244 | { |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 245 | struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); |
| 246 | struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 247 | DEFINE_WAKE_Q(wakeq); |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 248 | int err; |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 249 | |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 250 | retry: |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 251 | /* |
| 252 | * The waking up of stopper threads has to happen in the same |
| 253 | * scheduling context as the queueing. Otherwise, there is a |
| 254 | * possibility of one of the above stoppers being woken up by another |
| 255 | * CPU, and preempting us. This will cause us to not wake up the other |
| 256 | * stopper forever. |
| 257 | */ |
| 258 | preempt_disable(); |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 259 | raw_spin_lock_irq(&stopper1->lock); |
| 260 | raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 261 | |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 262 | if (!stopper1->enabled || !stopper2->enabled) { |
| 263 | err = -ENOENT; |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 264 | goto unlock; |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 265 | } |
| 266 | |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 267 | /* |
| 268 | * Ensure that if we race with __stop_cpus() the stoppers won't get |
| 269 | * queued up in reverse order leading to system deadlock. |
| 270 | * |
| 271 | * We can't miss stop_cpus_in_progress if queue_stop_cpus_work() has |
| 272 | * queued a work on cpu1 but not on cpu2, we hold both locks. |
| 273 | * |
| 274 | * It can be falsely true but it is safe to spin until it is cleared, |
| 275 | * queue_stop_cpus_work() does everything under preempt_disable(). |
| 276 | */ |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 277 | if (unlikely(stop_cpus_in_progress)) { |
| 278 | err = -EDEADLK; |
| 279 | goto unlock; |
| 280 | } |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 281 | |
| 282 | err = 0; |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 283 | __cpu_stop_queue_work(stopper1, work1, &wakeq); |
| 284 | __cpu_stop_queue_work(stopper2, work2, &wakeq); |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 285 | |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 286 | unlock: |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 287 | raw_spin_unlock(&stopper2->lock); |
| 288 | raw_spin_unlock_irq(&stopper1->lock); |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 289 | |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 290 | if (unlikely(err == -EDEADLK)) { |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 291 | preempt_enable(); |
| 292 | |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 293 | while (stop_cpus_in_progress) |
| 294 | cpu_relax(); |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 295 | |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 296 | goto retry; |
| 297 | } |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 298 | |
Peter Zijlstra | b80a2bf | 2018-07-30 13:21:40 +0200 | [diff] [blame] | 299 | wake_up_q(&wakeq); |
| 300 | preempt_enable(); |
Peter Zijlstra | 0b26351 | 2018-04-20 11:50:05 +0200 | [diff] [blame] | 301 | |
Oleg Nesterov | d8bc853 | 2015-10-08 19:01:41 +0200 | [diff] [blame] | 302 | return err; |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 303 | } |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 304 | /** |
| 305 | * stop_two_cpus - stops two cpus |
| 306 | * @cpu1: the cpu to stop |
| 307 | * @cpu2: the other cpu to stop |
| 308 | * @fn: function to execute |
| 309 | * @arg: argument to @fn |
| 310 | * |
| 311 | * Stops both the current and specified CPU and runs @fn on one of them. |
| 312 | * |
| 313 | * returns when both are completed. |
| 314 | */ |
| 315 | int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg) |
| 316 | { |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 317 | struct cpu_stop_done done; |
| 318 | struct cpu_stop_work work1, work2; |
Peter Zijlstra | 6acce3e | 2013-10-11 14:38:20 +0200 | [diff] [blame] | 319 | struct multi_stop_data msdata; |
| 320 | |
Peter Zijlstra | 6acce3e | 2013-10-11 14:38:20 +0200 | [diff] [blame] | 321 | msdata = (struct multi_stop_data){ |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 322 | .fn = fn, |
| 323 | .data = arg, |
| 324 | .num_threads = 2, |
| 325 | .active_cpus = cpumask_of(cpu1), |
| 326 | }; |
| 327 | |
| 328 | work1 = work2 = (struct cpu_stop_work){ |
| 329 | .fn = multi_cpu_stop, |
| 330 | .arg = &msdata, |
| 331 | .done = &done |
| 332 | }; |
| 333 | |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 334 | cpu_stop_init_done(&done, 2); |
| 335 | set_state(&msdata, MULTI_STOP_PREPARE); |
| 336 | |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 337 | if (cpu1 > cpu2) |
| 338 | swap(cpu1, cpu2); |
Oleg Nesterov | 6a19005 | 2015-11-15 20:33:14 +0100 | [diff] [blame] | 339 | if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) |
Oleg Nesterov | 5caa1c0 | 2015-10-08 16:51:34 +0200 | [diff] [blame] | 340 | return -ENOENT; |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 341 | |
| 342 | wait_for_completion(&done.completion); |
Oleg Nesterov | 6a19005 | 2015-11-15 20:33:14 +0100 | [diff] [blame] | 343 | return done.ret; |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 344 | } |
| 345 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 346 | /** |
| 347 | * stop_one_cpu_nowait - stop a cpu but don't wait for completion |
| 348 | * @cpu: cpu to stop |
| 349 | * @fn: function to execute |
| 350 | * @arg: argument to @fn |
Fabian Frederick | cf25004 | 2014-06-04 16:11:22 -0700 | [diff] [blame] | 351 | * @work_buf: pointer to cpu_stop_work structure |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 352 | * |
| 353 | * Similar to stop_one_cpu() but doesn't wait for completion. The |
| 354 | * caller is responsible for ensuring @work_buf is currently unused |
| 355 | * and will remain untouched until stopper starts executing @fn. |
| 356 | * |
| 357 | * CONTEXT: |
| 358 | * Don't care. |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 359 | * |
| 360 | * RETURNS: |
| 361 | * true if cpu_stop_work was queued successfully and @fn will be called, |
| 362 | * false otherwise. |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 363 | */ |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 364 | bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 365 | struct cpu_stop_work *work_buf) |
| 366 | { |
| 367 | *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; |
Oleg Nesterov | 1b034bd | 2015-11-17 18:05:23 +0100 | [diff] [blame] | 368 | return cpu_stop_queue_work(cpu, work_buf); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 369 | } |
| 370 | |
Oleg Nesterov | 4aff1ca | 2015-11-15 20:33:23 +0100 | [diff] [blame] | 371 | static bool queue_stop_cpus_work(const struct cpumask *cpumask, |
Tejun Heo | fd7355b | 2011-06-23 11:19:27 -0700 | [diff] [blame] | 372 | cpu_stop_fn_t fn, void *arg, |
| 373 | struct cpu_stop_done *done) |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 374 | { |
| 375 | struct cpu_stop_work *work; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 376 | unsigned int cpu; |
Oleg Nesterov | 4aff1ca | 2015-11-15 20:33:23 +0100 | [diff] [blame] | 377 | bool queued = false; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 378 | |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 379 | /* |
| 380 | * Disable preemption while queueing to avoid getting |
| 381 | * preempted by a stopper which might wait for other stoppers |
| 382 | * to enter @fn which can lead to deadlock. |
| 383 | */ |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 384 | preempt_disable(); |
| 385 | stop_cpus_in_progress = true; |
Peter Zijlstra | 99d84bf | 2019-05-29 20:36:37 +0000 | [diff] [blame] | 386 | barrier(); |
Oleg Nesterov | b377c2a | 2015-06-30 03:29:48 +0200 | [diff] [blame] | 387 | for_each_cpu(cpu, cpumask) { |
| 388 | work = &per_cpu(cpu_stopper.stop_work, cpu); |
| 389 | work->fn = fn; |
| 390 | work->arg = arg; |
| 391 | work->done = done; |
Oleg Nesterov | 4aff1ca | 2015-11-15 20:33:23 +0100 | [diff] [blame] | 392 | if (cpu_stop_queue_work(cpu, work)) |
| 393 | queued = true; |
Oleg Nesterov | b377c2a | 2015-06-30 03:29:48 +0200 | [diff] [blame] | 394 | } |
Peter Zijlstra | 99d84bf | 2019-05-29 20:36:37 +0000 | [diff] [blame] | 395 | barrier(); |
Oleg Nesterov | e625397 | 2015-11-21 19:11:48 +0100 | [diff] [blame] | 396 | stop_cpus_in_progress = false; |
| 397 | preempt_enable(); |
Oleg Nesterov | 4aff1ca | 2015-11-15 20:33:23 +0100 | [diff] [blame] | 398 | |
| 399 | return queued; |
Tejun Heo | fd7355b | 2011-06-23 11:19:27 -0700 | [diff] [blame] | 400 | } |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 401 | |
Tejun Heo | fd7355b | 2011-06-23 11:19:27 -0700 | [diff] [blame] | 402 | static int __stop_cpus(const struct cpumask *cpumask, |
| 403 | cpu_stop_fn_t fn, void *arg) |
| 404 | { |
| 405 | struct cpu_stop_done done; |
| 406 | |
| 407 | cpu_stop_init_done(&done, cpumask_weight(cpumask)); |
Oleg Nesterov | 4aff1ca | 2015-11-15 20:33:23 +0100 | [diff] [blame] | 408 | if (!queue_stop_cpus_work(cpumask, fn, arg, &done)) |
| 409 | return -ENOENT; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 410 | wait_for_completion(&done.completion); |
Oleg Nesterov | 4aff1ca | 2015-11-15 20:33:23 +0100 | [diff] [blame] | 411 | return done.ret; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 412 | } |
| 413 | |
| 414 | /** |
| 415 | * stop_cpus - stop multiple cpus |
| 416 | * @cpumask: cpus to stop |
| 417 | * @fn: function to execute |
| 418 | * @arg: argument to @fn |
| 419 | * |
| 420 | * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu, |
| 421 | * @fn is run in a process context with the highest priority |
| 422 | * preempting any task on the cpu and monopolizing it. This function |
| 423 | * returns after all executions are complete. |
| 424 | * |
| 425 | * This function doesn't guarantee the cpus in @cpumask stay online |
| 426 | * till @fn completes. If some cpus go down in the middle, execution |
| 427 | * on the cpu may happen partially or fully on different cpus. @fn |
| 428 | * should either be ready for that or the caller should ensure that |
| 429 | * the cpus stay online until this function completes. |
| 430 | * |
| 431 | * All stop_cpus() calls are serialized making it safe for @fn to wait |
| 432 | * for all cpus to start executing it. |
| 433 | * |
| 434 | * CONTEXT: |
| 435 | * Might sleep. |
| 436 | * |
| 437 | * RETURNS: |
| 438 | * -ENOENT if @fn(@arg) was not executed at all because all cpus in |
| 439 | * @cpumask were offline; otherwise, 0 if all executions of @fn |
| 440 | * returned 0, any non zero return value if any returned non zero. |
| 441 | */ |
| 442 | int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
| 443 | { |
| 444 | int ret; |
| 445 | |
| 446 | /* static works are used, process one request at a time */ |
| 447 | mutex_lock(&stop_cpus_mutex); |
| 448 | ret = __stop_cpus(cpumask, fn, arg); |
| 449 | mutex_unlock(&stop_cpus_mutex); |
| 450 | return ret; |
| 451 | } |
| 452 | |
| 453 | /** |
| 454 | * try_stop_cpus - try to stop multiple cpus |
| 455 | * @cpumask: cpus to stop |
| 456 | * @fn: function to execute |
| 457 | * @arg: argument to @fn |
| 458 | * |
| 459 | * Identical to stop_cpus() except that it fails with -EAGAIN if |
| 460 | * someone else is already using the facility. |
| 461 | * |
| 462 | * CONTEXT: |
| 463 | * Might sleep. |
| 464 | * |
| 465 | * RETURNS: |
| 466 | * -EAGAIN if someone else is already stopping cpus, -ENOENT if |
| 467 | * @fn(@arg) was not executed at all because all cpus in @cpumask were |
| 468 | * offline; otherwise, 0 if all executions of @fn returned 0, any non |
| 469 | * zero return value if any returned non zero. |
| 470 | */ |
| 471 | int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) |
| 472 | { |
| 473 | int ret; |
| 474 | |
| 475 | /* static works are used, process one request at a time */ |
| 476 | if (!mutex_trylock(&stop_cpus_mutex)) |
| 477 | return -EAGAIN; |
| 478 | ret = __stop_cpus(cpumask, fn, arg); |
| 479 | mutex_unlock(&stop_cpus_mutex); |
| 480 | return ret; |
| 481 | } |
| 482 | |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 483 | static int cpu_stop_should_run(unsigned int cpu) |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 484 | { |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 485 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
| 486 | unsigned long flags; |
| 487 | int run; |
| 488 | |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 489 | raw_spin_lock_irqsave(&stopper->lock, flags); |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 490 | run = !list_empty(&stopper->works); |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 491 | raw_spin_unlock_irqrestore(&stopper->lock, flags); |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 492 | return run; |
| 493 | } |
| 494 | |
| 495 | static void cpu_stopper_thread(unsigned int cpu) |
| 496 | { |
| 497 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 498 | struct cpu_stop_work *work; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 499 | |
| 500 | repeat: |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 501 | work = NULL; |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 502 | raw_spin_lock_irq(&stopper->lock); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 503 | if (!list_empty(&stopper->works)) { |
| 504 | work = list_first_entry(&stopper->works, |
| 505 | struct cpu_stop_work, list); |
| 506 | list_del_init(&work->list); |
| 507 | } |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 508 | raw_spin_unlock_irq(&stopper->lock); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 509 | |
| 510 | if (work) { |
| 511 | cpu_stop_fn_t fn = work->fn; |
| 512 | void *arg = work->arg; |
| 513 | struct cpu_stop_done *done = work->done; |
Oleg Nesterov | accaf6e | 2015-11-15 20:33:32 +0100 | [diff] [blame] | 514 | int ret; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 515 | |
Oleg Nesterov | accaf6e | 2015-11-15 20:33:32 +0100 | [diff] [blame] | 516 | /* cpu stop callbacks must not sleep, make in_atomic() == T */ |
| 517 | preempt_count_inc(); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 518 | ret = fn(arg); |
Oleg Nesterov | dd2e312 | 2015-11-15 20:33:29 +0100 | [diff] [blame] | 519 | if (done) { |
| 520 | if (ret) |
| 521 | done->ret = ret; |
| 522 | cpu_stop_signal_done(done); |
| 523 | } |
Oleg Nesterov | accaf6e | 2015-11-15 20:33:32 +0100 | [diff] [blame] | 524 | preempt_count_dec(); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 525 | WARN_ONCE(preempt_count(), |
Sakari Ailus | d75f773 | 2019-03-25 21:32:28 +0200 | [diff] [blame] | 526 | "cpu_stop: %ps(%p) leaked preempt count\n", fn, arg); |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 527 | goto repeat; |
| 528 | } |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 529 | } |
| 530 | |
Oleg Nesterov | 233e7f2 | 2015-10-08 16:51:31 +0200 | [diff] [blame] | 531 | void stop_machine_park(int cpu) |
| 532 | { |
| 533 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
| 534 | /* |
| 535 | * Lockless. cpu_stopper_thread() will take stopper->lock and flush |
| 536 | * the pending works before it parks, until then it is fine to queue |
| 537 | * the new works. |
| 538 | */ |
| 539 | stopper->enabled = false; |
| 540 | kthread_park(stopper->thread); |
| 541 | } |
| 542 | |
Peter Zijlstra | 34f971f | 2010-09-22 13:53:15 +0200 | [diff] [blame] | 543 | extern void sched_set_stop_task(int cpu, struct task_struct *stop); |
| 544 | |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 545 | static void cpu_stop_create(unsigned int cpu) |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 546 | { |
Oleg Nesterov | 02cb7aa | 2015-06-30 03:29:44 +0200 | [diff] [blame] | 547 | sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 548 | } |
| 549 | |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 550 | static void cpu_stop_park(unsigned int cpu) |
| 551 | { |
| 552 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 553 | |
Oleg Nesterov | 233e7f2 | 2015-10-08 16:51:31 +0200 | [diff] [blame] | 554 | WARN_ON(!list_empty(&stopper->works)); |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 555 | } |
| 556 | |
Oleg Nesterov | c00166d | 2015-10-09 18:00:49 +0200 | [diff] [blame] | 557 | void stop_machine_unpark(int cpu) |
| 558 | { |
| 559 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
| 560 | |
Oleg Nesterov | f0cf16c | 2015-10-09 18:00:51 +0200 | [diff] [blame] | 561 | stopper->enabled = true; |
Oleg Nesterov | c00166d | 2015-10-09 18:00:49 +0200 | [diff] [blame] | 562 | kthread_unpark(stopper->thread); |
| 563 | } |
| 564 | |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 565 | static struct smp_hotplug_thread cpu_stop_threads = { |
Oleg Nesterov | 02cb7aa | 2015-06-30 03:29:44 +0200 | [diff] [blame] | 566 | .store = &cpu_stopper.thread, |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 567 | .thread_should_run = cpu_stop_should_run, |
| 568 | .thread_fn = cpu_stopper_thread, |
| 569 | .thread_comm = "migration/%u", |
| 570 | .create = cpu_stop_create, |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 571 | .park = cpu_stop_park, |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 572 | .selfparking = true, |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 573 | }; |
| 574 | |
| 575 | static int __init cpu_stop_init(void) |
| 576 | { |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 577 | unsigned int cpu; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 578 | |
| 579 | for_each_possible_cpu(cpu) { |
| 580 | struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); |
| 581 | |
Thomas Gleixner | de5b55c | 2018-04-23 21:16:35 +0200 | [diff] [blame] | 582 | raw_spin_lock_init(&stopper->lock); |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 583 | INIT_LIST_HEAD(&stopper->works); |
| 584 | } |
| 585 | |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 586 | BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); |
Oleg Nesterov | c00166d | 2015-10-09 18:00:49 +0200 | [diff] [blame] | 587 | stop_machine_unpark(raw_smp_processor_id()); |
Jeremy Fitzhardinge | f445027 | 2011-10-31 17:11:15 -0700 | [diff] [blame] | 588 | stop_machine_initialized = true; |
Tejun Heo | 1142d81 | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 589 | return 0; |
| 590 | } |
| 591 | early_initcall(cpu_stop_init); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 592 | |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 593 | int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, |
| 594 | const struct cpumask *cpus) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 595 | { |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 596 | struct multi_stop_data msdata = { |
| 597 | .fn = fn, |
| 598 | .data = data, |
| 599 | .num_threads = num_online_cpus(), |
| 600 | .active_cpus = cpus, |
| 601 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 603 | lockdep_assert_cpus_held(); |
| 604 | |
Jeremy Fitzhardinge | f445027 | 2011-10-31 17:11:15 -0700 | [diff] [blame] | 605 | if (!stop_machine_initialized) { |
| 606 | /* |
| 607 | * Handle the case where stop_machine() is called |
| 608 | * early in boot before stop_machine() has been |
| 609 | * initialized. |
| 610 | */ |
| 611 | unsigned long flags; |
| 612 | int ret; |
| 613 | |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 614 | WARN_ON_ONCE(msdata.num_threads != 1); |
Jeremy Fitzhardinge | f445027 | 2011-10-31 17:11:15 -0700 | [diff] [blame] | 615 | |
| 616 | local_irq_save(flags); |
| 617 | hard_irq_disable(); |
| 618 | ret = (*fn)(data); |
| 619 | local_irq_restore(flags); |
| 620 | |
| 621 | return ret; |
| 622 | } |
| 623 | |
Tejun Heo | 3fc1f1e | 2010-05-06 18:49:20 +0200 | [diff] [blame] | 624 | /* Set the initial state and stop all online cpus. */ |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 625 | set_state(&msdata, MULTI_STOP_PREPARE); |
| 626 | return stop_cpus(cpu_online_mask, multi_cpu_stop, &msdata); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 627 | } |
| 628 | |
Oleg Nesterov | 9a301f2 | 2015-06-30 03:29:55 +0200 | [diff] [blame] | 629 | int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | int ret; |
| 632 | |
| 633 | /* No CPUs can come up or down during this. */ |
Sebastian Andrzej Siewior | fe5595c | 2017-05-24 10:15:16 +0200 | [diff] [blame] | 634 | cpus_read_lock(); |
| 635 | ret = stop_machine_cpuslocked(fn, data, cpus); |
| 636 | cpus_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | return ret; |
| 638 | } |
Rusty Russell | eeec4fa | 2008-07-28 12:16:30 -0500 | [diff] [blame] | 639 | EXPORT_SYMBOL_GPL(stop_machine); |
Tejun Heo | bbf1bb3 | 2010-05-08 16:20:53 +0200 | [diff] [blame] | 640 | |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 641 | /** |
| 642 | * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU |
| 643 | * @fn: the function to run |
| 644 | * @data: the data ptr for the @fn() |
| 645 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
| 646 | * |
| 647 | * This is identical to stop_machine() but can be called from a CPU which |
| 648 | * is not active. The local CPU is in the process of hotplug (so no other |
| 649 | * CPU hotplug can start) and not marked active and doesn't have enough |
| 650 | * context to sleep. |
| 651 | * |
| 652 | * This function provides stop_machine() functionality for such state by |
| 653 | * using busy-wait for synchronization and executing @fn directly for local |
| 654 | * CPU. |
| 655 | * |
| 656 | * CONTEXT: |
| 657 | * Local CPU is inactive. Temporarily stops all active CPUs. |
| 658 | * |
| 659 | * RETURNS: |
| 660 | * 0 if all executions of @fn returned 0, any non zero return value if any |
| 661 | * returned non zero. |
| 662 | */ |
Oleg Nesterov | 9a301f2 | 2015-06-30 03:29:55 +0200 | [diff] [blame] | 663 | int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data, |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 664 | const struct cpumask *cpus) |
| 665 | { |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 666 | struct multi_stop_data msdata = { .fn = fn, .data = data, |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 667 | .active_cpus = cpus }; |
| 668 | struct cpu_stop_done done; |
| 669 | int ret; |
| 670 | |
| 671 | /* Local CPU must be inactive and CPU hotplug in progress. */ |
| 672 | BUG_ON(cpu_active(raw_smp_processor_id())); |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 673 | msdata.num_threads = num_active_cpus() + 1; /* +1 for local */ |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 674 | |
| 675 | /* No proper task established and can't sleep - busy wait for lock. */ |
| 676 | while (!mutex_trylock(&stop_cpus_mutex)) |
| 677 | cpu_relax(); |
| 678 | |
| 679 | /* Schedule work on other CPUs and execute directly for local CPU */ |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 680 | set_state(&msdata, MULTI_STOP_PREPARE); |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 681 | cpu_stop_init_done(&done, num_active_cpus()); |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 682 | queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 683 | &done); |
Peter Zijlstra | 1be0bd77 | 2013-10-07 11:29:15 +0100 | [diff] [blame] | 684 | ret = multi_cpu_stop(&msdata); |
Tejun Heo | f740e6cd | 2011-06-23 11:19:28 -0700 | [diff] [blame] | 685 | |
| 686 | /* Busy wait for completion. */ |
| 687 | while (!completion_done(&done.completion)) |
| 688 | cpu_relax(); |
| 689 | |
| 690 | mutex_unlock(&stop_cpus_mutex); |
| 691 | return ret ?: done.ret; |
| 692 | } |