blob: 5043e7433f4b15879a6498ed3d1ca6cfa2876f83 [file] [log] [blame]
Thomas Gleixner38498a62012-04-20 13:05:44 +00001/*
2 * Common SMP CPU bringup/teardown functions
3 */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +00004#include <linux/cpu.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +00005#include <linux/err.h>
6#include <linux/smp.h>
Paul E. McKenney8038dad2015-02-25 10:34:39 -08007#include <linux/delay.h>
Thomas Gleixner38498a62012-04-20 13:05:44 +00008#include <linux/init.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +00009#include <linux/list.h>
10#include <linux/slab.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +000011#include <linux/sched.h>
Ingo Molnar29930022017-02-08 18:51:36 +010012#include <linux/sched/task.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000013#include <linux/export.h>
Thomas Gleixner29d5e042012-04-20 13:05:45 +000014#include <linux/percpu.h>
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000015#include <linux/kthread.h>
16#include <linux/smpboot.h>
Thomas Gleixner38498a62012-04-20 13:05:44 +000017
18#include "smpboot.h"
19
Paul E. McKenney3180d892012-07-12 01:55:54 -070020#ifdef CONFIG_SMP
21
Thomas Gleixner29d5e042012-04-20 13:05:45 +000022#ifdef CONFIG_GENERIC_SMP_IDLE_THREAD
Thomas Gleixner29d5e042012-04-20 13:05:45 +000023/*
24 * For the hotplug case we keep the task structs around and reuse
25 * them.
26 */
27static DEFINE_PER_CPU(struct task_struct *, idle_threads);
28
Paul Gortmaker0db06282013-06-19 14:53:51 -040029struct task_struct *idle_thread_get(unsigned int cpu)
Thomas Gleixner29d5e042012-04-20 13:05:45 +000030{
31 struct task_struct *tsk = per_cpu(idle_threads, cpu);
32
33 if (!tsk)
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070034 return ERR_PTR(-ENOMEM);
Thomas Gleixner29d5e042012-04-20 13:05:45 +000035 init_idle(tsk, cpu);
36 return tsk;
37}
38
Thomas Gleixner29d5e042012-04-20 13:05:45 +000039void __init idle_thread_set_boot_cpu(void)
40{
41 per_cpu(idle_threads, smp_processor_id()) = current;
42}
43
Srivatsa S. Bhat4a70d2d2012-05-24 20:41:00 +053044/**
45 * idle_init - Initialize the idle thread for a cpu
46 * @cpu: The cpu for which the idle thread should be initialized
47 *
48 * Creates the thread if it does not exist.
49 */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070050static inline void idle_init(unsigned int cpu)
51{
52 struct task_struct *tsk = per_cpu(idle_threads, cpu);
53
54 if (!tsk) {
55 tsk = fork_idle(cpu);
56 if (IS_ERR(tsk))
57 pr_err("SMP: fork_idle() failed for CPU %u\n", cpu);
58 else
59 per_cpu(idle_threads, cpu) = tsk;
60 }
61}
62
Thomas Gleixner29d5e042012-04-20 13:05:45 +000063/**
Srivatsa S. Bhat4a70d2d2012-05-24 20:41:00 +053064 * idle_threads_init - Initialize idle threads for all cpus
Thomas Gleixner29d5e042012-04-20 13:05:45 +000065 */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070066void __init idle_threads_init(void)
Thomas Gleixner29d5e042012-04-20 13:05:45 +000067{
Srivatsa S. Bhatee74d132012-05-24 20:40:55 +053068 unsigned int cpu, boot_cpu;
69
70 boot_cpu = smp_processor_id();
Thomas Gleixner29d5e042012-04-20 13:05:45 +000071
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070072 for_each_possible_cpu(cpu) {
Srivatsa S. Bhatee74d132012-05-24 20:40:55 +053073 if (cpu != boot_cpu)
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -070074 idle_init(cpu);
Thomas Gleixner29d5e042012-04-20 13:05:45 +000075 }
Thomas Gleixner29d5e042012-04-20 13:05:45 +000076}
Thomas Gleixner29d5e042012-04-20 13:05:45 +000077#endif
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000078
Paul E. McKenney3180d892012-07-12 01:55:54 -070079#endif /* #ifdef CONFIG_SMP */
80
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +000081static LIST_HEAD(hotplug_threads);
82static DEFINE_MUTEX(smpboot_threads_lock);
83
84struct smpboot_thread_data {
85 unsigned int cpu;
86 unsigned int status;
87 struct smp_hotplug_thread *ht;
88};
89
90enum {
91 HP_THREAD_NONE = 0,
92 HP_THREAD_ACTIVE,
93 HP_THREAD_PARKED,
94};
95
96/**
97 * smpboot_thread_fn - percpu hotplug thread loop function
98 * @data: thread data pointer
99 *
100 * Checks for thread stop and park conditions. Calls the necessary
101 * setup, cleanup, park and unpark functions for the registered
102 * thread.
103 *
104 * Returns 1 when the thread should exit, 0 otherwise.
105 */
106static int smpboot_thread_fn(void *data)
107{
108 struct smpboot_thread_data *td = data;
109 struct smp_hotplug_thread *ht = td->ht;
110
111 while (1) {
112 set_current_state(TASK_INTERRUPTIBLE);
113 preempt_disable();
114 if (kthread_should_stop()) {
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200115 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000116 preempt_enable();
Frederic Weisbecker3dd08c02015-09-04 15:45:03 -0700117 /* cleanup must mirror setup */
118 if (ht->cleanup && td->status != HP_THREAD_NONE)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000119 ht->cleanup(td->cpu, cpu_online(td->cpu));
120 kfree(td);
121 return 0;
122 }
123
124 if (kthread_should_park()) {
125 __set_current_state(TASK_RUNNING);
Ingo Molnarbe6a2e42016-10-04 09:55:57 +0200126 preempt_enable();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000127 if (ht->park && td->status == HP_THREAD_ACTIVE) {
128 BUG_ON(td->cpu != smp_processor_id());
129 ht->park(td->cpu);
130 td->status = HP_THREAD_PARKED;
131 }
132 kthread_parkme();
133 /* We might have been woken for stop */
134 continue;
135 }
136
Arnd Bergmanndc893e12013-03-08 12:43:31 -0800137 BUG_ON(td->cpu != smp_processor_id());
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000138
139 /* Check for state change setup */
140 switch (td->status) {
141 case HP_THREAD_NONE:
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200142 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000143 preempt_enable();
144 if (ht->setup)
145 ht->setup(td->cpu);
146 td->status = HP_THREAD_ACTIVE;
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200147 continue;
148
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000149 case HP_THREAD_PARKED:
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200150 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000151 preempt_enable();
152 if (ht->unpark)
153 ht->unpark(td->cpu);
154 td->status = HP_THREAD_ACTIVE;
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200155 continue;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000156 }
157
158 if (!ht->thread_should_run(td->cpu)) {
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200159 preempt_enable_no_resched();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000160 schedule();
161 } else {
Peter Zijlstra7d4d26962014-09-24 10:18:52 +0200162 __set_current_state(TASK_RUNNING);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000163 preempt_enable();
164 ht->thread_fn(td->cpu);
165 }
166 }
167}
168
169static int
170__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
171{
172 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
173 struct smpboot_thread_data *td;
174
175 if (tsk)
176 return 0;
177
178 td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu));
179 if (!td)
180 return -ENOMEM;
181 td->cpu = cpu;
182 td->ht = ht;
183
184 tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu,
185 ht->thread_comm);
186 if (IS_ERR(tsk)) {
187 kfree(td);
188 return PTR_ERR(tsk);
189 }
Petr Mladeka65d4092016-10-11 13:55:23 -0700190 /*
191 * Park the thread so that it could start right on the CPU
192 * when it is available.
193 */
194 kthread_park(tsk);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000195 get_task_struct(tsk);
196 *per_cpu_ptr(ht->store, cpu) = tsk;
Thomas Gleixnerf2530dc2013-04-09 09:33:34 +0200197 if (ht->create) {
198 /*
199 * Make sure that the task has actually scheduled out
200 * into park position, before calling the create
201 * callback. At least the migration thread callback
202 * requires that the task is off the runqueue.
203 */
204 if (!wait_task_inactive(tsk, TASK_PARKED))
205 WARN_ON(1);
206 else
207 ht->create(cpu);
208 }
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000209 return 0;
210}
211
212int smpboot_create_threads(unsigned int cpu)
213{
214 struct smp_hotplug_thread *cur;
215 int ret = 0;
216
217 mutex_lock(&smpboot_threads_lock);
218 list_for_each_entry(cur, &hotplug_threads, list) {
219 ret = __smpboot_create_thread(cur, cpu);
220 if (ret)
221 break;
222 }
223 mutex_unlock(&smpboot_threads_lock);
224 return ret;
225}
226
227static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
228{
229 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
230
Oleg Nesterovc00166d2015-10-09 18:00:49 +0200231 if (!ht->selfparking)
232 kthread_unpark(tsk);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000233}
234
Thomas Gleixner931ef162016-02-26 18:43:36 +0000235int smpboot_unpark_threads(unsigned int cpu)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000236{
237 struct smp_hotplug_thread *cur;
238
239 mutex_lock(&smpboot_threads_lock);
240 list_for_each_entry(cur, &hotplug_threads, list)
Chris Metcalfb5242e92015-06-24 16:55:42 -0700241 if (cpumask_test_cpu(cpu, cur->cpumask))
242 smpboot_unpark_thread(cur, cpu);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000243 mutex_unlock(&smpboot_threads_lock);
Thomas Gleixner931ef162016-02-26 18:43:36 +0000244 return 0;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000245}
246
247static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
248{
249 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
250
Thomas Gleixner7d7e4992013-01-31 12:11:12 +0000251 if (tsk && !ht->selfparking)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000252 kthread_park(tsk);
253}
254
Thomas Gleixner931ef162016-02-26 18:43:36 +0000255int smpboot_park_threads(unsigned int cpu)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000256{
257 struct smp_hotplug_thread *cur;
258
259 mutex_lock(&smpboot_threads_lock);
260 list_for_each_entry_reverse(cur, &hotplug_threads, list)
261 smpboot_park_thread(cur, cpu);
262 mutex_unlock(&smpboot_threads_lock);
Thomas Gleixner931ef162016-02-26 18:43:36 +0000263 return 0;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000264}
265
266static void smpboot_destroy_threads(struct smp_hotplug_thread *ht)
267{
268 unsigned int cpu;
269
270 /* We need to destroy also the parked threads of offline cpus */
271 for_each_possible_cpu(cpu) {
272 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
273
274 if (tsk) {
275 kthread_stop(tsk);
276 put_task_struct(tsk);
277 *per_cpu_ptr(ht->store, cpu) = NULL;
278 }
279 }
280}
281
282/**
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700283 * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related
284 * to hotplug
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000285 * @plug_thread: Hotplug thread descriptor
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700286 * @cpumask: The cpumask where threads run
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000287 *
288 * Creates and starts the threads on all online cpus.
289 */
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700290int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
291 const struct cpumask *cpumask)
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000292{
293 unsigned int cpu;
294 int ret = 0;
295
Chris Metcalfb5242e92015-06-24 16:55:42 -0700296 if (!alloc_cpumask_var(&plug_thread->cpumask, GFP_KERNEL))
297 return -ENOMEM;
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700298 cpumask_copy(plug_thread->cpumask, cpumask);
Chris Metcalfb5242e92015-06-24 16:55:42 -0700299
Lai Jiangshan4bee9682014-07-31 11:30:17 +0800300 get_online_cpus();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000301 mutex_lock(&smpboot_threads_lock);
302 for_each_online_cpu(cpu) {
303 ret = __smpboot_create_thread(plug_thread, cpu);
304 if (ret) {
305 smpboot_destroy_threads(plug_thread);
Frederic Weisbecker5869b502015-09-04 15:45:00 -0700306 free_cpumask_var(plug_thread->cpumask);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000307 goto out;
308 }
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700309 if (cpumask_test_cpu(cpu, cpumask))
310 smpboot_unpark_thread(plug_thread, cpu);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000311 }
312 list_add(&plug_thread->list, &hotplug_threads);
313out:
314 mutex_unlock(&smpboot_threads_lock);
Lai Jiangshan4bee9682014-07-31 11:30:17 +0800315 put_online_cpus();
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000316 return ret;
317}
Frederic Weisbecker230ec932015-09-04 15:45:06 -0700318EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread_cpumask);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000319
320/**
321 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug
322 * @plug_thread: Hotplug thread descriptor
323 *
324 * Stops all threads on all possible cpus.
325 */
326void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
327{
328 get_online_cpus();
329 mutex_lock(&smpboot_threads_lock);
330 list_del(&plug_thread->list);
331 smpboot_destroy_threads(plug_thread);
332 mutex_unlock(&smpboot_threads_lock);
333 put_online_cpus();
Chris Metcalfb5242e92015-06-24 16:55:42 -0700334 free_cpumask_var(plug_thread->cpumask);
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000335}
336EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread);
Paul E. McKenney8038dad2015-02-25 10:34:39 -0800337
Chris Metcalfb5242e92015-06-24 16:55:42 -0700338/**
339 * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked
340 * @plug_thread: Hotplug thread descriptor
341 * @new: Revised mask to use
342 *
343 * The cpumask field in the smp_hotplug_thread must not be updated directly
344 * by the client, but only by calling this function.
Chris Metcalffe4ba3c2015-06-24 16:55:45 -0700345 * This function can only be called on a registered smp_hotplug_thread.
Chris Metcalfb5242e92015-06-24 16:55:42 -0700346 */
Thomas Gleixner0d859232017-09-12 21:37:09 +0200347void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
348 const struct cpumask *new)
Chris Metcalfb5242e92015-06-24 16:55:42 -0700349{
350 struct cpumask *old = plug_thread->cpumask;
Thomas Gleixner0d859232017-09-12 21:37:09 +0200351 static struct cpumask tmp;
Chris Metcalfb5242e92015-06-24 16:55:42 -0700352 unsigned int cpu;
353
Thomas Gleixnere31d6882017-10-03 16:37:53 +0200354 lockdep_assert_cpus_held();
Chris Metcalfb5242e92015-06-24 16:55:42 -0700355 mutex_lock(&smpboot_threads_lock);
356
357 /* Park threads that were exclusively enabled on the old mask. */
Thomas Gleixner0d859232017-09-12 21:37:09 +0200358 cpumask_andnot(&tmp, old, new);
359 for_each_cpu_and(cpu, &tmp, cpu_online_mask)
Chris Metcalfb5242e92015-06-24 16:55:42 -0700360 smpboot_park_thread(plug_thread, cpu);
361
362 /* Unpark threads that are exclusively enabled on the new mask. */
Thomas Gleixner0d859232017-09-12 21:37:09 +0200363 cpumask_andnot(&tmp, new, old);
364 for_each_cpu_and(cpu, &tmp, cpu_online_mask)
Chris Metcalfb5242e92015-06-24 16:55:42 -0700365 smpboot_unpark_thread(plug_thread, cpu);
366
367 cpumask_copy(old, new);
368
369 mutex_unlock(&smpboot_threads_lock);
Chris Metcalfb5242e92015-06-24 16:55:42 -0700370}
Chris Metcalfb5242e92015-06-24 16:55:42 -0700371
Paul E. McKenney8038dad2015-02-25 10:34:39 -0800372static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD);
373
374/*
375 * Called to poll specified CPU's state, for example, when waiting for
376 * a CPU to come online.
377 */
378int cpu_report_state(int cpu)
379{
380 return atomic_read(&per_cpu(cpu_hotplug_state, cpu));
381}
382
383/*
384 * If CPU has died properly, set its state to CPU_UP_PREPARE and
385 * return success. Otherwise, return -EBUSY if the CPU died after
386 * cpu_wait_death() timed out. And yet otherwise again, return -EAGAIN
387 * if cpu_wait_death() timed out and the CPU still hasn't gotten around
388 * to dying. In the latter two cases, the CPU might not be set up
389 * properly, but it is up to the arch-specific code to decide.
390 * Finally, -EIO indicates an unanticipated problem.
391 *
392 * Note that it is permissible to omit this call entirely, as is
393 * done in architectures that do no CPU-hotplug error checking.
394 */
395int cpu_check_up_prepare(int cpu)
396{
397 if (!IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
398 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
399 return 0;
400 }
401
402 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) {
403
404 case CPU_POST_DEAD:
405
406 /* The CPU died properly, so just start it up again. */
407 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE);
408 return 0;
409
410 case CPU_DEAD_FROZEN:
411
412 /*
413 * Timeout during CPU death, so let caller know.
414 * The outgoing CPU completed its processing, but after
415 * cpu_wait_death() timed out and reported the error. The
416 * caller is free to proceed, in which case the state
417 * will be reset properly by cpu_set_state_online().
418 * Proceeding despite this -EBUSY return makes sense
419 * for systems where the outgoing CPUs take themselves
420 * offline, with no post-death manipulation required from
421 * a surviving CPU.
422 */
423 return -EBUSY;
424
425 case CPU_BROKEN:
426
427 /*
428 * The most likely reason we got here is that there was
429 * a timeout during CPU death, and the outgoing CPU never
430 * did complete its processing. This could happen on
431 * a virtualized system if the outgoing VCPU gets preempted
432 * for more than five seconds, and the user attempts to
433 * immediately online that same CPU. Trying again later
434 * might return -EBUSY above, hence -EAGAIN.
435 */
436 return -EAGAIN;
437
438 default:
439
440 /* Should not happen. Famous last words. */
441 return -EIO;
442 }
443}
444
445/*
446 * Mark the specified CPU online.
447 *
448 * Note that it is permissible to omit this call entirely, as is
449 * done in architectures that do no CPU-hotplug error checking.
450 */
451void cpu_set_state_online(int cpu)
452{
453 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE);
454}
455
456#ifdef CONFIG_HOTPLUG_CPU
457
458/*
459 * Wait for the specified CPU to exit the idle loop and die.
460 */
461bool cpu_wait_death(unsigned int cpu, int seconds)
462{
463 int jf_left = seconds * HZ;
464 int oldstate;
465 bool ret = true;
466 int sleep_jf = 1;
467
468 might_sleep();
469
470 /* The outgoing CPU will normally get done quite quickly. */
471 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
472 goto update_state;
473 udelay(5);
474
475 /* But if the outgoing CPU dawdles, wait increasingly long times. */
476 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) {
477 schedule_timeout_uninterruptible(sleep_jf);
478 jf_left -= sleep_jf;
479 if (jf_left <= 0)
480 break;
481 sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
482 }
483update_state:
484 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
485 if (oldstate == CPU_DEAD) {
486 /* Outgoing CPU died normally, update state. */
487 smp_mb(); /* atomic_read() before update. */
488 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
489 } else {
490 /* Outgoing CPU still hasn't died, set state accordingly. */
491 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
492 oldstate, CPU_BROKEN) != oldstate)
493 goto update_state;
494 ret = false;
495 }
496 return ret;
497}
498
499/*
500 * Called by the outgoing CPU to report its successful death. Return
501 * false if this report follows the surviving CPU's timing out.
502 *
503 * A separate "CPU_DEAD_FROZEN" is used when the surviving CPU
504 * timed out. This approach allows architectures to omit calls to
505 * cpu_check_up_prepare() and cpu_set_state_online() without defeating
506 * the next cpu_wait_death()'s polling loop.
507 */
508bool cpu_report_death(void)
509{
510 int oldstate;
511 int newstate;
512 int cpu = smp_processor_id();
513
514 do {
515 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
516 if (oldstate != CPU_BROKEN)
517 newstate = CPU_DEAD;
518 else
519 newstate = CPU_DEAD_FROZEN;
520 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
521 oldstate, newstate) != oldstate);
522 return newstate == CPU_DEAD;
523}
524
525#endif /* #ifdef CONFIG_HOTPLUG_CPU */