blob: 301851974b8ddcebe08612c11e3767c208bddf53 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b622006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053023#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000024#include <linux/irq.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000025
Todd E Brandtbb3632c2014-06-06 05:40:17 -070026#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000027#define CREATE_TRACE_POINTS
28#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Thomas Gleixner38498a62012-04-20 13:05:44 +000030#include "smpboot.h"
31
Thomas Gleixnercff7d372016-02-26 18:43:28 +000032/**
33 * cpuhp_cpu_state - Per cpu hotplug state storage
34 * @state: The current cpu state
35 * @target: The target state
36 */
37struct cpuhp_cpu_state {
38 enum cpuhp_state state;
39 enum cpuhp_state target;
40};
41
42static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
43
44/**
45 * cpuhp_step - Hotplug state machine step
46 * @name: Name of the step
47 * @startup: Startup function of the step
48 * @teardown: Teardown function of the step
49 * @skip_onerr: Do not invoke the functions on error rollback
50 * Will go away once the notifiers are gone
51 */
52struct cpuhp_step {
53 const char *name;
54 int (*startup)(unsigned int cpu);
55 int (*teardown)(unsigned int cpu);
56 bool skip_onerr;
57};
58
59static struct cpuhp_step cpuhp_bp_states[];
60
61/**
62 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
63 * @cpu: The cpu for which the callback should be invoked
64 * @step: The step in the state machine
65 * @cb: The callback function to invoke
66 *
67 * Called from cpu hotplug and from the state register machinery
68 */
69static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
70 int (*cb)(unsigned int))
71{
72 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
73 int ret = 0;
74
75 if (cb) {
76 trace_cpuhp_enter(cpu, st->target, step, cb);
77 ret = cb(cpu);
78 trace_cpuhp_exit(cpu, st->state, step, ret);
79 }
80 return ret;
81}
82
Rusty Russell98a79d62008-12-13 21:19:41 +103083#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103084/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070085static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +000086bool cpuhp_tasks_frozen;
87EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070089/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053090 * The following two APIs (cpu_maps_update_begin/done) must be used when
91 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
92 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
93 * hotplug callback (un)registration performed using __register_cpu_notifier()
94 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070095 */
96void cpu_maps_update_begin(void)
97{
98 mutex_lock(&cpu_add_remove_lock);
99}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530100EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700101
102void cpu_maps_update_done(void)
103{
104 mutex_unlock(&cpu_add_remove_lock);
105}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530106EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700107
Daniel J Blueman5c113fb2010-06-01 12:15:11 +0100108static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700110/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
111 * Should always be manipulated under cpu_add_remove_lock
112 */
113static int cpu_hotplug_disabled;
114
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700115#ifdef CONFIG_HOTPLUG_CPU
116
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100117static struct {
118 struct task_struct *active_writer;
David Hildenbrand87af9e72014-12-12 10:11:44 +0100119 /* wait queue to wake up the active_writer */
120 wait_queue_head_t wq;
121 /* verifies that no writer will get active while readers are active */
122 struct mutex lock;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100123 /*
124 * Also blocks the new readers during
125 * an ongoing cpu hotplug operation.
126 */
David Hildenbrand87af9e72014-12-12 10:11:44 +0100127 atomic_t refcount;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530128
129#ifdef CONFIG_DEBUG_LOCK_ALLOC
130 struct lockdep_map dep_map;
131#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -0700132} cpu_hotplug = {
133 .active_writer = NULL,
David Hildenbrand87af9e72014-12-12 10:11:44 +0100134 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
Linus Torvalds31950eb2009-06-22 21:18:12 -0700135 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530136#ifdef CONFIG_DEBUG_LOCK_ALLOC
137 .dep_map = {.name = "cpu_hotplug.lock" },
138#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -0700139};
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100140
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530141/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
142#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700143#define cpuhp_lock_acquire_tryread() \
144 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530145#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
146#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
147
Paul E. McKenney62db99f2014-10-22 14:51:49 -0700148
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100149void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800150{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100151 might_sleep();
152 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700153 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530154 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100155 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100156 atomic_inc(&cpu_hotplug.refcount);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100157 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800158}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100159EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800160
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100161void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800162{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100163 int refcount;
164
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100165 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700166 return;
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700167
David Hildenbrand87af9e72014-12-12 10:11:44 +0100168 refcount = atomic_dec_return(&cpu_hotplug.refcount);
169 if (WARN_ON(refcount < 0)) /* try to fix things up */
170 atomic_inc(&cpu_hotplug.refcount);
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700171
David Hildenbrand87af9e72014-12-12 10:11:44 +0100172 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
173 wake_up(&cpu_hotplug.wq);
174
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530175 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100176
Ashok Raja9d9baa2005-11-28 13:43:46 -0800177}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100178EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800179
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100180/*
181 * This ensures that the hotplug operation can begin only when the
182 * refcount goes to zero.
183 *
184 * Note that during a cpu-hotplug operation, the new readers, if any,
185 * will be blocked by the cpu_hotplug.lock
186 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700187 * Since cpu_hotplug_begin() is always called after invoking
188 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100189 *
190 * Note that theoretically, there is a possibility of a livelock:
191 * - Refcount goes to zero, last reader wakes up the sleeping
192 * writer.
193 * - Last reader unlocks the cpu_hotplug.lock.
194 * - A new reader arrives at this moment, bumps up the refcount.
195 * - The writer acquires the cpu_hotplug.lock finds the refcount
196 * non zero and goes to sleep again.
197 *
198 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100199 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100200 *
201 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600202void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100203{
David Hildenbrand87af9e72014-12-12 10:11:44 +0100204 DEFINE_WAIT(wait);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700205
David Hildenbrand87af9e72014-12-12 10:11:44 +0100206 cpu_hotplug.active_writer = current;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530207 cpuhp_lock_acquire();
David Hildenbrand87af9e72014-12-12 10:11:44 +0100208
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700209 for (;;) {
210 mutex_lock(&cpu_hotplug.lock);
David Hildenbrand87af9e72014-12-12 10:11:44 +0100211 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
212 if (likely(!atomic_read(&cpu_hotplug.refcount)))
213 break;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100214 mutex_unlock(&cpu_hotplug.lock);
215 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100216 }
David Hildenbrand87af9e72014-12-12 10:11:44 +0100217 finish_wait(&cpu_hotplug.wq, &wait);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100218}
219
Toshi Kanib9d10be2013-08-12 09:45:53 -0600220void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100221{
222 cpu_hotplug.active_writer = NULL;
223 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530224 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100225}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700226
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700227/*
228 * Wait for currently running CPU hotplug operations to complete (if any) and
229 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
230 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
231 * hotplug path before performing hotplug operations. So acquiring that lock
232 * guarantees mutual exclusion from any currently running hotplug operations.
233 */
234void cpu_hotplug_disable(void)
235{
236 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700237 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700238 cpu_maps_update_done();
239}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700240EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700241
242void cpu_hotplug_enable(void)
243{
244 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700245 WARN_ON(--cpu_hotplug_disabled < 0);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700246 cpu_maps_update_done();
247}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700248EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600249#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251/* Need to know about CPUs going up/down? */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200252int register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253{
Neil Brownbd5349c2006-10-17 00:10:35 -0700254 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100255 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700256 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100257 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700258 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700260
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200261int __register_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530262{
263 return raw_notifier_chain_register(&cpu_chain, nb);
264}
265
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000266static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700267 int *nr_calls)
268{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000269 unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
270 void *hcpu = (void *)(long)cpu;
271
Akinobu Mitae6bde732010-05-26 14:43:29 -0700272 int ret;
273
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000274 ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700275 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700276
277 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700278}
279
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000280static int cpu_notify(unsigned long val, unsigned int cpu)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700281{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000282 return __cpu_notify(val, cpu, -1, NULL);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700283}
284
Thomas Gleixnerba997462016-02-26 18:43:24 +0000285/* Notifier wrappers for transitioning to state machine */
286static int notify_prepare(unsigned int cpu)
287{
288 int nr_calls = 0;
289 int ret;
290
291 ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
292 if (ret) {
293 nr_calls--;
294 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
295 __func__, cpu);
296 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
297 }
298 return ret;
299}
300
301static int notify_online(unsigned int cpu)
302{
303 cpu_notify(CPU_ONLINE, cpu);
304 return 0;
305}
306
307static int bringup_cpu(unsigned int cpu)
308{
309 struct task_struct *idle = idle_thread_get(cpu);
310 int ret;
311
312 /* Arch-specific enabling code. */
313 ret = __cpu_up(cpu, idle);
314 if (ret) {
315 cpu_notify(CPU_UP_CANCELED, cpu);
316 return ret;
317 }
318 BUG_ON(!cpu_online(cpu));
319 return 0;
320}
321
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700322#ifdef CONFIG_HOTPLUG_CPU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530324EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200326void unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100328 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700329 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100330 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332EXPORT_SYMBOL(unregister_cpu_notifier);
333
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200334void __unregister_cpu_notifier(struct notifier_block *nb)
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530335{
336 raw_notifier_chain_unregister(&cpu_chain, nb);
337}
338EXPORT_SYMBOL(__unregister_cpu_notifier);
339
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700340/**
341 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
342 * @cpu: a CPU id
343 *
344 * This function walks all processes, finds a valid mm struct for each one and
345 * then clears a corresponding bit in mm's cpumask. While this all sounds
346 * trivial, there are various non-obvious corner cases, which this function
347 * tries to solve in a safe manner.
348 *
349 * Also note that the function uses a somewhat relaxed locking scheme, so it may
350 * be called only for an already offlined CPU.
351 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700352void clear_tasks_mm_cpumask(int cpu)
353{
354 struct task_struct *p;
355
356 /*
357 * This function is called after the cpu is taken down and marked
358 * offline, so its not like new tasks will ever get this cpu set in
359 * their mm mask. -- Peter Zijlstra
360 * Thus, we may use rcu_read_lock() here, instead of grabbing
361 * full-fledged tasklist_lock.
362 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700363 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700364 rcu_read_lock();
365 for_each_process(p) {
366 struct task_struct *t;
367
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700368 /*
369 * Main thread might exit, but other threads may still have
370 * a valid mm. Find one.
371 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700372 t = find_lock_task_mm(p);
373 if (!t)
374 continue;
375 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
376 task_unlock(t);
377 }
378 rcu_read_unlock();
379}
380
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400381static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400383 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Oleg Nesterova75a6062015-09-10 15:07:50 +0200385 read_lock(&tasklist_lock);
386 for_each_process_thread(g, p) {
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400387 if (!p->on_rq)
388 continue;
389 /*
390 * We do the check with unlocked task_rq(p)->lock.
391 * Order the reading to do not warn about a task,
392 * which was running on this cpu in the past, and
393 * it's just been woken on another cpu.
394 */
395 rmb();
396 if (task_cpu(p) != dead_cpu)
397 continue;
398
399 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
400 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
Oleg Nesterova75a6062015-09-10 15:07:50 +0200401 }
402 read_unlock(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
Thomas Gleixner98458172016-02-26 18:43:25 +0000405static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
406{
407 BUG_ON(cpu_notify(val, cpu));
408}
409
410static int notify_down_prepare(unsigned int cpu)
411{
412 int err, nr_calls = 0;
413
414 err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
415 if (err) {
416 nr_calls--;
417 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
418 pr_warn("%s: attempt to take down CPU %u failed\n",
419 __func__, cpu);
420 }
421 return err;
422}
423
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200425static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000427 int err, cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 /* Ensure this CPU doesn't handle any more interrupts. */
430 err = __cpu_disable();
431 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700432 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000434 cpu_notify(CPU_DYING, cpu);
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200435 /* Give up timekeeping duties */
436 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000437 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000438 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700439 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440}
441
Thomas Gleixner98458172016-02-26 18:43:25 +0000442static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
Thomas Gleixner98458172016-02-26 18:43:25 +0000444 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200446 /*
447 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
448 * and RCU users of this state to go away such that all new such users
449 * will observe it.
450 *
451 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
Paul E. McKenney779de6c2015-06-10 13:34:41 -0700452 * not imply sync_sched(), so wait for both.
Michael wang106dd5a2013-11-13 11:10:56 +0800453 *
454 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200455 */
Paul E. McKenney779de6c2015-06-10 13:34:41 -0700456 if (IS_ENABLED(CONFIG_PREEMPT))
457 synchronize_rcu_mult(call_rcu, call_rcu_sched);
458 else
459 synchronize_rcu();
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200460
Michael wang106dd5a2013-11-13 11:10:56 +0800461 smpboot_park_threads(cpu);
462
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200463 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000464 * Prevent irq alloc/free while the dying cpu reorganizes the
465 * interrupt affinities.
466 */
467 irq_lock_sparse();
468
469 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200470 * So now all preempt/rcu users must observe !cpu_active().
471 */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000472 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500473 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 /* CPU didn't die: tell everyone. Can't complain. */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000475 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
Thomas Gleixnera8994182015-07-05 17:12:30 +0000476 irq_unlock_sparse();
Thomas Gleixner98458172016-02-26 18:43:25 +0000477 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700478 }
Rusty Russell04321582008-07-28 12:16:29 -0500479 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100481 /*
482 * The migration_call() CPU_DYING callback will have removed all
483 * runnable tasks from the cpu, there's only the idle task left now
484 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100485 *
486 * Wait for the stop thread to go away.
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100487 */
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800488 while (!per_cpu(cpu_dead_idle, cpu))
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100489 cpu_relax();
Paul E. McKenney528a25b2015-01-28 14:09:43 -0800490 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
491 per_cpu(cpu_dead_idle, cpu) = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
Thomas Gleixnera8994182015-07-05 17:12:30 +0000493 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
494 irq_unlock_sparse();
495
Preeti U Murthy345527b2015-03-30 14:59:19 +0530496 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 /* This actually kills the CPU. */
498 __cpu_die(cpu);
499
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200500 tick_cleanup_dead_cpu(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000501 return 0;
502}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Thomas Gleixner98458172016-02-26 18:43:25 +0000504static int notify_dead(unsigned int cpu)
505{
506 cpu_notify_nofail(CPU_DEAD, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 check_for_tasks(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000508 return 0;
509}
510
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000511#else
512#define notify_down_prepare NULL
513#define takedown_cpu NULL
514#define notify_dead NULL
515#endif
516
517#ifdef CONFIG_HOTPLUG_CPU
518static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
519{
520 for (st->state++; st->state < st->target; st->state++) {
521 struct cpuhp_step *step = cpuhp_bp_states + st->state;
522
523 if (!step->skip_onerr)
524 cpuhp_invoke_callback(cpu, st->state, step->startup);
525 }
526}
527
Thomas Gleixner98458172016-02-26 18:43:25 +0000528/* Requires cpu_add_remove_lock to be held */
529static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
530{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000531 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
532 int prev_state, ret = 0;
533 bool hasdied = false;
Thomas Gleixner98458172016-02-26 18:43:25 +0000534
535 if (num_online_cpus() == 1)
536 return -EBUSY;
537
538 if (!cpu_online(cpu))
539 return -EINVAL;
540
541 cpu_hotplug_begin();
542
543 cpuhp_tasks_frozen = tasks_frozen;
544
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000545 prev_state = st->state;
546 st->target = CPUHP_OFFLINE;
547 for (; st->state > st->target; st->state--) {
548 struct cpuhp_step *step = cpuhp_bp_states + st->state;
Thomas Gleixner98458172016-02-26 18:43:25 +0000549
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000550 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
551 if (ret) {
552 st->target = prev_state;
553 undo_cpu_down(cpu, st);
554 break;
555 }
556 }
557 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100559 cpu_hotplug_done();
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000560 /* This post dead nonsense must die */
561 if (!ret && hasdied)
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000562 cpu_notify_nofail(CPU_POST_DEAD, cpu);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000563 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700564}
565
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200566int cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700567{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100568 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700569
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100570 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700571
Max Krasnyanskye761b772008-07-15 04:43:49 -0700572 if (cpu_hotplug_disabled) {
573 err = -EBUSY;
574 goto out;
575 }
576
Max Krasnyanskye761b772008-07-15 04:43:49 -0700577 err = _cpu_down(cpu, 0);
578
Max Krasnyanskye761b772008-07-15 04:43:49 -0700579out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100580 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 return err;
582}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400583EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584#endif /*CONFIG_HOTPLUG_CPU*/
585
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700586/*
587 * Unpark per-CPU smpboot kthreads at CPU-online time.
588 */
589static int smpboot_thread_call(struct notifier_block *nfb,
590 unsigned long action, void *hcpu)
591{
592 int cpu = (long)hcpu;
593
594 switch (action & ~CPU_TASKS_FROZEN) {
595
Paul E. McKenney64eaf9742015-04-15 12:45:41 -0700596 case CPU_DOWN_FAILED:
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700597 case CPU_ONLINE:
598 smpboot_unpark_threads(cpu);
599 break;
600
601 default:
602 break;
603 }
604
605 return NOTIFY_OK;
606}
607
608static struct notifier_block smpboot_thread_notifier = {
609 .notifier_call = smpboot_thread_call,
610 .priority = CPU_PRI_SMPBOOT,
611};
612
Paul Gortmaker927da9d2015-04-27 18:47:58 -0400613void smpboot_thread_init(void)
Paul E. McKenney00df35f2015-04-12 08:06:55 -0700614{
615 register_cpu_notifier(&smpboot_thread_notifier);
616}
617
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000618static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
619{
620 for (st->state--; st->state > st->target; st->state--) {
621 struct cpuhp_step *step = cpuhp_bp_states + st->state;
622
623 if (!step->skip_onerr)
624 cpuhp_invoke_callback(cpu, st->state, step->teardown);
625 }
626}
627
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700628/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400629static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000631 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700632 struct task_struct *idle;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000633 int prev_state, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100635 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000636
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200637 if (cpu_online(cpu) || !cpu_present(cpu)) {
638 ret = -EINVAL;
639 goto out;
640 }
641
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000642 /* Let it fail before we try to bring the cpu up */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700643 idle = idle_thread_get(cpu);
644 if (IS_ERR(idle)) {
645 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000646 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700647 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000648
Thomas Gleixnerba997462016-02-26 18:43:24 +0000649 cpuhp_tasks_frozen = tasks_frozen;
650
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000651 prev_state = st->state;
652 st->target = CPUHP_ONLINE;
653 while (st->state < st->target) {
654 struct cpuhp_step *step;
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000655
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000656 st->state++;
657 step = cpuhp_bp_states + st->state;
658 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
659 if (ret) {
660 st->target = prev_state;
661 undo_cpu_up(cpu, st);
662 break;
663 }
664 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000665out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100666 cpu_hotplug_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 return ret;
668}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700669
Paul Gortmaker0db06282013-06-19 14:53:51 -0400670int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700671{
672 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700673
Rusty Russelle0b582e2009-01-01 10:12:28 +1030674 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700675 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
676 cpu);
Chen Gong87d5e0232010-03-05 13:42:38 -0800677#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700678 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700679#endif
680 return -EINVAL;
681 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700682
Toshi Kani01b0f192013-11-12 15:07:25 -0800683 err = try_online_node(cpu_to_node(cpu));
684 if (err)
685 return err;
minskey guocf234222010-05-24 14:32:41 -0700686
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100687 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700688
Max Krasnyanskye761b772008-07-15 04:43:49 -0700689 if (cpu_hotplug_disabled) {
690 err = -EBUSY;
691 goto out;
692 }
693
694 err = _cpu_up(cpu, 0);
695
Max Krasnyanskye761b772008-07-15 04:43:49 -0700696out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100697 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700698 return err;
699}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800700EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700701
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700702#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030703static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700704
705int disable_nonboot_cpus(void)
706{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200707 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700708
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100709 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030710 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100711 /*
712 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700713 * with the userspace trying to use the CPU hotplug at the same time
714 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030715 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100716
Fabian Frederick84117da2014-06-04 16:11:17 -0700717 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700718 for_each_online_cpu(cpu) {
719 if (cpu == first_cpu)
720 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700721 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700722 error = _cpu_down(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700723 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600724 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030725 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600726 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700727 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700728 break;
729 }
730 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700731
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700732 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700733 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700734 else
Fabian Frederick84117da2014-06-04 16:11:17 -0700735 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700736
737 /*
738 * Make sure the CPUs won't be enabled by someone else. We need to do
739 * this even in case of failure as all disable_nonboot_cpus() users are
740 * supposed to do enable_nonboot_cpus() on the failure path.
741 */
742 cpu_hotplug_disabled++;
743
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100744 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700745 return error;
746}
747
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700748void __weak arch_enable_nonboot_cpus_begin(void)
749{
750}
751
752void __weak arch_enable_nonboot_cpus_end(void)
753{
754}
755
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200756void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700757{
758 int cpu, error;
759
760 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100761 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700762 WARN_ON(--cpu_hotplug_disabled < 0);
Rusty Russelle0b582e2009-01-01 10:12:28 +1030763 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700764 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700765
Fabian Frederick84117da2014-06-04 16:11:17 -0700766 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700767
768 arch_enable_nonboot_cpus_begin();
769
Rusty Russelle0b582e2009-01-01 10:12:28 +1030770 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700771 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700772 error = _cpu_up(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700773 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700774 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700775 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700776 continue;
777 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700778 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700779 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700780
781 arch_enable_nonboot_cpus_end();
782
Rusty Russelle0b582e2009-01-01 10:12:28 +1030783 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700784out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100785 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700786}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030787
Fenghua Yud7268a32011-11-15 21:59:31 +0100788static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030789{
790 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
791 return -ENOMEM;
792 return 0;
793}
794core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100795
796/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100797 * When callbacks for CPU hotplug notifications are being executed, we must
798 * ensure that the state of the system with respect to the tasks being frozen
799 * or not, as reported by the notification, remains unchanged *throughout the
800 * duration* of the execution of the callbacks.
801 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
802 *
803 * This synchronization is implemented by mutually excluding regular CPU
804 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
805 * Hibernate notifications.
806 */
807static int
808cpu_hotplug_pm_callback(struct notifier_block *nb,
809 unsigned long action, void *ptr)
810{
811 switch (action) {
812
813 case PM_SUSPEND_PREPARE:
814 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700815 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100816 break;
817
818 case PM_POST_SUSPEND:
819 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700820 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100821 break;
822
823 default:
824 return NOTIFY_DONE;
825 }
826
827 return NOTIFY_OK;
828}
829
830
Fenghua Yud7268a32011-11-15 21:59:31 +0100831static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100832{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800833 /*
834 * cpu_hotplug_pm_callback has higher priority than x86
835 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
836 * to disable cpu hotplug to avoid cpu hotplug race.
837 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100838 pm_notifier(cpu_hotplug_pm_callback, 0);
839 return 0;
840}
841core_initcall(cpu_hotplug_pm_sync_init);
842
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700843#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700844
Manfred Spraule545a612008-09-07 16:57:22 +0200845/**
846 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
847 * @cpu: cpu that just started
848 *
849 * This function calls the cpu_chain notifiers with CPU_STARTING.
850 * It must be called by the arch code on the new cpu, before the new cpu
851 * enables interrupts and before the "boot" cpu returns from __cpu_up().
852 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400853void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200854{
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000855 cpu_notify(CPU_STARTING, cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200856}
857
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700858#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700859
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000860/* Boot processor state steps */
861static struct cpuhp_step cpuhp_bp_states[] = {
862 [CPUHP_OFFLINE] = {
863 .name = "offline",
864 .startup = NULL,
865 .teardown = NULL,
866 },
867#ifdef CONFIG_SMP
868 [CPUHP_CREATE_THREADS]= {
869 .name = "threads:create",
870 .startup = smpboot_create_threads,
871 .teardown = NULL,
872 },
873 [CPUHP_NOTIFY_PREPARE] = {
874 .name = "notify:prepare",
875 .startup = notify_prepare,
876 .teardown = notify_dead,
877 .skip_onerr = true,
878 },
879 [CPUHP_BRINGUP_CPU] = {
880 .name = "cpu:bringup",
881 .startup = bringup_cpu,
882 .teardown = takedown_cpu,
883 .skip_onerr = true,
884 },
885 [CPUHP_NOTIFY_ONLINE] = {
886 .name = "notify:online",
887 .startup = notify_online,
888 .teardown = notify_down_prepare,
889 },
890#endif
891 [CPUHP_ONLINE] = {
892 .name = "online",
893 .startup = NULL,
894 .teardown = NULL,
895 },
896};
897
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700898/*
899 * cpu_bit_bitmap[] is a special, "compressed" data structure that
900 * represents all NR_CPUS bits binary values of 1<<nr.
901 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030902 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700903 * mask value that has a single bit set only.
904 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700905
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700906/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700907#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700908#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
909#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
910#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700911
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700912const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700913
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700914 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
915 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
916#if BITS_PER_LONG > 32
917 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
918 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700919#endif
920};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700921EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100922
923const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
924EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030925
926#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -0800927struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -0800928 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +1030929#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -0800930struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +1030931#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -0800932EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +1030933
Rasmus Villemoes4b804c82016-01-20 15:00:19 -0800934struct cpumask __cpu_online_mask __read_mostly;
935EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +1030936
Rasmus Villemoes4b804c82016-01-20 15:00:19 -0800937struct cpumask __cpu_present_mask __read_mostly;
938EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +1030939
Rasmus Villemoes4b804c82016-01-20 15:00:19 -0800940struct cpumask __cpu_active_mask __read_mostly;
941EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030942
Rusty Russell3fa41522008-12-30 09:05:16 +1030943void init_cpu_present(const struct cpumask *src)
944{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -0800945 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +1030946}
947
948void init_cpu_possible(const struct cpumask *src)
949{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -0800950 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +1030951}
952
953void init_cpu_online(const struct cpumask *src)
954{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -0800955 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +1030956}
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000957
958/*
959 * Activate the first processor.
960 */
961void __init boot_cpu_init(void)
962{
963 int cpu = smp_processor_id();
964
965 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
966 set_cpu_online(cpu, true);
967 set_cpu_active(cpu, true);
968 set_cpu_present(cpu, true);
969 set_cpu_possible(cpu, true);
970}
971
972/*
973 * Must be called _AFTER_ setting up the per_cpu areas
974 */
975void __init boot_cpu_state_init(void)
976{
977 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
978}