blob: 90a3d017b90ce56edee804892492bd5b62442c62 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070013#include <linux/oom.h>
14#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040015#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070016#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/kthread.h>
18#include <linux/stop_machine.h>
Ingo Molnar81615b62006-06-26 00:24:32 -070019#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010021#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053022#include <linux/lockdep.h>
Todd E Brandtbb3632c2014-06-06 05:40:17 -070023#include <trace/events/power.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner38498a62012-04-20 13:05:44 +000025#include "smpboot.h"
26
Rusty Russell98a79d62008-12-13 21:19:41 +103027#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +103028/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -070029static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070031/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053032 * The following two APIs (cpu_maps_update_begin/done) must be used when
33 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
34 * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
35 * hotplug callback (un)registration performed using __register_cpu_notifier()
36 * or __unregister_cpu_notifier().
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070037 */
38void cpu_maps_update_begin(void)
39{
40 mutex_lock(&cpu_add_remove_lock);
41}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053042EXPORT_SYMBOL(cpu_notifier_register_begin);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070043
44void cpu_maps_update_done(void)
45{
46 mutex_unlock(&cpu_add_remove_lock);
47}
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +053048EXPORT_SYMBOL(cpu_notifier_register_done);
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070049
Daniel J Blueman5c113fb2010-06-01 12:15:11 +010050static RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070052/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
53 * Should always be manipulated under cpu_add_remove_lock
54 */
55static int cpu_hotplug_disabled;
56
Lai Jiangshan79a6cde2010-05-26 14:43:36 -070057#ifdef CONFIG_HOTPLUG_CPU
58
Gautham R Shenoyd2219382008-01-25 21:08:01 +010059static struct {
60 struct task_struct *active_writer;
61 struct mutex lock; /* Synchronizes accesses to refcount, */
62 /*
63 * Also blocks the new readers during
64 * an ongoing cpu hotplug operation.
65 */
66 int refcount;
Paul E. McKenneyb2c46232014-10-22 10:00:05 -070067 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053069
70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71 struct lockdep_map dep_map;
72#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070073} cpu_hotplug = {
74 .active_writer = NULL,
75 .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
76 .refcount = 0,
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053077#ifdef CONFIG_DEBUG_LOCK_ALLOC
78 .dep_map = {.name = "cpu_hotplug.lock" },
79#endif
Linus Torvalds31950eb2009-06-22 21:18:12 -070080};
Gautham R Shenoyd2219382008-01-25 21:08:01 +010081
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053082/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
83#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
Paul E. McKenneydd56af42014-08-25 20:25:06 -070084#define cpuhp_lock_acquire_tryread() \
85 lock_map_acquire_tryread(&cpu_hotplug.dep_map)
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053086#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
87#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
88
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010089void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080090{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010091 might_sleep();
92 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070093 return;
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053094 cpuhp_lock_acquire_read();
Gautham R Shenoyd2219382008-01-25 21:08:01 +010095 mutex_lock(&cpu_hotplug.lock);
96 cpu_hotplug.refcount++;
97 mutex_unlock(&cpu_hotplug.lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -080098}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010099EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -0800100
Paul E. McKenneydd56af42014-08-25 20:25:06 -0700101bool try_get_online_cpus(void)
102{
103 if (cpu_hotplug.active_writer == current)
104 return true;
105 if (!mutex_trylock(&cpu_hotplug.lock))
106 return false;
107 cpuhp_lock_acquire_tryread();
108 cpu_hotplug.refcount++;
109 mutex_unlock(&cpu_hotplug.lock);
110 return true;
111}
112EXPORT_SYMBOL_GPL(try_get_online_cpus);
113
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100114void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800115{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100116 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -0700117 return;
Paul E. McKenneyb2c46232014-10-22 10:00:05 -0700118 if (!mutex_trylock(&cpu_hotplug.lock)) {
119 atomic_inc(&cpu_hotplug.puts_pending);
120 cpuhp_lock_release();
121 return;
122 }
Srivatsa S. Bhat075663d2012-10-08 16:28:20 -0700123
124 if (WARN_ON(!cpu_hotplug.refcount))
125 cpu_hotplug.refcount++; /* try to fix things up */
126
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700127 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
128 wake_up_process(cpu_hotplug.active_writer);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100129 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530130 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100131
Ashok Raja9d9baa2005-11-28 13:43:46 -0800132}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100133EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800134
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100135/*
136 * This ensures that the hotplug operation can begin only when the
137 * refcount goes to zero.
138 *
139 * Note that during a cpu-hotplug operation, the new readers, if any,
140 * will be blocked by the cpu_hotplug.lock
141 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700142 * Since cpu_hotplug_begin() is always called after invoking
143 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100144 *
145 * Note that theoretically, there is a possibility of a livelock:
146 * - Refcount goes to zero, last reader wakes up the sleeping
147 * writer.
148 * - Last reader unlocks the cpu_hotplug.lock.
149 * - A new reader arrives at this moment, bumps up the refcount.
150 * - The writer acquires the cpu_hotplug.lock finds the refcount
151 * non zero and goes to sleep again.
152 *
153 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100154 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100155 *
156 */
Toshi Kanib9d10be2013-08-12 09:45:53 -0600157void cpu_hotplug_begin(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100158{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100159 cpu_hotplug.active_writer = current;
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700160
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530161 cpuhp_lock_acquire();
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700162 for (;;) {
163 mutex_lock(&cpu_hotplug.lock);
Paul E. McKenneyb2c46232014-10-22 10:00:05 -0700164 if (atomic_read(&cpu_hotplug.puts_pending)) {
165 int delta;
166
167 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
168 cpu_hotplug.refcount -= delta;
169 }
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700170 if (likely(!cpu_hotplug.refcount))
171 break;
172 __set_current_state(TASK_UNINTERRUPTIBLE);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100173 mutex_unlock(&cpu_hotplug.lock);
174 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100175 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100176}
177
Toshi Kanib9d10be2013-08-12 09:45:53 -0600178void cpu_hotplug_done(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100179{
180 cpu_hotplug.active_writer = NULL;
181 mutex_unlock(&cpu_hotplug.lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530182 cpuhp_lock_release();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100183}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700184
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700185/*
186 * Wait for currently running CPU hotplug operations to complete (if any) and
187 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
188 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
189 * hotplug path before performing hotplug operations. So acquiring that lock
190 * guarantees mutual exclusion from any currently running hotplug operations.
191 */
192void cpu_hotplug_disable(void)
193{
194 cpu_maps_update_begin();
195 cpu_hotplug_disabled = 1;
196 cpu_maps_update_done();
197}
198
199void cpu_hotplug_enable(void)
200{
201 cpu_maps_update_begin();
202 cpu_hotplug_disabled = 0;
203 cpu_maps_update_done();
204}
205
Toshi Kanib9d10be2013-08-12 09:45:53 -0600206#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700209int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210{
Neil Brownbd5349c2006-10-17 00:10:35 -0700211 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100212 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700213 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100214 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700215 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700217
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530218int __ref __register_cpu_notifier(struct notifier_block *nb)
219{
220 return raw_notifier_chain_register(&cpu_chain, nb);
221}
222
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700223static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
224 int *nr_calls)
225{
Akinobu Mitae6bde732010-05-26 14:43:29 -0700226 int ret;
227
228 ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700229 nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700230
231 return notifier_to_errno(ret);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700232}
233
234static int cpu_notify(unsigned long val, void *v)
235{
236 return __cpu_notify(val, v, -1, NULL);
237}
238
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700239#ifdef CONFIG_HOTPLUG_CPU
240
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700241static void cpu_notify_nofail(unsigned long val, void *v)
242{
Linus Torvalds00b9b0a2010-05-27 10:32:08 -0700243 BUG_ON(cpu_notify(val, v));
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700244}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245EXPORT_SYMBOL(register_cpu_notifier);
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530246EXPORT_SYMBOL(__register_cpu_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Sam Ravnborg96471552008-04-29 00:58:48 -0700248void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100250 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700251 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100252 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
254EXPORT_SYMBOL(unregister_cpu_notifier);
255
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530256void __ref __unregister_cpu_notifier(struct notifier_block *nb)
257{
258 raw_notifier_chain_unregister(&cpu_chain, nb);
259}
260EXPORT_SYMBOL(__unregister_cpu_notifier);
261
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700262/**
263 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
264 * @cpu: a CPU id
265 *
266 * This function walks all processes, finds a valid mm struct for each one and
267 * then clears a corresponding bit in mm's cpumask. While this all sounds
268 * trivial, there are various non-obvious corner cases, which this function
269 * tries to solve in a safe manner.
270 *
271 * Also note that the function uses a somewhat relaxed locking scheme, so it may
272 * be called only for an already offlined CPU.
273 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700274void clear_tasks_mm_cpumask(int cpu)
275{
276 struct task_struct *p;
277
278 /*
279 * This function is called after the cpu is taken down and marked
280 * offline, so its not like new tasks will ever get this cpu set in
281 * their mm mask. -- Peter Zijlstra
282 * Thus, we may use rcu_read_lock() here, instead of grabbing
283 * full-fledged tasklist_lock.
284 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700285 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700286 rcu_read_lock();
287 for_each_process(p) {
288 struct task_struct *t;
289
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700290 /*
291 * Main thread might exit, but other threads may still have
292 * a valid mm. Find one.
293 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700294 t = find_lock_task_mm(p);
295 if (!t)
296 continue;
297 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
298 task_unlock(t);
299 }
300 rcu_read_unlock();
301}
302
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400303static inline void check_for_tasks(int dead_cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400305 struct task_struct *g, *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Kirill Tkhaib728ca02014-06-25 12:19:55 +0400307 read_lock_irq(&tasklist_lock);
308 do_each_thread(g, p) {
309 if (!p->on_rq)
310 continue;
311 /*
312 * We do the check with unlocked task_rq(p)->lock.
313 * Order the reading to do not warn about a task,
314 * which was running on this cpu in the past, and
315 * it's just been woken on another cpu.
316 */
317 rmb();
318 if (task_cpu(p) != dead_cpu)
319 continue;
320
321 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
322 p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
323 } while_each_thread(g, p);
324 read_unlock_irq(&tasklist_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326
Avi Kivitydb912f92007-05-24 12:23:10 +0300327struct take_cpu_down_param {
328 unsigned long mod;
329 void *hcpu;
330};
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700333static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Avi Kivitydb912f92007-05-24 12:23:10 +0300335 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 int err;
337
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 /* Ensure this CPU doesn't handle any more interrupts. */
339 err = __cpu_disable();
340 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700341 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700343 cpu_notify(CPU_DYING | param->mod, param->hcpu);
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000344 /* Park the stopper thread */
345 kthread_park(current);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700346 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700349/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700350static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700352 int err, nr_calls = 0;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700353 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700354 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300355 struct take_cpu_down_param tcd_param = {
356 .mod = mod,
357 .hcpu = hcpu,
358 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700360 if (num_online_cpus() == 1)
361 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700363 if (!cpu_online(cpu))
364 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100366 cpu_hotplug_begin();
Michael Rodriguez4d519852011-03-22 16:34:07 -0700367
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700368 err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700369 if (err) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700370 nr_calls--;
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700371 __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
Fabian Frederick84117da2014-06-04 16:11:17 -0700372 pr_warn("%s: attempt to take down CPU %u failed\n",
373 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700374 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 }
376
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200377 /*
378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
379 * and RCU users of this state to go away such that all new such users
380 * will observe it.
381 *
382 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
383 * not imply sync_sched(), so explicitly call both.
Michael wang106dd5a2013-11-13 11:10:56 +0800384 *
385 * Do sync before park smpboot threads to take care the rcu boost case.
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200386 */
387#ifdef CONFIG_PREEMPT
388 synchronize_sched();
389#endif
390 synchronize_rcu();
391
Michael wang106dd5a2013-11-13 11:10:56 +0800392 smpboot_park_threads(cpu);
393
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200394 /*
395 * So now all preempt/rcu users must observe !cpu_active().
396 */
397
Rusty Russelle0b582e2009-01-01 10:12:28 +1030398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500399 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 /* CPU didn't die: tell everyone. Can't complain. */
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000401 smpboot_unpark_threads(cpu);
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700402 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
Oleg Nesterov6a1bdc12010-03-15 10:10:23 +0100403 goto out_release;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700404 }
Rusty Russell04321582008-07-28 12:16:29 -0500405 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100407 /*
408 * The migration_call() CPU_DYING callback will have removed all
409 * runnable tasks from the cpu, there's only the idle task left now
410 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100411 *
412 * Wait for the stop thread to go away.
Peter Zijlstra48c5cca2010-11-13 19:32:29 +0100413 */
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100414 while (!idle_cpu(cpu))
415 cpu_relax();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416
417 /* This actually kills the CPU. */
418 __cpu_die(cpu);
419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 /* CPU is completely dead: tell everyone. Too late to complain. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700421 cpu_notify_nofail(CPU_DEAD | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
423 check_for_tasks(cpu);
424
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700425out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100426 cpu_hotplug_done();
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700427 if (!err)
428 cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700429 return err;
430}
431
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700432int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700433{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100434 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700435
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100436 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700437
Max Krasnyanskye761b772008-07-15 04:43:49 -0700438 if (cpu_hotplug_disabled) {
439 err = -EBUSY;
440 goto out;
441 }
442
Max Krasnyanskye761b772008-07-15 04:43:49 -0700443 err = _cpu_down(cpu, 0);
444
Max Krasnyanskye761b772008-07-15 04:43:49 -0700445out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100446 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 return err;
448}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400449EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450#endif /*CONFIG_HOTPLUG_CPU*/
451
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700452/* Requires cpu_add_remove_lock to be held */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400453static int _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700455 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700457 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700458 struct task_struct *idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100460 cpu_hotplug_begin();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000461
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200462 if (cpu_online(cpu) || !cpu_present(cpu)) {
463 ret = -EINVAL;
464 goto out;
465 }
466
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700467 idle = idle_thread_get(cpu);
468 if (IS_ERR(idle)) {
469 ret = PTR_ERR(idle);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000470 goto out;
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700471 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000472
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000473 ret = smpboot_create_threads(cpu);
474 if (ret)
475 goto out;
476
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700477 ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
Akinobu Mitae6bde732010-05-26 14:43:29 -0700478 if (ret) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700479 nr_calls--;
Fabian Frederick84117da2014-06-04 16:11:17 -0700480 pr_warn("%s: attempt to bring up CPU %u failed\n",
481 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482 goto out_notify;
483 }
484
485 /* Arch-specific enabling code. */
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700486 ret = __cpu_up(cpu, idle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 if (ret != 0)
488 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100489 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Thomas Gleixnerf97f8f02012-07-16 10:42:36 +0000491 /* Wake the per cpu threads */
492 smpboot_unpark_threads(cpu);
493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 /* Now call notifier in preparation. */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700495 cpu_notify(CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497out_notify:
498 if (ret != 0)
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700499 __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000500out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100501 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 return ret;
504}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700505
Paul Gortmaker0db06282013-06-19 14:53:51 -0400506int cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700507{
508 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700509
Rusty Russelle0b582e2009-01-01 10:12:28 +1030510 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700511 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
512 cpu);
Chen Gong87d5e022010-03-05 13:42:38 -0800513#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700514 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700515#endif
516 return -EINVAL;
517 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700518
Toshi Kani01b0f192013-11-12 15:07:25 -0800519 err = try_online_node(cpu_to_node(cpu));
520 if (err)
521 return err;
minskey guocf234222010-05-24 14:32:41 -0700522
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100523 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700524
Max Krasnyanskye761b772008-07-15 04:43:49 -0700525 if (cpu_hotplug_disabled) {
526 err = -EBUSY;
527 goto out;
528 }
529
530 err = _cpu_up(cpu, 0);
531
Max Krasnyanskye761b772008-07-15 04:43:49 -0700532out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100533 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700534 return err;
535}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800536EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700537
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700538#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030539static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700540
541int disable_nonboot_cpus(void)
542{
Rafael J. Wysockie9a5f422010-05-27 22:16:22 +0200543 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700544
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100545 cpu_maps_update_begin();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030546 first_cpu = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100547 /*
548 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700549 * with the userspace trying to use the CPU hotplug at the same time
550 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030551 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100552
Fabian Frederick84117da2014-06-04 16:11:17 -0700553 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700554 for_each_online_cpu(cpu) {
555 if (cpu == first_cpu)
556 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700557 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700558 error = _cpu_down(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700559 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600560 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030561 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600562 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700563 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700564 break;
565 }
566 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700567
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700568 if (!error) {
569 BUG_ON(num_online_cpus() > 1);
570 /* Make sure the CPUs won't be enabled by someone else */
571 cpu_hotplug_disabled = 1;
572 } else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700573 pr_err("Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700574 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100575 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700576 return error;
577}
578
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700579void __weak arch_enable_nonboot_cpus_begin(void)
580{
581}
582
583void __weak arch_enable_nonboot_cpus_end(void)
584{
585}
586
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800587void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700588{
589 int cpu, error;
590
591 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100592 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700593 cpu_hotplug_disabled = 0;
Rusty Russelle0b582e2009-01-01 10:12:28 +1030594 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700595 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700596
Fabian Frederick84117da2014-06-04 16:11:17 -0700597 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700598
599 arch_enable_nonboot_cpus_begin();
600
Rusty Russelle0b582e2009-01-01 10:12:28 +1030601 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700602 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700603 error = _cpu_up(cpu, 1);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700604 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700605 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700606 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700607 continue;
608 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700609 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700610 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700611
612 arch_enable_nonboot_cpus_end();
613
Rusty Russelle0b582e2009-01-01 10:12:28 +1030614 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700615out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100616 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700617}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030618
Fenghua Yud7268a32011-11-15 21:59:31 +0100619static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030620{
621 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
622 return -ENOMEM;
623 return 0;
624}
625core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100626
627/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100628 * When callbacks for CPU hotplug notifications are being executed, we must
629 * ensure that the state of the system with respect to the tasks being frozen
630 * or not, as reported by the notification, remains unchanged *throughout the
631 * duration* of the execution of the callbacks.
632 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
633 *
634 * This synchronization is implemented by mutually excluding regular CPU
635 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
636 * Hibernate notifications.
637 */
638static int
639cpu_hotplug_pm_callback(struct notifier_block *nb,
640 unsigned long action, void *ptr)
641{
642 switch (action) {
643
644 case PM_SUSPEND_PREPARE:
645 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700646 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100647 break;
648
649 case PM_POST_SUSPEND:
650 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700651 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100652 break;
653
654 default:
655 return NOTIFY_DONE;
656 }
657
658 return NOTIFY_OK;
659}
660
661
Fenghua Yud7268a32011-11-15 21:59:31 +0100662static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100663{
Fenghua Yu6e32d472012-11-13 11:32:43 -0800664 /*
665 * cpu_hotplug_pm_callback has higher priority than x86
666 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
667 * to disable cpu hotplug to avoid cpu hotplug race.
668 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100669 pm_notifier(cpu_hotplug_pm_callback, 0);
670 return 0;
671}
672core_initcall(cpu_hotplug_pm_sync_init);
673
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700674#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700675
Manfred Spraule545a612008-09-07 16:57:22 +0200676/**
677 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
678 * @cpu: cpu that just started
679 *
680 * This function calls the cpu_chain notifiers with CPU_STARTING.
681 * It must be called by the arch code on the new cpu, before the new cpu
682 * enables interrupts and before the "boot" cpu returns from __cpu_up().
683 */
Paul Gortmaker0db06282013-06-19 14:53:51 -0400684void notify_cpu_starting(unsigned int cpu)
Manfred Spraule545a612008-09-07 16:57:22 +0200685{
686 unsigned long val = CPU_STARTING;
687
688#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030689 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
Manfred Spraule545a612008-09-07 16:57:22 +0200690 val = CPU_STARTING_FROZEN;
691#endif /* CONFIG_PM_SLEEP_SMP */
Akinobu Mitae9fb7632010-05-26 14:43:28 -0700692 cpu_notify(val, (void *)(long)cpu);
Manfred Spraule545a612008-09-07 16:57:22 +0200693}
694
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700695#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700696
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700697/*
698 * cpu_bit_bitmap[] is a special, "compressed" data structure that
699 * represents all NR_CPUS bits binary values of 1<<nr.
700 *
Rusty Russelle0b582e2009-01-01 10:12:28 +1030701 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700702 * mask value that has a single bit set only.
703 */
Mike Travisb8d317d2008-07-24 18:21:29 -0700704
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700705/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -0700706#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700707#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
708#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
709#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -0700710
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700711const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -0700712
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700713 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
714 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
715#if BITS_PER_LONG > 32
716 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
717 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -0700718#endif
719};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -0700720EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +1100721
722const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
723EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +1030724
725#ifdef CONFIG_INIT_ALL_POSSIBLE
726static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
727 = CPU_BITS_ALL;
728#else
729static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
730#endif
731const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
732EXPORT_SYMBOL(cpu_possible_mask);
733
734static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
735const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
736EXPORT_SYMBOL(cpu_online_mask);
737
738static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
739const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
740EXPORT_SYMBOL(cpu_present_mask);
741
742static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
743const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
744EXPORT_SYMBOL(cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +1030745
746void set_cpu_possible(unsigned int cpu, bool possible)
747{
748 if (possible)
749 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
750 else
751 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
752}
753
754void set_cpu_present(unsigned int cpu, bool present)
755{
756 if (present)
757 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
758 else
759 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
760}
761
762void set_cpu_online(unsigned int cpu, bool online)
763{
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800764 if (online) {
Rusty Russell3fa41522008-12-30 09:05:16 +1030765 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800766 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
767 } else {
Rusty Russell3fa41522008-12-30 09:05:16 +1030768 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
Lai Jiangshan6acbfb92014-05-16 11:50:42 +0800769 }
Rusty Russell3fa41522008-12-30 09:05:16 +1030770}
771
772void set_cpu_active(unsigned int cpu, bool active)
773{
774 if (active)
775 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
776 else
777 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
778}
779
780void init_cpu_present(const struct cpumask *src)
781{
782 cpumask_copy(to_cpumask(cpu_present_bits), src);
783}
784
785void init_cpu_possible(const struct cpumask *src)
786{
787 cpumask_copy(to_cpumask(cpu_possible_bits), src);
788}
789
790void init_cpu_online(const struct cpumask *src)
791{
792 cpumask_copy(to_cpumask(cpu_online_bits), src);
793}