blob: fe31ff3d380956d650215d0b563e3494b3c81e6a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
13#include <linux/module.h>
14#include <linux/kthread.h>
15#include <linux/stop_machine.h>
Ingo Molnar81615b622006-06-26 00:24:32 -070016#include <linux/mutex.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -070018/*
19 * Represents all cpu's present in the system
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24cpumask_t cpu_present_map __read_mostly;
25EXPORT_SYMBOL(cpu_present_map);
26
27#ifndef CONFIG_SMP
28
29/*
30 * Represents all cpu's that are currently online.
31 */
32cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
33EXPORT_SYMBOL(cpu_online_map);
34
35cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
36EXPORT_SYMBOL(cpu_possible_map);
37
38#else /* CONFIG_SMP */
39
Gautham R Shenoyd2219382008-01-25 21:08:01 +010040/* Serializes the updates to cpu_online_map, cpu_present_map */
Linus Torvaldsaa953872006-07-23 12:12:16 -070041static DEFINE_MUTEX(cpu_add_remove_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Neil Brownbd5349c2006-10-17 00:10:35 -070043static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -070045/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
46 * Should always be manipulated under cpu_add_remove_lock
47 */
48static int cpu_hotplug_disabled;
49
Gautham R Shenoyd2219382008-01-25 21:08:01 +010050static struct {
51 struct task_struct *active_writer;
52 struct mutex lock; /* Synchronizes accesses to refcount, */
53 /*
54 * Also blocks the new readers during
55 * an ongoing cpu hotplug operation.
56 */
57 int refcount;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010058} cpu_hotplug;
Ashok Raj90d45d12005-11-08 21:34:24 -080059
Gautham R Shenoyd2219382008-01-25 21:08:01 +010060void __init cpu_hotplug_init(void)
61{
62 cpu_hotplug.active_writer = NULL;
63 mutex_init(&cpu_hotplug.lock);
64 cpu_hotplug.refcount = 0;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010065}
66
Max Krasnyanskye761b772008-07-15 04:43:49 -070067cpumask_t cpu_active_map;
68
Gautham R Shenoyd2219382008-01-25 21:08:01 +010069#ifdef CONFIG_HOTPLUG_CPU
Ashok Raj90d45d12005-11-08 21:34:24 -080070
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010071void get_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080072{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010073 might_sleep();
74 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070075 return;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010076 mutex_lock(&cpu_hotplug.lock);
77 cpu_hotplug.refcount++;
78 mutex_unlock(&cpu_hotplug.lock);
79
Ashok Raja9d9baa2005-11-28 13:43:46 -080080}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010081EXPORT_SYMBOL_GPL(get_online_cpus);
Ashok Raj90d45d12005-11-08 21:34:24 -080082
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010083void put_online_cpus(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -080084{
Gautham R Shenoyd2219382008-01-25 21:08:01 +010085 if (cpu_hotplug.active_writer == current)
Linus Torvaldsaa953872006-07-23 12:12:16 -070086 return;
Gautham R Shenoyd2219382008-01-25 21:08:01 +010087 mutex_lock(&cpu_hotplug.lock);
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -070088 if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
89 wake_up_process(cpu_hotplug.active_writer);
Gautham R Shenoyd2219382008-01-25 21:08:01 +010090 mutex_unlock(&cpu_hotplug.lock);
91
Ashok Raja9d9baa2005-11-28 13:43:46 -080092}
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +010093EXPORT_SYMBOL_GPL(put_online_cpus);
Ashok Raja9d9baa2005-11-28 13:43:46 -080094
Ashok Raja9d9baa2005-11-28 13:43:46 -080095#endif /* CONFIG_HOTPLUG_CPU */
Ashok Raj90d45d12005-11-08 21:34:24 -080096
Gautham R Shenoyd2219382008-01-25 21:08:01 +010097/*
98 * The following two API's must be used when attempting
99 * to serialize the updates to cpu_online_map, cpu_present_map.
100 */
101void cpu_maps_update_begin(void)
102{
103 mutex_lock(&cpu_add_remove_lock);
104}
105
106void cpu_maps_update_done(void)
107{
108 mutex_unlock(&cpu_add_remove_lock);
109}
110
111/*
112 * This ensures that the hotplug operation can begin only when the
113 * refcount goes to zero.
114 *
115 * Note that during a cpu-hotplug operation, the new readers, if any,
116 * will be blocked by the cpu_hotplug.lock
117 *
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700118 * Since cpu_hotplug_begin() is always called after invoking
119 * cpu_maps_update_begin(), we can be sure that only one writer is active.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100120 *
121 * Note that theoretically, there is a possibility of a livelock:
122 * - Refcount goes to zero, last reader wakes up the sleeping
123 * writer.
124 * - Last reader unlocks the cpu_hotplug.lock.
125 * - A new reader arrives at this moment, bumps up the refcount.
126 * - The writer acquires the cpu_hotplug.lock finds the refcount
127 * non zero and goes to sleep again.
128 *
129 * However, this is very difficult to achieve in practice since
Gautham R Shenoy86ef5c92008-01-25 21:08:02 +0100130 * get_online_cpus() not an api which is called all that often.
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100131 *
132 */
133static void cpu_hotplug_begin(void)
134{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100135 cpu_hotplug.active_writer = current;
Oleg Nesterovd2ba7e22008-04-29 01:00:29 -0700136
137 for (;;) {
138 mutex_lock(&cpu_hotplug.lock);
139 if (likely(!cpu_hotplug.refcount))
140 break;
141 __set_current_state(TASK_UNINTERRUPTIBLE);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100142 mutex_unlock(&cpu_hotplug.lock);
143 schedule();
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100144 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100145}
146
147static void cpu_hotplug_done(void)
148{
149 cpu_hotplug.active_writer = NULL;
150 mutex_unlock(&cpu_hotplug.lock);
151}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/* Need to know about CPUs going up/down? */
Sam Ravnborgf7b16c12008-04-29 00:58:51 -0700153int __ref register_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
Neil Brownbd5349c2006-10-17 00:10:35 -0700155 int ret;
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100156 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700157 ret = raw_notifier_chain_register(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100158 cpu_maps_update_done();
Neil Brownbd5349c2006-10-17 00:10:35 -0700159 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160}
Chandra Seetharaman65edc682006-06-27 02:54:08 -0700161
162#ifdef CONFIG_HOTPLUG_CPU
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164EXPORT_SYMBOL(register_cpu_notifier);
165
Sam Ravnborg96471552008-04-29 00:58:48 -0700166void __ref unregister_cpu_notifier(struct notifier_block *nb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100168 cpu_maps_update_begin();
Neil Brownbd5349c2006-10-17 00:10:35 -0700169 raw_notifier_chain_unregister(&cpu_chain, nb);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100170 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172EXPORT_SYMBOL(unregister_cpu_notifier);
173
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174static inline void check_for_tasks(int cpu)
175{
176 struct task_struct *p;
177
178 write_lock_irq(&tasklist_lock);
179 for_each_process(p) {
180 if (task_cpu(p) == cpu &&
181 (!cputime_eq(p->utime, cputime_zero) ||
182 !cputime_eq(p->stime, cputime_zero)))
183 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
Heiko Carstense7407dc2007-05-09 02:34:04 -0700184 (state = %ld, flags = %x) \n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -0700185 p->comm, task_pid_nr(p), cpu,
186 p->state, p->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
188 write_unlock_irq(&tasklist_lock);
189}
190
Avi Kivitydb912f92007-05-24 12:23:10 +0300191struct take_cpu_down_param {
192 unsigned long mod;
193 void *hcpu;
194};
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196/* Take this CPU down. */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700197static int __ref take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Avi Kivitydb912f92007-05-24 12:23:10 +0300199 struct take_cpu_down_param *param = _param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 int err;
201
Avi Kivitydb912f92007-05-24 12:23:10 +0300202 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
203 param->hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 /* Ensure this CPU doesn't handle any more interrupts. */
205 err = __cpu_disable();
206 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700207 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700209 /* Force idle task to run as soon as we yield: it should
210 immediately notice cpu is offline and die quickly. */
211 sched_idle_next();
212 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700215/* Requires cpu_add_remove_lock to be held */
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700216static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
Heiko Carstense7407dc2007-05-09 02:34:04 -0700218 int err, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 struct task_struct *p;
220 cpumask_t old_allowed, tmp;
Heiko Carstense7407dc2007-05-09 02:34:04 -0700221 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700222 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Avi Kivitydb912f92007-05-24 12:23:10 +0300223 struct take_cpu_down_param tcd_param = {
224 .mod = mod,
225 .hcpu = hcpu,
226 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700228 if (num_online_cpus() == 1)
229 return -EBUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700231 if (!cpu_online(cpu))
232 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100234 cpu_hotplug_begin();
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700235 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
Heiko Carstense7407dc2007-05-09 02:34:04 -0700236 hcpu, -1, &nr_calls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (err == NOTIFY_BAD) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700238 nr_calls--;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700239 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
240 hcpu, nr_calls, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 printk("%s: attempt to take down CPU %u failed\n",
Harvey Harrisonaf1f16d2008-04-30 00:55:08 -0700242 __func__, cpu);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700243 err = -EINVAL;
244 goto out_release;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 }
246
247 /* Ensure that we are not runnable on dying cpu */
248 old_allowed = current->cpus_allowed;
Mike Travisf70316d2008-04-04 18:11:06 -0700249 cpus_setall(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 cpu_clear(cpu, tmp);
Mike Travisf70316d2008-04-04 18:11:06 -0700251 set_cpus_allowed_ptr(current, &tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Avi Kivitydb912f92007-05-24 12:23:10 +0300253 p = __stop_machine_run(take_cpu_down, &tcd_param, cpu);
Linus Torvaldsaa953872006-07-23 12:12:16 -0700254
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700255 if (IS_ERR(p) || cpu_online(cpu)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 /* CPU didn't die: tell everyone. Can't complain. */
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700257 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
Heiko Carstense7407dc2007-05-09 02:34:04 -0700258 hcpu) == NOTIFY_BAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 BUG();
260
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700261 if (IS_ERR(p)) {
262 err = PTR_ERR(p);
263 goto out_allowed;
264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 goto out_thread;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /* Wait for it to sleep (leaving idle task). */
269 while (!idle_cpu(cpu))
270 yield();
271
272 /* This actually kills the CPU. */
273 __cpu_die(cpu);
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 /* CPU is completely dead: tell everyone. Too late to complain. */
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700276 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod,
277 hcpu) == NOTIFY_BAD)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 BUG();
279
280 check_for_tasks(cpu);
281
282out_thread:
283 err = kthread_stop(p);
284out_allowed:
Mike Travisf70316d2008-04-04 18:11:06 -0700285 set_cpus_allowed_ptr(current, &old_allowed);
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700286out_release:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100287 cpu_hotplug_done();
Oleg Nesterov3da1c842008-07-25 01:47:50 -0700288 if (!err) {
289 if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod,
290 hcpu) == NOTIFY_BAD)
291 BUG();
292 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700293 return err;
294}
295
Sam Ravnborg514a20a2008-04-29 00:58:50 -0700296int __ref cpu_down(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700297{
298 int err = 0;
299
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100300 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700301
Max Krasnyanskye761b772008-07-15 04:43:49 -0700302 if (cpu_hotplug_disabled) {
303 err = -EBUSY;
304 goto out;
305 }
306
307 cpu_clear(cpu, cpu_active_map);
308
Max Krasnyansky39b0fad2008-07-15 20:56:26 -0700309 /*
310 * Make sure the all cpus did the reschedule and are not
311 * using stale version of the cpu_active_map.
312 * This is not strictly necessary becuase stop_machine()
313 * that we run down the line already provides the required
314 * synchronization. But it's really a side effect and we do not
315 * want to depend on the innards of the stop_machine here.
316 */
317 synchronize_sched();
318
Max Krasnyanskye761b772008-07-15 04:43:49 -0700319 err = _cpu_down(cpu, 0);
320
321 if (cpu_online(cpu))
322 cpu_set(cpu, cpu_active_map);
323
324out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100325 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return err;
327}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400328EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329#endif /*CONFIG_HOTPLUG_CPU*/
330
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700331/* Requires cpu_add_remove_lock to be held */
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700332static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700334 int ret, nr_calls = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 void *hcpu = (void *)(long)cpu;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700336 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700338 if (cpu_online(cpu) || !cpu_present(cpu))
339 return -EINVAL;
Ashok Raj90d45d12005-11-08 21:34:24 -0800340
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100341 cpu_hotplug_begin();
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700342 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700343 -1, &nr_calls);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 if (ret == NOTIFY_BAD) {
Akinobu Mitaa0d8cdb2007-10-18 03:05:12 -0700345 nr_calls--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 printk("%s: attempt to bring up CPU %u failed\n",
Harvey Harrisonaf1f16d2008-04-30 00:55:08 -0700347 __func__, cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 ret = -EINVAL;
349 goto out_notify;
350 }
351
352 /* Arch-specific enabling code. */
353 ret = __cpu_up(cpu);
354 if (ret != 0)
355 goto out_notify;
Eric Sesterhenn6978c702006-03-24 18:45:21 +0100356 BUG_ON(!cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
358 /* Now call notifier in preparation. */
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700359 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361out_notify:
362 if (ret != 0)
Gautham R Shenoybaaca492007-05-09 02:34:03 -0700363 __raw_notifier_call_chain(&cpu_chain,
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700364 CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100365 cpu_hotplug_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 return ret;
368}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700369
Gautham R Shenoyb282b6f2007-01-10 23:15:34 -0800370int __cpuinit cpu_up(unsigned int cpu)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700371{
372 int err = 0;
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700373 if (!cpu_isset(cpu, cpu_possible_map)) {
374 printk(KERN_ERR "can't online cpu %d because it is not "
375 "configured as may-hotadd at boot time\n", cpu);
376#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390)
377 printk(KERN_ERR "please check additional_cpus= boot "
378 "parameter\n");
379#endif
380 return -EINVAL;
381 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700382
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100383 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700384
Max Krasnyanskye761b772008-07-15 04:43:49 -0700385 if (cpu_hotplug_disabled) {
386 err = -EBUSY;
387 goto out;
388 }
389
390 err = _cpu_up(cpu, 0);
391
392 if (cpu_online(cpu))
393 cpu_set(cpu, cpu_active_map);
394
395out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100396 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700397 return err;
398}
399
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700400#ifdef CONFIG_PM_SLEEP_SMP
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700401static cpumask_t frozen_cpus;
402
403int disable_nonboot_cpus(void)
404{
Ingo Molnare1d9fd22006-12-23 16:55:29 +0100405 int cpu, first_cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700406
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100407 cpu_maps_update_begin();
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700408 first_cpu = first_cpu(cpu_online_map);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700409 /* We take down all of the non-boot CPUs in one shot to avoid races
410 * with the userspace trying to use the CPU hotplug at the same time
411 */
412 cpus_clear(frozen_cpus);
413 printk("Disabling non-boot CPUs ...\n");
414 for_each_online_cpu(cpu) {
415 if (cpu == first_cpu)
416 continue;
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700417 error = _cpu_down(cpu, 1);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700418 if (!error) {
419 cpu_set(cpu, frozen_cpus);
420 printk("CPU%d is down\n", cpu);
421 } else {
422 printk(KERN_ERR "Error taking CPU%d down: %d\n",
423 cpu, error);
424 break;
425 }
426 }
427 if (!error) {
428 BUG_ON(num_online_cpus() > 1);
429 /* Make sure the CPUs won't be enabled by someone else */
430 cpu_hotplug_disabled = 1;
431 } else {
Ingo Molnare1d9fd22006-12-23 16:55:29 +0100432 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700433 }
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100434 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700435 return error;
436}
437
Sam Ravnborgfa7303e2008-02-08 04:21:55 -0800438void __ref enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700439{
440 int cpu, error;
441
442 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100443 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700444 cpu_hotplug_disabled = 0;
Rafael J. Wysockied746e32007-02-10 01:43:32 -0800445 if (cpus_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700446 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700447
448 printk("Enabling non-boot CPUs ...\n");
Mike Travis363ab6f2008-05-12 21:21:13 +0200449 for_each_cpu_mask_nr(cpu, frozen_cpus) {
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -0700450 error = _cpu_up(cpu, 1);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700451 if (!error) {
452 printk("CPU%d is up\n", cpu);
453 continue;
454 }
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700455 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700456 }
457 cpus_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700458out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100459 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700460}
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700461#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -0700462
463#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -0700464
465#ifndef CONFIG_HAVE_CPUMASK_OF_CPU_MAP
466/* 64 bits of zeros, for initializers. */
467#if BITS_PER_LONG == 32
468#define Z64 0, 0
469#else
470#define Z64 0
471#endif
472
473/* Initializer macros. */
474#define CMI0(n) { .bits = { 1UL << (n) } }
475#define CMI(n, ...) { .bits = { __VA_ARGS__, 1UL << ((n) % BITS_PER_LONG) } }
476
477#define CMI8(n, ...) \
478 CMI((n), __VA_ARGS__), CMI((n)+1, __VA_ARGS__), \
479 CMI((n)+2, __VA_ARGS__), CMI((n)+3, __VA_ARGS__), \
480 CMI((n)+4, __VA_ARGS__), CMI((n)+5, __VA_ARGS__), \
481 CMI((n)+6, __VA_ARGS__), CMI((n)+7, __VA_ARGS__)
482
483#if BITS_PER_LONG == 32
484#define CMI64(n, ...) \
485 CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \
486 CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \
487 CMI8((n)+32, 0, __VA_ARGS__), CMI8((n)+40, 0, __VA_ARGS__), \
488 CMI8((n)+48, 0, __VA_ARGS__), CMI8((n)+56, 0, __VA_ARGS__)
489#else
490#define CMI64(n, ...) \
491 CMI8((n), __VA_ARGS__), CMI8((n)+8, __VA_ARGS__), \
492 CMI8((n)+16, __VA_ARGS__), CMI8((n)+24, __VA_ARGS__), \
493 CMI8((n)+32, __VA_ARGS__), CMI8((n)+40, __VA_ARGS__), \
494 CMI8((n)+48, __VA_ARGS__), CMI8((n)+56, __VA_ARGS__)
495#endif
496
497#define CMI256(n, ...) \
498 CMI64((n), __VA_ARGS__), CMI64((n)+64, Z64, __VA_ARGS__), \
499 CMI64((n)+128, Z64, Z64, __VA_ARGS__), \
500 CMI64((n)+192, Z64, Z64, Z64, __VA_ARGS__)
501#define Z256 Z64, Z64, Z64, Z64
502
503#define CMI1024(n, ...) \
504 CMI256((n), __VA_ARGS__), \
505 CMI256((n)+256, Z256, __VA_ARGS__), \
506 CMI256((n)+512, Z256, Z256, __VA_ARGS__), \
507 CMI256((n)+768, Z256, Z256, Z256, __VA_ARGS__)
508#define Z1024 Z256, Z256, Z256, Z256
509
510/* We want this statically initialized, just to be safe. We try not
511 * to waste too much space, either. */
512static const cpumask_t cpumask_map[] = {
513 CMI0(0), CMI0(1), CMI0(2), CMI0(3),
514#if NR_CPUS > 4
515 CMI0(4), CMI0(5), CMI0(6), CMI0(7),
516#endif
517#if NR_CPUS > 8
518 CMI0(8), CMI0(9), CMI0(10), CMI0(11),
519 CMI0(12), CMI0(13), CMI0(14), CMI0(15),
520#endif
521#if NR_CPUS > 16
522 CMI0(16), CMI0(17), CMI0(18), CMI0(19),
523 CMI0(20), CMI0(21), CMI0(22), CMI0(23),
524 CMI0(24), CMI0(25), CMI0(26), CMI0(27),
525 CMI0(28), CMI0(29), CMI0(30), CMI0(31),
526#endif
527#if NR_CPUS > 32
528#if BITS_PER_LONG == 32
529 CMI(32, 0), CMI(33, 0), CMI(34, 0), CMI(35, 0),
530 CMI(36, 0), CMI(37, 0), CMI(38, 0), CMI(39, 0),
531 CMI(40, 0), CMI(41, 0), CMI(42, 0), CMI(43, 0),
532 CMI(44, 0), CMI(45, 0), CMI(46, 0), CMI(47, 0),
533 CMI(48, 0), CMI(49, 0), CMI(50, 0), CMI(51, 0),
534 CMI(52, 0), CMI(53, 0), CMI(54, 0), CMI(55, 0),
535 CMI(56, 0), CMI(57, 0), CMI(58, 0), CMI(59, 0),
536 CMI(60, 0), CMI(61, 0), CMI(62, 0), CMI(63, 0),
537#else
538 CMI0(32), CMI0(33), CMI0(34), CMI0(35),
539 CMI0(36), CMI0(37), CMI0(38), CMI0(39),
540 CMI0(40), CMI0(41), CMI0(42), CMI0(43),
541 CMI0(44), CMI0(45), CMI0(46), CMI0(47),
542 CMI0(48), CMI0(49), CMI0(50), CMI0(51),
543 CMI0(52), CMI0(53), CMI0(54), CMI0(55),
544 CMI0(56), CMI0(57), CMI0(58), CMI0(59),
545 CMI0(60), CMI0(61), CMI0(62), CMI0(63),
546#endif /* BITS_PER_LONG == 64 */
547#endif
548#if NR_CPUS > 64
549 CMI64(64, Z64),
550#endif
551#if NR_CPUS > 128
552 CMI64(128, Z64, Z64), CMI64(192, Z64, Z64, Z64),
553#endif
554#if NR_CPUS > 256
555 CMI256(256, Z256),
556#endif
557#if NR_CPUS > 512
558 CMI256(512, Z256, Z256), CMI256(768, Z256, Z256, Z256),
559#endif
560#if NR_CPUS > 1024
561 CMI1024(1024, Z1024),
562#endif
563#if NR_CPUS > 2048
564 CMI1024(2048, Z1024, Z1024), CMI1024(3072, Z1024, Z1024, Z1024),
565#endif
566#if NR_CPUS > 4096
567#error NR_CPUS too big. Fix initializers or set CONFIG_HAVE_CPUMASK_OF_CPU_MAP
568#endif
569};
570
571const cpumask_t *cpumask_of_cpu_map = cpumask_map;
572#endif /* !CONFIG_HAVE_CPUMASK_OF_CPU_MAP */