blob: ab860453841dfd244c141f329fa9cdecc1f9e063 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010010#include <linux/sched/signal.h>
Ingo Molnaref8bd772017-02-08 18:51:36 +010011#include <linux/sched/hotplug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010012#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/unistd.h>
14#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070015#include <linux/oom.h>
16#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040017#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070018#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/kthread.h>
20#include <linux/stop_machine.h>
Ingo Molnar81615b622006-06-26 00:24:32 -070021#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010023#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053024#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053025#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000026#include <linux/irq.h>
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000027#include <linux/smpboot.h>
Richard Weinbergere6d49892016-08-18 14:57:17 +020028#include <linux/relay.h>
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +020029#include <linux/slab.h>
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +020030#include <linux/percpu-rwsem.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000031
Todd E Brandtbb3632c2014-06-06 05:40:17 -070032#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000033#define CREATE_TRACE_POINTS
34#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
Thomas Gleixner38498a62012-04-20 13:05:44 +000036#include "smpboot.h"
37
Thomas Gleixnercff7d372016-02-26 18:43:28 +000038/**
39 * cpuhp_cpu_state - Per cpu hotplug state storage
40 * @state: The current cpu state
41 * @target: The target state
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000042 * @thread: Pointer to the hotplug thread
43 * @should_run: Thread should execute
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020044 * @rollback: Perform a rollback
Thomas Gleixnera7246322016-08-12 19:49:38 +020045 * @single: Single callback invocation
46 * @bringup: Single callback bringup or teardown selector
47 * @cb_state: The state for a single callback (install/uninstall)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000048 * @result: Result of the operation
49 * @done: Signal completion to the issuer of the task
Thomas Gleixnercff7d372016-02-26 18:43:28 +000050 */
51struct cpuhp_cpu_state {
52 enum cpuhp_state state;
53 enum cpuhp_state target;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000054#ifdef CONFIG_SMP
55 struct task_struct *thread;
56 bool should_run;
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020057 bool rollback;
Thomas Gleixnera7246322016-08-12 19:49:38 +020058 bool single;
59 bool bringup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020060 struct hlist_node *node;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000061 enum cpuhp_state cb_state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000062 int result;
63 struct completion done;
64#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +000065};
66
67static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
68
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +020069#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
70static struct lock_class_key cpuhp_state_key;
71static struct lockdep_map cpuhp_state_lock_map =
72 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
73#endif
74
Thomas Gleixnercff7d372016-02-26 18:43:28 +000075/**
76 * cpuhp_step - Hotplug state machine step
77 * @name: Name of the step
78 * @startup: Startup function of the step
79 * @teardown: Teardown function of the step
80 * @skip_onerr: Do not invoke the functions on error rollback
81 * Will go away once the notifiers are gone
Thomas Gleixner757c9892016-02-26 18:43:32 +000082 * @cant_stop: Bringup/teardown can't be stopped at this step
Thomas Gleixnercff7d372016-02-26 18:43:28 +000083 */
84struct cpuhp_step {
Thomas Gleixnercf392d12016-08-12 19:49:39 +020085 const char *name;
86 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020087 int (*single)(unsigned int cpu);
88 int (*multi)(unsigned int cpu,
89 struct hlist_node *node);
90 } startup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020091 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020092 int (*single)(unsigned int cpu);
93 int (*multi)(unsigned int cpu,
94 struct hlist_node *node);
95 } teardown;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020096 struct hlist_head list;
97 bool skip_onerr;
98 bool cant_stop;
99 bool multi_instance;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000100};
101
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +0000102static DEFINE_MUTEX(cpuhp_state_mutex);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000103static struct cpuhp_step cpuhp_bp_states[];
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000104static struct cpuhp_step cpuhp_ap_states[];
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000105
Thomas Gleixnera7246322016-08-12 19:49:38 +0200106static bool cpuhp_is_ap_state(enum cpuhp_state state)
107{
108 /*
109 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
110 * purposes as that state is handled explicitly in cpu_down.
111 */
112 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
113}
114
115static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
116{
117 struct cpuhp_step *sp;
118
119 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
120 return sp + state;
121}
122
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000123/**
124 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
125 * @cpu: The cpu for which the callback should be invoked
126 * @step: The step in the state machine
Thomas Gleixnera7246322016-08-12 19:49:38 +0200127 * @bringup: True if the bringup callback should be invoked
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000128 *
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200129 * Called from cpu hotplug and from the state register machinery.
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000130 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200131static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200132 bool bringup, struct hlist_node *node)
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000133{
134 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200135 struct cpuhp_step *step = cpuhp_get_step(state);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200136 int (*cbm)(unsigned int cpu, struct hlist_node *node);
137 int (*cb)(unsigned int cpu);
138 int ret, cnt;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000139
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200140 if (!step->multi_instance) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200141 cb = bringup ? step->startup.single : step->teardown.single;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200142 if (!cb)
143 return 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200144 trace_cpuhp_enter(cpu, st->target, state, cb);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000145 ret = cb(cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200146 trace_cpuhp_exit(cpu, st->state, state, ret);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200147 return ret;
148 }
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200149 cbm = bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200150 if (!cbm)
151 return 0;
152
153 /* Single invocation for instance add/remove */
154 if (node) {
155 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
156 ret = cbm(cpu, node);
157 trace_cpuhp_exit(cpu, st->state, state, ret);
158 return ret;
159 }
160
161 /* State transition. Invoke on all instances */
162 cnt = 0;
163 hlist_for_each(node, &step->list) {
164 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
165 ret = cbm(cpu, node);
166 trace_cpuhp_exit(cpu, st->state, state, ret);
167 if (ret)
168 goto err;
169 cnt++;
170 }
171 return 0;
172err:
173 /* Rollback the instances if one failed */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200174 cbm = !bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200175 if (!cbm)
176 return ret;
177
178 hlist_for_each(node, &step->list) {
179 if (!cnt--)
180 break;
181 cbm(cpu, node);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000182 }
183 return ret;
184}
185
Rusty Russell98a79d62008-12-13 21:19:41 +1030186#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +1030187/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -0700188static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000189bool cpuhp_tasks_frozen;
190EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700192/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530193 * The following two APIs (cpu_maps_update_begin/done) must be used when
194 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700195 */
196void cpu_maps_update_begin(void)
197{
198 mutex_lock(&cpu_add_remove_lock);
199}
200
201void cpu_maps_update_done(void)
202{
203 mutex_unlock(&cpu_add_remove_lock);
204}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200206/*
207 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700208 * Should always be manipulated under cpu_add_remove_lock
209 */
210static int cpu_hotplug_disabled;
211
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700212#ifdef CONFIG_HOTPLUG_CPU
213
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200214DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530215
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200216void cpus_read_lock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800217{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200218 percpu_down_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800219}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200220EXPORT_SYMBOL_GPL(cpus_read_lock);
Ashok Raj90d45d12005-11-08 21:34:24 -0800221
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200222void cpus_read_unlock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800223{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200224 percpu_up_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800225}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200226EXPORT_SYMBOL_GPL(cpus_read_unlock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800227
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200228void cpus_write_lock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100229{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200230 percpu_down_write(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100231}
232
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200233void cpus_write_unlock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100234{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200235 percpu_up_write(&cpu_hotplug_lock);
236}
237
238void lockdep_assert_cpus_held(void)
239{
240 percpu_rwsem_assert_held(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100241}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700242
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700243/*
244 * Wait for currently running CPU hotplug operations to complete (if any) and
245 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
246 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
247 * hotplug path before performing hotplug operations. So acquiring that lock
248 * guarantees mutual exclusion from any currently running hotplug operations.
249 */
250void cpu_hotplug_disable(void)
251{
252 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700253 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700254 cpu_maps_update_done();
255}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700256EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700257
Lianwei Wang01b41152016-06-09 23:43:28 -0700258static void __cpu_hotplug_enable(void)
259{
260 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
261 return;
262 cpu_hotplug_disabled--;
263}
264
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700265void cpu_hotplug_enable(void)
266{
267 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700268 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700269 cpu_maps_update_done();
270}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700271EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600272#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700273
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200274static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
275
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000276static int bringup_wait_for_ap(unsigned int cpu)
277{
278 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
279
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200280 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000281 wait_for_completion(&st->done);
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200282 BUG_ON(!cpu_online(cpu));
283
284 /* Unpark the stopper thread and the hotplug thread of the target cpu */
285 stop_machine_unpark(cpu);
286 kthread_unpark(st->thread);
287
288 /* Should we go further up ? */
289 if (st->target > CPUHP_AP_ONLINE_IDLE) {
290 __cpuhp_kick_ap_work(st);
291 wait_for_completion(&st->done);
292 }
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000293 return st->result;
294}
295
Thomas Gleixnerba997462016-02-26 18:43:24 +0000296static int bringup_cpu(unsigned int cpu)
297{
298 struct task_struct *idle = idle_thread_get(cpu);
299 int ret;
300
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400301 /*
302 * Some architectures have to walk the irq descriptors to
303 * setup the vector space for the cpu which comes online.
304 * Prevent irq alloc/free across the bringup.
305 */
306 irq_lock_sparse();
307
Thomas Gleixnerba997462016-02-26 18:43:24 +0000308 /* Arch-specific enabling code. */
309 ret = __cpu_up(cpu, idle);
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400310 irq_unlock_sparse();
Thomas Gleixner530e9b72016-12-21 20:19:53 +0100311 if (ret)
Thomas Gleixnerba997462016-02-26 18:43:24 +0000312 return ret;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200313 return bringup_wait_for_ap(cpu);
Thomas Gleixnerba997462016-02-26 18:43:24 +0000314}
315
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000316/*
317 * Hotplug state machine related functions
318 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200319static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000320{
321 for (st->state++; st->state < st->target; st->state++) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200322 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000323
324 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200325 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000326 }
327}
328
329static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200330 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000331{
332 enum cpuhp_state prev_state = st->state;
333 int ret = 0;
334
335 for (; st->state > target; st->state--) {
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200336 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000337 if (ret) {
338 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200339 undo_cpu_down(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000340 break;
341 }
342 }
343 return ret;
344}
345
Thomas Gleixnera7246322016-08-12 19:49:38 +0200346static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000347{
348 for (st->state--; st->state > st->target; st->state--) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200349 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000350
351 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200352 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000353 }
354}
355
356static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200357 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000358{
359 enum cpuhp_state prev_state = st->state;
360 int ret = 0;
361
362 while (st->state < target) {
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000363 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200364 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000365 if (ret) {
366 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200367 undo_cpu_up(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000368 break;
369 }
370 }
371 return ret;
372}
373
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000374/*
375 * The cpu hotplug threads manage the bringup and teardown of the cpus
376 */
377static void cpuhp_create(unsigned int cpu)
378{
379 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
380
381 init_completion(&st->done);
382}
383
384static int cpuhp_should_run(unsigned int cpu)
385{
386 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
387
388 return st->should_run;
389}
390
391/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
392static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
393{
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000394 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000395
Thomas Gleixnera7246322016-08-12 19:49:38 +0200396 return cpuhp_down_callbacks(cpu, st, target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000397}
398
399/* Execute the online startup callbacks. Used to be CPU_ONLINE */
400static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
401{
Thomas Gleixnera7246322016-08-12 19:49:38 +0200402 return cpuhp_up_callbacks(cpu, st, st->target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000403}
404
405/*
406 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
407 * callbacks when a state gets [un]installed at runtime.
408 */
409static void cpuhp_thread_fun(unsigned int cpu)
410{
411 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
412 int ret = 0;
413
414 /*
415 * Paired with the mb() in cpuhp_kick_ap_work and
416 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
417 */
418 smp_mb();
419 if (!st->should_run)
420 return;
421
422 st->should_run = false;
423
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200424 lock_map_acquire(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000425 /* Single callback invocation for [un]install ? */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200426 if (st->single) {
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000427 if (st->cb_state < CPUHP_AP_ONLINE) {
428 local_irq_disable();
Thomas Gleixnera7246322016-08-12 19:49:38 +0200429 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200430 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000431 local_irq_enable();
432 } else {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200433 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200434 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000435 }
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200436 } else if (st->rollback) {
437 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
438
Thomas Gleixnera7246322016-08-12 19:49:38 +0200439 undo_cpu_down(cpu, st);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200440 st->rollback = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000441 } else {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000442 /* Cannot happen .... */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000443 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000444
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000445 /* Regular hotplug work */
446 if (st->state < st->target)
447 ret = cpuhp_ap_online(cpu, st);
448 else if (st->state > st->target)
449 ret = cpuhp_ap_offline(cpu, st);
450 }
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200451 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000452 st->result = ret;
453 complete(&st->done);
454}
455
456/* Invoke a single callback on a remote cpu */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200457static int
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200458cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
459 struct hlist_node *node)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000460{
461 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
462
463 if (!cpu_online(cpu))
464 return 0;
465
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200466 lock_map_acquire(&cpuhp_state_lock_map);
467 lock_map_release(&cpuhp_state_lock_map);
468
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000469 /*
470 * If we are up and running, use the hotplug thread. For early calls
471 * we invoke the thread function directly.
472 */
473 if (!st->thread)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200474 return cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000475
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000476 st->cb_state = state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200477 st->single = true;
478 st->bringup = bringup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200479 st->node = node;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200480
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000481 /*
482 * Make sure the above stores are visible before should_run becomes
483 * true. Paired with the mb() above in cpuhp_thread_fun()
484 */
485 smp_mb();
486 st->should_run = true;
487 wake_up_process(st->thread);
488 wait_for_completion(&st->done);
489 return st->result;
490}
491
492/* Regular hotplug invocation of the AP hotplug thread */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000493static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000494{
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000495 st->result = 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200496 st->single = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000497 /*
498 * Make sure the above stores are visible before should_run becomes
499 * true. Paired with the mb() above in cpuhp_thread_fun()
500 */
501 smp_mb();
502 st->should_run = true;
503 wake_up_process(st->thread);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000504}
505
506static int cpuhp_kick_ap_work(unsigned int cpu)
507{
508 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
509 enum cpuhp_state state = st->state;
510
511 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200512 lock_map_acquire(&cpuhp_state_lock_map);
513 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000514 __cpuhp_kick_ap_work(st);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000515 wait_for_completion(&st->done);
516 trace_cpuhp_exit(cpu, st->state, state, st->result);
517 return st->result;
518}
519
520static struct smp_hotplug_thread cpuhp_threads = {
521 .store = &cpuhp_state.thread,
522 .create = &cpuhp_create,
523 .thread_should_run = cpuhp_should_run,
524 .thread_fn = cpuhp_thread_fun,
525 .thread_comm = "cpuhp/%u",
526 .selfparking = true,
527};
528
529void __init cpuhp_threads_init(void)
530{
531 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
532 kthread_unpark(this_cpu_read(cpuhp_state.thread));
533}
534
Michal Hocko777c6e02016-12-07 14:54:38 +0100535#ifdef CONFIG_HOTPLUG_CPU
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700536/**
537 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
538 * @cpu: a CPU id
539 *
540 * This function walks all processes, finds a valid mm struct for each one and
541 * then clears a corresponding bit in mm's cpumask. While this all sounds
542 * trivial, there are various non-obvious corner cases, which this function
543 * tries to solve in a safe manner.
544 *
545 * Also note that the function uses a somewhat relaxed locking scheme, so it may
546 * be called only for an already offlined CPU.
547 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700548void clear_tasks_mm_cpumask(int cpu)
549{
550 struct task_struct *p;
551
552 /*
553 * This function is called after the cpu is taken down and marked
554 * offline, so its not like new tasks will ever get this cpu set in
555 * their mm mask. -- Peter Zijlstra
556 * Thus, we may use rcu_read_lock() here, instead of grabbing
557 * full-fledged tasklist_lock.
558 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700559 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700560 rcu_read_lock();
561 for_each_process(p) {
562 struct task_struct *t;
563
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700564 /*
565 * Main thread might exit, but other threads may still have
566 * a valid mm. Find one.
567 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700568 t = find_lock_task_mm(p);
569 if (!t)
570 continue;
571 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
572 task_unlock(t);
573 }
574 rcu_read_unlock();
575}
576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200578static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579{
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000580 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
581 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000582 int err, cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 /* Ensure this CPU doesn't handle any more interrupts. */
585 err = __cpu_disable();
586 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700587 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Thomas Gleixnera7246322016-08-12 19:49:38 +0200589 /*
590 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
591 * do this step again.
592 */
593 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
594 st->state--;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000595 /* Invoke the former CPU_DYING callbacks */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200596 for (; st->state > target; st->state--)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200597 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000598
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200599 /* Give up timekeeping duties */
600 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000601 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000602 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700603 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604}
605
Thomas Gleixner98458172016-02-26 18:43:25 +0000606static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000608 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000609 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100611 /* Park the smpboot threads */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000612 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100613 smpboot_park_threads(cpu);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000614
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200615 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000616 * Prevent irq alloc/free while the dying cpu reorganizes the
617 * interrupt affinities.
618 */
619 irq_lock_sparse();
620
621 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200622 * So now all preempt/rcu users must observe !cpu_active().
623 */
Sebastian Andrzej Siewior210e2132017-05-24 10:15:28 +0200624 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500625 if (err) {
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200626 /* CPU refused to die */
Thomas Gleixnera8994182015-07-05 17:12:30 +0000627 irq_unlock_sparse();
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200628 /* Unpark the hotplug thread so we can rollback there */
629 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner98458172016-02-26 18:43:25 +0000630 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700631 }
Rusty Russell04321582008-07-28 12:16:29 -0500632 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100634 /*
Thomas Gleixneree1e7142016-08-18 14:57:16 +0200635 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100636 * runnable tasks from the cpu, there's only the idle task left now
637 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100638 *
639 * Wait for the stop thread to go away.
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100640 */
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000641 wait_for_completion(&st->done);
642 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
Thomas Gleixnera8994182015-07-05 17:12:30 +0000644 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
645 irq_unlock_sparse();
646
Preeti U Murthy345527b2015-03-30 14:59:19 +0530647 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 /* This actually kills the CPU. */
649 __cpu_die(cpu);
650
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200651 tick_cleanup_dead_cpu(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000652 return 0;
653}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100655static void cpuhp_complete_idle_dead(void *arg)
656{
657 struct cpuhp_cpu_state *st = arg;
658
659 complete(&st->done);
660}
661
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000662void cpuhp_report_idle_dead(void)
663{
664 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
665
666 BUG_ON(st->state != CPUHP_AP_OFFLINE);
Thomas Gleixner27d50c72016-02-26 18:43:44 +0000667 rcu_report_dead(smp_processor_id());
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100668 st->state = CPUHP_AP_IDLE_DEAD;
669 /*
670 * We cannot call complete after rcu_report_dead() so we delegate it
671 * to an online cpu.
672 */
673 smp_call_function_single(cpumask_first(cpu_online_mask),
674 cpuhp_complete_idle_dead, st, 0);
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000675}
676
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000677#else
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000678#define takedown_cpu NULL
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000679#endif
680
681#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000682
Thomas Gleixner98458172016-02-26 18:43:25 +0000683/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000684static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
685 enum cpuhp_state target)
Thomas Gleixner98458172016-02-26 18:43:25 +0000686{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000687 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
688 int prev_state, ret = 0;
Thomas Gleixner98458172016-02-26 18:43:25 +0000689
690 if (num_online_cpus() == 1)
691 return -EBUSY;
692
Thomas Gleixner757c9892016-02-26 18:43:32 +0000693 if (!cpu_present(cpu))
Thomas Gleixner98458172016-02-26 18:43:25 +0000694 return -EINVAL;
695
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200696 cpus_write_lock();
Thomas Gleixner98458172016-02-26 18:43:25 +0000697
698 cpuhp_tasks_frozen = tasks_frozen;
699
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000700 prev_state = st->state;
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000701 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000702 /*
703 * If the current CPU state is in the range of the AP hotplug thread,
704 * then we need to kick the thread.
705 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000706 if (st->state > CPUHP_TEARDOWN_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000707 ret = cpuhp_kick_ap_work(cpu);
708 /*
709 * The AP side has done the error rollback already. Just
710 * return the error code..
711 */
712 if (ret)
713 goto out;
714
715 /*
716 * We might have stopped still in the range of the AP hotplug
717 * thread. Nothing to do anymore.
718 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000719 if (st->state > CPUHP_TEARDOWN_CPU)
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000720 goto out;
721 }
722 /*
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000723 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000724 * to do the further cleanups.
725 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200726 ret = cpuhp_down_callbacks(cpu, st, target);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200727 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
728 st->target = prev_state;
729 st->rollback = true;
730 cpuhp_kick_ap_work(cpu);
731 }
Thomas Gleixner98458172016-02-26 18:43:25 +0000732
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000733out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200734 cpus_write_unlock();
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000735 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700736}
737
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000738static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700739{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100740 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700741
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100742 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700743
Max Krasnyanskye761b772008-07-15 04:43:49 -0700744 if (cpu_hotplug_disabled) {
745 err = -EBUSY;
746 goto out;
747 }
748
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000749 err = _cpu_down(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700750
Max Krasnyanskye761b772008-07-15 04:43:49 -0700751out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100752 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 return err;
754}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000755int cpu_down(unsigned int cpu)
756{
757 return do_cpu_down(cpu, CPUHP_OFFLINE);
758}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400759EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760#endif /*CONFIG_HOTPLUG_CPU*/
761
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000762/**
Thomas Gleixneree1e7142016-08-18 14:57:16 +0200763 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000764 * @cpu: cpu that just started
765 *
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000766 * It must be called by the arch code on the new cpu, before the new cpu
767 * enables interrupts and before the "boot" cpu returns from __cpu_up().
768 */
769void notify_cpu_starting(unsigned int cpu)
770{
771 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
772 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
773
Sebastian Andrzej Siewior0c6d4572016-08-17 14:21:04 +0200774 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000775 while (st->state < target) {
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000776 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200777 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000778 }
779}
780
Thomas Gleixner949338e2016-02-26 18:43:35 +0000781/*
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200782 * Called from the idle task. Wake up the controlling task which brings the
783 * stopper and the hotplug thread of the upcoming CPU up and then delegates
784 * the rest of the online bringup to the hotplug thread.
Thomas Gleixner949338e2016-02-26 18:43:35 +0000785 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000786void cpuhp_online_idle(enum cpuhp_state state)
Thomas Gleixner949338e2016-02-26 18:43:35 +0000787{
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000788 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000789
790 /* Happens for the boot cpu */
791 if (state != CPUHP_AP_ONLINE_IDLE)
792 return;
793
794 st->state = CPUHP_AP_ONLINE_IDLE;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200795 complete(&st->done);
Thomas Gleixner949338e2016-02-26 18:43:35 +0000796}
797
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700798/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000799static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000801 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700802 struct task_struct *idle;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000803 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200805 cpus_write_lock();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000806
Thomas Gleixner757c9892016-02-26 18:43:32 +0000807 if (!cpu_present(cpu)) {
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200808 ret = -EINVAL;
809 goto out;
810 }
811
Thomas Gleixner757c9892016-02-26 18:43:32 +0000812 /*
813 * The caller of do_cpu_up might have raced with another
814 * caller. Ignore it for now.
815 */
816 if (st->state >= target)
Thomas Gleixner38498a62012-04-20 13:05:44 +0000817 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +0000818
819 if (st->state == CPUHP_OFFLINE) {
820 /* Let it fail before we try to bring the cpu up */
821 idle = idle_thread_get(cpu);
822 if (IS_ERR(idle)) {
823 ret = PTR_ERR(idle);
824 goto out;
825 }
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700826 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000827
Thomas Gleixnerba997462016-02-26 18:43:24 +0000828 cpuhp_tasks_frozen = tasks_frozen;
829
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000830 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000831 /*
832 * If the current CPU state is in the range of the AP hotplug thread,
833 * then we need to kick the thread once more.
834 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000835 if (st->state > CPUHP_BRINGUP_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000836 ret = cpuhp_kick_ap_work(cpu);
837 /*
838 * The AP side has done the error rollback already. Just
839 * return the error code..
840 */
841 if (ret)
842 goto out;
843 }
844
845 /*
846 * Try to reach the target state. We max out on the BP at
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000847 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000848 * responsible for bringing it up to the target state.
849 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000850 target = min((int)target, CPUHP_BRINGUP_CPU);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200851 ret = cpuhp_up_callbacks(cpu, st, target);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000852out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200853 cpus_write_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 return ret;
855}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700856
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000857static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700858{
859 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700860
Rusty Russelle0b582e2009-01-01 10:12:28 +1030861 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700862 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
863 cpu);
Chen Gong87d5e0232010-03-05 13:42:38 -0800864#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700865 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700866#endif
867 return -EINVAL;
868 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700869
Toshi Kani01b0f192013-11-12 15:07:25 -0800870 err = try_online_node(cpu_to_node(cpu));
871 if (err)
872 return err;
minskey guocf234222010-05-24 14:32:41 -0700873
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100874 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700875
Max Krasnyanskye761b772008-07-15 04:43:49 -0700876 if (cpu_hotplug_disabled) {
877 err = -EBUSY;
878 goto out;
879 }
880
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000881 err = _cpu_up(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700882out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100883 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700884 return err;
885}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000886
887int cpu_up(unsigned int cpu)
888{
889 return do_cpu_up(cpu, CPUHP_ONLINE);
890}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800891EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700892
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700893#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030894static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700895
James Morsed391e552016-08-17 13:50:25 +0100896int freeze_secondary_cpus(int primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700897{
James Morsed391e552016-08-17 13:50:25 +0100898 int cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700899
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100900 cpu_maps_update_begin();
James Morsed391e552016-08-17 13:50:25 +0100901 if (!cpu_online(primary))
902 primary = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100903 /*
904 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700905 * with the userspace trying to use the CPU hotplug at the same time
906 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030907 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100908
Fabian Frederick84117da2014-06-04 16:11:17 -0700909 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700910 for_each_online_cpu(cpu) {
James Morsed391e552016-08-17 13:50:25 +0100911 if (cpu == primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700912 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700913 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000914 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700915 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600916 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030917 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600918 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700919 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700920 break;
921 }
922 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700923
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700924 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700925 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700926 else
Fabian Frederick84117da2014-06-04 16:11:17 -0700927 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700928
929 /*
930 * Make sure the CPUs won't be enabled by someone else. We need to do
931 * this even in case of failure as all disable_nonboot_cpus() users are
932 * supposed to do enable_nonboot_cpus() on the failure path.
933 */
934 cpu_hotplug_disabled++;
935
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100936 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700937 return error;
938}
939
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700940void __weak arch_enable_nonboot_cpus_begin(void)
941{
942}
943
944void __weak arch_enable_nonboot_cpus_end(void)
945{
946}
947
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200948void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700949{
950 int cpu, error;
951
952 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100953 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700954 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030955 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700956 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700957
Fabian Frederick84117da2014-06-04 16:11:17 -0700958 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700959
960 arch_enable_nonboot_cpus_begin();
961
Rusty Russelle0b582e2009-01-01 10:12:28 +1030962 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700963 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000964 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700965 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700966 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700967 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700968 continue;
969 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700970 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700971 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700972
973 arch_enable_nonboot_cpus_end();
974
Rusty Russelle0b582e2009-01-01 10:12:28 +1030975 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700976out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100977 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700978}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030979
Fenghua Yud7268a32011-11-15 21:59:31 +0100980static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030981{
982 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
983 return -ENOMEM;
984 return 0;
985}
986core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100987
988/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100989 * When callbacks for CPU hotplug notifications are being executed, we must
990 * ensure that the state of the system with respect to the tasks being frozen
991 * or not, as reported by the notification, remains unchanged *throughout the
992 * duration* of the execution of the callbacks.
993 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
994 *
995 * This synchronization is implemented by mutually excluding regular CPU
996 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
997 * Hibernate notifications.
998 */
999static int
1000cpu_hotplug_pm_callback(struct notifier_block *nb,
1001 unsigned long action, void *ptr)
1002{
1003 switch (action) {
1004
1005 case PM_SUSPEND_PREPARE:
1006 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001007 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001008 break;
1009
1010 case PM_POST_SUSPEND:
1011 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001012 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001013 break;
1014
1015 default:
1016 return NOTIFY_DONE;
1017 }
1018
1019 return NOTIFY_OK;
1020}
1021
1022
Fenghua Yud7268a32011-11-15 21:59:31 +01001023static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001024{
Fenghua Yu6e32d472012-11-13 11:32:43 -08001025 /*
1026 * cpu_hotplug_pm_callback has higher priority than x86
1027 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1028 * to disable cpu hotplug to avoid cpu hotplug race.
1029 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001030 pm_notifier(cpu_hotplug_pm_callback, 0);
1031 return 0;
1032}
1033core_initcall(cpu_hotplug_pm_sync_init);
1034
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001035#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001036
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01001037int __boot_cpu_id;
1038
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001039#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -07001040
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001041/* Boot processor state steps */
1042static struct cpuhp_step cpuhp_bp_states[] = {
1043 [CPUHP_OFFLINE] = {
1044 .name = "offline",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001045 .startup.single = NULL,
1046 .teardown.single = NULL,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001047 },
1048#ifdef CONFIG_SMP
1049 [CPUHP_CREATE_THREADS]= {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001050 .name = "threads:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001051 .startup.single = smpboot_create_threads,
1052 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001053 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001054 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001055 [CPUHP_PERF_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001056 .name = "perf:prepare",
1057 .startup.single = perf_event_init_cpu,
1058 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001059 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001060 [CPUHP_WORKQUEUE_PREP] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001061 .name = "workqueue:prepare",
1062 .startup.single = workqueue_prepare_cpu,
1063 .teardown.single = NULL,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001064 },
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001065 [CPUHP_HRTIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001066 .name = "hrtimers:prepare",
1067 .startup.single = hrtimers_prepare_cpu,
1068 .teardown.single = hrtimers_dead_cpu,
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001069 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001070 [CPUHP_SMPCFD_PREPARE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001071 .name = "smpcfd:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001072 .startup.single = smpcfd_prepare_cpu,
1073 .teardown.single = smpcfd_dead_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001074 },
Richard Weinbergere6d49892016-08-18 14:57:17 +02001075 [CPUHP_RELAY_PREPARE] = {
1076 .name = "relay:prepare",
1077 .startup.single = relay_prepare_cpu,
1078 .teardown.single = NULL,
1079 },
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +02001080 [CPUHP_SLAB_PREPARE] = {
1081 .name = "slab:prepare",
1082 .startup.single = slab_prepare_cpu,
1083 .teardown.single = slab_dead_cpu,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001084 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001085 [CPUHP_RCUTREE_PREP] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001086 .name = "RCU/tree:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001087 .startup.single = rcutree_prepare_cpu,
1088 .teardown.single = rcutree_dead_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001089 },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001090 /*
Richard Cochran4fae16d2016-07-27 11:08:18 +02001091 * On the tear-down path, timers_dead_cpu() must be invoked
1092 * before blk_mq_queue_reinit_notify() from notify_dead(),
1093 * otherwise a RCU stall occurs.
1094 */
1095 [CPUHP_TIMERS_DEAD] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001096 .name = "timers:dead",
1097 .startup.single = NULL,
1098 .teardown.single = timers_dead_cpu,
Richard Cochran4fae16d2016-07-27 11:08:18 +02001099 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001100 /* Kicks the plugged cpu into life */
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001101 [CPUHP_BRINGUP_CPU] = {
1102 .name = "cpu:bringup",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001103 .startup.single = bringup_cpu,
1104 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001105 .cant_stop = true,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001106 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001107 [CPUHP_AP_SMPCFD_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001108 .name = "smpcfd:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001109 .startup.single = NULL,
1110 .teardown.single = smpcfd_dying_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001111 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001112 /*
1113 * Handled on controll processor until the plugged processor manages
1114 * this itself.
1115 */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001116 [CPUHP_TEARDOWN_CPU] = {
1117 .name = "cpu:teardown",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001118 .startup.single = NULL,
1119 .teardown.single = takedown_cpu,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001120 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001121 },
Thomas Gleixnera7c734142016-07-12 21:59:23 +02001122#else
1123 [CPUHP_BRINGUP_CPU] = { },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001124#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001125};
1126
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001127/* Application processor state steps */
1128static struct cpuhp_step cpuhp_ap_states[] = {
1129#ifdef CONFIG_SMP
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001130 /* Final state before CPU kills itself */
1131 [CPUHP_AP_IDLE_DEAD] = {
1132 .name = "idle:dead",
1133 },
1134 /*
1135 * Last state before CPU enters the idle loop to die. Transient state
1136 * for synchronization.
1137 */
1138 [CPUHP_AP_OFFLINE] = {
1139 .name = "ap:offline",
1140 .cant_stop = true,
1141 },
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001142 /* First state is scheduler control. Interrupts are disabled */
1143 [CPUHP_AP_SCHED_STARTING] = {
1144 .name = "sched:starting",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001145 .startup.single = sched_cpu_starting,
1146 .teardown.single = sched_cpu_dying,
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001147 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001148 [CPUHP_AP_RCUTREE_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001149 .name = "RCU/tree:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001150 .startup.single = NULL,
1151 .teardown.single = rcutree_dying_cpu,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001152 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001153 /* Entry state on starting. Interrupts enabled from here on. Transient
1154 * state for synchronsization */
1155 [CPUHP_AP_ONLINE] = {
1156 .name = "ap:online",
1157 },
1158 /* Handle smpboot threads park/unpark */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001159 [CPUHP_AP_SMPBOOT_THREADS] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001160 .name = "smpboot/threads:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001161 .startup.single = smpboot_unpark_threads,
1162 .teardown.single = NULL,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001163 },
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +02001164 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1165 .name = "irq/affinity:online",
1166 .startup.single = irq_affinity_online_cpu,
1167 .teardown.single = NULL,
1168 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001169 [CPUHP_AP_PERF_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001170 .name = "perf:online",
1171 .startup.single = perf_event_init_cpu,
1172 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001173 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001174 [CPUHP_AP_WORKQUEUE_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001175 .name = "workqueue:online",
1176 .startup.single = workqueue_online_cpu,
1177 .teardown.single = workqueue_offline_cpu,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001178 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001179 [CPUHP_AP_RCUTREE_ONLINE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001180 .name = "RCU/tree:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001181 .startup.single = rcutree_online_cpu,
1182 .teardown.single = rcutree_offline_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001183 },
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001184#endif
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001185 /*
1186 * The dynamically registered state space is here
1187 */
1188
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001189#ifdef CONFIG_SMP
1190 /* Last state is scheduler control setting the cpu active */
1191 [CPUHP_AP_ACTIVE] = {
1192 .name = "sched:active",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001193 .startup.single = sched_cpu_activate,
1194 .teardown.single = sched_cpu_deactivate,
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001195 },
1196#endif
1197
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001198 /* CPU is fully up and running. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001199 [CPUHP_ONLINE] = {
1200 .name = "online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001201 .startup.single = NULL,
1202 .teardown.single = NULL,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001203 },
1204};
1205
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001206/* Sanity check for callbacks */
1207static int cpuhp_cb_check(enum cpuhp_state state)
1208{
1209 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1210 return -EINVAL;
1211 return 0;
1212}
1213
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001214/*
1215 * Returns a free for dynamic slot assignment of the Online state. The states
1216 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1217 * by having no name assigned.
1218 */
1219static int cpuhp_reserve_state(enum cpuhp_state state)
1220{
Thomas Gleixner4205e472017-01-10 14:01:05 +01001221 enum cpuhp_state i, end;
1222 struct cpuhp_step *step;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001223
Thomas Gleixner4205e472017-01-10 14:01:05 +01001224 switch (state) {
1225 case CPUHP_AP_ONLINE_DYN:
1226 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1227 end = CPUHP_AP_ONLINE_DYN_END;
1228 break;
1229 case CPUHP_BP_PREPARE_DYN:
1230 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1231 end = CPUHP_BP_PREPARE_DYN_END;
1232 break;
1233 default:
1234 return -EINVAL;
1235 }
1236
1237 for (i = state; i <= end; i++, step++) {
1238 if (!step->name)
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001239 return i;
1240 }
1241 WARN(1, "No more dynamic states available for CPU hotplug\n");
1242 return -ENOSPC;
1243}
1244
1245static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1246 int (*startup)(unsigned int cpu),
1247 int (*teardown)(unsigned int cpu),
1248 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001249{
1250 /* (Un)Install the callbacks for further cpu hotplug operations */
1251 struct cpuhp_step *sp;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001252 int ret = 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001253
Thomas Gleixner4205e472017-01-10 14:01:05 +01001254 if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001255 ret = cpuhp_reserve_state(state);
1256 if (ret < 0)
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001257 return ret;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001258 state = ret;
1259 }
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001260 sp = cpuhp_get_step(state);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001261 if (name && sp->name)
1262 return -EBUSY;
1263
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001264 sp->startup.single = startup;
1265 sp->teardown.single = teardown;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001266 sp->name = name;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001267 sp->multi_instance = multi_instance;
1268 INIT_HLIST_HEAD(&sp->list);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001269 return ret;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001270}
1271
1272static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1273{
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001274 return cpuhp_get_step(state)->teardown.single;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001275}
1276
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001277/*
1278 * Call the startup/teardown function for a step either on the AP or
1279 * on the current CPU.
1280 */
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001281static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1282 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001283{
Thomas Gleixnera7246322016-08-12 19:49:38 +02001284 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001285 int ret;
1286
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001287 if ((bringup && !sp->startup.single) ||
1288 (!bringup && !sp->teardown.single))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001289 return 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001290 /*
1291 * The non AP bound callbacks can fail on bringup. On teardown
1292 * e.g. module removal we crash for now.
1293 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001294#ifdef CONFIG_SMP
1295 if (cpuhp_is_ap_state(state))
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001296 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001297 else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001298 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001299#else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001300 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001301#endif
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001302 BUG_ON(ret && !bringup);
1303 return ret;
1304}
1305
1306/*
1307 * Called from __cpuhp_setup_state on a recoverable failure.
1308 *
1309 * Note: The teardown callbacks for rollback are not allowed to fail!
1310 */
1311static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001312 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001313{
1314 int cpu;
1315
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001316 /* Roll back the already executed steps on the other cpus */
1317 for_each_present_cpu(cpu) {
1318 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1319 int cpustate = st->state;
1320
1321 if (cpu >= failedcpu)
1322 break;
1323
1324 /* Did we invoke the startup call on that cpu ? */
1325 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001326 cpuhp_issue_call(cpu, state, false, node);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001327 }
1328}
1329
Thomas Gleixner9805c672017-05-24 10:15:15 +02001330int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1331 struct hlist_node *node,
1332 bool invoke)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001333{
1334 struct cpuhp_step *sp;
1335 int cpu;
1336 int ret;
1337
Thomas Gleixner9805c672017-05-24 10:15:15 +02001338 lockdep_assert_cpus_held();
1339
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001340 sp = cpuhp_get_step(state);
1341 if (sp->multi_instance == false)
1342 return -EINVAL;
1343
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001344 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001345
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001346 if (!invoke || !sp->startup.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001347 goto add_node;
1348
1349 /*
1350 * Try to call the startup callback for each present cpu
1351 * depending on the hotplug state of the cpu.
1352 */
1353 for_each_present_cpu(cpu) {
1354 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1355 int cpustate = st->state;
1356
1357 if (cpustate < state)
1358 continue;
1359
1360 ret = cpuhp_issue_call(cpu, state, true, node);
1361 if (ret) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001362 if (sp->teardown.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001363 cpuhp_rollback_install(cpu, state, node);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001364 goto unlock;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001365 }
1366 }
1367add_node:
1368 ret = 0;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001369 hlist_add_head(node, &sp->list);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001370unlock:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001371 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner9805c672017-05-24 10:15:15 +02001372 return ret;
1373}
1374
1375int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1376 bool invoke)
1377{
1378 int ret;
1379
1380 cpus_read_lock();
1381 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001382 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001383 return ret;
1384}
1385EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1386
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001387/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001388 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001389 * @state: The state to setup
1390 * @invoke: If true, the startup function is invoked for cpus where
1391 * cpu state >= @state
1392 * @startup: startup callback function
1393 * @teardown: teardown callback function
1394 * @multi_instance: State is set up for multiple instances which get
1395 * added afterwards.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001396 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001397 * The caller needs to hold cpus read locked while calling this function.
Boris Ostrovsky512f0982016-12-15 10:00:57 -05001398 * Returns:
1399 * On success:
1400 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1401 * 0 for all other states
1402 * On failure: proper (negative) error code
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001403 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001404int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1405 const char *name, bool invoke,
1406 int (*startup)(unsigned int cpu),
1407 int (*teardown)(unsigned int cpu),
1408 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001409{
1410 int cpu, ret = 0;
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001411 bool dynstate;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001412
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001413 lockdep_assert_cpus_held();
1414
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001415 if (cpuhp_cb_check(state) || !name)
1416 return -EINVAL;
1417
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001418 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001419
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001420 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1421 multi_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001422
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001423 dynstate = state == CPUHP_AP_ONLINE_DYN;
1424 if (ret > 0 && dynstate) {
1425 state = ret;
1426 ret = 0;
1427 }
1428
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001429 if (ret || !invoke || !startup)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001430 goto out;
1431
1432 /*
1433 * Try to call the startup callback for each present cpu
1434 * depending on the hotplug state of the cpu.
1435 */
1436 for_each_present_cpu(cpu) {
1437 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1438 int cpustate = st->state;
1439
1440 if (cpustate < state)
1441 continue;
1442
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001443 ret = cpuhp_issue_call(cpu, state, true, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001444 if (ret) {
Thomas Gleixnera7246322016-08-12 19:49:38 +02001445 if (teardown)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001446 cpuhp_rollback_install(cpu, state, NULL);
1447 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001448 goto out;
1449 }
1450 }
1451out:
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001452 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001453 /*
1454 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1455 * dynamically allocated state in case of success.
1456 */
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001457 if (!ret && dynstate)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001458 return state;
1459 return ret;
1460}
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001461EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1462
1463int __cpuhp_setup_state(enum cpuhp_state state,
1464 const char *name, bool invoke,
1465 int (*startup)(unsigned int cpu),
1466 int (*teardown)(unsigned int cpu),
1467 bool multi_instance)
1468{
1469 int ret;
1470
1471 cpus_read_lock();
1472 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1473 teardown, multi_instance);
1474 cpus_read_unlock();
1475 return ret;
1476}
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001477EXPORT_SYMBOL(__cpuhp_setup_state);
1478
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001479int __cpuhp_state_remove_instance(enum cpuhp_state state,
1480 struct hlist_node *node, bool invoke)
1481{
1482 struct cpuhp_step *sp = cpuhp_get_step(state);
1483 int cpu;
1484
1485 BUG_ON(cpuhp_cb_check(state));
1486
1487 if (!sp->multi_instance)
1488 return -EINVAL;
1489
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001490 cpus_read_lock();
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001491 mutex_lock(&cpuhp_state_mutex);
1492
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001493 if (!invoke || !cpuhp_get_teardown_cb(state))
1494 goto remove;
1495 /*
1496 * Call the teardown callback for each present cpu depending
1497 * on the hotplug state of the cpu. This function is not
1498 * allowed to fail currently!
1499 */
1500 for_each_present_cpu(cpu) {
1501 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1502 int cpustate = st->state;
1503
1504 if (cpustate >= state)
1505 cpuhp_issue_call(cpu, state, false, node);
1506 }
1507
1508remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001509 hlist_del(node);
1510 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001511 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001512
1513 return 0;
1514}
1515EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001516
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001517/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001518 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001519 * @state: The state to remove
1520 * @invoke: If true, the teardown function is invoked for cpus where
1521 * cpu state >= @state
1522 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001523 * The caller needs to hold cpus read locked while calling this function.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001524 * The teardown callback is currently not allowed to fail. Think
1525 * about module removal!
1526 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001527void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001528{
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001529 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001530 int cpu;
1531
1532 BUG_ON(cpuhp_cb_check(state));
1533
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001534 lockdep_assert_cpus_held();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001535
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001536 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001537 if (sp->multi_instance) {
1538 WARN(!hlist_empty(&sp->list),
1539 "Error: Removing state %d which has instances left.\n",
1540 state);
1541 goto remove;
1542 }
1543
Thomas Gleixnera7246322016-08-12 19:49:38 +02001544 if (!invoke || !cpuhp_get_teardown_cb(state))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001545 goto remove;
1546
1547 /*
1548 * Call the teardown callback for each present cpu depending
1549 * on the hotplug state of the cpu. This function is not
1550 * allowed to fail currently!
1551 */
1552 for_each_present_cpu(cpu) {
1553 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1554 int cpustate = st->state;
1555
1556 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001557 cpuhp_issue_call(cpu, state, false, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001558 }
1559remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001560 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001561 mutex_unlock(&cpuhp_state_mutex);
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001562}
1563EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1564
1565void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1566{
1567 cpus_read_lock();
1568 __cpuhp_remove_state_cpuslocked(state, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001569 cpus_read_unlock();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001570}
1571EXPORT_SYMBOL(__cpuhp_remove_state);
1572
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001573#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1574static ssize_t show_cpuhp_state(struct device *dev,
1575 struct device_attribute *attr, char *buf)
1576{
1577 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1578
1579 return sprintf(buf, "%d\n", st->state);
1580}
1581static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1582
Thomas Gleixner757c9892016-02-26 18:43:32 +00001583static ssize_t write_cpuhp_target(struct device *dev,
1584 struct device_attribute *attr,
1585 const char *buf, size_t count)
1586{
1587 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1588 struct cpuhp_step *sp;
1589 int target, ret;
1590
1591 ret = kstrtoint(buf, 10, &target);
1592 if (ret)
1593 return ret;
1594
1595#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1596 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1597 return -EINVAL;
1598#else
1599 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1600 return -EINVAL;
1601#endif
1602
1603 ret = lock_device_hotplug_sysfs();
1604 if (ret)
1605 return ret;
1606
1607 mutex_lock(&cpuhp_state_mutex);
1608 sp = cpuhp_get_step(target);
1609 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1610 mutex_unlock(&cpuhp_state_mutex);
1611 if (ret)
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02001612 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001613
1614 if (st->state < target)
1615 ret = do_cpu_up(dev->id, target);
1616 else
1617 ret = do_cpu_down(dev->id, target);
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02001618out:
Thomas Gleixner757c9892016-02-26 18:43:32 +00001619 unlock_device_hotplug();
1620 return ret ? ret : count;
1621}
1622
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001623static ssize_t show_cpuhp_target(struct device *dev,
1624 struct device_attribute *attr, char *buf)
1625{
1626 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1627
1628 return sprintf(buf, "%d\n", st->target);
1629}
Thomas Gleixner757c9892016-02-26 18:43:32 +00001630static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001631
1632static struct attribute *cpuhp_cpu_attrs[] = {
1633 &dev_attr_state.attr,
1634 &dev_attr_target.attr,
1635 NULL
1636};
1637
Arvind Yadav993647a2017-06-29 17:40:47 +05301638static const struct attribute_group cpuhp_cpu_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001639 .attrs = cpuhp_cpu_attrs,
1640 .name = "hotplug",
1641 NULL
1642};
1643
1644static ssize_t show_cpuhp_states(struct device *dev,
1645 struct device_attribute *attr, char *buf)
1646{
1647 ssize_t cur, res = 0;
1648 int i;
1649
1650 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner757c9892016-02-26 18:43:32 +00001651 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001652 struct cpuhp_step *sp = cpuhp_get_step(i);
1653
1654 if (sp->name) {
1655 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1656 buf += cur;
1657 res += cur;
1658 }
1659 }
1660 mutex_unlock(&cpuhp_state_mutex);
1661 return res;
1662}
1663static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1664
1665static struct attribute *cpuhp_cpu_root_attrs[] = {
1666 &dev_attr_states.attr,
1667 NULL
1668};
1669
Arvind Yadav993647a2017-06-29 17:40:47 +05301670static const struct attribute_group cpuhp_cpu_root_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001671 .attrs = cpuhp_cpu_root_attrs,
1672 .name = "hotplug",
1673 NULL
1674};
1675
1676static int __init cpuhp_sysfs_init(void)
1677{
1678 int cpu, ret;
1679
1680 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1681 &cpuhp_cpu_root_attr_group);
1682 if (ret)
1683 return ret;
1684
1685 for_each_possible_cpu(cpu) {
1686 struct device *dev = get_cpu_device(cpu);
1687
1688 if (!dev)
1689 continue;
1690 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1691 if (ret)
1692 return ret;
1693 }
1694 return 0;
1695}
1696device_initcall(cpuhp_sysfs_init);
1697#endif
1698
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001699/*
1700 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1701 * represents all NR_CPUS bits binary values of 1<<nr.
1702 *
Rusty Russelle0b582e2009-01-01 10:12:28 +10301703 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001704 * mask value that has a single bit set only.
1705 */
Mike Travisb8d317d2008-07-24 18:21:29 -07001706
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001707/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -07001708#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001709#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1710#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1711#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -07001712
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001713const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -07001714
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001715 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1716 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1717#if BITS_PER_LONG > 32
1718 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1719 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -07001720#endif
1721};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001722EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +11001723
1724const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1725EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +10301726
1727#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001728struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001729 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +10301730#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001731struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +10301732#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001733EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10301734
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001735struct cpumask __cpu_online_mask __read_mostly;
1736EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10301737
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001738struct cpumask __cpu_present_mask __read_mostly;
1739EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10301740
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001741struct cpumask __cpu_active_mask __read_mostly;
1742EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +10301743
Rusty Russell3fa41522008-12-30 09:05:16 +10301744void init_cpu_present(const struct cpumask *src)
1745{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001746 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10301747}
1748
1749void init_cpu_possible(const struct cpumask *src)
1750{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001751 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10301752}
1753
1754void init_cpu_online(const struct cpumask *src)
1755{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001756 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10301757}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001758
1759/*
1760 * Activate the first processor.
1761 */
1762void __init boot_cpu_init(void)
1763{
1764 int cpu = smp_processor_id();
1765
1766 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1767 set_cpu_online(cpu, true);
1768 set_cpu_active(cpu, true);
1769 set_cpu_present(cpu, true);
1770 set_cpu_possible(cpu, true);
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01001771
1772#ifdef CONFIG_SMP
1773 __boot_cpu_id = cpu;
1774#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001775}
1776
1777/*
1778 * Must be called _AFTER_ setting up the per_cpu areas
1779 */
1780void __init boot_cpu_state_init(void)
1781{
1782 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1783}