blob: a96b348591dfdb6aa7d3f9149971852d1d6dc5bf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010010#include <linux/sched/signal.h>
Ingo Molnaref8bd772017-02-08 18:51:36 +010011#include <linux/sched/hotplug.h>
Ingo Molnar29930022017-02-08 18:51:36 +010012#include <linux/sched/task.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/unistd.h>
14#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070015#include <linux/oom.h>
16#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040017#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070018#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/kthread.h>
20#include <linux/stop_machine.h>
Ingo Molnar81615b622006-06-26 00:24:32 -070021#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010023#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053024#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053025#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000026#include <linux/irq.h>
Thomas Gleixner941154b2017-09-12 21:37:04 +020027#include <linux/nmi.h>
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000028#include <linux/smpboot.h>
Richard Weinbergere6d49892016-08-18 14:57:17 +020029#include <linux/relay.h>
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +020030#include <linux/slab.h>
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +020031#include <linux/percpu-rwsem.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000032
Todd E Brandtbb3632c2014-06-06 05:40:17 -070033#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000034#define CREATE_TRACE_POINTS
35#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Thomas Gleixner38498a62012-04-20 13:05:44 +000037#include "smpboot.h"
38
Thomas Gleixnercff7d372016-02-26 18:43:28 +000039/**
40 * cpuhp_cpu_state - Per cpu hotplug state storage
41 * @state: The current cpu state
42 * @target: The target state
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000043 * @thread: Pointer to the hotplug thread
44 * @should_run: Thread should execute
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020045 * @rollback: Perform a rollback
Thomas Gleixnera7246322016-08-12 19:49:38 +020046 * @single: Single callback invocation
47 * @bringup: Single callback bringup or teardown selector
48 * @cb_state: The state for a single callback (install/uninstall)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000049 * @result: Result of the operation
50 * @done: Signal completion to the issuer of the task
Thomas Gleixnercff7d372016-02-26 18:43:28 +000051 */
52struct cpuhp_cpu_state {
53 enum cpuhp_state state;
54 enum cpuhp_state target;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000055#ifdef CONFIG_SMP
56 struct task_struct *thread;
57 bool should_run;
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020058 bool rollback;
Thomas Gleixnera7246322016-08-12 19:49:38 +020059 bool single;
60 bool bringup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020061 struct hlist_node *node;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000062 enum cpuhp_state cb_state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000063 int result;
64 struct completion done;
65#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +000066};
67
68static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
69
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +020070#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
71static struct lock_class_key cpuhp_state_key;
72static struct lockdep_map cpuhp_state_lock_map =
73 STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
74#endif
75
Thomas Gleixnercff7d372016-02-26 18:43:28 +000076/**
77 * cpuhp_step - Hotplug state machine step
78 * @name: Name of the step
79 * @startup: Startup function of the step
80 * @teardown: Teardown function of the step
81 * @skip_onerr: Do not invoke the functions on error rollback
82 * Will go away once the notifiers are gone
Thomas Gleixner757c9892016-02-26 18:43:32 +000083 * @cant_stop: Bringup/teardown can't be stopped at this step
Thomas Gleixnercff7d372016-02-26 18:43:28 +000084 */
85struct cpuhp_step {
Thomas Gleixnercf392d12016-08-12 19:49:39 +020086 const char *name;
87 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020088 int (*single)(unsigned int cpu);
89 int (*multi)(unsigned int cpu,
90 struct hlist_node *node);
91 } startup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020092 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +020093 int (*single)(unsigned int cpu);
94 int (*multi)(unsigned int cpu,
95 struct hlist_node *node);
96 } teardown;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020097 struct hlist_head list;
98 bool skip_onerr;
99 bool cant_stop;
100 bool multi_instance;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000101};
102
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +0000103static DEFINE_MUTEX(cpuhp_state_mutex);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000104static struct cpuhp_step cpuhp_bp_states[];
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000105static struct cpuhp_step cpuhp_ap_states[];
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000106
Thomas Gleixnera7246322016-08-12 19:49:38 +0200107static bool cpuhp_is_ap_state(enum cpuhp_state state)
108{
109 /*
110 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
111 * purposes as that state is handled explicitly in cpu_down.
112 */
113 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
114}
115
116static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
117{
118 struct cpuhp_step *sp;
119
120 sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
121 return sp + state;
122}
123
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000124/**
125 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
126 * @cpu: The cpu for which the callback should be invoked
127 * @step: The step in the state machine
Thomas Gleixnera7246322016-08-12 19:49:38 +0200128 * @bringup: True if the bringup callback should be invoked
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000129 *
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200130 * Called from cpu hotplug and from the state register machinery.
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000131 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200132static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200133 bool bringup, struct hlist_node *node)
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000134{
135 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200136 struct cpuhp_step *step = cpuhp_get_step(state);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200137 int (*cbm)(unsigned int cpu, struct hlist_node *node);
138 int (*cb)(unsigned int cpu);
139 int ret, cnt;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000140
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200141 if (!step->multi_instance) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200142 cb = bringup ? step->startup.single : step->teardown.single;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200143 if (!cb)
144 return 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200145 trace_cpuhp_enter(cpu, st->target, state, cb);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000146 ret = cb(cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200147 trace_cpuhp_exit(cpu, st->state, state, ret);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200148 return ret;
149 }
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200150 cbm = bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200151 if (!cbm)
152 return 0;
153
154 /* Single invocation for instance add/remove */
155 if (node) {
156 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
157 ret = cbm(cpu, node);
158 trace_cpuhp_exit(cpu, st->state, state, ret);
159 return ret;
160 }
161
162 /* State transition. Invoke on all instances */
163 cnt = 0;
164 hlist_for_each(node, &step->list) {
165 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
166 ret = cbm(cpu, node);
167 trace_cpuhp_exit(cpu, st->state, state, ret);
168 if (ret)
169 goto err;
170 cnt++;
171 }
172 return 0;
173err:
174 /* Rollback the instances if one failed */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200175 cbm = !bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200176 if (!cbm)
177 return ret;
178
179 hlist_for_each(node, &step->list) {
180 if (!cnt--)
181 break;
182 cbm(cpu, node);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000183 }
184 return ret;
185}
186
Rusty Russell98a79d62008-12-13 21:19:41 +1030187#ifdef CONFIG_SMP
Rusty Russellb3199c02008-12-30 09:05:14 +1030188/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -0700189static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000190bool cpuhp_tasks_frozen;
191EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700193/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530194 * The following two APIs (cpu_maps_update_begin/done) must be used when
195 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700196 */
197void cpu_maps_update_begin(void)
198{
199 mutex_lock(&cpu_add_remove_lock);
200}
201
202void cpu_maps_update_done(void)
203{
204 mutex_unlock(&cpu_add_remove_lock);
205}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200207/*
208 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700209 * Should always be manipulated under cpu_add_remove_lock
210 */
211static int cpu_hotplug_disabled;
212
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700213#ifdef CONFIG_HOTPLUG_CPU
214
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200215DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530216
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200217void cpus_read_lock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800218{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200219 percpu_down_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800220}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200221EXPORT_SYMBOL_GPL(cpus_read_lock);
Ashok Raj90d45d12005-11-08 21:34:24 -0800222
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200223void cpus_read_unlock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800224{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200225 percpu_up_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800226}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200227EXPORT_SYMBOL_GPL(cpus_read_unlock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800228
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200229void cpus_write_lock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100230{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200231 percpu_down_write(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100232}
233
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200234void cpus_write_unlock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100235{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200236 percpu_up_write(&cpu_hotplug_lock);
237}
238
239void lockdep_assert_cpus_held(void)
240{
241 percpu_rwsem_assert_held(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100242}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700243
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700244/*
245 * Wait for currently running CPU hotplug operations to complete (if any) and
246 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
247 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
248 * hotplug path before performing hotplug operations. So acquiring that lock
249 * guarantees mutual exclusion from any currently running hotplug operations.
250 */
251void cpu_hotplug_disable(void)
252{
253 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700254 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700255 cpu_maps_update_done();
256}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700257EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700258
Lianwei Wang01b41152016-06-09 23:43:28 -0700259static void __cpu_hotplug_enable(void)
260{
261 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
262 return;
263 cpu_hotplug_disabled--;
264}
265
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700266void cpu_hotplug_enable(void)
267{
268 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700269 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700270 cpu_maps_update_done();
271}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700272EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Toshi Kanib9d10be2013-08-12 09:45:53 -0600273#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700274
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200275static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
276
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000277static int bringup_wait_for_ap(unsigned int cpu)
278{
279 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
280
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200281 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000282 wait_for_completion(&st->done);
Thomas Gleixnerdea1d0f2017-07-11 22:06:24 +0200283 if (WARN_ON_ONCE((!cpu_online(cpu))))
284 return -ECANCELED;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200285
286 /* Unpark the stopper thread and the hotplug thread of the target cpu */
287 stop_machine_unpark(cpu);
288 kthread_unpark(st->thread);
289
290 /* Should we go further up ? */
291 if (st->target > CPUHP_AP_ONLINE_IDLE) {
292 __cpuhp_kick_ap_work(st);
293 wait_for_completion(&st->done);
294 }
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000295 return st->result;
296}
297
Thomas Gleixnerba997462016-02-26 18:43:24 +0000298static int bringup_cpu(unsigned int cpu)
299{
300 struct task_struct *idle = idle_thread_get(cpu);
301 int ret;
302
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400303 /*
304 * Some architectures have to walk the irq descriptors to
305 * setup the vector space for the cpu which comes online.
306 * Prevent irq alloc/free across the bringup.
307 */
308 irq_lock_sparse();
309
Thomas Gleixnerba997462016-02-26 18:43:24 +0000310 /* Arch-specific enabling code. */
311 ret = __cpu_up(cpu, idle);
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400312 irq_unlock_sparse();
Thomas Gleixner530e9b72016-12-21 20:19:53 +0100313 if (ret)
Thomas Gleixnerba997462016-02-26 18:43:24 +0000314 return ret;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200315 return bringup_wait_for_ap(cpu);
Thomas Gleixnerba997462016-02-26 18:43:24 +0000316}
317
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000318/*
319 * Hotplug state machine related functions
320 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200321static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000322{
323 for (st->state++; st->state < st->target; st->state++) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200324 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000325
326 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200327 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000328 }
329}
330
331static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200332 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000333{
334 enum cpuhp_state prev_state = st->state;
335 int ret = 0;
336
337 for (; st->state > target; st->state--) {
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200338 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000339 if (ret) {
340 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200341 undo_cpu_down(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000342 break;
343 }
344 }
345 return ret;
346}
347
Thomas Gleixnera7246322016-08-12 19:49:38 +0200348static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000349{
350 for (st->state--; st->state > st->target; st->state--) {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200351 struct cpuhp_step *step = cpuhp_get_step(st->state);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000352
353 if (!step->skip_onerr)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200354 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000355 }
356}
357
358static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200359 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000360{
361 enum cpuhp_state prev_state = st->state;
362 int ret = 0;
363
364 while (st->state < target) {
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000365 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200366 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000367 if (ret) {
368 st->target = prev_state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200369 undo_cpu_up(cpu, st);
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000370 break;
371 }
372 }
373 return ret;
374}
375
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000376/*
377 * The cpu hotplug threads manage the bringup and teardown of the cpus
378 */
379static void cpuhp_create(unsigned int cpu)
380{
381 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
382
383 init_completion(&st->done);
384}
385
386static int cpuhp_should_run(unsigned int cpu)
387{
388 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
389
390 return st->should_run;
391}
392
393/* Execute the teardown callbacks. Used to be CPU_DOWN_PREPARE */
394static int cpuhp_ap_offline(unsigned int cpu, struct cpuhp_cpu_state *st)
395{
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000396 enum cpuhp_state target = max((int)st->target, CPUHP_TEARDOWN_CPU);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000397
Thomas Gleixnera7246322016-08-12 19:49:38 +0200398 return cpuhp_down_callbacks(cpu, st, target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000399}
400
401/* Execute the online startup callbacks. Used to be CPU_ONLINE */
402static int cpuhp_ap_online(unsigned int cpu, struct cpuhp_cpu_state *st)
403{
Thomas Gleixnera7246322016-08-12 19:49:38 +0200404 return cpuhp_up_callbacks(cpu, st, st->target);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000405}
406
407/*
408 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
409 * callbacks when a state gets [un]installed at runtime.
410 */
411static void cpuhp_thread_fun(unsigned int cpu)
412{
413 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
414 int ret = 0;
415
416 /*
417 * Paired with the mb() in cpuhp_kick_ap_work and
418 * cpuhp_invoke_ap_callback, so the work set is consistent visible.
419 */
420 smp_mb();
421 if (!st->should_run)
422 return;
423
424 st->should_run = false;
425
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200426 lock_map_acquire(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000427 /* Single callback invocation for [un]install ? */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200428 if (st->single) {
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000429 if (st->cb_state < CPUHP_AP_ONLINE) {
430 local_irq_disable();
Thomas Gleixnera7246322016-08-12 19:49:38 +0200431 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200432 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000433 local_irq_enable();
434 } else {
Thomas Gleixnera7246322016-08-12 19:49:38 +0200435 ret = cpuhp_invoke_callback(cpu, st->cb_state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200436 st->bringup, st->node);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000437 }
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200438 } else if (st->rollback) {
439 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
440
Thomas Gleixnera7246322016-08-12 19:49:38 +0200441 undo_cpu_down(cpu, st);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200442 st->rollback = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000443 } else {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000444 /* Cannot happen .... */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000445 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000446
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000447 /* Regular hotplug work */
448 if (st->state < st->target)
449 ret = cpuhp_ap_online(cpu, st);
450 else if (st->state > st->target)
451 ret = cpuhp_ap_offline(cpu, st);
452 }
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200453 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000454 st->result = ret;
455 complete(&st->done);
456}
457
458/* Invoke a single callback on a remote cpu */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200459static int
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200460cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
461 struct hlist_node *node)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000462{
463 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
464
465 if (!cpu_online(cpu))
466 return 0;
467
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200468 lock_map_acquire(&cpuhp_state_lock_map);
469 lock_map_release(&cpuhp_state_lock_map);
470
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000471 /*
472 * If we are up and running, use the hotplug thread. For early calls
473 * we invoke the thread function directly.
474 */
475 if (!st->thread)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200476 return cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000477
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000478 st->cb_state = state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200479 st->single = true;
480 st->bringup = bringup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200481 st->node = node;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200482
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000483 /*
484 * Make sure the above stores are visible before should_run becomes
485 * true. Paired with the mb() above in cpuhp_thread_fun()
486 */
487 smp_mb();
488 st->should_run = true;
489 wake_up_process(st->thread);
490 wait_for_completion(&st->done);
491 return st->result;
492}
493
494/* Regular hotplug invocation of the AP hotplug thread */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000495static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000496{
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000497 st->result = 0;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200498 st->single = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000499 /*
500 * Make sure the above stores are visible before should_run becomes
501 * true. Paired with the mb() above in cpuhp_thread_fun()
502 */
503 smp_mb();
504 st->should_run = true;
505 wake_up_process(st->thread);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000506}
507
508static int cpuhp_kick_ap_work(unsigned int cpu)
509{
510 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
511 enum cpuhp_state state = st->state;
512
513 trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200514 lock_map_acquire(&cpuhp_state_lock_map);
515 lock_map_release(&cpuhp_state_lock_map);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000516 __cpuhp_kick_ap_work(st);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000517 wait_for_completion(&st->done);
518 trace_cpuhp_exit(cpu, st->state, state, st->result);
519 return st->result;
520}
521
522static struct smp_hotplug_thread cpuhp_threads = {
523 .store = &cpuhp_state.thread,
524 .create = &cpuhp_create,
525 .thread_should_run = cpuhp_should_run,
526 .thread_fn = cpuhp_thread_fun,
527 .thread_comm = "cpuhp/%u",
528 .selfparking = true,
529};
530
531void __init cpuhp_threads_init(void)
532{
533 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
534 kthread_unpark(this_cpu_read(cpuhp_state.thread));
535}
536
Michal Hocko777c6e02016-12-07 14:54:38 +0100537#ifdef CONFIG_HOTPLUG_CPU
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700538/**
539 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
540 * @cpu: a CPU id
541 *
542 * This function walks all processes, finds a valid mm struct for each one and
543 * then clears a corresponding bit in mm's cpumask. While this all sounds
544 * trivial, there are various non-obvious corner cases, which this function
545 * tries to solve in a safe manner.
546 *
547 * Also note that the function uses a somewhat relaxed locking scheme, so it may
548 * be called only for an already offlined CPU.
549 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700550void clear_tasks_mm_cpumask(int cpu)
551{
552 struct task_struct *p;
553
554 /*
555 * This function is called after the cpu is taken down and marked
556 * offline, so its not like new tasks will ever get this cpu set in
557 * their mm mask. -- Peter Zijlstra
558 * Thus, we may use rcu_read_lock() here, instead of grabbing
559 * full-fledged tasklist_lock.
560 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700561 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700562 rcu_read_lock();
563 for_each_process(p) {
564 struct task_struct *t;
565
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700566 /*
567 * Main thread might exit, but other threads may still have
568 * a valid mm. Find one.
569 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700570 t = find_lock_task_mm(p);
571 if (!t)
572 continue;
573 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
574 task_unlock(t);
575 }
576 rcu_read_unlock();
577}
578
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200580static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581{
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000582 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
583 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000584 int err, cpu = smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 /* Ensure this CPU doesn't handle any more interrupts. */
587 err = __cpu_disable();
588 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700589 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Thomas Gleixnera7246322016-08-12 19:49:38 +0200591 /*
592 * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
593 * do this step again.
594 */
595 WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
596 st->state--;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000597 /* Invoke the former CPU_DYING callbacks */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200598 for (; st->state > target; st->state--)
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200599 cpuhp_invoke_callback(cpu, st->state, false, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000600
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200601 /* Give up timekeeping duties */
602 tick_handover_do_timer();
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000603 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000604 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700605 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606}
607
Thomas Gleixner98458172016-02-26 18:43:25 +0000608static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609{
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000610 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000611 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100613 /* Park the smpboot threads */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000614 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100615 smpboot_park_threads(cpu);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000616
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200617 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000618 * Prevent irq alloc/free while the dying cpu reorganizes the
619 * interrupt affinities.
620 */
621 irq_lock_sparse();
622
623 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200624 * So now all preempt/rcu users must observe !cpu_active().
625 */
Sebastian Andrzej Siewior210e2132017-05-24 10:15:28 +0200626 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500627 if (err) {
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200628 /* CPU refused to die */
Thomas Gleixnera8994182015-07-05 17:12:30 +0000629 irq_unlock_sparse();
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200630 /* Unpark the hotplug thread so we can rollback there */
631 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner98458172016-02-26 18:43:25 +0000632 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700633 }
Rusty Russell04321582008-07-28 12:16:29 -0500634 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100636 /*
Thomas Gleixneree1e7142016-08-18 14:57:16 +0200637 * The CPUHP_AP_SCHED_MIGRATE_DYING callback will have removed all
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100638 * runnable tasks from the cpu, there's only the idle task left now
639 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100640 *
641 * Wait for the stop thread to go away.
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100642 */
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000643 wait_for_completion(&st->done);
644 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Thomas Gleixnera8994182015-07-05 17:12:30 +0000646 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
647 irq_unlock_sparse();
648
Preeti U Murthy345527b2015-03-30 14:59:19 +0530649 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 /* This actually kills the CPU. */
651 __cpu_die(cpu);
652
Thomas Gleixnera49b1162015-04-03 02:38:05 +0200653 tick_cleanup_dead_cpu(cpu);
Paul E. McKenneya58163d2017-06-20 12:11:34 -0700654 rcutree_migrate_callbacks(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000655 return 0;
656}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100658static void cpuhp_complete_idle_dead(void *arg)
659{
660 struct cpuhp_cpu_state *st = arg;
661
662 complete(&st->done);
663}
664
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000665void cpuhp_report_idle_dead(void)
666{
667 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
668
669 BUG_ON(st->state != CPUHP_AP_OFFLINE);
Thomas Gleixner27d50c72016-02-26 18:43:44 +0000670 rcu_report_dead(smp_processor_id());
Thomas Gleixner71f87b22016-03-03 10:52:10 +0100671 st->state = CPUHP_AP_IDLE_DEAD;
672 /*
673 * We cannot call complete after rcu_report_dead() so we delegate it
674 * to an online cpu.
675 */
676 smp_call_function_single(cpumask_first(cpu_online_mask),
677 cpuhp_complete_idle_dead, st, 0);
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000678}
679
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000680#else
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000681#define takedown_cpu NULL
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000682#endif
683
684#ifdef CONFIG_HOTPLUG_CPU
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000685
Thomas Gleixner98458172016-02-26 18:43:25 +0000686/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000687static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
688 enum cpuhp_state target)
Thomas Gleixner98458172016-02-26 18:43:25 +0000689{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000690 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
691 int prev_state, ret = 0;
Thomas Gleixner98458172016-02-26 18:43:25 +0000692
693 if (num_online_cpus() == 1)
694 return -EBUSY;
695
Thomas Gleixner757c9892016-02-26 18:43:32 +0000696 if (!cpu_present(cpu))
Thomas Gleixner98458172016-02-26 18:43:25 +0000697 return -EINVAL;
698
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200699 cpus_write_lock();
Thomas Gleixner98458172016-02-26 18:43:25 +0000700
701 cpuhp_tasks_frozen = tasks_frozen;
702
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000703 prev_state = st->state;
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000704 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000705 /*
706 * If the current CPU state is in the range of the AP hotplug thread,
707 * then we need to kick the thread.
708 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000709 if (st->state > CPUHP_TEARDOWN_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000710 ret = cpuhp_kick_ap_work(cpu);
711 /*
712 * The AP side has done the error rollback already. Just
713 * return the error code..
714 */
715 if (ret)
716 goto out;
717
718 /*
719 * We might have stopped still in the range of the AP hotplug
720 * thread. Nothing to do anymore.
721 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000722 if (st->state > CPUHP_TEARDOWN_CPU)
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000723 goto out;
724 }
725 /*
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000726 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000727 * to do the further cleanups.
728 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200729 ret = cpuhp_down_callbacks(cpu, st, target);
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200730 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
731 st->target = prev_state;
732 st->rollback = true;
733 cpuhp_kick_ap_work(cpu);
734 }
Thomas Gleixner98458172016-02-26 18:43:25 +0000735
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000736out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200737 cpus_write_unlock();
Thomas Gleixner941154b2017-09-12 21:37:04 +0200738 /*
739 * Do post unplug cleanup. This is still protected against
740 * concurrent CPU hotplug via cpu_add_remove_lock.
741 */
742 lockup_detector_cleanup();
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000743 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700744}
745
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000746static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700747{
Heiko Carstens9ea09af2008-12-22 12:36:30 +0100748 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700749
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100750 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700751
Max Krasnyanskye761b772008-07-15 04:43:49 -0700752 if (cpu_hotplug_disabled) {
753 err = -EBUSY;
754 goto out;
755 }
756
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000757 err = _cpu_down(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700758
Max Krasnyanskye761b772008-07-15 04:43:49 -0700759out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100760 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return err;
762}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000763int cpu_down(unsigned int cpu)
764{
765 return do_cpu_down(cpu, CPUHP_OFFLINE);
766}
Zhang Ruib62b8ef2008-04-29 02:35:56 -0400767EXPORT_SYMBOL(cpu_down);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768#endif /*CONFIG_HOTPLUG_CPU*/
769
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000770/**
Thomas Gleixneree1e7142016-08-18 14:57:16 +0200771 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000772 * @cpu: cpu that just started
773 *
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000774 * It must be called by the arch code on the new cpu, before the new cpu
775 * enables interrupts and before the "boot" cpu returns from __cpu_up().
776 */
777void notify_cpu_starting(unsigned int cpu)
778{
779 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
780 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
781
Sebastian Andrzej Siewior0c6d4572016-08-17 14:21:04 +0200782 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000783 while (st->state < target) {
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000784 st->state++;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200785 cpuhp_invoke_callback(cpu, st->state, true, NULL);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000786 }
787}
788
Thomas Gleixner949338e2016-02-26 18:43:35 +0000789/*
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200790 * Called from the idle task. Wake up the controlling task which brings the
791 * stopper and the hotplug thread of the upcoming CPU up and then delegates
792 * the rest of the online bringup to the hotplug thread.
Thomas Gleixner949338e2016-02-26 18:43:35 +0000793 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000794void cpuhp_online_idle(enum cpuhp_state state)
Thomas Gleixner949338e2016-02-26 18:43:35 +0000795{
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000796 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000797
798 /* Happens for the boot cpu */
799 if (state != CPUHP_AP_ONLINE_IDLE)
800 return;
801
802 st->state = CPUHP_AP_ONLINE_IDLE;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200803 complete(&st->done);
Thomas Gleixner949338e2016-02-26 18:43:35 +0000804}
805
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700806/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000807static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808{
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000809 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700810 struct task_struct *idle;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000811 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200813 cpus_write_lock();
Thomas Gleixner38498a62012-04-20 13:05:44 +0000814
Thomas Gleixner757c9892016-02-26 18:43:32 +0000815 if (!cpu_present(cpu)) {
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +0200816 ret = -EINVAL;
817 goto out;
818 }
819
Thomas Gleixner757c9892016-02-26 18:43:32 +0000820 /*
821 * The caller of do_cpu_up might have raced with another
822 * caller. Ignore it for now.
823 */
824 if (st->state >= target)
Thomas Gleixner38498a62012-04-20 13:05:44 +0000825 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +0000826
827 if (st->state == CPUHP_OFFLINE) {
828 /* Let it fail before we try to bring the cpu up */
829 idle = idle_thread_get(cpu);
830 if (IS_ERR(idle)) {
831 ret = PTR_ERR(idle);
832 goto out;
833 }
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -0700834 }
Thomas Gleixner38498a62012-04-20 13:05:44 +0000835
Thomas Gleixnerba997462016-02-26 18:43:24 +0000836 cpuhp_tasks_frozen = tasks_frozen;
837
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000838 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000839 /*
840 * If the current CPU state is in the range of the AP hotplug thread,
841 * then we need to kick the thread once more.
842 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000843 if (st->state > CPUHP_BRINGUP_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000844 ret = cpuhp_kick_ap_work(cpu);
845 /*
846 * The AP side has done the error rollback already. Just
847 * return the error code..
848 */
849 if (ret)
850 goto out;
851 }
852
853 /*
854 * Try to reach the target state. We max out on the BP at
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000855 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000856 * responsible for bringing it up to the target state.
857 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000858 target = min((int)target, CPUHP_BRINGUP_CPU);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200859 ret = cpuhp_up_callbacks(cpu, st, target);
Thomas Gleixner38498a62012-04-20 13:05:44 +0000860out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200861 cpus_write_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return ret;
863}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700864
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000865static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700866{
867 int err = 0;
minskey guocf234222010-05-24 14:32:41 -0700868
Rusty Russelle0b582e2009-01-01 10:12:28 +1030869 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700870 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
871 cpu);
Chen Gong87d5e0232010-03-05 13:42:38 -0800872#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -0700873 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -0700874#endif
875 return -EINVAL;
876 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700877
Toshi Kani01b0f192013-11-12 15:07:25 -0800878 err = try_online_node(cpu_to_node(cpu));
879 if (err)
880 return err;
minskey guocf234222010-05-24 14:32:41 -0700881
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100882 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700883
Max Krasnyanskye761b772008-07-15 04:43:49 -0700884 if (cpu_hotplug_disabled) {
885 err = -EBUSY;
886 goto out;
887 }
888
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000889 err = _cpu_up(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -0700890out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100891 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700892 return err;
893}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000894
895int cpu_up(unsigned int cpu)
896{
897 return do_cpu_up(cpu, CPUHP_ONLINE);
898}
Paul E. McKenneya513f6b2011-12-11 21:54:45 -0800899EXPORT_SYMBOL_GPL(cpu_up);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700900
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -0700901#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +1030902static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700903
James Morsed391e552016-08-17 13:50:25 +0100904int freeze_secondary_cpus(int primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700905{
James Morsed391e552016-08-17 13:50:25 +0100906 int cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700907
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100908 cpu_maps_update_begin();
James Morsed391e552016-08-17 13:50:25 +0100909 if (!cpu_online(primary))
910 primary = cpumask_first(cpu_online_mask);
Xiaotian Feng9ee349a2009-12-16 18:04:32 +0100911 /*
912 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700913 * with the userspace trying to use the CPU hotplug at the same time
914 */
Rusty Russelle0b582e2009-01-01 10:12:28 +1030915 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +0100916
Fabian Frederick84117da2014-06-04 16:11:17 -0700917 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700918 for_each_online_cpu(cpu) {
James Morsed391e552016-08-17 13:50:25 +0100919 if (cpu == primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700920 continue;
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700921 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000922 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700923 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -0600924 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030925 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -0600926 else {
Fabian Frederick84117da2014-06-04 16:11:17 -0700927 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700928 break;
929 }
930 }
Joseph Cihula86886e52009-06-30 19:31:07 -0700931
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700932 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700933 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700934 else
Fabian Frederick84117da2014-06-04 16:11:17 -0700935 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700936
937 /*
938 * Make sure the CPUs won't be enabled by someone else. We need to do
939 * this even in case of failure as all disable_nonboot_cpus() users are
940 * supposed to do enable_nonboot_cpus() on the failure path.
941 */
942 cpu_hotplug_disabled++;
943
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100944 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700945 return error;
946}
947
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700948void __weak arch_enable_nonboot_cpus_begin(void)
949{
950}
951
952void __weak arch_enable_nonboot_cpus_end(void)
953{
954}
955
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200956void enable_nonboot_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700957{
958 int cpu, error;
959
960 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100961 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700962 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +1030963 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700964 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700965
Fabian Frederick84117da2014-06-04 16:11:17 -0700966 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700967
968 arch_enable_nonboot_cpus_begin();
969
Rusty Russelle0b582e2009-01-01 10:12:28 +1030970 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700971 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +0000972 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -0700973 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700974 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -0700975 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700976 continue;
977 }
Fabian Frederick84117da2014-06-04 16:11:17 -0700978 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700979 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -0700980
981 arch_enable_nonboot_cpus_end();
982
Rusty Russelle0b582e2009-01-01 10:12:28 +1030983 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -0700984out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100985 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700986}
Rusty Russelle0b582e2009-01-01 10:12:28 +1030987
Fenghua Yud7268a32011-11-15 21:59:31 +0100988static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +1030989{
990 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
991 return -ENOMEM;
992 return 0;
993}
994core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100995
996/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +0100997 * When callbacks for CPU hotplug notifications are being executed, we must
998 * ensure that the state of the system with respect to the tasks being frozen
999 * or not, as reported by the notification, remains unchanged *throughout the
1000 * duration* of the execution of the callbacks.
1001 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1002 *
1003 * This synchronization is implemented by mutually excluding regular CPU
1004 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1005 * Hibernate notifications.
1006 */
1007static int
1008cpu_hotplug_pm_callback(struct notifier_block *nb,
1009 unsigned long action, void *ptr)
1010{
1011 switch (action) {
1012
1013 case PM_SUSPEND_PREPARE:
1014 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001015 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001016 break;
1017
1018 case PM_POST_SUSPEND:
1019 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001020 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001021 break;
1022
1023 default:
1024 return NOTIFY_DONE;
1025 }
1026
1027 return NOTIFY_OK;
1028}
1029
1030
Fenghua Yud7268a32011-11-15 21:59:31 +01001031static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001032{
Fenghua Yu6e32d472012-11-13 11:32:43 -08001033 /*
1034 * cpu_hotplug_pm_callback has higher priority than x86
1035 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1036 * to disable cpu hotplug to avoid cpu hotplug race.
1037 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001038 pm_notifier(cpu_hotplug_pm_callback, 0);
1039 return 0;
1040}
1041core_initcall(cpu_hotplug_pm_sync_init);
1042
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001043#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001044
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01001045int __boot_cpu_id;
1046
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001047#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -07001048
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001049/* Boot processor state steps */
1050static struct cpuhp_step cpuhp_bp_states[] = {
1051 [CPUHP_OFFLINE] = {
1052 .name = "offline",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001053 .startup.single = NULL,
1054 .teardown.single = NULL,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001055 },
1056#ifdef CONFIG_SMP
1057 [CPUHP_CREATE_THREADS]= {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001058 .name = "threads:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001059 .startup.single = smpboot_create_threads,
1060 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001061 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001062 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001063 [CPUHP_PERF_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001064 .name = "perf:prepare",
1065 .startup.single = perf_event_init_cpu,
1066 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001067 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001068 [CPUHP_WORKQUEUE_PREP] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001069 .name = "workqueue:prepare",
1070 .startup.single = workqueue_prepare_cpu,
1071 .teardown.single = NULL,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001072 },
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001073 [CPUHP_HRTIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001074 .name = "hrtimers:prepare",
1075 .startup.single = hrtimers_prepare_cpu,
1076 .teardown.single = hrtimers_dead_cpu,
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001077 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001078 [CPUHP_SMPCFD_PREPARE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001079 .name = "smpcfd:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001080 .startup.single = smpcfd_prepare_cpu,
1081 .teardown.single = smpcfd_dead_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001082 },
Richard Weinbergere6d49892016-08-18 14:57:17 +02001083 [CPUHP_RELAY_PREPARE] = {
1084 .name = "relay:prepare",
1085 .startup.single = relay_prepare_cpu,
1086 .teardown.single = NULL,
1087 },
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +02001088 [CPUHP_SLAB_PREPARE] = {
1089 .name = "slab:prepare",
1090 .startup.single = slab_prepare_cpu,
1091 .teardown.single = slab_dead_cpu,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001092 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001093 [CPUHP_RCUTREE_PREP] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001094 .name = "RCU/tree:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001095 .startup.single = rcutree_prepare_cpu,
1096 .teardown.single = rcutree_dead_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001097 },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001098 /*
Richard Cochran4fae16d2016-07-27 11:08:18 +02001099 * On the tear-down path, timers_dead_cpu() must be invoked
1100 * before blk_mq_queue_reinit_notify() from notify_dead(),
1101 * otherwise a RCU stall occurs.
1102 */
1103 [CPUHP_TIMERS_DEAD] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001104 .name = "timers:dead",
1105 .startup.single = NULL,
1106 .teardown.single = timers_dead_cpu,
Richard Cochran4fae16d2016-07-27 11:08:18 +02001107 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001108 /* Kicks the plugged cpu into life */
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001109 [CPUHP_BRINGUP_CPU] = {
1110 .name = "cpu:bringup",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001111 .startup.single = bringup_cpu,
1112 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001113 .cant_stop = true,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001114 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001115 [CPUHP_AP_SMPCFD_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001116 .name = "smpcfd:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001117 .startup.single = NULL,
1118 .teardown.single = smpcfd_dying_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001119 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001120 /*
1121 * Handled on controll processor until the plugged processor manages
1122 * this itself.
1123 */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001124 [CPUHP_TEARDOWN_CPU] = {
1125 .name = "cpu:teardown",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001126 .startup.single = NULL,
1127 .teardown.single = takedown_cpu,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001128 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001129 },
Thomas Gleixnera7c734142016-07-12 21:59:23 +02001130#else
1131 [CPUHP_BRINGUP_CPU] = { },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001132#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001133};
1134
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001135/* Application processor state steps */
1136static struct cpuhp_step cpuhp_ap_states[] = {
1137#ifdef CONFIG_SMP
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001138 /* Final state before CPU kills itself */
1139 [CPUHP_AP_IDLE_DEAD] = {
1140 .name = "idle:dead",
1141 },
1142 /*
1143 * Last state before CPU enters the idle loop to die. Transient state
1144 * for synchronization.
1145 */
1146 [CPUHP_AP_OFFLINE] = {
1147 .name = "ap:offline",
1148 .cant_stop = true,
1149 },
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001150 /* First state is scheduler control. Interrupts are disabled */
1151 [CPUHP_AP_SCHED_STARTING] = {
1152 .name = "sched:starting",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001153 .startup.single = sched_cpu_starting,
1154 .teardown.single = sched_cpu_dying,
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001155 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001156 [CPUHP_AP_RCUTREE_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001157 .name = "RCU/tree:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001158 .startup.single = NULL,
1159 .teardown.single = rcutree_dying_cpu,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001160 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001161 /* Entry state on starting. Interrupts enabled from here on. Transient
1162 * state for synchronsization */
1163 [CPUHP_AP_ONLINE] = {
1164 .name = "ap:online",
1165 },
1166 /* Handle smpboot threads park/unpark */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001167 [CPUHP_AP_SMPBOOT_THREADS] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001168 .name = "smpboot/threads:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001169 .startup.single = smpboot_unpark_threads,
1170 .teardown.single = NULL,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001171 },
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +02001172 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1173 .name = "irq/affinity:online",
1174 .startup.single = irq_affinity_online_cpu,
1175 .teardown.single = NULL,
1176 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001177 [CPUHP_AP_PERF_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001178 .name = "perf:online",
1179 .startup.single = perf_event_init_cpu,
1180 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001181 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001182 [CPUHP_AP_WORKQUEUE_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001183 .name = "workqueue:online",
1184 .startup.single = workqueue_online_cpu,
1185 .teardown.single = workqueue_offline_cpu,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001186 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001187 [CPUHP_AP_RCUTREE_ONLINE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001188 .name = "RCU/tree:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001189 .startup.single = rcutree_online_cpu,
1190 .teardown.single = rcutree_offline_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001191 },
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001192#endif
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001193 /*
1194 * The dynamically registered state space is here
1195 */
1196
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001197#ifdef CONFIG_SMP
1198 /* Last state is scheduler control setting the cpu active */
1199 [CPUHP_AP_ACTIVE] = {
1200 .name = "sched:active",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001201 .startup.single = sched_cpu_activate,
1202 .teardown.single = sched_cpu_deactivate,
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001203 },
1204#endif
1205
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001206 /* CPU is fully up and running. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001207 [CPUHP_ONLINE] = {
1208 .name = "online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001209 .startup.single = NULL,
1210 .teardown.single = NULL,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001211 },
1212};
1213
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001214/* Sanity check for callbacks */
1215static int cpuhp_cb_check(enum cpuhp_state state)
1216{
1217 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1218 return -EINVAL;
1219 return 0;
1220}
1221
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001222/*
1223 * Returns a free for dynamic slot assignment of the Online state. The states
1224 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1225 * by having no name assigned.
1226 */
1227static int cpuhp_reserve_state(enum cpuhp_state state)
1228{
Thomas Gleixner4205e472017-01-10 14:01:05 +01001229 enum cpuhp_state i, end;
1230 struct cpuhp_step *step;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001231
Thomas Gleixner4205e472017-01-10 14:01:05 +01001232 switch (state) {
1233 case CPUHP_AP_ONLINE_DYN:
1234 step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
1235 end = CPUHP_AP_ONLINE_DYN_END;
1236 break;
1237 case CPUHP_BP_PREPARE_DYN:
1238 step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
1239 end = CPUHP_BP_PREPARE_DYN_END;
1240 break;
1241 default:
1242 return -EINVAL;
1243 }
1244
1245 for (i = state; i <= end; i++, step++) {
1246 if (!step->name)
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001247 return i;
1248 }
1249 WARN(1, "No more dynamic states available for CPU hotplug\n");
1250 return -ENOSPC;
1251}
1252
1253static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1254 int (*startup)(unsigned int cpu),
1255 int (*teardown)(unsigned int cpu),
1256 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001257{
1258 /* (Un)Install the callbacks for further cpu hotplug operations */
1259 struct cpuhp_step *sp;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001260 int ret = 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001261
Ethan Barnes0c96b272017-07-19 22:36:00 +00001262 /*
1263 * If name is NULL, then the state gets removed.
1264 *
1265 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1266 * the first allocation from these dynamic ranges, so the removal
1267 * would trigger a new allocation and clear the wrong (already
1268 * empty) state, leaving the callbacks of the to be cleared state
1269 * dangling, which causes wreckage on the next hotplug operation.
1270 */
1271 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1272 state == CPUHP_BP_PREPARE_DYN)) {
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001273 ret = cpuhp_reserve_state(state);
1274 if (ret < 0)
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001275 return ret;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001276 state = ret;
1277 }
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001278 sp = cpuhp_get_step(state);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001279 if (name && sp->name)
1280 return -EBUSY;
1281
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001282 sp->startup.single = startup;
1283 sp->teardown.single = teardown;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001284 sp->name = name;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001285 sp->multi_instance = multi_instance;
1286 INIT_HLIST_HEAD(&sp->list);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001287 return ret;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001288}
1289
1290static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1291{
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001292 return cpuhp_get_step(state)->teardown.single;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001293}
1294
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001295/*
1296 * Call the startup/teardown function for a step either on the AP or
1297 * on the current CPU.
1298 */
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001299static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1300 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001301{
Thomas Gleixnera7246322016-08-12 19:49:38 +02001302 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001303 int ret;
1304
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001305 if ((bringup && !sp->startup.single) ||
1306 (!bringup && !sp->teardown.single))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001307 return 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001308 /*
1309 * The non AP bound callbacks can fail on bringup. On teardown
1310 * e.g. module removal we crash for now.
1311 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001312#ifdef CONFIG_SMP
1313 if (cpuhp_is_ap_state(state))
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001314 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001315 else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001316 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001317#else
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001318 ret = cpuhp_invoke_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001319#endif
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001320 BUG_ON(ret && !bringup);
1321 return ret;
1322}
1323
1324/*
1325 * Called from __cpuhp_setup_state on a recoverable failure.
1326 *
1327 * Note: The teardown callbacks for rollback are not allowed to fail!
1328 */
1329static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001330 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001331{
1332 int cpu;
1333
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001334 /* Roll back the already executed steps on the other cpus */
1335 for_each_present_cpu(cpu) {
1336 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1337 int cpustate = st->state;
1338
1339 if (cpu >= failedcpu)
1340 break;
1341
1342 /* Did we invoke the startup call on that cpu ? */
1343 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001344 cpuhp_issue_call(cpu, state, false, node);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001345 }
1346}
1347
Thomas Gleixner9805c672017-05-24 10:15:15 +02001348int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1349 struct hlist_node *node,
1350 bool invoke)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001351{
1352 struct cpuhp_step *sp;
1353 int cpu;
1354 int ret;
1355
Thomas Gleixner9805c672017-05-24 10:15:15 +02001356 lockdep_assert_cpus_held();
1357
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001358 sp = cpuhp_get_step(state);
1359 if (sp->multi_instance == false)
1360 return -EINVAL;
1361
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001362 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001363
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001364 if (!invoke || !sp->startup.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001365 goto add_node;
1366
1367 /*
1368 * Try to call the startup callback for each present cpu
1369 * depending on the hotplug state of the cpu.
1370 */
1371 for_each_present_cpu(cpu) {
1372 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1373 int cpustate = st->state;
1374
1375 if (cpustate < state)
1376 continue;
1377
1378 ret = cpuhp_issue_call(cpu, state, true, node);
1379 if (ret) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001380 if (sp->teardown.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001381 cpuhp_rollback_install(cpu, state, node);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001382 goto unlock;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001383 }
1384 }
1385add_node:
1386 ret = 0;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001387 hlist_add_head(node, &sp->list);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001388unlock:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001389 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner9805c672017-05-24 10:15:15 +02001390 return ret;
1391}
1392
1393int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1394 bool invoke)
1395{
1396 int ret;
1397
1398 cpus_read_lock();
1399 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001400 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001401 return ret;
1402}
1403EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1404
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001405/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001406 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001407 * @state: The state to setup
1408 * @invoke: If true, the startup function is invoked for cpus where
1409 * cpu state >= @state
1410 * @startup: startup callback function
1411 * @teardown: teardown callback function
1412 * @multi_instance: State is set up for multiple instances which get
1413 * added afterwards.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001414 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001415 * The caller needs to hold cpus read locked while calling this function.
Boris Ostrovsky512f0982016-12-15 10:00:57 -05001416 * Returns:
1417 * On success:
1418 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1419 * 0 for all other states
1420 * On failure: proper (negative) error code
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001421 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001422int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1423 const char *name, bool invoke,
1424 int (*startup)(unsigned int cpu),
1425 int (*teardown)(unsigned int cpu),
1426 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001427{
1428 int cpu, ret = 0;
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001429 bool dynstate;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001430
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001431 lockdep_assert_cpus_held();
1432
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001433 if (cpuhp_cb_check(state) || !name)
1434 return -EINVAL;
1435
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001436 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001437
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001438 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1439 multi_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001440
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001441 dynstate = state == CPUHP_AP_ONLINE_DYN;
1442 if (ret > 0 && dynstate) {
1443 state = ret;
1444 ret = 0;
1445 }
1446
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001447 if (ret || !invoke || !startup)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001448 goto out;
1449
1450 /*
1451 * Try to call the startup callback for each present cpu
1452 * depending on the hotplug state of the cpu.
1453 */
1454 for_each_present_cpu(cpu) {
1455 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1456 int cpustate = st->state;
1457
1458 if (cpustate < state)
1459 continue;
1460
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001461 ret = cpuhp_issue_call(cpu, state, true, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001462 if (ret) {
Thomas Gleixnera7246322016-08-12 19:49:38 +02001463 if (teardown)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001464 cpuhp_rollback_install(cpu, state, NULL);
1465 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001466 goto out;
1467 }
1468 }
1469out:
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001470 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001471 /*
1472 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1473 * dynamically allocated state in case of success.
1474 */
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001475 if (!ret && dynstate)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001476 return state;
1477 return ret;
1478}
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001479EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1480
1481int __cpuhp_setup_state(enum cpuhp_state state,
1482 const char *name, bool invoke,
1483 int (*startup)(unsigned int cpu),
1484 int (*teardown)(unsigned int cpu),
1485 bool multi_instance)
1486{
1487 int ret;
1488
1489 cpus_read_lock();
1490 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1491 teardown, multi_instance);
1492 cpus_read_unlock();
1493 return ret;
1494}
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001495EXPORT_SYMBOL(__cpuhp_setup_state);
1496
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001497int __cpuhp_state_remove_instance(enum cpuhp_state state,
1498 struct hlist_node *node, bool invoke)
1499{
1500 struct cpuhp_step *sp = cpuhp_get_step(state);
1501 int cpu;
1502
1503 BUG_ON(cpuhp_cb_check(state));
1504
1505 if (!sp->multi_instance)
1506 return -EINVAL;
1507
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001508 cpus_read_lock();
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001509 mutex_lock(&cpuhp_state_mutex);
1510
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001511 if (!invoke || !cpuhp_get_teardown_cb(state))
1512 goto remove;
1513 /*
1514 * Call the teardown callback for each present cpu depending
1515 * on the hotplug state of the cpu. This function is not
1516 * allowed to fail currently!
1517 */
1518 for_each_present_cpu(cpu) {
1519 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1520 int cpustate = st->state;
1521
1522 if (cpustate >= state)
1523 cpuhp_issue_call(cpu, state, false, node);
1524 }
1525
1526remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001527 hlist_del(node);
1528 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001529 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001530
1531 return 0;
1532}
1533EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001534
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001535/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001536 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001537 * @state: The state to remove
1538 * @invoke: If true, the teardown function is invoked for cpus where
1539 * cpu state >= @state
1540 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001541 * The caller needs to hold cpus read locked while calling this function.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001542 * The teardown callback is currently not allowed to fail. Think
1543 * about module removal!
1544 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001545void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001546{
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001547 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001548 int cpu;
1549
1550 BUG_ON(cpuhp_cb_check(state));
1551
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001552 lockdep_assert_cpus_held();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001553
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001554 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001555 if (sp->multi_instance) {
1556 WARN(!hlist_empty(&sp->list),
1557 "Error: Removing state %d which has instances left.\n",
1558 state);
1559 goto remove;
1560 }
1561
Thomas Gleixnera7246322016-08-12 19:49:38 +02001562 if (!invoke || !cpuhp_get_teardown_cb(state))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001563 goto remove;
1564
1565 /*
1566 * Call the teardown callback for each present cpu depending
1567 * on the hotplug state of the cpu. This function is not
1568 * allowed to fail currently!
1569 */
1570 for_each_present_cpu(cpu) {
1571 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1572 int cpustate = st->state;
1573
1574 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001575 cpuhp_issue_call(cpu, state, false, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001576 }
1577remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001578 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001579 mutex_unlock(&cpuhp_state_mutex);
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001580}
1581EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
1582
1583void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1584{
1585 cpus_read_lock();
1586 __cpuhp_remove_state_cpuslocked(state, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001587 cpus_read_unlock();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001588}
1589EXPORT_SYMBOL(__cpuhp_remove_state);
1590
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001591#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1592static ssize_t show_cpuhp_state(struct device *dev,
1593 struct device_attribute *attr, char *buf)
1594{
1595 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1596
1597 return sprintf(buf, "%d\n", st->state);
1598}
1599static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1600
Thomas Gleixner757c9892016-02-26 18:43:32 +00001601static ssize_t write_cpuhp_target(struct device *dev,
1602 struct device_attribute *attr,
1603 const char *buf, size_t count)
1604{
1605 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1606 struct cpuhp_step *sp;
1607 int target, ret;
1608
1609 ret = kstrtoint(buf, 10, &target);
1610 if (ret)
1611 return ret;
1612
1613#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1614 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1615 return -EINVAL;
1616#else
1617 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1618 return -EINVAL;
1619#endif
1620
1621 ret = lock_device_hotplug_sysfs();
1622 if (ret)
1623 return ret;
1624
1625 mutex_lock(&cpuhp_state_mutex);
1626 sp = cpuhp_get_step(target);
1627 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1628 mutex_unlock(&cpuhp_state_mutex);
1629 if (ret)
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02001630 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001631
1632 if (st->state < target)
1633 ret = do_cpu_up(dev->id, target);
1634 else
1635 ret = do_cpu_down(dev->id, target);
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02001636out:
Thomas Gleixner757c9892016-02-26 18:43:32 +00001637 unlock_device_hotplug();
1638 return ret ? ret : count;
1639}
1640
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001641static ssize_t show_cpuhp_target(struct device *dev,
1642 struct device_attribute *attr, char *buf)
1643{
1644 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1645
1646 return sprintf(buf, "%d\n", st->target);
1647}
Thomas Gleixner757c9892016-02-26 18:43:32 +00001648static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001649
1650static struct attribute *cpuhp_cpu_attrs[] = {
1651 &dev_attr_state.attr,
1652 &dev_attr_target.attr,
1653 NULL
1654};
1655
Arvind Yadav993647a2017-06-29 17:40:47 +05301656static const struct attribute_group cpuhp_cpu_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001657 .attrs = cpuhp_cpu_attrs,
1658 .name = "hotplug",
1659 NULL
1660};
1661
1662static ssize_t show_cpuhp_states(struct device *dev,
1663 struct device_attribute *attr, char *buf)
1664{
1665 ssize_t cur, res = 0;
1666 int i;
1667
1668 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner757c9892016-02-26 18:43:32 +00001669 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001670 struct cpuhp_step *sp = cpuhp_get_step(i);
1671
1672 if (sp->name) {
1673 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1674 buf += cur;
1675 res += cur;
1676 }
1677 }
1678 mutex_unlock(&cpuhp_state_mutex);
1679 return res;
1680}
1681static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1682
1683static struct attribute *cpuhp_cpu_root_attrs[] = {
1684 &dev_attr_states.attr,
1685 NULL
1686};
1687
Arvind Yadav993647a2017-06-29 17:40:47 +05301688static const struct attribute_group cpuhp_cpu_root_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00001689 .attrs = cpuhp_cpu_root_attrs,
1690 .name = "hotplug",
1691 NULL
1692};
1693
1694static int __init cpuhp_sysfs_init(void)
1695{
1696 int cpu, ret;
1697
1698 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1699 &cpuhp_cpu_root_attr_group);
1700 if (ret)
1701 return ret;
1702
1703 for_each_possible_cpu(cpu) {
1704 struct device *dev = get_cpu_device(cpu);
1705
1706 if (!dev)
1707 continue;
1708 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1709 if (ret)
1710 return ret;
1711 }
1712 return 0;
1713}
1714device_initcall(cpuhp_sysfs_init);
1715#endif
1716
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001717/*
1718 * cpu_bit_bitmap[] is a special, "compressed" data structure that
1719 * represents all NR_CPUS bits binary values of 1<<nr.
1720 *
Rusty Russelle0b582e2009-01-01 10:12:28 +10301721 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001722 * mask value that has a single bit set only.
1723 */
Mike Travisb8d317d2008-07-24 18:21:29 -07001724
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001725/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -07001726#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001727#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1728#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1729#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -07001730
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001731const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -07001732
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001733 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
1734 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
1735#if BITS_PER_LONG > 32
1736 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
1737 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -07001738#endif
1739};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07001740EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +11001741
1742const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1743EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +10301744
1745#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001746struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001747 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +10301748#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001749struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +10301750#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001751EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10301752
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001753struct cpumask __cpu_online_mask __read_mostly;
1754EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10301755
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001756struct cpumask __cpu_present_mask __read_mostly;
1757EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10301758
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08001759struct cpumask __cpu_active_mask __read_mostly;
1760EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +10301761
Rusty Russell3fa41522008-12-30 09:05:16 +10301762void init_cpu_present(const struct cpumask *src)
1763{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001764 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10301765}
1766
1767void init_cpu_possible(const struct cpumask *src)
1768{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001769 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10301770}
1771
1772void init_cpu_online(const struct cpumask *src)
1773{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08001774 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10301775}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001776
1777/*
1778 * Activate the first processor.
1779 */
1780void __init boot_cpu_init(void)
1781{
1782 int cpu = smp_processor_id();
1783
1784 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1785 set_cpu_online(cpu, true);
1786 set_cpu_active(cpu, true);
1787 set_cpu_present(cpu, true);
1788 set_cpu_possible(cpu, true);
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01001789
1790#ifdef CONFIG_SMP
1791 __boot_cpu_id = cpu;
1792#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001793}
1794
1795/*
1796 * Must be called _AFTER_ setting up the per_cpu areas
1797 */
1798void __init boot_cpu_state_init(void)
1799{
1800 per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1801}