blob: e538518556f47da89d52fec50a4419a942200f0d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
Peter Zijlstrabf2c59f2020-04-01 17:40:33 -04006#include <linux/sched/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/proc_fs.h>
8#include <linux/smp.h>
9#include <linux/init.h>
10#include <linux/notifier.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010011#include <linux/sched/signal.h>
Ingo Molnaref8bd772017-02-08 18:51:36 +010012#include <linux/sched/hotplug.h>
Nicholas Piggin9ca12ac2019-04-11 13:34:46 +100013#include <linux/sched/isolation.h>
Ingo Molnar29930022017-02-08 18:51:36 +010014#include <linux/sched/task.h>
Thomas Gleixnera74cfff2018-11-25 19:33:39 +010015#include <linux/sched/smt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/unistd.h>
17#include <linux/cpu.h>
Anton Vorontsovcb792952012-05-31 16:26:22 -070018#include <linux/oom.h>
19#include <linux/rcupdate.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040020#include <linux/export.h>
Anton Vorontsove4cc2f82012-05-31 16:26:26 -070021#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/kthread.h>
23#include <linux/stop_machine.h>
Ingo Molnar81615b622006-06-26 00:24:32 -070024#include <linux/mutex.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +010026#include <linux/suspend.h>
Gautham R. Shenoya19423b2014-03-11 02:04:03 +053027#include <linux/lockdep.h>
Preeti U Murthy345527b2015-03-30 14:59:19 +053028#include <linux/tick.h>
Thomas Gleixnera8994182015-07-05 17:12:30 +000029#include <linux/irq.h>
Thomas Gleixner941154b2017-09-12 21:37:04 +020030#include <linux/nmi.h>
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000031#include <linux/smpboot.h>
Richard Weinbergere6d49892016-08-18 14:57:17 +020032#include <linux/relay.h>
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +020033#include <linux/slab.h>
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +020034#include <linux/percpu-rwsem.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000035
Todd E Brandtbb3632c2014-06-06 05:40:17 -070036#include <trace/events/power.h>
Thomas Gleixnercff7d372016-02-26 18:43:28 +000037#define CREATE_TRACE_POINTS
38#include <trace/events/cpuhp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Thomas Gleixner38498a62012-04-20 13:05:44 +000040#include "smpboot.h"
41
Thomas Gleixnercff7d372016-02-26 18:43:28 +000042/**
43 * cpuhp_cpu_state - Per cpu hotplug state storage
44 * @state: The current cpu state
45 * @target: The target state
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000046 * @thread: Pointer to the hotplug thread
47 * @should_run: Thread should execute
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020048 * @rollback: Perform a rollback
Thomas Gleixnera7246322016-08-12 19:49:38 +020049 * @single: Single callback invocation
50 * @bringup: Single callback bringup or teardown selector
51 * @cb_state: The state for a single callback (install/uninstall)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000052 * @result: Result of the operation
Peter Zijlstra5ebe7742017-09-20 19:00:19 +020053 * @done_up: Signal completion to the issuer of the task for cpu-up
54 * @done_down: Signal completion to the issuer of the task for cpu-down
Thomas Gleixnercff7d372016-02-26 18:43:28 +000055 */
56struct cpuhp_cpu_state {
57 enum cpuhp_state state;
58 enum cpuhp_state target;
Peter Zijlstra1db49482017-09-20 19:00:21 +020059 enum cpuhp_state fail;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000060#ifdef CONFIG_SMP
61 struct task_struct *thread;
62 bool should_run;
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +020063 bool rollback;
Thomas Gleixnera7246322016-08-12 19:49:38 +020064 bool single;
65 bool bringup;
Peter Zijlstra2ea46c62021-04-20 20:04:19 +020066 int cpu;
Thomas Gleixnercf392d12016-08-12 19:49:39 +020067 struct hlist_node *node;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +020068 struct hlist_node *last;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000069 enum cpuhp_state cb_state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000070 int result;
Peter Zijlstra5ebe7742017-09-20 19:00:19 +020071 struct completion done_up;
72 struct completion done_down;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +000073#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +000074};
75
Peter Zijlstra1db49482017-09-20 19:00:21 +020076static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
77 .fail = CPUHP_INVALID,
78};
Thomas Gleixnercff7d372016-02-26 18:43:28 +000079
Thomas Gleixnere797bda2019-07-22 20:47:16 +020080#ifdef CONFIG_SMP
81cpumask_t cpus_booted_once_mask;
82#endif
83
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +020084#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020085static struct lockdep_map cpuhp_state_up_map =
86 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
87static struct lockdep_map cpuhp_state_down_map =
88 STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
89
90
Mathieu Malaterre76dc6c02017-12-26 15:08:53 +010091static inline void cpuhp_lock_acquire(bool bringup)
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020092{
93 lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
94}
95
Mathieu Malaterre76dc6c02017-12-26 15:08:53 +010096static inline void cpuhp_lock_release(bool bringup)
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +020097{
98 lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
99}
100#else
101
Mathieu Malaterre76dc6c02017-12-26 15:08:53 +0100102static inline void cpuhp_lock_acquire(bool bringup) { }
103static inline void cpuhp_lock_release(bool bringup) { }
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200104
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200105#endif
106
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000107/**
108 * cpuhp_step - Hotplug state machine step
109 * @name: Name of the step
110 * @startup: Startup function of the step
111 * @teardown: Teardown function of the step
Thomas Gleixner757c9892016-02-26 18:43:32 +0000112 * @cant_stop: Bringup/teardown can't be stopped at this step
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000113 */
114struct cpuhp_step {
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200115 const char *name;
116 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200117 int (*single)(unsigned int cpu);
118 int (*multi)(unsigned int cpu,
119 struct hlist_node *node);
120 } startup;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200121 union {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200122 int (*single)(unsigned int cpu);
123 int (*multi)(unsigned int cpu,
124 struct hlist_node *node);
125 } teardown;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200126 struct hlist_head list;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200127 bool cant_stop;
128 bool multi_instance;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000129};
130
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +0000131static DEFINE_MUTEX(cpuhp_state_mutex);
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +0800132static struct cpuhp_step cpuhp_hp_states[];
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000133
Thomas Gleixnera7246322016-08-12 19:49:38 +0200134static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
135{
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +0800136 return cpuhp_hp_states + state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200137}
138
Vincent Donnefort453e4102021-02-16 10:35:06 +0000139static bool cpuhp_step_empty(bool bringup, struct cpuhp_step *step)
140{
141 return bringup ? !step->startup.single : !step->teardown.single;
142}
143
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000144/**
145 * cpuhp_invoke_callback _ Invoke the callbacks for a given state
146 * @cpu: The cpu for which the callback should be invoked
Peter Zijlstra96abb962017-09-20 19:00:16 +0200147 * @state: The state to do callbacks for
Thomas Gleixnera7246322016-08-12 19:49:38 +0200148 * @bringup: True if the bringup callback should be invoked
Peter Zijlstra96abb962017-09-20 19:00:16 +0200149 * @node: For multi-instance, do a single entry callback for install/remove
150 * @lastp: For multi-instance rollback, remember how far we got
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000151 *
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200152 * Called from cpu hotplug and from the state register machinery.
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000153 */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200154static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
Peter Zijlstra96abb962017-09-20 19:00:16 +0200155 bool bringup, struct hlist_node *node,
156 struct hlist_node **lastp)
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000157{
158 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200159 struct cpuhp_step *step = cpuhp_get_step(state);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200160 int (*cbm)(unsigned int cpu, struct hlist_node *node);
161 int (*cb)(unsigned int cpu);
162 int ret, cnt;
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000163
Peter Zijlstra1db49482017-09-20 19:00:21 +0200164 if (st->fail == state) {
165 st->fail = CPUHP_INVALID;
Peter Zijlstra1db49482017-09-20 19:00:21 +0200166 return -EAGAIN;
167 }
168
Vincent Donnefort453e4102021-02-16 10:35:06 +0000169 if (cpuhp_step_empty(bringup, step)) {
170 WARN_ON_ONCE(1);
171 return 0;
172 }
173
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200174 if (!step->multi_instance) {
Peter Zijlstra96abb962017-09-20 19:00:16 +0200175 WARN_ON_ONCE(lastp && *lastp);
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200176 cb = bringup ? step->startup.single : step->teardown.single;
Vincent Donnefort453e4102021-02-16 10:35:06 +0000177
Thomas Gleixnera7246322016-08-12 19:49:38 +0200178 trace_cpuhp_enter(cpu, st->target, state, cb);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000179 ret = cb(cpu);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200180 trace_cpuhp_exit(cpu, st->state, state, ret);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200181 return ret;
182 }
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200183 cbm = bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200184
185 /* Single invocation for instance add/remove */
186 if (node) {
Peter Zijlstra96abb962017-09-20 19:00:16 +0200187 WARN_ON_ONCE(lastp && *lastp);
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200188 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
189 ret = cbm(cpu, node);
190 trace_cpuhp_exit(cpu, st->state, state, ret);
191 return ret;
192 }
193
194 /* State transition. Invoke on all instances */
195 cnt = 0;
196 hlist_for_each(node, &step->list) {
Peter Zijlstra96abb962017-09-20 19:00:16 +0200197 if (lastp && node == *lastp)
198 break;
199
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200200 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
201 ret = cbm(cpu, node);
202 trace_cpuhp_exit(cpu, st->state, state, ret);
Peter Zijlstra96abb962017-09-20 19:00:16 +0200203 if (ret) {
204 if (!lastp)
205 goto err;
206
207 *lastp = node;
208 return ret;
209 }
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200210 cnt++;
211 }
Peter Zijlstra96abb962017-09-20 19:00:16 +0200212 if (lastp)
213 *lastp = NULL;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200214 return 0;
215err:
216 /* Rollback the instances if one failed */
Thomas Gleixner3c1627e2016-09-05 15:28:36 +0200217 cbm = !bringup ? step->startup.multi : step->teardown.multi;
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200218 if (!cbm)
219 return ret;
220
221 hlist_for_each(node, &step->list) {
222 if (!cnt--)
223 break;
Peter Zijlstra724a8682017-09-20 19:00:18 +0200224
225 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
226 ret = cbm(cpu, node);
227 trace_cpuhp_exit(cpu, st->state, state, ret);
228 /*
229 * Rollback must not fail,
230 */
231 WARN_ON_ONCE(ret);
Thomas Gleixnercff7d372016-02-26 18:43:28 +0000232 }
233 return ret;
234}
235
Rusty Russell98a79d62008-12-13 21:19:41 +1030236#ifdef CONFIG_SMP
Arnd Bergmannfcb30292018-03-15 16:38:04 +0100237static bool cpuhp_is_ap_state(enum cpuhp_state state)
238{
239 /*
240 * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
241 * purposes as that state is handled explicitly in cpu_down.
242 */
243 return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
244}
245
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200246static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
247{
248 struct completion *done = bringup ? &st->done_up : &st->done_down;
249 wait_for_completion(done);
250}
251
252static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
253{
254 struct completion *done = bringup ? &st->done_up : &st->done_down;
255 complete(done);
256}
257
258/*
259 * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
260 */
261static bool cpuhp_is_atomic_state(enum cpuhp_state state)
262{
263 return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
264}
265
Rusty Russellb3199c02008-12-30 09:05:14 +1030266/* Serializes the updates to cpu_online_mask, cpu_present_mask */
Linus Torvaldsaa953872006-07-23 12:12:16 -0700267static DEFINE_MUTEX(cpu_add_remove_lock);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000268bool cpuhp_tasks_frozen;
269EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700271/*
Srivatsa S. Bhat93ae4f92014-03-11 02:04:14 +0530272 * The following two APIs (cpu_maps_update_begin/done) must be used when
273 * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700274 */
275void cpu_maps_update_begin(void)
276{
277 mutex_lock(&cpu_add_remove_lock);
278}
279
280void cpu_maps_update_done(void)
281{
282 mutex_unlock(&cpu_add_remove_lock);
283}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200285/*
286 * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -0700287 * Should always be manipulated under cpu_add_remove_lock
288 */
289static int cpu_hotplug_disabled;
290
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700291#ifdef CONFIG_HOTPLUG_CPU
292
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200293DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
Gautham R. Shenoya19423b2014-03-11 02:04:03 +0530294
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200295void cpus_read_lock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800296{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200297 percpu_down_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800298}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200299EXPORT_SYMBOL_GPL(cpus_read_lock);
Ashok Raj90d45d12005-11-08 21:34:24 -0800300
Waiman Long6f4ceee2018-07-24 14:26:04 -0400301int cpus_read_trylock(void)
302{
303 return percpu_down_read_trylock(&cpu_hotplug_lock);
304}
305EXPORT_SYMBOL_GPL(cpus_read_trylock);
306
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200307void cpus_read_unlock(void)
Ashok Raja9d9baa2005-11-28 13:43:46 -0800308{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200309 percpu_up_read(&cpu_hotplug_lock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800310}
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200311EXPORT_SYMBOL_GPL(cpus_read_unlock);
Ashok Raja9d9baa2005-11-28 13:43:46 -0800312
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200313void cpus_write_lock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100314{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200315 percpu_down_write(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100316}
317
Thomas Gleixner8f553c42017-05-24 10:15:12 +0200318void cpus_write_unlock(void)
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100319{
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200320 percpu_up_write(&cpu_hotplug_lock);
321}
322
323void lockdep_assert_cpus_held(void)
324{
Valentin Schneiderce48c4572018-12-19 18:23:15 +0000325 /*
326 * We can't have hotplug operations before userspace starts running,
327 * and some init codepaths will knowingly not take the hotplug lock.
328 * This is all valid, so mute lockdep until it makes sense to report
329 * unheld locks.
330 */
331 if (system_state < SYSTEM_RUNNING)
332 return;
333
Thomas Gleixnerfc8dffd2017-05-24 10:15:40 +0200334 percpu_rwsem_assert_held(&cpu_hotplug_lock);
Gautham R Shenoyd2219382008-01-25 21:08:01 +0100335}
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700336
Frederic Weisbecker43759fe2020-11-11 23:53:13 +0100337#ifdef CONFIG_LOCKDEP
338int lockdep_is_cpus_held(void)
339{
340 return percpu_rwsem_is_held(&cpu_hotplug_lock);
341}
342#endif
343
Peter Zijlstracb921732018-09-11 11:51:27 +0200344static void lockdep_acquire_cpus_lock(void)
345{
Peter Zijlstra17510602019-10-30 20:01:26 +0100346 rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
Peter Zijlstracb921732018-09-11 11:51:27 +0200347}
348
349static void lockdep_release_cpus_lock(void)
350{
Peter Zijlstra17510602019-10-30 20:01:26 +0100351 rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
Peter Zijlstracb921732018-09-11 11:51:27 +0200352}
353
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700354/*
355 * Wait for currently running CPU hotplug operations to complete (if any) and
356 * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
357 * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
358 * hotplug path before performing hotplug operations. So acquiring that lock
359 * guarantees mutual exclusion from any currently running hotplug operations.
360 */
361void cpu_hotplug_disable(void)
362{
363 cpu_maps_update_begin();
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -0700364 cpu_hotplug_disabled++;
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700365 cpu_maps_update_done();
366}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700367EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700368
Lianwei Wang01b41152016-06-09 23:43:28 -0700369static void __cpu_hotplug_enable(void)
370{
371 if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
372 return;
373 cpu_hotplug_disabled--;
374}
375
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700376void cpu_hotplug_enable(void)
377{
378 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -0700379 __cpu_hotplug_enable();
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -0700380 cpu_maps_update_done();
381}
Vitaly Kuznetsov32145c42015-08-05 00:52:47 -0700382EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
Peter Zijlstracb921732018-09-11 11:51:27 +0200383
384#else
385
386static void lockdep_acquire_cpus_lock(void)
387{
388}
389
390static void lockdep_release_cpus_lock(void)
391{
392}
393
Toshi Kanib9d10be2013-08-12 09:45:53 -0600394#endif /* CONFIG_HOTPLUG_CPU */
Lai Jiangshan79a6cde2010-05-26 14:43:36 -0700395
Thomas Gleixnera74cfff2018-11-25 19:33:39 +0100396/*
397 * Architectures that need SMT-specific errata handling during SMT hotplug
398 * should override this.
399 */
400void __weak arch_smt_update(void) { }
401
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200402#ifdef CONFIG_HOTPLUG_SMT
403enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
Thomas Gleixnerbc2d8d262018-08-07 08:19:57 +0200404
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200405void __init cpu_smt_disable(bool force)
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200406{
Vitaly Kuznetsove1572f12019-09-16 18:22:56 +0200407 if (!cpu_smt_possible())
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200408 return;
409
410 if (force) {
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200411 pr_info("SMT: Force disabled\n");
412 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200413 } else {
Borislav Petkovd0e7d142018-10-04 19:22:27 +0200414 pr_info("SMT: disabled\n");
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200415 cpu_smt_control = CPU_SMT_DISABLED;
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200416 }
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200417}
418
Thomas Gleixnerfee0aed2018-07-13 16:23:24 +0200419/*
420 * The decision whether SMT is supported can only be done after the full
Josh Poimboeufb2849092019-01-30 07:13:58 -0600421 * CPU identification. Called from architecture code.
Thomas Gleixnerfee0aed2018-07-13 16:23:24 +0200422 */
423void __init cpu_smt_check_topology(void)
424{
Josh Poimboeufb2849092019-01-30 07:13:58 -0600425 if (!topology_smt_supported())
Thomas Gleixnerfee0aed2018-07-13 16:23:24 +0200426 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
427}
428
Jiri Kosina8e1b7062018-07-13 16:23:23 +0200429static int __init smt_cmdline_disable(char *str)
430{
431 cpu_smt_disable(str && !strcmp(str, "force"));
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200432 return 0;
433}
434early_param("nosmt", smt_cmdline_disable);
435
436static inline bool cpu_smt_allowed(unsigned int cpu)
437{
Josh Poimboeufb2849092019-01-30 07:13:58 -0600438 if (cpu_smt_control == CPU_SMT_ENABLED)
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200439 return true;
440
Josh Poimboeufb2849092019-01-30 07:13:58 -0600441 if (topology_is_primary_thread(cpu))
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200442 return true;
443
444 /*
445 * On x86 it's required to boot all logical CPUs at least once so
446 * that the init code can get a chance to set CR4.MCE on each
Ethon Paul182e0732020-04-18 00:40:04 +0800447 * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200448 * core will shutdown the machine.
449 */
Thomas Gleixnere797bda2019-07-22 20:47:16 +0200450 return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200451}
Vitaly Kuznetsove1572f12019-09-16 18:22:56 +0200452
453/* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
454bool cpu_smt_possible(void)
455{
456 return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
457 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
458}
459EXPORT_SYMBOL_GPL(cpu_smt_possible);
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200460#else
461static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
462#endif
463
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200464static inline enum cpuhp_state
465cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
466{
467 enum cpuhp_state prev_state = st->state;
Peter Zijlstra2ea46c62021-04-20 20:04:19 +0200468 bool bringup = st->state < target;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200469
470 st->rollback = false;
471 st->last = NULL;
472
473 st->target = target;
474 st->single = false;
Peter Zijlstra2ea46c62021-04-20 20:04:19 +0200475 st->bringup = bringup;
476 if (cpu_dying(st->cpu) != !bringup)
477 set_cpu_dying(st->cpu, !bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200478
479 return prev_state;
480}
481
482static inline void
483cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
484{
Peter Zijlstra2ea46c62021-04-20 20:04:19 +0200485 bool bringup = !st->bringup;
486
Vincent Donnefort453e4102021-02-16 10:35:06 +0000487 st->target = prev_state;
488
489 /*
490 * Already rolling back. No need invert the bringup value or to change
491 * the current state.
492 */
493 if (st->rollback)
494 return;
495
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200496 st->rollback = true;
497
498 /*
499 * If we have st->last we need to undo partial multi_instance of this
500 * state first. Otherwise start undo at the previous state.
501 */
502 if (!st->last) {
503 if (st->bringup)
504 st->state--;
505 else
506 st->state++;
507 }
508
Peter Zijlstra2ea46c62021-04-20 20:04:19 +0200509 st->bringup = bringup;
510 if (cpu_dying(st->cpu) != !bringup)
511 set_cpu_dying(st->cpu, !bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200512}
513
514/* Regular hotplug invocation of the AP hotplug thread */
515static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
516{
517 if (!st->single && st->state == st->target)
518 return;
519
520 st->result = 0;
521 /*
522 * Make sure the above stores are visible before should_run becomes
523 * true. Paired with the mb() above in cpuhp_thread_fun()
524 */
525 smp_mb();
526 st->should_run = true;
527 wake_up_process(st->thread);
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200528 wait_for_ap_thread(st, st->bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200529}
530
531static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
532{
533 enum cpuhp_state prev_state;
534 int ret;
535
536 prev_state = cpuhp_set_state(st, target);
537 __cpuhp_kick_ap(st);
538 if ((ret = st->result)) {
539 cpuhp_reset_state(st, prev_state);
540 __cpuhp_kick_ap(st);
541 }
542
543 return ret;
544}
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200545
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000546static int bringup_wait_for_ap(unsigned int cpu)
547{
548 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
549
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200550 /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200551 wait_for_ap_thread(st, true);
Thomas Gleixnerdea1d0f2017-07-11 22:06:24 +0200552 if (WARN_ON_ONCE((!cpu_online(cpu))))
553 return -ECANCELED;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200554
Peter Zijlstra45178ac2019-12-10 09:34:54 +0100555 /* Unpark the hotplug thread of the target cpu */
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200556 kthread_unpark(st->thread);
557
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200558 /*
559 * SMT soft disabling on X86 requires to bring the CPU out of the
560 * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The
Jiri Kosinaf5602012019-05-28 21:31:49 +0200561 * CPU marked itself as booted_once in notify_cpu_starting() so the
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +0200562 * cpu_smt_allowed() check will now return false if this is not the
563 * primary sibling.
564 */
565 if (!cpu_smt_allowed(cpu))
566 return -ECANCELED;
567
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200568 if (st->target <= CPUHP_AP_ONLINE_IDLE)
569 return 0;
570
571 return cpuhp_kick_ap(st, st->target);
Thomas Gleixner8df3e072016-02-26 18:43:41 +0000572}
573
Thomas Gleixnerba997462016-02-26 18:43:24 +0000574static int bringup_cpu(unsigned int cpu)
575{
576 struct task_struct *idle = idle_thread_get(cpu);
577 int ret;
578
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400579 /*
580 * Some architectures have to walk the irq descriptors to
581 * setup the vector space for the cpu which comes online.
582 * Prevent irq alloc/free across the bringup.
583 */
584 irq_lock_sparse();
585
Thomas Gleixnerba997462016-02-26 18:43:24 +0000586 /* Arch-specific enabling code. */
587 ret = __cpu_up(cpu, idle);
Boris Ostrovskyaa877172016-08-03 13:22:28 -0400588 irq_unlock_sparse();
Thomas Gleixner530e9b72016-12-21 20:19:53 +0100589 if (ret)
Thomas Gleixnerba997462016-02-26 18:43:24 +0000590 return ret;
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +0200591 return bringup_wait_for_ap(cpu);
Thomas Gleixnerba997462016-02-26 18:43:24 +0000592}
593
Peter Zijlstrabf2c59f2020-04-01 17:40:33 -0400594static int finish_cpu(unsigned int cpu)
595{
596 struct task_struct *idle = idle_thread_get(cpu);
597 struct mm_struct *mm = idle->active_mm;
598
599 /*
600 * idle_task_exit() will have switched to &init_mm, now
601 * clean up any remaining active_mm state.
602 */
603 if (mm != &init_mm)
604 idle->active_mm = &init_mm;
605 mmdrop(mm);
606 return 0;
607}
608
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000609/*
610 * Hotplug state machine related functions
611 */
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000612
Vincent Donnefort453e4102021-02-16 10:35:06 +0000613/*
614 * Get the next state to run. Empty ones will be skipped. Returns true if a
615 * state must be run.
616 *
617 * st->state will be modified ahead of time, to match state_to_run, as if it
618 * has already ran.
619 */
620static bool cpuhp_next_state(bool bringup,
621 enum cpuhp_state *state_to_run,
622 struct cpuhp_cpu_state *st,
623 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000624{
Vincent Donnefort453e4102021-02-16 10:35:06 +0000625 do {
626 if (bringup) {
627 if (st->state >= target)
628 return false;
629
630 *state_to_run = ++st->state;
631 } else {
632 if (st->state <= target)
633 return false;
634
635 *state_to_run = st->state--;
636 }
637
638 if (!cpuhp_step_empty(bringup, cpuhp_get_step(*state_to_run)))
639 break;
640 } while (true);
641
642 return true;
643}
644
645static int cpuhp_invoke_callback_range(bool bringup,
646 unsigned int cpu,
647 struct cpuhp_cpu_state *st,
648 enum cpuhp_state target)
649{
650 enum cpuhp_state state;
651 int err = 0;
652
653 while (cpuhp_next_state(bringup, &state, st, target)) {
654 err = cpuhp_invoke_callback(cpu, state, bringup, NULL, NULL);
655 if (err)
656 break;
657 }
658
659 return err;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000660}
661
Thomas Gleixner206b9232019-03-26 17:36:05 +0100662static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
663{
664 if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
665 return true;
666 /*
667 * When CPU hotplug is disabled, then taking the CPU down is not
668 * possible because takedown_cpu() and the architecture and
669 * subsystem specific mechanisms are not available. So the CPU
670 * which would be completely unplugged again needs to stay around
671 * in the current state.
672 */
673 return st->state <= CPUHP_BRINGUP_CPU;
674}
675
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000676static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
Thomas Gleixnera7246322016-08-12 19:49:38 +0200677 enum cpuhp_state target)
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000678{
679 enum cpuhp_state prev_state = st->state;
680 int ret = 0;
681
Vincent Donnefort453e4102021-02-16 10:35:06 +0000682 ret = cpuhp_invoke_callback_range(true, cpu, st, target);
683 if (ret) {
684 cpuhp_reset_state(st, prev_state);
685 if (can_rollback_cpu(st))
686 WARN_ON(cpuhp_invoke_callback_range(false, cpu, st,
687 prev_state));
Thomas Gleixner2e1a3482016-02-26 18:43:37 +0000688 }
689 return ret;
690}
691
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000692/*
693 * The cpu hotplug threads manage the bringup and teardown of the cpus
694 */
695static void cpuhp_create(unsigned int cpu)
696{
697 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
698
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200699 init_completion(&st->done_up);
700 init_completion(&st->done_down);
Peter Zijlstra2ea46c62021-04-20 20:04:19 +0200701 st->cpu = cpu;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000702}
703
704static int cpuhp_should_run(unsigned int cpu)
705{
706 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
707
708 return st->should_run;
709}
710
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000711/*
712 * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
713 * callbacks when a state gets [un]installed at runtime.
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200714 *
715 * Each invocation of this function by the smpboot thread does a single AP
716 * state callback.
717 *
718 * It has 3 modes of operation:
719 * - single: runs st->cb_state
720 * - up: runs ++st->state, while st->state < st->target
721 * - down: runs st->state--, while st->state > st->target
722 *
723 * When complete or on error, should_run is cleared and the completion is fired.
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000724 */
725static void cpuhp_thread_fun(unsigned int cpu)
726{
727 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200728 bool bringup = st->bringup;
729 enum cpuhp_state state;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000730
Neeraj Upadhyayf8b75302018-09-05 11:22:07 +0530731 if (WARN_ON_ONCE(!st->should_run))
732 return;
733
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000734 /*
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200735 * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
736 * that if we see ->should_run we also see the rest of the state.
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000737 */
738 smp_mb();
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200739
Peter Zijlstracb921732018-09-11 11:51:27 +0200740 /*
741 * The BP holds the hotplug lock, but we're now running on the AP,
742 * ensure that anybody asserting the lock is held, will actually find
743 * it so.
744 */
745 lockdep_acquire_cpus_lock();
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200746 cpuhp_lock_acquire(bringup);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200747
Thomas Gleixnera7246322016-08-12 19:49:38 +0200748 if (st->single) {
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200749 state = st->cb_state;
750 st->should_run = false;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000751 } else {
Vincent Donnefort453e4102021-02-16 10:35:06 +0000752 st->should_run = cpuhp_next_state(bringup, &state, st, st->target);
753 if (!st->should_run)
754 goto end;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000755 }
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200756
757 WARN_ON_ONCE(!cpuhp_is_ap_state(state));
758
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200759 if (cpuhp_is_atomic_state(state)) {
760 local_irq_disable();
761 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
762 local_irq_enable();
763
764 /*
765 * STARTING/DYING must not fail!
766 */
767 WARN_ON_ONCE(st->result);
768 } else {
769 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
770 }
771
772 if (st->result) {
773 /*
774 * If we fail on a rollback, we're up a creek without no
775 * paddle, no way forward, no way back. We loose, thanks for
776 * playing.
777 */
778 WARN_ON_ONCE(st->rollback);
779 st->should_run = false;
780 }
781
Vincent Donnefort453e4102021-02-16 10:35:06 +0000782end:
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200783 cpuhp_lock_release(bringup);
Peter Zijlstracb921732018-09-11 11:51:27 +0200784 lockdep_release_cpus_lock();
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200785
786 if (!st->should_run)
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200787 complete_ap_thread(st, bringup);
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000788}
789
790/* Invoke a single callback on a remote cpu */
Thomas Gleixnera7246322016-08-12 19:49:38 +0200791static int
Thomas Gleixnercf392d12016-08-12 19:49:39 +0200792cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
793 struct hlist_node *node)
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000794{
795 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200796 int ret;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000797
798 if (!cpu_online(cpu))
799 return 0;
800
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200801 cpuhp_lock_acquire(false);
802 cpuhp_lock_release(false);
803
804 cpuhp_lock_acquire(true);
805 cpuhp_lock_release(true);
Thomas Gleixner49dfe2a2017-05-24 10:15:43 +0200806
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000807 /*
808 * If we are up and running, use the hotplug thread. For early calls
809 * we invoke the thread function directly.
810 */
811 if (!st->thread)
Peter Zijlstra96abb962017-09-20 19:00:16 +0200812 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
Thomas Gleixner6a4e2452016-07-13 17:16:03 +0000813
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200814 st->rollback = false;
815 st->last = NULL;
816
817 st->node = node;
818 st->bringup = bringup;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000819 st->cb_state = state;
Thomas Gleixnera7246322016-08-12 19:49:38 +0200820 st->single = true;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200821
822 __cpuhp_kick_ap(st);
Thomas Gleixnera7246322016-08-12 19:49:38 +0200823
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000824 /*
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200825 * If we failed and did a partial, do a rollback.
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000826 */
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200827 if ((ret = st->result) && st->last) {
828 st->rollback = true;
829 st->bringup = !bringup;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000830
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200831 __cpuhp_kick_ap(st);
832 }
833
Thomas Gleixner1f7c70d2017-10-21 16:06:52 +0200834 /*
835 * Clean up the leftovers so the next hotplug operation wont use stale
836 * data.
837 */
838 st->node = st->last = NULL;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200839 return ret;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000840}
841
842static int cpuhp_kick_ap_work(unsigned int cpu)
843{
844 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200845 enum cpuhp_state prev_state = st->state;
846 int ret;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000847
Peter Zijlstra5f4b55e2017-09-20 19:00:20 +0200848 cpuhp_lock_acquire(false);
849 cpuhp_lock_release(false);
850
851 cpuhp_lock_acquire(true);
852 cpuhp_lock_release(true);
Peter Zijlstra4dddfb52017-09-20 19:00:17 +0200853
854 trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
855 ret = cpuhp_kick_ap(st, st->target);
856 trace_cpuhp_exit(cpu, st->state, prev_state, ret);
857
858 return ret;
Thomas Gleixner4cb28ce2016-02-26 18:43:38 +0000859}
860
861static struct smp_hotplug_thread cpuhp_threads = {
862 .store = &cpuhp_state.thread,
863 .create = &cpuhp_create,
864 .thread_should_run = cpuhp_should_run,
865 .thread_fn = cpuhp_thread_fun,
866 .thread_comm = "cpuhp/%u",
867 .selfparking = true,
868};
869
870void __init cpuhp_threads_init(void)
871{
872 BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
873 kthread_unpark(this_cpu_read(cpuhp_state.thread));
874}
875
Michal Hocko777c6e02016-12-07 14:54:38 +0100876#ifdef CONFIG_HOTPLUG_CPU
Nicholas Piggin8ff00392020-11-26 20:25:29 +1000877#ifndef arch_clear_mm_cpumask_cpu
878#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
879#endif
880
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700881/**
882 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
883 * @cpu: a CPU id
884 *
885 * This function walks all processes, finds a valid mm struct for each one and
886 * then clears a corresponding bit in mm's cpumask. While this all sounds
887 * trivial, there are various non-obvious corner cases, which this function
888 * tries to solve in a safe manner.
889 *
890 * Also note that the function uses a somewhat relaxed locking scheme, so it may
891 * be called only for an already offlined CPU.
892 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700893void clear_tasks_mm_cpumask(int cpu)
894{
895 struct task_struct *p;
896
897 /*
898 * This function is called after the cpu is taken down and marked
899 * offline, so its not like new tasks will ever get this cpu set in
900 * their mm mask. -- Peter Zijlstra
901 * Thus, we may use rcu_read_lock() here, instead of grabbing
902 * full-fledged tasklist_lock.
903 */
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700904 WARN_ON(cpu_online(cpu));
Anton Vorontsovcb792952012-05-31 16:26:22 -0700905 rcu_read_lock();
906 for_each_process(p) {
907 struct task_struct *t;
908
Anton Vorontsove4cc2f82012-05-31 16:26:26 -0700909 /*
910 * Main thread might exit, but other threads may still have
911 * a valid mm. Find one.
912 */
Anton Vorontsovcb792952012-05-31 16:26:22 -0700913 t = find_lock_task_mm(p);
914 if (!t)
915 continue;
Nicholas Piggin8ff00392020-11-26 20:25:29 +1000916 arch_clear_mm_cpumask_cpu(cpu, t->mm);
Anton Vorontsovcb792952012-05-31 16:26:22 -0700917 task_unlock(t);
918 }
919 rcu_read_unlock();
920}
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922/* Take this CPU down. */
Mathias Krause71cf5ae2015-07-19 20:06:22 +0200923static int take_cpu_down(void *_param)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924{
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000925 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
926 enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000927 int err, cpu = smp_processor_id();
Peter Zijlstra724a8682017-09-20 19:00:18 +0200928 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 /* Ensure this CPU doesn't handle any more interrupts. */
931 err = __cpu_disable();
932 if (err < 0)
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700933 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
Thomas Gleixnera7246322016-08-12 19:49:38 +0200935 /*
Vincent Donnefort453e4102021-02-16 10:35:06 +0000936 * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going
937 * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
Thomas Gleixnera7246322016-08-12 19:49:38 +0200938 */
Vincent Donnefort453e4102021-02-16 10:35:06 +0000939 WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
940
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000941 /* Invoke the former CPU_DYING callbacks */
Vincent Donnefort453e4102021-02-16 10:35:06 +0000942 ret = cpuhp_invoke_callback_range(false, cpu, st, target);
943
944 /*
945 * DYING must not fail!
946 */
947 WARN_ON_ONCE(ret);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +0000948
Thomas Gleixner52c063d2015-04-03 02:37:24 +0200949 /* Give up timekeeping duties */
950 tick_handover_do_timer();
Thomas Gleixner1b72d432019-03-21 16:39:20 +0100951 /* Remove CPU from timer broadcasting */
952 tick_offline_cpu(cpu);
Thomas Gleixner14e568e2013-01-31 12:11:14 +0000953 /* Park the stopper thread */
Thomas Gleixner090e77c2016-02-26 18:43:23 +0000954 stop_machine_park(cpu);
Zwane Mwaikambof3705132005-06-25 14:54:50 -0700955 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957
Thomas Gleixner98458172016-02-26 18:43:25 +0000958static int takedown_cpu(unsigned int cpu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959{
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000960 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +0000961 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962
Thomas Gleixner2a58c522016-03-10 20:42:08 +0100963 /* Park the smpboot threads */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +0000964 kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
965
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200966 /*
Thomas Gleixnera8994182015-07-05 17:12:30 +0000967 * Prevent irq alloc/free while the dying cpu reorganizes the
968 * interrupt affinities.
969 */
970 irq_lock_sparse();
971
972 /*
Peter Zijlstra6acce3e2013-10-11 14:38:20 +0200973 * So now all preempt/rcu users must observe !cpu_active().
974 */
Sebastian Andrzej Siewior210e2132017-05-24 10:15:28 +0200975 err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
Rusty Russell04321582008-07-28 12:16:29 -0500976 if (err) {
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200977 /* CPU refused to die */
Thomas Gleixnera8994182015-07-05 17:12:30 +0000978 irq_unlock_sparse();
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +0200979 /* Unpark the hotplug thread so we can rollback there */
980 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
Thomas Gleixner98458172016-02-26 18:43:25 +0000981 return err;
Satoru Takeuchi8fa1d7d2006-10-28 10:38:57 -0700982 }
Rusty Russell04321582008-07-28 12:16:29 -0500983 BUG_ON(cpu_online(cpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100985 /*
Brendan Jackman5b1ead62017-12-06 10:59:11 +0000986 * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
987 * all runnable tasks from the CPU, there's only the idle task left now
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100988 * that the migration thread is done doing the stop_machine thing.
Peter Zijlstra51a96c72010-11-19 20:37:53 +0100989 *
990 * Wait for the stop thread to go away.
Peter Zijlstra48c5ccae2010-11-13 19:32:29 +0100991 */
Peter Zijlstra5ebe7742017-09-20 19:00:19 +0200992 wait_for_ap_thread(st, false);
Thomas Gleixnere69aab12016-02-26 18:43:43 +0000993 BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Thomas Gleixnera8994182015-07-05 17:12:30 +0000995 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
996 irq_unlock_sparse();
997
Preeti U Murthy345527b2015-03-30 14:59:19 +0530998 hotplug_cpu__broadcast_tick_pull(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 /* This actually kills the CPU. */
1000 __cpu_die(cpu);
1001
Thomas Gleixnera49b1162015-04-03 02:38:05 +02001002 tick_cleanup_dead_cpu(cpu);
Paul E. McKenneya58163d2017-06-20 12:11:34 -07001003 rcutree_migrate_callbacks(cpu);
Thomas Gleixner98458172016-02-26 18:43:25 +00001004 return 0;
1005}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
Thomas Gleixner71f87b22016-03-03 10:52:10 +01001007static void cpuhp_complete_idle_dead(void *arg)
1008{
1009 struct cpuhp_cpu_state *st = arg;
1010
Peter Zijlstra5ebe7742017-09-20 19:00:19 +02001011 complete_ap_thread(st, false);
Thomas Gleixner71f87b22016-03-03 10:52:10 +01001012}
1013
Thomas Gleixnere69aab12016-02-26 18:43:43 +00001014void cpuhp_report_idle_dead(void)
1015{
1016 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1017
1018 BUG_ON(st->state != CPUHP_AP_OFFLINE);
Thomas Gleixner27d50c72016-02-26 18:43:44 +00001019 rcu_report_dead(smp_processor_id());
Thomas Gleixner71f87b22016-03-03 10:52:10 +01001020 st->state = CPUHP_AP_IDLE_DEAD;
1021 /*
1022 * We cannot call complete after rcu_report_dead() so we delegate it
1023 * to an online cpu.
1024 */
1025 smp_call_function_single(cpumask_first(cpu_online_mask),
1026 cpuhp_complete_idle_dead, st, 0);
Thomas Gleixnere69aab12016-02-26 18:43:43 +00001027}
1028
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001029static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
1030 enum cpuhp_state target)
1031{
1032 enum cpuhp_state prev_state = st->state;
1033 int ret = 0;
1034
Vincent Donnefort453e4102021-02-16 10:35:06 +00001035 ret = cpuhp_invoke_callback_range(false, cpu, st, target);
1036 if (ret) {
1037
1038 cpuhp_reset_state(st, prev_state);
1039
1040 if (st->state < prev_state)
1041 WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
1042 prev_state));
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001043 }
Vincent Donnefort453e4102021-02-16 10:35:06 +00001044
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001045 return ret;
1046}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001047
Thomas Gleixner98458172016-02-26 18:43:25 +00001048/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001049static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
1050 enum cpuhp_state target)
Thomas Gleixner98458172016-02-26 18:43:25 +00001051{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001052 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1053 int prev_state, ret = 0;
Thomas Gleixner98458172016-02-26 18:43:25 +00001054
1055 if (num_online_cpus() == 1)
1056 return -EBUSY;
1057
Thomas Gleixner757c9892016-02-26 18:43:32 +00001058 if (!cpu_present(cpu))
Thomas Gleixner98458172016-02-26 18:43:25 +00001059 return -EINVAL;
1060
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001061 cpus_write_lock();
Thomas Gleixner98458172016-02-26 18:43:25 +00001062
1063 cpuhp_tasks_frozen = tasks_frozen;
1064
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001065 prev_state = cpuhp_set_state(st, target);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001066 /*
1067 * If the current CPU state is in the range of the AP hotplug thread,
1068 * then we need to kick the thread.
1069 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001070 if (st->state > CPUHP_TEARDOWN_CPU) {
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001071 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001072 ret = cpuhp_kick_ap_work(cpu);
1073 /*
1074 * The AP side has done the error rollback already. Just
1075 * return the error code..
1076 */
1077 if (ret)
1078 goto out;
1079
1080 /*
1081 * We might have stopped still in the range of the AP hotplug
1082 * thread. Nothing to do anymore.
1083 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001084 if (st->state > CPUHP_TEARDOWN_CPU)
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001085 goto out;
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001086
1087 st->target = target;
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001088 }
1089 /*
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001090 * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001091 * to do the further cleanups.
1092 */
Thomas Gleixnera7246322016-08-12 19:49:38 +02001093 ret = cpuhp_down_callbacks(cpu, st, target);
Vincent Donnefort62f25062021-02-16 10:35:05 +00001094 if (ret && st->state < prev_state) {
1095 if (st->state == CPUHP_TEARDOWN_CPU) {
1096 cpuhp_reset_state(st, prev_state);
1097 __cpuhp_kick_ap(st);
1098 } else {
1099 WARN(1, "DEAD callback error for CPU%d", cpu);
1100 }
Sebastian Andrzej Siewior3b9d6da2016-04-08 14:40:15 +02001101 }
Thomas Gleixner98458172016-02-26 18:43:25 +00001102
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001103out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001104 cpus_write_unlock();
Thomas Gleixner941154b2017-09-12 21:37:04 +02001105 /*
1106 * Do post unplug cleanup. This is still protected against
1107 * concurrent CPU hotplug via cpu_add_remove_lock.
1108 */
1109 lockup_detector_cleanup();
Thomas Gleixnera74cfff2018-11-25 19:33:39 +01001110 arch_smt_update();
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001111 return ret;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001112}
1113
Thomas Gleixnercc1fe212018-05-29 17:49:05 +02001114static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1115{
1116 if (cpu_hotplug_disabled)
1117 return -EBUSY;
1118 return _cpu_down(cpu, 0, target);
1119}
1120
Qais Yousef33c37362020-03-23 13:51:10 +00001121static int cpu_down(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001122{
Heiko Carstens9ea09af2008-12-22 12:36:30 +01001123 int err;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001124
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001125 cpu_maps_update_begin();
Thomas Gleixnercc1fe212018-05-29 17:49:05 +02001126 err = cpu_down_maps_locked(cpu, target);
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001127 cpu_maps_update_done();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 return err;
1129}
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001130
Qais Yousef33c37362020-03-23 13:51:10 +00001131/**
1132 * cpu_device_down - Bring down a cpu device
1133 * @dev: Pointer to the cpu device to offline
1134 *
1135 * This function is meant to be used by device core cpu subsystem only.
1136 *
1137 * Other subsystems should use remove_cpu() instead.
1138 */
1139int cpu_device_down(struct device *dev)
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001140{
Qais Yousef33c37362020-03-23 13:51:10 +00001141 return cpu_down(dev->id, CPUHP_OFFLINE);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001142}
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001143
Qais Yousef93ef1422020-03-23 13:50:54 +00001144int remove_cpu(unsigned int cpu)
1145{
1146 int ret;
1147
1148 lock_device_hotplug();
1149 ret = device_offline(get_cpu_device(cpu));
1150 unlock_device_hotplug();
1151
1152 return ret;
1153}
1154EXPORT_SYMBOL_GPL(remove_cpu);
1155
Qais Yousef0441a552020-03-23 13:50:55 +00001156void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1157{
1158 unsigned int cpu;
1159 int error;
1160
1161 cpu_maps_update_begin();
1162
1163 /*
1164 * Make certain the cpu I'm about to reboot on is online.
1165 *
1166 * This is inline to what migrate_to_reboot_cpu() already do.
1167 */
1168 if (!cpu_online(primary_cpu))
1169 primary_cpu = cpumask_first(cpu_online_mask);
1170
1171 for_each_online_cpu(cpu) {
1172 if (cpu == primary_cpu)
1173 continue;
1174
1175 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1176 if (error) {
1177 pr_err("Failed to offline CPU%d - error=%d",
1178 cpu, error);
1179 break;
1180 }
1181 }
1182
1183 /*
1184 * Ensure all but the reboot CPU are offline.
1185 */
1186 BUG_ON(num_online_cpus() > 1);
1187
1188 /*
1189 * Make sure the CPUs won't be enabled by someone else after this
1190 * point. Kexec will reboot to a new kernel shortly resetting
1191 * everything along the way.
1192 */
1193 cpu_hotplug_disabled++;
1194
1195 cpu_maps_update_done();
1196}
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001197
1198#else
1199#define takedown_cpu NULL
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200#endif /*CONFIG_HOTPLUG_CPU*/
1201
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001202/**
Thomas Gleixneree1e7142016-08-18 14:57:16 +02001203 * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001204 * @cpu: cpu that just started
1205 *
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001206 * It must be called by the arch code on the new cpu, before the new cpu
1207 * enables interrupts and before the "boot" cpu returns from __cpu_up().
1208 */
1209void notify_cpu_starting(unsigned int cpu)
1210{
1211 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1212 enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
Peter Zijlstra724a8682017-09-20 19:00:18 +02001213 int ret;
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001214
Sebastian Andrzej Siewior0c6d4572016-08-17 14:21:04 +02001215 rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
Thomas Gleixnere797bda2019-07-22 20:47:16 +02001216 cpumask_set_cpu(cpu, &cpus_booted_once_mask);
Vincent Donnefort453e4102021-02-16 10:35:06 +00001217 ret = cpuhp_invoke_callback_range(true, cpu, st, target);
1218
1219 /*
1220 * STARTING must not fail!
1221 */
1222 WARN_ON_ONCE(ret);
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001223}
1224
Thomas Gleixner949338e2016-02-26 18:43:35 +00001225/*
Thomas Gleixner9cd4f1a2017-07-04 22:20:23 +02001226 * Called from the idle task. Wake up the controlling task which brings the
Peter Zijlstra45178ac2019-12-10 09:34:54 +01001227 * hotplug thread of the upcoming CPU up and then delegates the rest of the
1228 * online bringup to the hotplug thread.
Thomas Gleixner949338e2016-02-26 18:43:35 +00001229 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001230void cpuhp_online_idle(enum cpuhp_state state)
Thomas Gleixner949338e2016-02-26 18:43:35 +00001231{
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001232 struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001233
1234 /* Happens for the boot cpu */
1235 if (state != CPUHP_AP_ONLINE_IDLE)
1236 return;
1237
Peter Zijlstra45178ac2019-12-10 09:34:54 +01001238 /*
1239 * Unpart the stopper thread before we start the idle loop (and start
1240 * scheduling); this ensures the stopper task is always available.
1241 */
1242 stop_machine_unpark(smp_processor_id());
1243
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001244 st->state = CPUHP_AP_ONLINE_IDLE;
Peter Zijlstra5ebe7742017-09-20 19:00:19 +02001245 complete_ap_thread(st, true);
Thomas Gleixner949338e2016-02-26 18:43:35 +00001246}
1247
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001248/* Requires cpu_add_remove_lock to be held */
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001249static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250{
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001251 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001252 struct task_struct *idle;
Thomas Gleixner2e1a3482016-02-26 18:43:37 +00001253 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001255 cpus_write_lock();
Thomas Gleixner38498a62012-04-20 13:05:44 +00001256
Thomas Gleixner757c9892016-02-26 18:43:32 +00001257 if (!cpu_present(cpu)) {
Yasuaki Ishimatsu5e5041f2012-10-23 01:30:54 +02001258 ret = -EINVAL;
1259 goto out;
1260 }
1261
Thomas Gleixner757c9892016-02-26 18:43:32 +00001262 /*
Qais Yousef33c37362020-03-23 13:51:10 +00001263 * The caller of cpu_up() might have raced with another
1264 * caller. Nothing to do.
Thomas Gleixner757c9892016-02-26 18:43:32 +00001265 */
1266 if (st->state >= target)
Thomas Gleixner38498a62012-04-20 13:05:44 +00001267 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00001268
1269 if (st->state == CPUHP_OFFLINE) {
1270 /* Let it fail before we try to bring the cpu up */
1271 idle = idle_thread_get(cpu);
1272 if (IS_ERR(idle)) {
1273 ret = PTR_ERR(idle);
1274 goto out;
1275 }
Suresh Siddha3bb5d2e2012-04-20 17:08:50 -07001276 }
Thomas Gleixner38498a62012-04-20 13:05:44 +00001277
Thomas Gleixnerba997462016-02-26 18:43:24 +00001278 cpuhp_tasks_frozen = tasks_frozen;
1279
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001280 cpuhp_set_state(st, target);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001281 /*
1282 * If the current CPU state is in the range of the AP hotplug thread,
1283 * then we need to kick the thread once more.
1284 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001285 if (st->state > CPUHP_BRINGUP_CPU) {
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001286 ret = cpuhp_kick_ap_work(cpu);
1287 /*
1288 * The AP side has done the error rollback already. Just
1289 * return the error code..
1290 */
1291 if (ret)
1292 goto out;
1293 }
1294
1295 /*
1296 * Try to reach the target state. We max out on the BP at
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001297 * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001298 * responsible for bringing it up to the target state.
1299 */
Thomas Gleixner8df3e072016-02-26 18:43:41 +00001300 target = min((int)target, CPUHP_BRINGUP_CPU);
Thomas Gleixnera7246322016-08-12 19:49:38 +02001301 ret = cpuhp_up_callbacks(cpu, st, target);
Thomas Gleixner38498a62012-04-20 13:05:44 +00001302out:
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001303 cpus_write_unlock();
Thomas Gleixnera74cfff2018-11-25 19:33:39 +01001304 arch_smt_update();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 return ret;
1306}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001307
Qais Yousef33c37362020-03-23 13:51:10 +00001308static int cpu_up(unsigned int cpu, enum cpuhp_state target)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001309{
1310 int err = 0;
minskey guocf234222010-05-24 14:32:41 -07001311
Rusty Russelle0b582e2009-01-01 10:12:28 +10301312 if (!cpu_possible(cpu)) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001313 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1314 cpu);
Chen Gong87d5e0232010-03-05 13:42:38 -08001315#if defined(CONFIG_IA64)
Fabian Frederick84117da2014-06-04 16:11:17 -07001316 pr_err("please check additional_cpus= boot parameter\n");
KAMEZAWA Hiroyuki73e753a2007-10-18 23:40:47 -07001317#endif
1318 return -EINVAL;
1319 }
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001320
Toshi Kani01b0f192013-11-12 15:07:25 -08001321 err = try_online_node(cpu_to_node(cpu));
1322 if (err)
1323 return err;
minskey guocf234222010-05-24 14:32:41 -07001324
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001325 cpu_maps_update_begin();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001326
Max Krasnyanskye761b772008-07-15 04:43:49 -07001327 if (cpu_hotplug_disabled) {
1328 err = -EBUSY;
1329 goto out;
1330 }
Thomas Gleixner05736e42018-05-29 17:48:27 +02001331 if (!cpu_smt_allowed(cpu)) {
1332 err = -EPERM;
1333 goto out;
1334 }
Max Krasnyanskye761b772008-07-15 04:43:49 -07001335
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001336 err = _cpu_up(cpu, 0, target);
Max Krasnyanskye761b772008-07-15 04:43:49 -07001337out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001338 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001339 return err;
1340}
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001341
Qais Yousef33c37362020-03-23 13:51:10 +00001342/**
1343 * cpu_device_up - Bring up a cpu device
1344 * @dev: Pointer to the cpu device to online
1345 *
1346 * This function is meant to be used by device core cpu subsystem only.
1347 *
1348 * Other subsystems should use add_cpu() instead.
1349 */
1350int cpu_device_up(struct device *dev)
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001351{
Qais Yousef33c37362020-03-23 13:51:10 +00001352 return cpu_up(dev->id, CPUHP_ONLINE);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001353}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001354
Qais Yousef93ef1422020-03-23 13:50:54 +00001355int add_cpu(unsigned int cpu)
1356{
1357 int ret;
1358
1359 lock_device_hotplug();
1360 ret = device_online(get_cpu_device(cpu));
1361 unlock_device_hotplug();
1362
1363 return ret;
1364}
1365EXPORT_SYMBOL_GPL(add_cpu);
1366
Qais Yousefd720f982020-03-23 13:51:01 +00001367/**
1368 * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1369 * @sleep_cpu: The cpu we hibernated on and should be brought up.
1370 *
1371 * On some architectures like arm64, we can hibernate on any CPU, but on
1372 * wake up the CPU we hibernated on might be offline as a side effect of
1373 * using maxcpus= for example.
1374 */
1375int bringup_hibernate_cpu(unsigned int sleep_cpu)
1376{
1377 int ret;
1378
1379 if (!cpu_online(sleep_cpu)) {
1380 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
Qais Yousef33c37362020-03-23 13:51:10 +00001381 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
Qais Yousefd720f982020-03-23 13:51:01 +00001382 if (ret) {
1383 pr_err("Failed to bring hibernate-CPU up!\n");
1384 return ret;
1385 }
1386 }
1387 return 0;
1388}
1389
Qais Yousefb99a2652020-03-23 13:51:09 +00001390void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1391{
1392 unsigned int cpu;
1393
1394 for_each_present_cpu(cpu) {
1395 if (num_online_cpus() >= setup_max_cpus)
1396 break;
1397 if (!cpu_online(cpu))
Qais Yousef33c37362020-03-23 13:51:10 +00001398 cpu_up(cpu, CPUHP_ONLINE);
Qais Yousefb99a2652020-03-23 13:51:09 +00001399 }
1400}
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001401
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001402#ifdef CONFIG_PM_SLEEP_SMP
Rusty Russelle0b582e2009-01-01 10:12:28 +10301403static cpumask_var_t frozen_cpus;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001404
Qais Youseffb7fb842020-04-30 12:40:04 +01001405int freeze_secondary_cpus(int primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001406{
James Morsed391e552016-08-17 13:50:25 +01001407 int cpu, error = 0;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001408
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001409 cpu_maps_update_begin();
Nicholas Piggin9ca12ac2019-04-11 13:34:46 +10001410 if (primary == -1) {
James Morsed391e552016-08-17 13:50:25 +01001411 primary = cpumask_first(cpu_online_mask);
Nicholas Piggin9ca12ac2019-04-11 13:34:46 +10001412 if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1413 primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1414 } else {
1415 if (!cpu_online(primary))
1416 primary = cpumask_first(cpu_online_mask);
1417 }
1418
Xiaotian Feng9ee349a2009-12-16 18:04:32 +01001419 /*
1420 * We take down all of the non-boot CPUs in one shot to avoid races
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001421 * with the userspace trying to use the CPU hotplug at the same time
1422 */
Rusty Russelle0b582e2009-01-01 10:12:28 +10301423 cpumask_clear(frozen_cpus);
Peter Zijlstra6ad4c182009-11-25 13:31:39 +01001424
Fabian Frederick84117da2014-06-04 16:11:17 -07001425 pr_info("Disabling non-boot CPUs ...\n");
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001426 for_each_online_cpu(cpu) {
James Morsed391e552016-08-17 13:50:25 +01001427 if (cpu == primary)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001428 continue;
Pavankumar Kondetia66d9552019-06-03 10:01:03 +05301429
Qais Youseffb7fb842020-04-30 12:40:04 +01001430 if (pm_wakeup_pending()) {
Pavankumar Kondetia66d9552019-06-03 10:01:03 +05301431 pr_info("Wakeup pending. Abort CPU freeze\n");
1432 error = -EBUSY;
1433 break;
1434 }
1435
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001436 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001437 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001438 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
Mike Travisfeae3202009-11-17 18:22:13 -06001439 if (!error)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301440 cpumask_set_cpu(cpu, frozen_cpus);
Mike Travisfeae3202009-11-17 18:22:13 -06001441 else {
Fabian Frederick84117da2014-06-04 16:11:17 -07001442 pr_err("Error taking CPU%d down: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001443 break;
1444 }
1445 }
Joseph Cihula86886e52009-06-30 19:31:07 -07001446
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001447 if (!error)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001448 BUG_ON(num_online_cpus() > 1);
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001449 else
Fabian Frederick84117da2014-06-04 16:11:17 -07001450 pr_err("Non-boot CPUs are not disabled\n");
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001451
1452 /*
1453 * Make sure the CPUs won't be enabled by someone else. We need to do
Qais Yousef56555852020-04-30 12:40:03 +01001454 * this even in case of failure as all freeze_secondary_cpus() users are
1455 * supposed to do thaw_secondary_cpus() on the failure path.
Vitaly Kuznetsov89af7ba2015-08-05 00:52:46 -07001456 */
1457 cpu_hotplug_disabled++;
1458
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001459 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001460 return error;
1461}
1462
Qais Yousef56555852020-04-30 12:40:03 +01001463void __weak arch_thaw_secondary_cpus_begin(void)
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001464{
1465}
1466
Qais Yousef56555852020-04-30 12:40:03 +01001467void __weak arch_thaw_secondary_cpus_end(void)
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001468{
1469}
1470
Qais Yousef56555852020-04-30 12:40:03 +01001471void thaw_secondary_cpus(void)
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001472{
1473 int cpu, error;
1474
1475 /* Allow everyone to use the CPU hotplug again */
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001476 cpu_maps_update_begin();
Lianwei Wang01b41152016-06-09 23:43:28 -07001477 __cpu_hotplug_enable();
Rusty Russelle0b582e2009-01-01 10:12:28 +10301478 if (cpumask_empty(frozen_cpus))
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001479 goto out;
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001480
Fabian Frederick84117da2014-06-04 16:11:17 -07001481 pr_info("Enabling non-boot CPUs ...\n");
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001482
Qais Yousef56555852020-04-30 12:40:03 +01001483 arch_thaw_secondary_cpus_begin();
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001484
Rusty Russelle0b582e2009-01-01 10:12:28 +10301485 for_each_cpu(cpu, frozen_cpus) {
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001486 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
Thomas Gleixneraf1f4042016-02-26 18:43:30 +00001487 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
Todd E Brandtbb3632c2014-06-06 05:40:17 -07001488 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001489 if (!error) {
Fabian Frederick84117da2014-06-04 16:11:17 -07001490 pr_info("CPU%d is up\n", cpu);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001491 continue;
1492 }
Fabian Frederick84117da2014-06-04 16:11:17 -07001493 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001494 }
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001495
Qais Yousef56555852020-04-30 12:40:03 +01001496 arch_thaw_secondary_cpus_end();
Suresh Siddhad0af9ee2009-08-19 18:05:36 -07001497
Rusty Russelle0b582e2009-01-01 10:12:28 +10301498 cpumask_clear(frozen_cpus);
Rafael J. Wysocki1d64b9c2007-04-01 23:49:49 -07001499out:
Gautham R Shenoyd2219382008-01-25 21:08:01 +01001500 cpu_maps_update_done();
Rafael J. Wysockie3920fb2006-09-25 23:32:48 -07001501}
Rusty Russelle0b582e2009-01-01 10:12:28 +10301502
Fenghua Yud7268a32011-11-15 21:59:31 +01001503static int __init alloc_frozen_cpus(void)
Rusty Russelle0b582e2009-01-01 10:12:28 +10301504{
1505 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1506 return -ENOMEM;
1507 return 0;
1508}
1509core_initcall(alloc_frozen_cpus);
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001510
1511/*
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001512 * When callbacks for CPU hotplug notifications are being executed, we must
1513 * ensure that the state of the system with respect to the tasks being frozen
1514 * or not, as reported by the notification, remains unchanged *throughout the
1515 * duration* of the execution of the callbacks.
1516 * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1517 *
1518 * This synchronization is implemented by mutually excluding regular CPU
1519 * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1520 * Hibernate notifications.
1521 */
1522static int
1523cpu_hotplug_pm_callback(struct notifier_block *nb,
1524 unsigned long action, void *ptr)
1525{
1526 switch (action) {
1527
1528 case PM_SUSPEND_PREPARE:
1529 case PM_HIBERNATION_PREPARE:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001530 cpu_hotplug_disable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001531 break;
1532
1533 case PM_POST_SUSPEND:
1534 case PM_POST_HIBERNATION:
Srivatsa S. Bhat16e53db2013-06-12 14:04:36 -07001535 cpu_hotplug_enable();
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001536 break;
1537
1538 default:
1539 return NOTIFY_DONE;
1540 }
1541
1542 return NOTIFY_OK;
1543}
1544
1545
Fenghua Yud7268a32011-11-15 21:59:31 +01001546static int __init cpu_hotplug_pm_sync_init(void)
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001547{
Fenghua Yu6e32d472012-11-13 11:32:43 -08001548 /*
1549 * cpu_hotplug_pm_callback has higher priority than x86
1550 * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1551 * to disable cpu hotplug to avoid cpu hotplug race.
1552 */
Srivatsa S. Bhat79cfbdf2011-11-03 00:59:25 +01001553 pm_notifier(cpu_hotplug_pm_callback, 0);
1554 return 0;
1555}
1556core_initcall(cpu_hotplug_pm_sync_init);
1557
Rafael J. Wysockif3de4be2007-08-30 23:56:29 -07001558#endif /* CONFIG_PM_SLEEP_SMP */
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001559
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01001560int __boot_cpu_id;
1561
Max Krasnyansky68f4f1e2008-05-29 11:17:02 -07001562#endif /* CONFIG_SMP */
Mike Travisb8d317d2008-07-24 18:21:29 -07001563
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001564/* Boot processor state steps */
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001565static struct cpuhp_step cpuhp_hp_states[] = {
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001566 [CPUHP_OFFLINE] = {
1567 .name = "offline",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001568 .startup.single = NULL,
1569 .teardown.single = NULL,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001570 },
1571#ifdef CONFIG_SMP
1572 [CPUHP_CREATE_THREADS]= {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001573 .name = "threads:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001574 .startup.single = smpboot_create_threads,
1575 .teardown.single = NULL,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001576 .cant_stop = true,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001577 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001578 [CPUHP_PERF_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001579 .name = "perf:prepare",
1580 .startup.single = perf_event_init_cpu,
1581 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001582 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001583 [CPUHP_WORKQUEUE_PREP] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001584 .name = "workqueue:prepare",
1585 .startup.single = workqueue_prepare_cpu,
1586 .teardown.single = NULL,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001587 },
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001588 [CPUHP_HRTIMERS_PREPARE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001589 .name = "hrtimers:prepare",
1590 .startup.single = hrtimers_prepare_cpu,
1591 .teardown.single = hrtimers_dead_cpu,
Thomas Gleixner27590dc2016-07-15 10:41:04 +02001592 },
Richard Weinberger31487f82016-07-13 17:17:01 +00001593 [CPUHP_SMPCFD_PREPARE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001594 .name = "smpcfd:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001595 .startup.single = smpcfd_prepare_cpu,
1596 .teardown.single = smpcfd_dead_cpu,
Richard Weinberger31487f82016-07-13 17:17:01 +00001597 },
Richard Weinbergere6d49892016-08-18 14:57:17 +02001598 [CPUHP_RELAY_PREPARE] = {
1599 .name = "relay:prepare",
1600 .startup.single = relay_prepare_cpu,
1601 .teardown.single = NULL,
1602 },
Sebastian Andrzej Siewior6731d4f2016-08-23 14:53:19 +02001603 [CPUHP_SLAB_PREPARE] = {
1604 .name = "slab:prepare",
1605 .startup.single = slab_prepare_cpu,
1606 .teardown.single = slab_dead_cpu,
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001607 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001608 [CPUHP_RCUTREE_PREP] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001609 .name = "RCU/tree:prepare",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001610 .startup.single = rcutree_prepare_cpu,
1611 .teardown.single = rcutree_dead_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001612 },
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001613 /*
Richard Cochran4fae16d2016-07-27 11:08:18 +02001614 * On the tear-down path, timers_dead_cpu() must be invoked
1615 * before blk_mq_queue_reinit_notify() from notify_dead(),
1616 * otherwise a RCU stall occurs.
1617 */
Thomas Gleixner26456f82017-12-27 21:37:25 +01001618 [CPUHP_TIMERS_PREPARE] = {
Mukesh Ojhad0180312018-07-24 20:17:48 +05301619 .name = "timers:prepare",
Thomas Gleixner26456f82017-12-27 21:37:25 +01001620 .startup.single = timers_prepare_cpu,
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001621 .teardown.single = timers_dead_cpu,
Richard Cochran4fae16d2016-07-27 11:08:18 +02001622 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001623 /* Kicks the plugged cpu into life */
Thomas Gleixnercff7d372016-02-26 18:43:28 +00001624 [CPUHP_BRINGUP_CPU] = {
1625 .name = "cpu:bringup",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001626 .startup.single = bringup_cpu,
Peter Zijlstrabf2c59f2020-04-01 17:40:33 -04001627 .teardown.single = finish_cpu,
Thomas Gleixner757c9892016-02-26 18:43:32 +00001628 .cant_stop = true,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001629 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001630 /* Final state before CPU kills itself */
1631 [CPUHP_AP_IDLE_DEAD] = {
1632 .name = "idle:dead",
1633 },
1634 /*
1635 * Last state before CPU enters the idle loop to die. Transient state
1636 * for synchronization.
1637 */
1638 [CPUHP_AP_OFFLINE] = {
1639 .name = "ap:offline",
1640 .cant_stop = true,
1641 },
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001642 /* First state is scheduler control. Interrupts are disabled */
1643 [CPUHP_AP_SCHED_STARTING] = {
1644 .name = "sched:starting",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001645 .startup.single = sched_cpu_starting,
1646 .teardown.single = sched_cpu_dying,
Thomas Gleixner9cf72432016-03-10 12:54:09 +01001647 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001648 [CPUHP_AP_RCUTREE_DYING] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001649 .name = "RCU/tree:dying",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001650 .startup.single = NULL,
1651 .teardown.single = rcutree_dying_cpu,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001652 },
Lai Jiangshan46febd32017-11-28 21:19:53 +08001653 [CPUHP_AP_SMPCFD_DYING] = {
1654 .name = "smpcfd:dying",
1655 .startup.single = NULL,
1656 .teardown.single = smpcfd_dying_cpu,
1657 },
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001658 /* Entry state on starting. Interrupts enabled from here on. Transient
1659 * state for synchronsization */
1660 [CPUHP_AP_ONLINE] = {
1661 .name = "ap:online",
1662 },
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001663 /*
Thomas Gleixner1cf12e02020-09-16 09:27:18 +02001664 * Handled on control processor until the plugged processor manages
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001665 * this itself.
1666 */
1667 [CPUHP_TEARDOWN_CPU] = {
1668 .name = "cpu:teardown",
1669 .startup.single = NULL,
1670 .teardown.single = takedown_cpu,
1671 .cant_stop = true,
1672 },
Thomas Gleixner1cf12e02020-09-16 09:27:18 +02001673
1674 [CPUHP_AP_SCHED_WAIT_EMPTY] = {
1675 .name = "sched:waitempty",
1676 .startup.single = NULL,
1677 .teardown.single = sched_cpu_wait_empty,
1678 },
1679
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001680 /* Handle smpboot threads park/unpark */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001681 [CPUHP_AP_SMPBOOT_THREADS] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001682 .name = "smpboot/threads:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001683 .startup.single = smpboot_unpark_threads,
Thomas Gleixnerc4de6562018-05-29 19:05:25 +02001684 .teardown.single = smpboot_park_threads,
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001685 },
Thomas Gleixnerc5cb83b2017-06-20 01:37:51 +02001686 [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1687 .name = "irq/affinity:online",
1688 .startup.single = irq_affinity_online_cpu,
1689 .teardown.single = NULL,
1690 },
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001691 [CPUHP_AP_PERF_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001692 .name = "perf:online",
1693 .startup.single = perf_event_init_cpu,
1694 .teardown.single = perf_event_exit_cpu,
Thomas Gleixner00e16c32016-07-13 17:16:09 +00001695 },
Peter Zijlstra9cf57732018-06-07 10:52:03 +02001696 [CPUHP_AP_WATCHDOG_ONLINE] = {
1697 .name = "lockup_detector:online",
1698 .startup.single = lockup_detector_online_cpu,
1699 .teardown.single = lockup_detector_offline_cpu,
1700 },
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001701 [CPUHP_AP_WORKQUEUE_ONLINE] = {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001702 .name = "workqueue:online",
1703 .startup.single = workqueue_online_cpu,
1704 .teardown.single = workqueue_offline_cpu,
Thomas Gleixner7ee681b2016-07-13 17:16:29 +00001705 },
Thomas Gleixner4df83742016-07-13 17:17:03 +00001706 [CPUHP_AP_RCUTREE_ONLINE] = {
Thomas Gleixner677f6642016-09-06 16:13:48 +02001707 .name = "RCU/tree:online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001708 .startup.single = rcutree_online_cpu,
1709 .teardown.single = rcutree_offline_cpu,
Thomas Gleixner4df83742016-07-13 17:17:03 +00001710 },
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001711#endif
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001712 /*
1713 * The dynamically registered state space is here
1714 */
1715
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001716#ifdef CONFIG_SMP
1717 /* Last state is scheduler control setting the cpu active */
1718 [CPUHP_AP_ACTIVE] = {
1719 .name = "sched:active",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001720 .startup.single = sched_cpu_activate,
1721 .teardown.single = sched_cpu_deactivate,
Thomas Gleixneraaddd7d2016-03-10 12:54:19 +01001722 },
1723#endif
1724
Thomas Gleixnerd10ef6f2016-03-08 10:36:13 +01001725 /* CPU is fully up and running. */
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001726 [CPUHP_ONLINE] = {
1727 .name = "online",
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001728 .startup.single = NULL,
1729 .teardown.single = NULL,
Thomas Gleixner4baa0af2016-02-26 18:43:29 +00001730 },
1731};
1732
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001733/* Sanity check for callbacks */
1734static int cpuhp_cb_check(enum cpuhp_state state)
1735{
1736 if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1737 return -EINVAL;
1738 return 0;
1739}
1740
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001741/*
1742 * Returns a free for dynamic slot assignment of the Online state. The states
1743 * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1744 * by having no name assigned.
1745 */
1746static int cpuhp_reserve_state(enum cpuhp_state state)
1747{
Thomas Gleixner4205e472017-01-10 14:01:05 +01001748 enum cpuhp_state i, end;
1749 struct cpuhp_step *step;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001750
Thomas Gleixner4205e472017-01-10 14:01:05 +01001751 switch (state) {
1752 case CPUHP_AP_ONLINE_DYN:
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001753 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
Thomas Gleixner4205e472017-01-10 14:01:05 +01001754 end = CPUHP_AP_ONLINE_DYN_END;
1755 break;
1756 case CPUHP_BP_PREPARE_DYN:
Lai Jiangshan17a2f1c2017-12-01 21:50:05 +08001757 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
Thomas Gleixner4205e472017-01-10 14:01:05 +01001758 end = CPUHP_BP_PREPARE_DYN_END;
1759 break;
1760 default:
1761 return -EINVAL;
1762 }
1763
1764 for (i = state; i <= end; i++, step++) {
1765 if (!step->name)
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001766 return i;
1767 }
1768 WARN(1, "No more dynamic states available for CPU hotplug\n");
1769 return -ENOSPC;
1770}
1771
1772static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1773 int (*startup)(unsigned int cpu),
1774 int (*teardown)(unsigned int cpu),
1775 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001776{
1777 /* (Un)Install the callbacks for further cpu hotplug operations */
1778 struct cpuhp_step *sp;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001779 int ret = 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001780
Ethan Barnes0c96b272017-07-19 22:36:00 +00001781 /*
1782 * If name is NULL, then the state gets removed.
1783 *
1784 * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1785 * the first allocation from these dynamic ranges, so the removal
1786 * would trigger a new allocation and clear the wrong (already
1787 * empty) state, leaving the callbacks of the to be cleared state
1788 * dangling, which causes wreckage on the next hotplug operation.
1789 */
1790 if (name && (state == CPUHP_AP_ONLINE_DYN ||
1791 state == CPUHP_BP_PREPARE_DYN)) {
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001792 ret = cpuhp_reserve_state(state);
1793 if (ret < 0)
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001794 return ret;
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001795 state = ret;
1796 }
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001797 sp = cpuhp_get_step(state);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001798 if (name && sp->name)
1799 return -EBUSY;
1800
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001801 sp->startup.single = startup;
1802 sp->teardown.single = teardown;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001803 sp->name = name;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001804 sp->multi_instance = multi_instance;
1805 INIT_HLIST_HEAD(&sp->list);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001806 return ret;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001807}
1808
1809static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1810{
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001811 return cpuhp_get_step(state)->teardown.single;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001812}
1813
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001814/*
1815 * Call the startup/teardown function for a step either on the AP or
1816 * on the current CPU.
1817 */
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001818static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1819 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001820{
Thomas Gleixnera7246322016-08-12 19:49:38 +02001821 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001822 int ret;
1823
Peter Zijlstra4dddfb52017-09-20 19:00:17 +02001824 /*
1825 * If there's nothing to do, we done.
1826 * Relies on the union for multi_instance.
1827 */
Vincent Donnefort453e4102021-02-16 10:35:06 +00001828 if (cpuhp_step_empty(bringup, sp))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001829 return 0;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001830 /*
1831 * The non AP bound callbacks can fail on bringup. On teardown
1832 * e.g. module removal we crash for now.
1833 */
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001834#ifdef CONFIG_SMP
1835 if (cpuhp_is_ap_state(state))
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001836 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001837 else
Peter Zijlstra96abb962017-09-20 19:00:16 +02001838 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001839#else
Peter Zijlstra96abb962017-09-20 19:00:16 +02001840 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
Thomas Gleixner1cf4f622016-02-26 18:43:39 +00001841#endif
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001842 BUG_ON(ret && !bringup);
1843 return ret;
1844}
1845
1846/*
1847 * Called from __cpuhp_setup_state on a recoverable failure.
1848 *
1849 * Note: The teardown callbacks for rollback are not allowed to fail!
1850 */
1851static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001852 struct hlist_node *node)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001853{
1854 int cpu;
1855
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001856 /* Roll back the already executed steps on the other cpus */
1857 for_each_present_cpu(cpu) {
1858 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1859 int cpustate = st->state;
1860
1861 if (cpu >= failedcpu)
1862 break;
1863
1864 /* Did we invoke the startup call on that cpu ? */
1865 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001866 cpuhp_issue_call(cpu, state, false, node);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001867 }
1868}
1869
Thomas Gleixner9805c672017-05-24 10:15:15 +02001870int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1871 struct hlist_node *node,
1872 bool invoke)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001873{
1874 struct cpuhp_step *sp;
1875 int cpu;
1876 int ret;
1877
Thomas Gleixner9805c672017-05-24 10:15:15 +02001878 lockdep_assert_cpus_held();
1879
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001880 sp = cpuhp_get_step(state);
1881 if (sp->multi_instance == false)
1882 return -EINVAL;
1883
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001884 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001885
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001886 if (!invoke || !sp->startup.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001887 goto add_node;
1888
1889 /*
1890 * Try to call the startup callback for each present cpu
1891 * depending on the hotplug state of the cpu.
1892 */
1893 for_each_present_cpu(cpu) {
1894 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1895 int cpustate = st->state;
1896
1897 if (cpustate < state)
1898 continue;
1899
1900 ret = cpuhp_issue_call(cpu, state, true, node);
1901 if (ret) {
Thomas Gleixner3c1627e2016-09-05 15:28:36 +02001902 if (sp->teardown.multi)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001903 cpuhp_rollback_install(cpu, state, node);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001904 goto unlock;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001905 }
1906 }
1907add_node:
1908 ret = 0;
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001909 hlist_add_head(node, &sp->list);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001910unlock:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001911 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner9805c672017-05-24 10:15:15 +02001912 return ret;
1913}
1914
1915int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1916 bool invoke)
1917{
1918 int ret;
1919
1920 cpus_read_lock();
1921 ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02001922 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001923 return ret;
1924}
1925EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1926
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001927/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001928 * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001929 * @state: The state to setup
1930 * @invoke: If true, the startup function is invoked for cpus where
1931 * cpu state >= @state
1932 * @startup: startup callback function
1933 * @teardown: teardown callback function
1934 * @multi_instance: State is set up for multiple instances which get
1935 * added afterwards.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001936 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001937 * The caller needs to hold cpus read locked while calling this function.
Boris Ostrovsky512f0982016-12-15 10:00:57 -05001938 * Returns:
1939 * On success:
1940 * Positive state number if @state is CPUHP_AP_ONLINE_DYN
1941 * 0 for all other states
1942 * On failure: proper (negative) error code
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001943 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001944int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1945 const char *name, bool invoke,
1946 int (*startup)(unsigned int cpu),
1947 int (*teardown)(unsigned int cpu),
1948 bool multi_instance)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001949{
1950 int cpu, ret = 0;
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001951 bool dynstate;
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001952
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02001953 lockdep_assert_cpus_held();
1954
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001955 if (cpuhp_cb_check(state) || !name)
1956 return -EINVAL;
1957
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001958 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001959
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001960 ret = cpuhp_store_callbacks(state, name, startup, teardown,
1961 multi_instance);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001962
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001963 dynstate = state == CPUHP_AP_ONLINE_DYN;
1964 if (ret > 0 && dynstate) {
1965 state = ret;
1966 ret = 0;
1967 }
1968
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001969 if (ret || !invoke || !startup)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001970 goto out;
1971
1972 /*
1973 * Try to call the startup callback for each present cpu
1974 * depending on the hotplug state of the cpu.
1975 */
1976 for_each_present_cpu(cpu) {
1977 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1978 int cpustate = st->state;
1979
1980 if (cpustate < state)
1981 continue;
1982
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001983 ret = cpuhp_issue_call(cpu, state, true, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001984 if (ret) {
Thomas Gleixnera7246322016-08-12 19:49:38 +02001985 if (teardown)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02001986 cpuhp_rollback_install(cpu, state, NULL);
1987 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001988 goto out;
1989 }
1990 }
1991out:
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01001992 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixnerdc280d932016-12-21 20:19:49 +01001993 /*
1994 * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1995 * dynamically allocated state in case of success.
1996 */
Thomas Gleixnerb9d9d692016-12-26 22:58:19 +01001997 if (!ret && dynstate)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00001998 return state;
1999 return ret;
2000}
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02002001EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
2002
2003int __cpuhp_setup_state(enum cpuhp_state state,
2004 const char *name, bool invoke,
2005 int (*startup)(unsigned int cpu),
2006 int (*teardown)(unsigned int cpu),
2007 bool multi_instance)
2008{
2009 int ret;
2010
2011 cpus_read_lock();
2012 ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
2013 teardown, multi_instance);
2014 cpus_read_unlock();
2015 return ret;
2016}
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002017EXPORT_SYMBOL(__cpuhp_setup_state);
2018
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002019int __cpuhp_state_remove_instance(enum cpuhp_state state,
2020 struct hlist_node *node, bool invoke)
2021{
2022 struct cpuhp_step *sp = cpuhp_get_step(state);
2023 int cpu;
2024
2025 BUG_ON(cpuhp_cb_check(state));
2026
2027 if (!sp->multi_instance)
2028 return -EINVAL;
2029
Thomas Gleixner8f553c42017-05-24 10:15:12 +02002030 cpus_read_lock();
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01002031 mutex_lock(&cpuhp_state_mutex);
2032
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002033 if (!invoke || !cpuhp_get_teardown_cb(state))
2034 goto remove;
2035 /*
2036 * Call the teardown callback for each present cpu depending
2037 * on the hotplug state of the cpu. This function is not
2038 * allowed to fail currently!
2039 */
2040 for_each_present_cpu(cpu) {
2041 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2042 int cpustate = st->state;
2043
2044 if (cpustate >= state)
2045 cpuhp_issue_call(cpu, state, false, node);
2046 }
2047
2048remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002049 hlist_del(node);
2050 mutex_unlock(&cpuhp_state_mutex);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02002051 cpus_read_unlock();
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002052
2053 return 0;
2054}
2055EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01002056
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002057/**
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02002058 * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002059 * @state: The state to remove
2060 * @invoke: If true, the teardown function is invoked for cpus where
2061 * cpu state >= @state
2062 *
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02002063 * The caller needs to hold cpus read locked while calling this function.
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002064 * The teardown callback is currently not allowed to fail. Think
2065 * about module removal!
2066 */
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02002067void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002068{
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002069 struct cpuhp_step *sp = cpuhp_get_step(state);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002070 int cpu;
2071
2072 BUG_ON(cpuhp_cb_check(state));
2073
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02002074 lockdep_assert_cpus_held();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002075
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01002076 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002077 if (sp->multi_instance) {
2078 WARN(!hlist_empty(&sp->list),
2079 "Error: Removing state %d which has instances left.\n",
2080 state);
2081 goto remove;
2082 }
2083
Thomas Gleixnera7246322016-08-12 19:49:38 +02002084 if (!invoke || !cpuhp_get_teardown_cb(state))
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002085 goto remove;
2086
2087 /*
2088 * Call the teardown callback for each present cpu depending
2089 * on the hotplug state of the cpu. This function is not
2090 * allowed to fail currently!
2091 */
2092 for_each_present_cpu(cpu) {
2093 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2094 int cpustate = st->state;
2095
2096 if (cpustate >= state)
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002097 cpuhp_issue_call(cpu, state, false, NULL);
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002098 }
2099remove:
Thomas Gleixnercf392d12016-08-12 19:49:39 +02002100 cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
Sebastian Andrzej Siewiordc434e052017-03-14 16:06:45 +01002101 mutex_unlock(&cpuhp_state_mutex);
Sebastian Andrzej Siewior71def422017-05-24 10:15:14 +02002102}
2103EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2104
2105void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2106{
2107 cpus_read_lock();
2108 __cpuhp_remove_state_cpuslocked(state, invoke);
Thomas Gleixner8f553c42017-05-24 10:15:12 +02002109 cpus_read_unlock();
Thomas Gleixner5b7aa872016-02-26 18:43:33 +00002110}
2111EXPORT_SYMBOL(__cpuhp_remove_state);
2112
Arnd Bergmanndc8d37e2019-12-10 20:56:04 +01002113#ifdef CONFIG_HOTPLUG_SMT
2114static void cpuhp_offline_cpu_device(unsigned int cpu)
2115{
2116 struct device *dev = get_cpu_device(cpu);
2117
2118 dev->offline = true;
2119 /* Tell user space about the state change */
2120 kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2121}
2122
2123static void cpuhp_online_cpu_device(unsigned int cpu)
2124{
2125 struct device *dev = get_cpu_device(cpu);
2126
2127 dev->offline = false;
2128 /* Tell user space about the state change */
2129 kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2130}
2131
2132int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2133{
2134 int cpu, ret = 0;
2135
2136 cpu_maps_update_begin();
2137 for_each_online_cpu(cpu) {
2138 if (topology_is_primary_thread(cpu))
2139 continue;
2140 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2141 if (ret)
2142 break;
2143 /*
2144 * As this needs to hold the cpu maps lock it's impossible
2145 * to call device_offline() because that ends up calling
2146 * cpu_down() which takes cpu maps lock. cpu maps lock
2147 * needs to be held as this might race against in kernel
2148 * abusers of the hotplug machinery (thermal management).
2149 *
2150 * So nothing would update device:offline state. That would
2151 * leave the sysfs entry stale and prevent onlining after
2152 * smt control has been changed to 'off' again. This is
2153 * called under the sysfs hotplug lock, so it is properly
2154 * serialized against the regular offline usage.
2155 */
2156 cpuhp_offline_cpu_device(cpu);
2157 }
2158 if (!ret)
2159 cpu_smt_control = ctrlval;
2160 cpu_maps_update_done();
2161 return ret;
2162}
2163
2164int cpuhp_smt_enable(void)
2165{
2166 int cpu, ret = 0;
2167
2168 cpu_maps_update_begin();
2169 cpu_smt_control = CPU_SMT_ENABLED;
2170 for_each_present_cpu(cpu) {
2171 /* Skip online CPUs and CPUs on offline nodes */
2172 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2173 continue;
2174 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2175 if (ret)
2176 break;
2177 /* See comment in cpuhp_smt_disable() */
2178 cpuhp_online_cpu_device(cpu);
2179 }
2180 cpu_maps_update_done();
2181 return ret;
2182}
2183#endif
2184
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002185#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2186static ssize_t show_cpuhp_state(struct device *dev,
2187 struct device_attribute *attr, char *buf)
2188{
2189 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2190
2191 return sprintf(buf, "%d\n", st->state);
2192}
2193static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2194
Thomas Gleixner757c9892016-02-26 18:43:32 +00002195static ssize_t write_cpuhp_target(struct device *dev,
2196 struct device_attribute *attr,
2197 const char *buf, size_t count)
2198{
2199 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2200 struct cpuhp_step *sp;
2201 int target, ret;
2202
2203 ret = kstrtoint(buf, 10, &target);
2204 if (ret)
2205 return ret;
2206
2207#ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2208 if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2209 return -EINVAL;
2210#else
2211 if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2212 return -EINVAL;
2213#endif
2214
2215 ret = lock_device_hotplug_sysfs();
2216 if (ret)
2217 return ret;
2218
2219 mutex_lock(&cpuhp_state_mutex);
2220 sp = cpuhp_get_step(target);
2221 ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2222 mutex_unlock(&cpuhp_state_mutex);
2223 if (ret)
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02002224 goto out;
Thomas Gleixner757c9892016-02-26 18:43:32 +00002225
2226 if (st->state < target)
Qais Yousef33c37362020-03-23 13:51:10 +00002227 ret = cpu_up(dev->id, target);
Thomas Gleixner757c9892016-02-26 18:43:32 +00002228 else
Qais Yousef33c37362020-03-23 13:51:10 +00002229 ret = cpu_down(dev->id, target);
Sebastian Andrzej Siewior40da1b12017-06-02 16:27:14 +02002230out:
Thomas Gleixner757c9892016-02-26 18:43:32 +00002231 unlock_device_hotplug();
2232 return ret ? ret : count;
2233}
2234
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002235static ssize_t show_cpuhp_target(struct device *dev,
2236 struct device_attribute *attr, char *buf)
2237{
2238 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2239
2240 return sprintf(buf, "%d\n", st->target);
2241}
Thomas Gleixner757c9892016-02-26 18:43:32 +00002242static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002243
Peter Zijlstra1db49482017-09-20 19:00:21 +02002244
2245static ssize_t write_cpuhp_fail(struct device *dev,
2246 struct device_attribute *attr,
2247 const char *buf, size_t count)
2248{
2249 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2250 struct cpuhp_step *sp;
2251 int fail, ret;
2252
2253 ret = kstrtoint(buf, 10, &fail);
2254 if (ret)
2255 return ret;
2256
Vincent Donnefort3ae70c22021-02-16 10:35:04 +00002257 if (fail == CPUHP_INVALID) {
2258 st->fail = fail;
2259 return count;
2260 }
2261
Eiichi Tsukata33d4a5a2019-06-27 11:47:32 +09002262 if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2263 return -EINVAL;
2264
Peter Zijlstra1db49482017-09-20 19:00:21 +02002265 /*
2266 * Cannot fail STARTING/DYING callbacks.
2267 */
2268 if (cpuhp_is_atomic_state(fail))
2269 return -EINVAL;
2270
2271 /*
Vincent Donnefort62f25062021-02-16 10:35:05 +00002272 * DEAD callbacks cannot fail...
2273 * ... neither can CPUHP_BRINGUP_CPU during hotunplug. The latter
2274 * triggering STARTING callbacks, a failure in this state would
2275 * hinder rollback.
2276 */
2277 if (fail <= CPUHP_BRINGUP_CPU && st->state > CPUHP_BRINGUP_CPU)
2278 return -EINVAL;
2279
2280 /*
Peter Zijlstra1db49482017-09-20 19:00:21 +02002281 * Cannot fail anything that doesn't have callbacks.
2282 */
2283 mutex_lock(&cpuhp_state_mutex);
2284 sp = cpuhp_get_step(fail);
2285 if (!sp->startup.single && !sp->teardown.single)
2286 ret = -EINVAL;
2287 mutex_unlock(&cpuhp_state_mutex);
2288 if (ret)
2289 return ret;
2290
2291 st->fail = fail;
2292
2293 return count;
2294}
2295
2296static ssize_t show_cpuhp_fail(struct device *dev,
2297 struct device_attribute *attr, char *buf)
2298{
2299 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2300
2301 return sprintf(buf, "%d\n", st->fail);
2302}
2303
2304static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2305
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002306static struct attribute *cpuhp_cpu_attrs[] = {
2307 &dev_attr_state.attr,
2308 &dev_attr_target.attr,
Peter Zijlstra1db49482017-09-20 19:00:21 +02002309 &dev_attr_fail.attr,
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002310 NULL
2311};
2312
Arvind Yadav993647a2017-06-29 17:40:47 +05302313static const struct attribute_group cpuhp_cpu_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002314 .attrs = cpuhp_cpu_attrs,
2315 .name = "hotplug",
2316 NULL
2317};
2318
2319static ssize_t show_cpuhp_states(struct device *dev,
2320 struct device_attribute *attr, char *buf)
2321{
2322 ssize_t cur, res = 0;
2323 int i;
2324
2325 mutex_lock(&cpuhp_state_mutex);
Thomas Gleixner757c9892016-02-26 18:43:32 +00002326 for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002327 struct cpuhp_step *sp = cpuhp_get_step(i);
2328
2329 if (sp->name) {
2330 cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2331 buf += cur;
2332 res += cur;
2333 }
2334 }
2335 mutex_unlock(&cpuhp_state_mutex);
2336 return res;
2337}
2338static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2339
2340static struct attribute *cpuhp_cpu_root_attrs[] = {
2341 &dev_attr_states.attr,
2342 NULL
2343};
2344
Arvind Yadav993647a2017-06-29 17:40:47 +05302345static const struct attribute_group cpuhp_cpu_root_attr_group = {
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002346 .attrs = cpuhp_cpu_root_attrs,
2347 .name = "hotplug",
2348 NULL
2349};
2350
Thomas Gleixner05736e42018-05-29 17:48:27 +02002351#ifdef CONFIG_HOTPLUG_SMT
2352
Thomas Gleixner05736e42018-05-29 17:48:27 +02002353static ssize_t
Josh Poimboeufde7b77e2019-03-27 07:00:29 -05002354__store_smt_control(struct device *dev, struct device_attribute *attr,
2355 const char *buf, size_t count)
Thomas Gleixner05736e42018-05-29 17:48:27 +02002356{
2357 int ctrlval, ret;
2358
2359 if (sysfs_streq(buf, "on"))
2360 ctrlval = CPU_SMT_ENABLED;
2361 else if (sysfs_streq(buf, "off"))
2362 ctrlval = CPU_SMT_DISABLED;
2363 else if (sysfs_streq(buf, "forceoff"))
2364 ctrlval = CPU_SMT_FORCE_DISABLED;
2365 else
2366 return -EINVAL;
2367
2368 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2369 return -EPERM;
2370
2371 if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2372 return -ENODEV;
2373
2374 ret = lock_device_hotplug_sysfs();
2375 if (ret)
2376 return ret;
2377
2378 if (ctrlval != cpu_smt_control) {
2379 switch (ctrlval) {
2380 case CPU_SMT_ENABLED:
Thomas Gleixner215af542018-07-07 11:40:18 +02002381 ret = cpuhp_smt_enable();
Thomas Gleixner05736e42018-05-29 17:48:27 +02002382 break;
2383 case CPU_SMT_DISABLED:
2384 case CPU_SMT_FORCE_DISABLED:
2385 ret = cpuhp_smt_disable(ctrlval);
2386 break;
2387 }
2388 }
2389
2390 unlock_device_hotplug();
2391 return ret ? ret : count;
2392}
Josh Poimboeufde7b77e2019-03-27 07:00:29 -05002393
2394#else /* !CONFIG_HOTPLUG_SMT */
2395static ssize_t
2396__store_smt_control(struct device *dev, struct device_attribute *attr,
2397 const char *buf, size_t count)
2398{
2399 return -ENODEV;
2400}
2401#endif /* CONFIG_HOTPLUG_SMT */
2402
2403static const char *smt_states[] = {
2404 [CPU_SMT_ENABLED] = "on",
2405 [CPU_SMT_DISABLED] = "off",
2406 [CPU_SMT_FORCE_DISABLED] = "forceoff",
2407 [CPU_SMT_NOT_SUPPORTED] = "notsupported",
2408 [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented",
2409};
2410
2411static ssize_t
2412show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2413{
2414 const char *state = smt_states[cpu_smt_control];
2415
2416 return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2417}
2418
2419static ssize_t
2420store_smt_control(struct device *dev, struct device_attribute *attr,
2421 const char *buf, size_t count)
2422{
2423 return __store_smt_control(dev, attr, buf, count);
2424}
Thomas Gleixner05736e42018-05-29 17:48:27 +02002425static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2426
2427static ssize_t
2428show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2429{
Josh Poimboeufde7b77e2019-03-27 07:00:29 -05002430 return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
Thomas Gleixner05736e42018-05-29 17:48:27 +02002431}
2432static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2433
2434static struct attribute *cpuhp_smt_attrs[] = {
2435 &dev_attr_control.attr,
2436 &dev_attr_active.attr,
2437 NULL
2438};
2439
2440static const struct attribute_group cpuhp_smt_attr_group = {
2441 .attrs = cpuhp_smt_attrs,
2442 .name = "smt",
2443 NULL
2444};
2445
Josh Poimboeufde7b77e2019-03-27 07:00:29 -05002446static int __init cpu_smt_sysfs_init(void)
Thomas Gleixner05736e42018-05-29 17:48:27 +02002447{
Thomas Gleixner05736e42018-05-29 17:48:27 +02002448 return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2449 &cpuhp_smt_attr_group);
2450}
2451
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002452static int __init cpuhp_sysfs_init(void)
2453{
2454 int cpu, ret;
2455
Josh Poimboeufde7b77e2019-03-27 07:00:29 -05002456 ret = cpu_smt_sysfs_init();
Thomas Gleixner05736e42018-05-29 17:48:27 +02002457 if (ret)
2458 return ret;
2459
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002460 ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2461 &cpuhp_cpu_root_attr_group);
2462 if (ret)
2463 return ret;
2464
2465 for_each_possible_cpu(cpu) {
2466 struct device *dev = get_cpu_device(cpu);
2467
2468 if (!dev)
2469 continue;
2470 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2471 if (ret)
2472 return ret;
2473 }
2474 return 0;
2475}
2476device_initcall(cpuhp_sysfs_init);
Josh Poimboeufde7b77e2019-03-27 07:00:29 -05002477#endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
Thomas Gleixner98f8cdc2016-02-26 18:43:31 +00002478
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002479/*
2480 * cpu_bit_bitmap[] is a special, "compressed" data structure that
2481 * represents all NR_CPUS bits binary values of 1<<nr.
2482 *
Rusty Russelle0b582e2009-01-01 10:12:28 +10302483 * It is used by cpumask_of() to get a constant address to a CPU
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002484 * mask value that has a single bit set only.
2485 */
Mike Travisb8d317d2008-07-24 18:21:29 -07002486
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002487/* cpu_bit_bitmap[0] is empty - so we can back into it */
Michael Rodriguez4d519852011-03-22 16:34:07 -07002488#define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002489#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2490#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2491#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
Mike Travisb8d317d2008-07-24 18:21:29 -07002492
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002493const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
Mike Travisb8d317d2008-07-24 18:21:29 -07002494
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002495 MASK_DECLARE_8(0), MASK_DECLARE_8(8),
2496 MASK_DECLARE_8(16), MASK_DECLARE_8(24),
2497#if BITS_PER_LONG > 32
2498 MASK_DECLARE_8(32), MASK_DECLARE_8(40),
2499 MASK_DECLARE_8(48), MASK_DECLARE_8(56),
Mike Travisb8d317d2008-07-24 18:21:29 -07002500#endif
2501};
Linus Torvaldse56b3bc2008-07-28 11:32:33 -07002502EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
Rusty Russell2d3854a2008-11-05 13:39:10 +11002503
2504const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2505EXPORT_SYMBOL(cpu_all_bits);
Rusty Russellb3199c02008-12-30 09:05:14 +10302506
2507#ifdef CONFIG_INIT_ALL_POSSIBLE
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002508struct cpumask __cpu_possible_mask __read_mostly
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002509 = {CPU_BITS_ALL};
Rusty Russellb3199c02008-12-30 09:05:14 +10302510#else
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002511struct cpumask __cpu_possible_mask __read_mostly;
Rusty Russellb3199c02008-12-30 09:05:14 +10302512#endif
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002513EXPORT_SYMBOL(__cpu_possible_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302514
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002515struct cpumask __cpu_online_mask __read_mostly;
2516EXPORT_SYMBOL(__cpu_online_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302517
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002518struct cpumask __cpu_present_mask __read_mostly;
2519EXPORT_SYMBOL(__cpu_present_mask);
Rusty Russellb3199c02008-12-30 09:05:14 +10302520
Rasmus Villemoes4b804c82016-01-20 15:00:19 -08002521struct cpumask __cpu_active_mask __read_mostly;
2522EXPORT_SYMBOL(__cpu_active_mask);
Rusty Russell3fa41522008-12-30 09:05:16 +10302523
Peter Zijlstrae40f74c2021-01-19 18:43:45 +01002524struct cpumask __cpu_dying_mask __read_mostly;
2525EXPORT_SYMBOL(__cpu_dying_mask);
2526
Thomas Gleixner0c09ab92019-07-09 16:23:40 +02002527atomic_t __num_online_cpus __read_mostly;
2528EXPORT_SYMBOL(__num_online_cpus);
2529
Rusty Russell3fa41522008-12-30 09:05:16 +10302530void init_cpu_present(const struct cpumask *src)
2531{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002532 cpumask_copy(&__cpu_present_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302533}
2534
2535void init_cpu_possible(const struct cpumask *src)
2536{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002537 cpumask_copy(&__cpu_possible_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302538}
2539
2540void init_cpu_online(const struct cpumask *src)
2541{
Rasmus Villemoesc4c54dd2016-01-20 15:00:16 -08002542 cpumask_copy(&__cpu_online_mask, src);
Rusty Russell3fa41522008-12-30 09:05:16 +10302543}
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002544
Thomas Gleixner0c09ab92019-07-09 16:23:40 +02002545void set_cpu_online(unsigned int cpu, bool online)
2546{
2547 /*
2548 * atomic_inc/dec() is required to handle the horrid abuse of this
2549 * function by the reboot and kexec code which invoke it from
2550 * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2551 * regular CPU hotplug is properly serialized.
2552 *
2553 * Note, that the fact that __num_online_cpus is of type atomic_t
2554 * does not protect readers which are not serialized against
2555 * concurrent hotplug operations.
2556 */
2557 if (online) {
2558 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2559 atomic_inc(&__num_online_cpus);
2560 } else {
2561 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2562 atomic_dec(&__num_online_cpus);
2563 }
2564}
2565
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002566/*
2567 * Activate the first processor.
2568 */
2569void __init boot_cpu_init(void)
2570{
2571 int cpu = smp_processor_id();
2572
2573 /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2574 set_cpu_online(cpu, true);
2575 set_cpu_active(cpu, true);
2576 set_cpu_present(cpu, true);
2577 set_cpu_possible(cpu, true);
Peter Zijlstra8ce371f2017-03-20 12:26:55 +01002578
2579#ifdef CONFIG_SMP
2580 __boot_cpu_id = cpu;
2581#endif
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002582}
2583
2584/*
2585 * Must be called _AFTER_ setting up the per_cpu areas
2586 */
Linus Torvaldsb5b14042018-08-12 12:19:42 -07002587void __init boot_cpu_hotplug_init(void)
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002588{
Abel Vesa269777a2018-08-15 00:26:00 +03002589#ifdef CONFIG_SMP
Thomas Gleixnere797bda2019-07-22 20:47:16 +02002590 cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
Abel Vesa269777a2018-08-15 00:26:00 +03002591#endif
Thomas Gleixner0cc3cd22018-06-29 16:05:48 +02002592 this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
Thomas Gleixnercff7d372016-02-26 18:43:28 +00002593}
Josh Poimboeuf98af8452019-04-12 15:39:28 -05002594
Tyler Hicks731dc9d2019-11-04 12:22:02 +01002595/*
2596 * These are used for a global "mitigations=" cmdline option for toggling
2597 * optional CPU mitigations.
2598 */
2599enum cpu_mitigations {
2600 CPU_MITIGATIONS_OFF,
2601 CPU_MITIGATIONS_AUTO,
2602 CPU_MITIGATIONS_AUTO_NOSMT,
2603};
2604
2605static enum cpu_mitigations cpu_mitigations __ro_after_init =
2606 CPU_MITIGATIONS_AUTO;
Josh Poimboeuf98af8452019-04-12 15:39:28 -05002607
2608static int __init mitigations_parse_cmdline(char *arg)
2609{
2610 if (!strcmp(arg, "off"))
2611 cpu_mitigations = CPU_MITIGATIONS_OFF;
2612 else if (!strcmp(arg, "auto"))
2613 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2614 else if (!strcmp(arg, "auto,nosmt"))
2615 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
Geert Uytterhoeven1bf72722019-05-16 09:09:35 +02002616 else
2617 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2618 arg);
Josh Poimboeuf98af8452019-04-12 15:39:28 -05002619
2620 return 0;
2621}
2622early_param("mitigations", mitigations_parse_cmdline);
Tyler Hicks731dc9d2019-11-04 12:22:02 +01002623
2624/* mitigations=off */
2625bool cpu_mitigations_off(void)
2626{
2627 return cpu_mitigations == CPU_MITIGATIONS_OFF;
2628}
2629EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2630
2631/* mitigations=auto,nosmt */
2632bool cpu_mitigations_auto_nosmt(void)
2633{
2634 return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2635}
2636EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);