Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* CPU control. |
| 2 | * (C) 2001, 2002, 2003, 2004 Rusty Russell |
| 3 | * |
| 4 | * This code is licenced under the GPL. |
| 5 | */ |
Peter Zijlstra | bf2c59f | 2020-04-01 17:40:33 -0400 | [diff] [blame] | 6 | #include <linux/sched/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <linux/proc_fs.h> |
| 8 | #include <linux/smp.h> |
| 9 | #include <linux/init.h> |
| 10 | #include <linux/notifier.h> |
Ingo Molnar | 3f07c01 | 2017-02-08 18:51:30 +0100 | [diff] [blame] | 11 | #include <linux/sched/signal.h> |
Ingo Molnar | ef8bd77 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 12 | #include <linux/sched/hotplug.h> |
Nicholas Piggin | 9ca12ac | 2019-04-11 13:34:46 +1000 | [diff] [blame] | 13 | #include <linux/sched/isolation.h> |
Ingo Molnar | 2993002 | 2017-02-08 18:51:36 +0100 | [diff] [blame] | 14 | #include <linux/sched/task.h> |
Thomas Gleixner | a74cfff | 2018-11-25 19:33:39 +0100 | [diff] [blame] | 15 | #include <linux/sched/smt.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/unistd.h> |
| 17 | #include <linux/cpu.h> |
Anton Vorontsov | cb79295 | 2012-05-31 16:26:22 -0700 | [diff] [blame] | 18 | #include <linux/oom.h> |
| 19 | #include <linux/rcupdate.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 20 | #include <linux/export.h> |
Anton Vorontsov | e4cc2f8 | 2012-05-31 16:26:26 -0700 | [diff] [blame] | 21 | #include <linux/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/kthread.h> |
| 23 | #include <linux/stop_machine.h> |
Ingo Molnar | 81615b62 | 2006-06-26 00:24:32 -0700 | [diff] [blame] | 24 | #include <linux/mutex.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 25 | #include <linux/gfp.h> |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 26 | #include <linux/suspend.h> |
Gautham R. Shenoy | a19423b | 2014-03-11 02:04:03 +0530 | [diff] [blame] | 27 | #include <linux/lockdep.h> |
Preeti U Murthy | 345527b | 2015-03-30 14:59:19 +0530 | [diff] [blame] | 28 | #include <linux/tick.h> |
Thomas Gleixner | a899418 | 2015-07-05 17:12:30 +0000 | [diff] [blame] | 29 | #include <linux/irq.h> |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 30 | #include <linux/nmi.h> |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 31 | #include <linux/smpboot.h> |
Richard Weinberger | e6d4989 | 2016-08-18 14:57:17 +0200 | [diff] [blame] | 32 | #include <linux/relay.h> |
Sebastian Andrzej Siewior | 6731d4f | 2016-08-23 14:53:19 +0200 | [diff] [blame] | 33 | #include <linux/slab.h> |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 34 | #include <linux/percpu-rwsem.h> |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 35 | |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 36 | #include <trace/events/power.h> |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 37 | #define CREATE_TRACE_POINTS |
| 38 | #include <trace/events/cpuhp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Thomas Gleixner | 38498a6 | 2012-04-20 13:05:44 +0000 | [diff] [blame] | 40 | #include "smpboot.h" |
| 41 | |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 42 | /** |
| 43 | * cpuhp_cpu_state - Per cpu hotplug state storage |
| 44 | * @state: The current cpu state |
| 45 | * @target: The target state |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 46 | * @thread: Pointer to the hotplug thread |
| 47 | * @should_run: Thread should execute |
Sebastian Andrzej Siewior | 3b9d6da | 2016-04-08 14:40:15 +0200 | [diff] [blame] | 48 | * @rollback: Perform a rollback |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 49 | * @single: Single callback invocation |
| 50 | * @bringup: Single callback bringup or teardown selector |
| 51 | * @cb_state: The state for a single callback (install/uninstall) |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 52 | * @result: Result of the operation |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 53 | * @done_up: Signal completion to the issuer of the task for cpu-up |
| 54 | * @done_down: Signal completion to the issuer of the task for cpu-down |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 55 | */ |
| 56 | struct cpuhp_cpu_state { |
| 57 | enum cpuhp_state state; |
| 58 | enum cpuhp_state target; |
Peter Zijlstra | 1db4948 | 2017-09-20 19:00:21 +0200 | [diff] [blame] | 59 | enum cpuhp_state fail; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 60 | #ifdef CONFIG_SMP |
| 61 | struct task_struct *thread; |
| 62 | bool should_run; |
Sebastian Andrzej Siewior | 3b9d6da | 2016-04-08 14:40:15 +0200 | [diff] [blame] | 63 | bool rollback; |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 64 | bool single; |
| 65 | bool bringup; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 66 | struct hlist_node *node; |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 67 | struct hlist_node *last; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 68 | enum cpuhp_state cb_state; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 69 | int result; |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 70 | struct completion done_up; |
| 71 | struct completion done_down; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 72 | #endif |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 73 | }; |
| 74 | |
Peter Zijlstra | 1db4948 | 2017-09-20 19:00:21 +0200 | [diff] [blame] | 75 | static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = { |
| 76 | .fail = CPUHP_INVALID, |
| 77 | }; |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 78 | |
Thomas Gleixner | e797bda | 2019-07-22 20:47:16 +0200 | [diff] [blame] | 79 | #ifdef CONFIG_SMP |
| 80 | cpumask_t cpus_booted_once_mask; |
| 81 | #endif |
| 82 | |
Thomas Gleixner | 49dfe2a | 2017-05-24 10:15:43 +0200 | [diff] [blame] | 83 | #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 84 | static struct lockdep_map cpuhp_state_up_map = |
| 85 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); |
| 86 | static struct lockdep_map cpuhp_state_down_map = |
| 87 | STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map); |
| 88 | |
| 89 | |
Mathieu Malaterre | 76dc6c0 | 2017-12-26 15:08:53 +0100 | [diff] [blame] | 90 | static inline void cpuhp_lock_acquire(bool bringup) |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 91 | { |
| 92 | lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); |
| 93 | } |
| 94 | |
Mathieu Malaterre | 76dc6c0 | 2017-12-26 15:08:53 +0100 | [diff] [blame] | 95 | static inline void cpuhp_lock_release(bool bringup) |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 96 | { |
| 97 | lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map); |
| 98 | } |
| 99 | #else |
| 100 | |
Mathieu Malaterre | 76dc6c0 | 2017-12-26 15:08:53 +0100 | [diff] [blame] | 101 | static inline void cpuhp_lock_acquire(bool bringup) { } |
| 102 | static inline void cpuhp_lock_release(bool bringup) { } |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 103 | |
Thomas Gleixner | 49dfe2a | 2017-05-24 10:15:43 +0200 | [diff] [blame] | 104 | #endif |
| 105 | |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 106 | /** |
| 107 | * cpuhp_step - Hotplug state machine step |
| 108 | * @name: Name of the step |
| 109 | * @startup: Startup function of the step |
| 110 | * @teardown: Teardown function of the step |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 111 | * @cant_stop: Bringup/teardown can't be stopped at this step |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 112 | */ |
| 113 | struct cpuhp_step { |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 114 | const char *name; |
| 115 | union { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 116 | int (*single)(unsigned int cpu); |
| 117 | int (*multi)(unsigned int cpu, |
| 118 | struct hlist_node *node); |
| 119 | } startup; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 120 | union { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 121 | int (*single)(unsigned int cpu); |
| 122 | int (*multi)(unsigned int cpu, |
| 123 | struct hlist_node *node); |
| 124 | } teardown; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 125 | struct hlist_head list; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 126 | bool cant_stop; |
| 127 | bool multi_instance; |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 128 | }; |
| 129 | |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 130 | static DEFINE_MUTEX(cpuhp_state_mutex); |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 131 | static struct cpuhp_step cpuhp_hp_states[]; |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 132 | |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 133 | static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state) |
| 134 | { |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 135 | return cpuhp_hp_states + state; |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 136 | } |
| 137 | |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 138 | /** |
| 139 | * cpuhp_invoke_callback _ Invoke the callbacks for a given state |
| 140 | * @cpu: The cpu for which the callback should be invoked |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 141 | * @state: The state to do callbacks for |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 142 | * @bringup: True if the bringup callback should be invoked |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 143 | * @node: For multi-instance, do a single entry callback for install/remove |
| 144 | * @lastp: For multi-instance rollback, remember how far we got |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 145 | * |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 146 | * Called from cpu hotplug and from the state register machinery. |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 147 | */ |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 148 | static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state, |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 149 | bool bringup, struct hlist_node *node, |
| 150 | struct hlist_node **lastp) |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 151 | { |
| 152 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 153 | struct cpuhp_step *step = cpuhp_get_step(state); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 154 | int (*cbm)(unsigned int cpu, struct hlist_node *node); |
| 155 | int (*cb)(unsigned int cpu); |
| 156 | int ret, cnt; |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 157 | |
Peter Zijlstra | 1db4948 | 2017-09-20 19:00:21 +0200 | [diff] [blame] | 158 | if (st->fail == state) { |
| 159 | st->fail = CPUHP_INVALID; |
| 160 | |
| 161 | if (!(bringup ? step->startup.single : step->teardown.single)) |
| 162 | return 0; |
| 163 | |
| 164 | return -EAGAIN; |
| 165 | } |
| 166 | |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 167 | if (!step->multi_instance) { |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 168 | WARN_ON_ONCE(lastp && *lastp); |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 169 | cb = bringup ? step->startup.single : step->teardown.single; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 170 | if (!cb) |
| 171 | return 0; |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 172 | trace_cpuhp_enter(cpu, st->target, state, cb); |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 173 | ret = cb(cpu); |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 174 | trace_cpuhp_exit(cpu, st->state, state, ret); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 175 | return ret; |
| 176 | } |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 177 | cbm = bringup ? step->startup.multi : step->teardown.multi; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 178 | if (!cbm) |
| 179 | return 0; |
| 180 | |
| 181 | /* Single invocation for instance add/remove */ |
| 182 | if (node) { |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 183 | WARN_ON_ONCE(lastp && *lastp); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 184 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); |
| 185 | ret = cbm(cpu, node); |
| 186 | trace_cpuhp_exit(cpu, st->state, state, ret); |
| 187 | return ret; |
| 188 | } |
| 189 | |
| 190 | /* State transition. Invoke on all instances */ |
| 191 | cnt = 0; |
| 192 | hlist_for_each(node, &step->list) { |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 193 | if (lastp && node == *lastp) |
| 194 | break; |
| 195 | |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 196 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); |
| 197 | ret = cbm(cpu, node); |
| 198 | trace_cpuhp_exit(cpu, st->state, state, ret); |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 199 | if (ret) { |
| 200 | if (!lastp) |
| 201 | goto err; |
| 202 | |
| 203 | *lastp = node; |
| 204 | return ret; |
| 205 | } |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 206 | cnt++; |
| 207 | } |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 208 | if (lastp) |
| 209 | *lastp = NULL; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 210 | return 0; |
| 211 | err: |
| 212 | /* Rollback the instances if one failed */ |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 213 | cbm = !bringup ? step->startup.multi : step->teardown.multi; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 214 | if (!cbm) |
| 215 | return ret; |
| 216 | |
| 217 | hlist_for_each(node, &step->list) { |
| 218 | if (!cnt--) |
| 219 | break; |
Peter Zijlstra | 724a868 | 2017-09-20 19:00:18 +0200 | [diff] [blame] | 220 | |
| 221 | trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node); |
| 222 | ret = cbm(cpu, node); |
| 223 | trace_cpuhp_exit(cpu, st->state, state, ret); |
| 224 | /* |
| 225 | * Rollback must not fail, |
| 226 | */ |
| 227 | WARN_ON_ONCE(ret); |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 228 | } |
| 229 | return ret; |
| 230 | } |
| 231 | |
Rusty Russell | 98a79d6 | 2008-12-13 21:19:41 +1030 | [diff] [blame] | 232 | #ifdef CONFIG_SMP |
Arnd Bergmann | fcb3029 | 2018-03-15 16:38:04 +0100 | [diff] [blame] | 233 | static bool cpuhp_is_ap_state(enum cpuhp_state state) |
| 234 | { |
| 235 | /* |
| 236 | * The extra check for CPUHP_TEARDOWN_CPU is only for documentation |
| 237 | * purposes as that state is handled explicitly in cpu_down. |
| 238 | */ |
| 239 | return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU; |
| 240 | } |
| 241 | |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 242 | static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup) |
| 243 | { |
| 244 | struct completion *done = bringup ? &st->done_up : &st->done_down; |
| 245 | wait_for_completion(done); |
| 246 | } |
| 247 | |
| 248 | static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) |
| 249 | { |
| 250 | struct completion *done = bringup ? &st->done_up : &st->done_down; |
| 251 | complete(done); |
| 252 | } |
| 253 | |
| 254 | /* |
| 255 | * The former STARTING/DYING states, ran with IRQs disabled and must not fail. |
| 256 | */ |
| 257 | static bool cpuhp_is_atomic_state(enum cpuhp_state state) |
| 258 | { |
| 259 | return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; |
| 260 | } |
| 261 | |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 262 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
Linus Torvalds | aa95387 | 2006-07-23 12:12:16 -0700 | [diff] [blame] | 263 | static DEFINE_MUTEX(cpu_add_remove_lock); |
Thomas Gleixner | 090e77c | 2016-02-26 18:43:23 +0000 | [diff] [blame] | 264 | bool cpuhp_tasks_frozen; |
| 265 | EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
Lai Jiangshan | 79a6cde | 2010-05-26 14:43:36 -0700 | [diff] [blame] | 267 | /* |
Srivatsa S. Bhat | 93ae4f9 | 2014-03-11 02:04:14 +0530 | [diff] [blame] | 268 | * The following two APIs (cpu_maps_update_begin/done) must be used when |
| 269 | * attempting to serialize the updates to cpu_online_mask & cpu_present_mask. |
Lai Jiangshan | 79a6cde | 2010-05-26 14:43:36 -0700 | [diff] [blame] | 270 | */ |
| 271 | void cpu_maps_update_begin(void) |
| 272 | { |
| 273 | mutex_lock(&cpu_add_remove_lock); |
| 274 | } |
| 275 | |
| 276 | void cpu_maps_update_done(void) |
| 277 | { |
| 278 | mutex_unlock(&cpu_add_remove_lock); |
| 279 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 281 | /* |
| 282 | * If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 283 | * Should always be manipulated under cpu_add_remove_lock |
| 284 | */ |
| 285 | static int cpu_hotplug_disabled; |
| 286 | |
Lai Jiangshan | 79a6cde | 2010-05-26 14:43:36 -0700 | [diff] [blame] | 287 | #ifdef CONFIG_HOTPLUG_CPU |
| 288 | |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 289 | DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); |
Gautham R. Shenoy | a19423b | 2014-03-11 02:04:03 +0530 | [diff] [blame] | 290 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 291 | void cpus_read_lock(void) |
Ashok Raj | a9d9baa | 2005-11-28 13:43:46 -0800 | [diff] [blame] | 292 | { |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 293 | percpu_down_read(&cpu_hotplug_lock); |
Ashok Raj | a9d9baa | 2005-11-28 13:43:46 -0800 | [diff] [blame] | 294 | } |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 295 | EXPORT_SYMBOL_GPL(cpus_read_lock); |
Ashok Raj | 90d45d1 | 2005-11-08 21:34:24 -0800 | [diff] [blame] | 296 | |
Waiman Long | 6f4ceee | 2018-07-24 14:26:04 -0400 | [diff] [blame] | 297 | int cpus_read_trylock(void) |
| 298 | { |
| 299 | return percpu_down_read_trylock(&cpu_hotplug_lock); |
| 300 | } |
| 301 | EXPORT_SYMBOL_GPL(cpus_read_trylock); |
| 302 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 303 | void cpus_read_unlock(void) |
Ashok Raj | a9d9baa | 2005-11-28 13:43:46 -0800 | [diff] [blame] | 304 | { |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 305 | percpu_up_read(&cpu_hotplug_lock); |
Ashok Raj | a9d9baa | 2005-11-28 13:43:46 -0800 | [diff] [blame] | 306 | } |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 307 | EXPORT_SYMBOL_GPL(cpus_read_unlock); |
Ashok Raj | a9d9baa | 2005-11-28 13:43:46 -0800 | [diff] [blame] | 308 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 309 | void cpus_write_lock(void) |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 310 | { |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 311 | percpu_down_write(&cpu_hotplug_lock); |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 312 | } |
| 313 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 314 | void cpus_write_unlock(void) |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 315 | { |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 316 | percpu_up_write(&cpu_hotplug_lock); |
| 317 | } |
| 318 | |
| 319 | void lockdep_assert_cpus_held(void) |
| 320 | { |
Valentin Schneider | ce48c457 | 2018-12-19 18:23:15 +0000 | [diff] [blame] | 321 | /* |
| 322 | * We can't have hotplug operations before userspace starts running, |
| 323 | * and some init codepaths will knowingly not take the hotplug lock. |
| 324 | * This is all valid, so mute lockdep until it makes sense to report |
| 325 | * unheld locks. |
| 326 | */ |
| 327 | if (system_state < SYSTEM_RUNNING) |
| 328 | return; |
| 329 | |
Thomas Gleixner | fc8dffd | 2017-05-24 10:15:40 +0200 | [diff] [blame] | 330 | percpu_rwsem_assert_held(&cpu_hotplug_lock); |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 331 | } |
Lai Jiangshan | 79a6cde | 2010-05-26 14:43:36 -0700 | [diff] [blame] | 332 | |
Frederic Weisbecker | 43759fe | 2020-11-11 23:53:13 +0100 | [diff] [blame] | 333 | #ifdef CONFIG_LOCKDEP |
| 334 | int lockdep_is_cpus_held(void) |
| 335 | { |
| 336 | return percpu_rwsem_is_held(&cpu_hotplug_lock); |
| 337 | } |
| 338 | #endif |
| 339 | |
Peter Zijlstra | cb92173 | 2018-09-11 11:51:27 +0200 | [diff] [blame] | 340 | static void lockdep_acquire_cpus_lock(void) |
| 341 | { |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 342 | rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_); |
Peter Zijlstra | cb92173 | 2018-09-11 11:51:27 +0200 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | static void lockdep_release_cpus_lock(void) |
| 346 | { |
Peter Zijlstra | 1751060 | 2019-10-30 20:01:26 +0100 | [diff] [blame] | 347 | rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_); |
Peter Zijlstra | cb92173 | 2018-09-11 11:51:27 +0200 | [diff] [blame] | 348 | } |
| 349 | |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 350 | /* |
| 351 | * Wait for currently running CPU hotplug operations to complete (if any) and |
| 352 | * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects |
| 353 | * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the |
| 354 | * hotplug path before performing hotplug operations. So acquiring that lock |
| 355 | * guarantees mutual exclusion from any currently running hotplug operations. |
| 356 | */ |
| 357 | void cpu_hotplug_disable(void) |
| 358 | { |
| 359 | cpu_maps_update_begin(); |
Vitaly Kuznetsov | 89af7ba | 2015-08-05 00:52:46 -0700 | [diff] [blame] | 360 | cpu_hotplug_disabled++; |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 361 | cpu_maps_update_done(); |
| 362 | } |
Vitaly Kuznetsov | 32145c4 | 2015-08-05 00:52:47 -0700 | [diff] [blame] | 363 | EXPORT_SYMBOL_GPL(cpu_hotplug_disable); |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 364 | |
Lianwei Wang | 01b4115 | 2016-06-09 23:43:28 -0700 | [diff] [blame] | 365 | static void __cpu_hotplug_enable(void) |
| 366 | { |
| 367 | if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) |
| 368 | return; |
| 369 | cpu_hotplug_disabled--; |
| 370 | } |
| 371 | |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 372 | void cpu_hotplug_enable(void) |
| 373 | { |
| 374 | cpu_maps_update_begin(); |
Lianwei Wang | 01b4115 | 2016-06-09 23:43:28 -0700 | [diff] [blame] | 375 | __cpu_hotplug_enable(); |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 376 | cpu_maps_update_done(); |
| 377 | } |
Vitaly Kuznetsov | 32145c4 | 2015-08-05 00:52:47 -0700 | [diff] [blame] | 378 | EXPORT_SYMBOL_GPL(cpu_hotplug_enable); |
Peter Zijlstra | cb92173 | 2018-09-11 11:51:27 +0200 | [diff] [blame] | 379 | |
| 380 | #else |
| 381 | |
| 382 | static void lockdep_acquire_cpus_lock(void) |
| 383 | { |
| 384 | } |
| 385 | |
| 386 | static void lockdep_release_cpus_lock(void) |
| 387 | { |
| 388 | } |
| 389 | |
Toshi Kani | b9d10be | 2013-08-12 09:45:53 -0600 | [diff] [blame] | 390 | #endif /* CONFIG_HOTPLUG_CPU */ |
Lai Jiangshan | 79a6cde | 2010-05-26 14:43:36 -0700 | [diff] [blame] | 391 | |
Thomas Gleixner | a74cfff | 2018-11-25 19:33:39 +0100 | [diff] [blame] | 392 | /* |
| 393 | * Architectures that need SMT-specific errata handling during SMT hotplug |
| 394 | * should override this. |
| 395 | */ |
| 396 | void __weak arch_smt_update(void) { } |
| 397 | |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 398 | #ifdef CONFIG_HOTPLUG_SMT |
| 399 | enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; |
Thomas Gleixner | bc2d8d26 | 2018-08-07 08:19:57 +0200 | [diff] [blame] | 400 | |
Jiri Kosina | 8e1b706 | 2018-07-13 16:23:23 +0200 | [diff] [blame] | 401 | void __init cpu_smt_disable(bool force) |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 402 | { |
Vitaly Kuznetsov | e1572f1 | 2019-09-16 18:22:56 +0200 | [diff] [blame] | 403 | if (!cpu_smt_possible()) |
Jiri Kosina | 8e1b706 | 2018-07-13 16:23:23 +0200 | [diff] [blame] | 404 | return; |
| 405 | |
| 406 | if (force) { |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 407 | pr_info("SMT: Force disabled\n"); |
| 408 | cpu_smt_control = CPU_SMT_FORCE_DISABLED; |
Jiri Kosina | 8e1b706 | 2018-07-13 16:23:23 +0200 | [diff] [blame] | 409 | } else { |
Borislav Petkov | d0e7d14 | 2018-10-04 19:22:27 +0200 | [diff] [blame] | 410 | pr_info("SMT: disabled\n"); |
Jiri Kosina | 8e1b706 | 2018-07-13 16:23:23 +0200 | [diff] [blame] | 411 | cpu_smt_control = CPU_SMT_DISABLED; |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 412 | } |
Jiri Kosina | 8e1b706 | 2018-07-13 16:23:23 +0200 | [diff] [blame] | 413 | } |
| 414 | |
Thomas Gleixner | fee0aed | 2018-07-13 16:23:24 +0200 | [diff] [blame] | 415 | /* |
| 416 | * The decision whether SMT is supported can only be done after the full |
Josh Poimboeuf | b284909 | 2019-01-30 07:13:58 -0600 | [diff] [blame] | 417 | * CPU identification. Called from architecture code. |
Thomas Gleixner | fee0aed | 2018-07-13 16:23:24 +0200 | [diff] [blame] | 418 | */ |
| 419 | void __init cpu_smt_check_topology(void) |
| 420 | { |
Josh Poimboeuf | b284909 | 2019-01-30 07:13:58 -0600 | [diff] [blame] | 421 | if (!topology_smt_supported()) |
Thomas Gleixner | fee0aed | 2018-07-13 16:23:24 +0200 | [diff] [blame] | 422 | cpu_smt_control = CPU_SMT_NOT_SUPPORTED; |
| 423 | } |
| 424 | |
Jiri Kosina | 8e1b706 | 2018-07-13 16:23:23 +0200 | [diff] [blame] | 425 | static int __init smt_cmdline_disable(char *str) |
| 426 | { |
| 427 | cpu_smt_disable(str && !strcmp(str, "force")); |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 428 | return 0; |
| 429 | } |
| 430 | early_param("nosmt", smt_cmdline_disable); |
| 431 | |
| 432 | static inline bool cpu_smt_allowed(unsigned int cpu) |
| 433 | { |
Josh Poimboeuf | b284909 | 2019-01-30 07:13:58 -0600 | [diff] [blame] | 434 | if (cpu_smt_control == CPU_SMT_ENABLED) |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 435 | return true; |
| 436 | |
Josh Poimboeuf | b284909 | 2019-01-30 07:13:58 -0600 | [diff] [blame] | 437 | if (topology_is_primary_thread(cpu)) |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 438 | return true; |
| 439 | |
| 440 | /* |
| 441 | * On x86 it's required to boot all logical CPUs at least once so |
| 442 | * that the init code can get a chance to set CR4.MCE on each |
Ethon Paul | 182e073 | 2020-04-18 00:40:04 +0800 | [diff] [blame] | 443 | * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 444 | * core will shutdown the machine. |
| 445 | */ |
Thomas Gleixner | e797bda | 2019-07-22 20:47:16 +0200 | [diff] [blame] | 446 | return !cpumask_test_cpu(cpu, &cpus_booted_once_mask); |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 447 | } |
Vitaly Kuznetsov | e1572f1 | 2019-09-16 18:22:56 +0200 | [diff] [blame] | 448 | |
| 449 | /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */ |
| 450 | bool cpu_smt_possible(void) |
| 451 | { |
| 452 | return cpu_smt_control != CPU_SMT_FORCE_DISABLED && |
| 453 | cpu_smt_control != CPU_SMT_NOT_SUPPORTED; |
| 454 | } |
| 455 | EXPORT_SYMBOL_GPL(cpu_smt_possible); |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 456 | #else |
| 457 | static inline bool cpu_smt_allowed(unsigned int cpu) { return true; } |
| 458 | #endif |
| 459 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 460 | static inline enum cpuhp_state |
| 461 | cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target) |
| 462 | { |
| 463 | enum cpuhp_state prev_state = st->state; |
| 464 | |
| 465 | st->rollback = false; |
| 466 | st->last = NULL; |
| 467 | |
| 468 | st->target = target; |
| 469 | st->single = false; |
| 470 | st->bringup = st->state < target; |
| 471 | |
| 472 | return prev_state; |
| 473 | } |
| 474 | |
| 475 | static inline void |
| 476 | cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state) |
| 477 | { |
| 478 | st->rollback = true; |
| 479 | |
| 480 | /* |
| 481 | * If we have st->last we need to undo partial multi_instance of this |
| 482 | * state first. Otherwise start undo at the previous state. |
| 483 | */ |
| 484 | if (!st->last) { |
| 485 | if (st->bringup) |
| 486 | st->state--; |
| 487 | else |
| 488 | st->state++; |
| 489 | } |
| 490 | |
| 491 | st->target = prev_state; |
| 492 | st->bringup = !st->bringup; |
| 493 | } |
| 494 | |
| 495 | /* Regular hotplug invocation of the AP hotplug thread */ |
| 496 | static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st) |
| 497 | { |
| 498 | if (!st->single && st->state == st->target) |
| 499 | return; |
| 500 | |
| 501 | st->result = 0; |
| 502 | /* |
| 503 | * Make sure the above stores are visible before should_run becomes |
| 504 | * true. Paired with the mb() above in cpuhp_thread_fun() |
| 505 | */ |
| 506 | smp_mb(); |
| 507 | st->should_run = true; |
| 508 | wake_up_process(st->thread); |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 509 | wait_for_ap_thread(st, st->bringup); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 510 | } |
| 511 | |
| 512 | static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target) |
| 513 | { |
| 514 | enum cpuhp_state prev_state; |
| 515 | int ret; |
| 516 | |
| 517 | prev_state = cpuhp_set_state(st, target); |
| 518 | __cpuhp_kick_ap(st); |
| 519 | if ((ret = st->result)) { |
| 520 | cpuhp_reset_state(st, prev_state); |
| 521 | __cpuhp_kick_ap(st); |
| 522 | } |
| 523 | |
| 524 | return ret; |
| 525 | } |
Thomas Gleixner | 9cd4f1a | 2017-07-04 22:20:23 +0200 | [diff] [blame] | 526 | |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 527 | static int bringup_wait_for_ap(unsigned int cpu) |
| 528 | { |
| 529 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 530 | |
Thomas Gleixner | 9cd4f1a | 2017-07-04 22:20:23 +0200 | [diff] [blame] | 531 | /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */ |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 532 | wait_for_ap_thread(st, true); |
Thomas Gleixner | dea1d0f | 2017-07-11 22:06:24 +0200 | [diff] [blame] | 533 | if (WARN_ON_ONCE((!cpu_online(cpu)))) |
| 534 | return -ECANCELED; |
Thomas Gleixner | 9cd4f1a | 2017-07-04 22:20:23 +0200 | [diff] [blame] | 535 | |
Peter Zijlstra | 45178ac | 2019-12-10 09:34:54 +0100 | [diff] [blame] | 536 | /* Unpark the hotplug thread of the target cpu */ |
Thomas Gleixner | 9cd4f1a | 2017-07-04 22:20:23 +0200 | [diff] [blame] | 537 | kthread_unpark(st->thread); |
| 538 | |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 539 | /* |
| 540 | * SMT soft disabling on X86 requires to bring the CPU out of the |
| 541 | * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The |
Jiri Kosina | f560201 | 2019-05-28 21:31:49 +0200 | [diff] [blame] | 542 | * CPU marked itself as booted_once in notify_cpu_starting() so the |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 543 | * cpu_smt_allowed() check will now return false if this is not the |
| 544 | * primary sibling. |
| 545 | */ |
| 546 | if (!cpu_smt_allowed(cpu)) |
| 547 | return -ECANCELED; |
| 548 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 549 | if (st->target <= CPUHP_AP_ONLINE_IDLE) |
| 550 | return 0; |
| 551 | |
| 552 | return cpuhp_kick_ap(st, st->target); |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 553 | } |
| 554 | |
Thomas Gleixner | ba99746 | 2016-02-26 18:43:24 +0000 | [diff] [blame] | 555 | static int bringup_cpu(unsigned int cpu) |
| 556 | { |
| 557 | struct task_struct *idle = idle_thread_get(cpu); |
| 558 | int ret; |
| 559 | |
Boris Ostrovsky | aa87717 | 2016-08-03 13:22:28 -0400 | [diff] [blame] | 560 | /* |
| 561 | * Some architectures have to walk the irq descriptors to |
| 562 | * setup the vector space for the cpu which comes online. |
| 563 | * Prevent irq alloc/free across the bringup. |
| 564 | */ |
| 565 | irq_lock_sparse(); |
| 566 | |
Thomas Gleixner | ba99746 | 2016-02-26 18:43:24 +0000 | [diff] [blame] | 567 | /* Arch-specific enabling code. */ |
| 568 | ret = __cpu_up(cpu, idle); |
Boris Ostrovsky | aa87717 | 2016-08-03 13:22:28 -0400 | [diff] [blame] | 569 | irq_unlock_sparse(); |
Thomas Gleixner | 530e9b7 | 2016-12-21 20:19:53 +0100 | [diff] [blame] | 570 | if (ret) |
Thomas Gleixner | ba99746 | 2016-02-26 18:43:24 +0000 | [diff] [blame] | 571 | return ret; |
Thomas Gleixner | 9cd4f1a | 2017-07-04 22:20:23 +0200 | [diff] [blame] | 572 | return bringup_wait_for_ap(cpu); |
Thomas Gleixner | ba99746 | 2016-02-26 18:43:24 +0000 | [diff] [blame] | 573 | } |
| 574 | |
Peter Zijlstra | bf2c59f | 2020-04-01 17:40:33 -0400 | [diff] [blame] | 575 | static int finish_cpu(unsigned int cpu) |
| 576 | { |
| 577 | struct task_struct *idle = idle_thread_get(cpu); |
| 578 | struct mm_struct *mm = idle->active_mm; |
| 579 | |
| 580 | /* |
| 581 | * idle_task_exit() will have switched to &init_mm, now |
| 582 | * clean up any remaining active_mm state. |
| 583 | */ |
| 584 | if (mm != &init_mm) |
| 585 | idle->active_mm = &init_mm; |
| 586 | mmdrop(mm); |
| 587 | return 0; |
| 588 | } |
| 589 | |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 590 | /* |
| 591 | * Hotplug state machine related functions |
| 592 | */ |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 593 | |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 594 | static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st) |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 595 | { |
Mukesh Ojha | 6fb86d9 | 2018-08-28 12:24:54 +0530 | [diff] [blame] | 596 | for (st->state--; st->state > st->target; st->state--) |
| 597 | cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 598 | } |
| 599 | |
Thomas Gleixner | 206b923 | 2019-03-26 17:36:05 +0100 | [diff] [blame] | 600 | static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st) |
| 601 | { |
| 602 | if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) |
| 603 | return true; |
| 604 | /* |
| 605 | * When CPU hotplug is disabled, then taking the CPU down is not |
| 606 | * possible because takedown_cpu() and the architecture and |
| 607 | * subsystem specific mechanisms are not available. So the CPU |
| 608 | * which would be completely unplugged again needs to stay around |
| 609 | * in the current state. |
| 610 | */ |
| 611 | return st->state <= CPUHP_BRINGUP_CPU; |
| 612 | } |
| 613 | |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 614 | static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 615 | enum cpuhp_state target) |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 616 | { |
| 617 | enum cpuhp_state prev_state = st->state; |
| 618 | int ret = 0; |
| 619 | |
| 620 | while (st->state < target) { |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 621 | st->state++; |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 622 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 623 | if (ret) { |
Thomas Gleixner | 206b923 | 2019-03-26 17:36:05 +0100 | [diff] [blame] | 624 | if (can_rollback_cpu(st)) { |
| 625 | st->target = prev_state; |
| 626 | undo_cpu_up(cpu, st); |
| 627 | } |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 628 | break; |
| 629 | } |
| 630 | } |
| 631 | return ret; |
| 632 | } |
| 633 | |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 634 | /* |
| 635 | * The cpu hotplug threads manage the bringup and teardown of the cpus |
| 636 | */ |
| 637 | static void cpuhp_create(unsigned int cpu) |
| 638 | { |
| 639 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 640 | |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 641 | init_completion(&st->done_up); |
| 642 | init_completion(&st->done_down); |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 643 | } |
| 644 | |
| 645 | static int cpuhp_should_run(unsigned int cpu) |
| 646 | { |
| 647 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
| 648 | |
| 649 | return st->should_run; |
| 650 | } |
| 651 | |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 652 | /* |
| 653 | * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke |
| 654 | * callbacks when a state gets [un]installed at runtime. |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 655 | * |
| 656 | * Each invocation of this function by the smpboot thread does a single AP |
| 657 | * state callback. |
| 658 | * |
| 659 | * It has 3 modes of operation: |
| 660 | * - single: runs st->cb_state |
| 661 | * - up: runs ++st->state, while st->state < st->target |
| 662 | * - down: runs st->state--, while st->state > st->target |
| 663 | * |
| 664 | * When complete or on error, should_run is cleared and the completion is fired. |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 665 | */ |
| 666 | static void cpuhp_thread_fun(unsigned int cpu) |
| 667 | { |
| 668 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 669 | bool bringup = st->bringup; |
| 670 | enum cpuhp_state state; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 671 | |
Neeraj Upadhyay | f8b7530 | 2018-09-05 11:22:07 +0530 | [diff] [blame] | 672 | if (WARN_ON_ONCE(!st->should_run)) |
| 673 | return; |
| 674 | |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 675 | /* |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 676 | * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures |
| 677 | * that if we see ->should_run we also see the rest of the state. |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 678 | */ |
| 679 | smp_mb(); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 680 | |
Peter Zijlstra | cb92173 | 2018-09-11 11:51:27 +0200 | [diff] [blame] | 681 | /* |
| 682 | * The BP holds the hotplug lock, but we're now running on the AP, |
| 683 | * ensure that anybody asserting the lock is held, will actually find |
| 684 | * it so. |
| 685 | */ |
| 686 | lockdep_acquire_cpus_lock(); |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 687 | cpuhp_lock_acquire(bringup); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 688 | |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 689 | if (st->single) { |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 690 | state = st->cb_state; |
| 691 | st->should_run = false; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 692 | } else { |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 693 | if (bringup) { |
| 694 | st->state++; |
| 695 | state = st->state; |
| 696 | st->should_run = (st->state < st->target); |
| 697 | WARN_ON_ONCE(st->state > st->target); |
| 698 | } else { |
| 699 | state = st->state; |
| 700 | st->state--; |
| 701 | st->should_run = (st->state > st->target); |
| 702 | WARN_ON_ONCE(st->state < st->target); |
| 703 | } |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 704 | } |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 705 | |
| 706 | WARN_ON_ONCE(!cpuhp_is_ap_state(state)); |
| 707 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 708 | if (cpuhp_is_atomic_state(state)) { |
| 709 | local_irq_disable(); |
| 710 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
| 711 | local_irq_enable(); |
| 712 | |
| 713 | /* |
| 714 | * STARTING/DYING must not fail! |
| 715 | */ |
| 716 | WARN_ON_ONCE(st->result); |
| 717 | } else { |
| 718 | st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last); |
| 719 | } |
| 720 | |
| 721 | if (st->result) { |
| 722 | /* |
| 723 | * If we fail on a rollback, we're up a creek without no |
| 724 | * paddle, no way forward, no way back. We loose, thanks for |
| 725 | * playing. |
| 726 | */ |
| 727 | WARN_ON_ONCE(st->rollback); |
| 728 | st->should_run = false; |
| 729 | } |
| 730 | |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 731 | cpuhp_lock_release(bringup); |
Peter Zijlstra | cb92173 | 2018-09-11 11:51:27 +0200 | [diff] [blame] | 732 | lockdep_release_cpus_lock(); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 733 | |
| 734 | if (!st->should_run) |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 735 | complete_ap_thread(st, bringup); |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 736 | } |
| 737 | |
| 738 | /* Invoke a single callback on a remote cpu */ |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 739 | static int |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 740 | cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, |
| 741 | struct hlist_node *node) |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 742 | { |
| 743 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 744 | int ret; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 745 | |
| 746 | if (!cpu_online(cpu)) |
| 747 | return 0; |
| 748 | |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 749 | cpuhp_lock_acquire(false); |
| 750 | cpuhp_lock_release(false); |
| 751 | |
| 752 | cpuhp_lock_acquire(true); |
| 753 | cpuhp_lock_release(true); |
Thomas Gleixner | 49dfe2a | 2017-05-24 10:15:43 +0200 | [diff] [blame] | 754 | |
Thomas Gleixner | 6a4e245 | 2016-07-13 17:16:03 +0000 | [diff] [blame] | 755 | /* |
| 756 | * If we are up and running, use the hotplug thread. For early calls |
| 757 | * we invoke the thread function directly. |
| 758 | */ |
| 759 | if (!st->thread) |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 760 | return cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
Thomas Gleixner | 6a4e245 | 2016-07-13 17:16:03 +0000 | [diff] [blame] | 761 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 762 | st->rollback = false; |
| 763 | st->last = NULL; |
| 764 | |
| 765 | st->node = node; |
| 766 | st->bringup = bringup; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 767 | st->cb_state = state; |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 768 | st->single = true; |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 769 | |
| 770 | __cpuhp_kick_ap(st); |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 771 | |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 772 | /* |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 773 | * If we failed and did a partial, do a rollback. |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 774 | */ |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 775 | if ((ret = st->result) && st->last) { |
| 776 | st->rollback = true; |
| 777 | st->bringup = !bringup; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 778 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 779 | __cpuhp_kick_ap(st); |
| 780 | } |
| 781 | |
Thomas Gleixner | 1f7c70d | 2017-10-21 16:06:52 +0200 | [diff] [blame] | 782 | /* |
| 783 | * Clean up the leftovers so the next hotplug operation wont use stale |
| 784 | * data. |
| 785 | */ |
| 786 | st->node = st->last = NULL; |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 787 | return ret; |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 788 | } |
| 789 | |
| 790 | static int cpuhp_kick_ap_work(unsigned int cpu) |
| 791 | { |
| 792 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 793 | enum cpuhp_state prev_state = st->state; |
| 794 | int ret; |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 795 | |
Peter Zijlstra | 5f4b55e | 2017-09-20 19:00:20 +0200 | [diff] [blame] | 796 | cpuhp_lock_acquire(false); |
| 797 | cpuhp_lock_release(false); |
| 798 | |
| 799 | cpuhp_lock_acquire(true); |
| 800 | cpuhp_lock_release(true); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 801 | |
| 802 | trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work); |
| 803 | ret = cpuhp_kick_ap(st, st->target); |
| 804 | trace_cpuhp_exit(cpu, st->state, prev_state, ret); |
| 805 | |
| 806 | return ret; |
Thomas Gleixner | 4cb28ce | 2016-02-26 18:43:38 +0000 | [diff] [blame] | 807 | } |
| 808 | |
| 809 | static struct smp_hotplug_thread cpuhp_threads = { |
| 810 | .store = &cpuhp_state.thread, |
| 811 | .create = &cpuhp_create, |
| 812 | .thread_should_run = cpuhp_should_run, |
| 813 | .thread_fn = cpuhp_thread_fun, |
| 814 | .thread_comm = "cpuhp/%u", |
| 815 | .selfparking = true, |
| 816 | }; |
| 817 | |
| 818 | void __init cpuhp_threads_init(void) |
| 819 | { |
| 820 | BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads)); |
| 821 | kthread_unpark(this_cpu_read(cpuhp_state.thread)); |
| 822 | } |
| 823 | |
Michal Hocko | 777c6e0 | 2016-12-07 14:54:38 +0100 | [diff] [blame] | 824 | #ifdef CONFIG_HOTPLUG_CPU |
Nicholas Piggin | 8ff0039 | 2020-11-26 20:25:29 +1000 | [diff] [blame] | 825 | #ifndef arch_clear_mm_cpumask_cpu |
| 826 | #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) |
| 827 | #endif |
| 828 | |
Anton Vorontsov | e4cc2f8 | 2012-05-31 16:26:26 -0700 | [diff] [blame] | 829 | /** |
| 830 | * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU |
| 831 | * @cpu: a CPU id |
| 832 | * |
| 833 | * This function walks all processes, finds a valid mm struct for each one and |
| 834 | * then clears a corresponding bit in mm's cpumask. While this all sounds |
| 835 | * trivial, there are various non-obvious corner cases, which this function |
| 836 | * tries to solve in a safe manner. |
| 837 | * |
| 838 | * Also note that the function uses a somewhat relaxed locking scheme, so it may |
| 839 | * be called only for an already offlined CPU. |
| 840 | */ |
Anton Vorontsov | cb79295 | 2012-05-31 16:26:22 -0700 | [diff] [blame] | 841 | void clear_tasks_mm_cpumask(int cpu) |
| 842 | { |
| 843 | struct task_struct *p; |
| 844 | |
| 845 | /* |
| 846 | * This function is called after the cpu is taken down and marked |
| 847 | * offline, so its not like new tasks will ever get this cpu set in |
| 848 | * their mm mask. -- Peter Zijlstra |
| 849 | * Thus, we may use rcu_read_lock() here, instead of grabbing |
| 850 | * full-fledged tasklist_lock. |
| 851 | */ |
Anton Vorontsov | e4cc2f8 | 2012-05-31 16:26:26 -0700 | [diff] [blame] | 852 | WARN_ON(cpu_online(cpu)); |
Anton Vorontsov | cb79295 | 2012-05-31 16:26:22 -0700 | [diff] [blame] | 853 | rcu_read_lock(); |
| 854 | for_each_process(p) { |
| 855 | struct task_struct *t; |
| 856 | |
Anton Vorontsov | e4cc2f8 | 2012-05-31 16:26:26 -0700 | [diff] [blame] | 857 | /* |
| 858 | * Main thread might exit, but other threads may still have |
| 859 | * a valid mm. Find one. |
| 860 | */ |
Anton Vorontsov | cb79295 | 2012-05-31 16:26:22 -0700 | [diff] [blame] | 861 | t = find_lock_task_mm(p); |
| 862 | if (!t) |
| 863 | continue; |
Nicholas Piggin | 8ff0039 | 2020-11-26 20:25:29 +1000 | [diff] [blame] | 864 | arch_clear_mm_cpumask_cpu(cpu, t->mm); |
Anton Vorontsov | cb79295 | 2012-05-31 16:26:22 -0700 | [diff] [blame] | 865 | task_unlock(t); |
| 866 | } |
| 867 | rcu_read_unlock(); |
| 868 | } |
| 869 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | /* Take this CPU down. */ |
Mathias Krause | 71cf5ae | 2015-07-19 20:06:22 +0200 | [diff] [blame] | 871 | static int take_cpu_down(void *_param) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | { |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 873 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
| 874 | enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); |
Thomas Gleixner | 090e77c | 2016-02-26 18:43:23 +0000 | [diff] [blame] | 875 | int err, cpu = smp_processor_id(); |
Peter Zijlstra | 724a868 | 2017-09-20 19:00:18 +0200 | [diff] [blame] | 876 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 877 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 878 | /* Ensure this CPU doesn't handle any more interrupts. */ |
| 879 | err = __cpu_disable(); |
| 880 | if (err < 0) |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 881 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 882 | |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 883 | /* |
| 884 | * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not |
| 885 | * do this step again. |
| 886 | */ |
| 887 | WARN_ON(st->state != CPUHP_TEARDOWN_CPU); |
| 888 | st->state--; |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 889 | /* Invoke the former CPU_DYING callbacks */ |
Peter Zijlstra | 724a868 | 2017-09-20 19:00:18 +0200 | [diff] [blame] | 890 | for (; st->state > target; st->state--) { |
| 891 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
| 892 | /* |
| 893 | * DYING must not fail! |
| 894 | */ |
| 895 | WARN_ON_ONCE(ret); |
| 896 | } |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 897 | |
Thomas Gleixner | 52c063d | 2015-04-03 02:37:24 +0200 | [diff] [blame] | 898 | /* Give up timekeeping duties */ |
| 899 | tick_handover_do_timer(); |
Thomas Gleixner | 1b72d43 | 2019-03-21 16:39:20 +0100 | [diff] [blame] | 900 | /* Remove CPU from timer broadcasting */ |
| 901 | tick_offline_cpu(cpu); |
Thomas Gleixner | 14e568e | 2013-01-31 12:11:14 +0000 | [diff] [blame] | 902 | /* Park the stopper thread */ |
Thomas Gleixner | 090e77c | 2016-02-26 18:43:23 +0000 | [diff] [blame] | 903 | stop_machine_park(cpu); |
Zwane Mwaikambo | f370513 | 2005-06-25 14:54:50 -0700 | [diff] [blame] | 904 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 905 | } |
| 906 | |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 907 | static int takedown_cpu(unsigned int cpu) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | { |
Thomas Gleixner | e69aab1 | 2016-02-26 18:43:43 +0000 | [diff] [blame] | 909 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 910 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | |
Thomas Gleixner | 2a58c52 | 2016-03-10 20:42:08 +0100 | [diff] [blame] | 912 | /* Park the smpboot threads */ |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 913 | kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); |
| 914 | |
Peter Zijlstra | 6acce3e | 2013-10-11 14:38:20 +0200 | [diff] [blame] | 915 | /* |
Thomas Gleixner | a899418 | 2015-07-05 17:12:30 +0000 | [diff] [blame] | 916 | * Prevent irq alloc/free while the dying cpu reorganizes the |
| 917 | * interrupt affinities. |
| 918 | */ |
| 919 | irq_lock_sparse(); |
| 920 | |
| 921 | /* |
Peter Zijlstra | 6acce3e | 2013-10-11 14:38:20 +0200 | [diff] [blame] | 922 | * So now all preempt/rcu users must observe !cpu_active(). |
| 923 | */ |
Sebastian Andrzej Siewior | 210e213 | 2017-05-24 10:15:28 +0200 | [diff] [blame] | 924 | err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); |
Rusty Russell | 0432158 | 2008-07-28 12:16:29 -0500 | [diff] [blame] | 925 | if (err) { |
Sebastian Andrzej Siewior | 3b9d6da | 2016-04-08 14:40:15 +0200 | [diff] [blame] | 926 | /* CPU refused to die */ |
Thomas Gleixner | a899418 | 2015-07-05 17:12:30 +0000 | [diff] [blame] | 927 | irq_unlock_sparse(); |
Sebastian Andrzej Siewior | 3b9d6da | 2016-04-08 14:40:15 +0200 | [diff] [blame] | 928 | /* Unpark the hotplug thread so we can rollback there */ |
| 929 | kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread); |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 930 | return err; |
Satoru Takeuchi | 8fa1d7d | 2006-10-28 10:38:57 -0700 | [diff] [blame] | 931 | } |
Rusty Russell | 0432158 | 2008-07-28 12:16:29 -0500 | [diff] [blame] | 932 | BUG_ON(cpu_online(cpu)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 933 | |
Peter Zijlstra | 48c5ccae | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 934 | /* |
Brendan Jackman | 5b1ead6 | 2017-12-06 10:59:11 +0000 | [diff] [blame] | 935 | * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed |
| 936 | * all runnable tasks from the CPU, there's only the idle task left now |
Peter Zijlstra | 48c5ccae | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 937 | * that the migration thread is done doing the stop_machine thing. |
Peter Zijlstra | 51a96c7 | 2010-11-19 20:37:53 +0100 | [diff] [blame] | 938 | * |
| 939 | * Wait for the stop thread to go away. |
Peter Zijlstra | 48c5ccae | 2010-11-13 19:32:29 +0100 | [diff] [blame] | 940 | */ |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 941 | wait_for_ap_thread(st, false); |
Thomas Gleixner | e69aab1 | 2016-02-26 18:43:43 +0000 | [diff] [blame] | 942 | BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | |
Thomas Gleixner | a899418 | 2015-07-05 17:12:30 +0000 | [diff] [blame] | 944 | /* Interrupts are moved away from the dying cpu, reenable alloc/free */ |
| 945 | irq_unlock_sparse(); |
| 946 | |
Preeti U Murthy | 345527b | 2015-03-30 14:59:19 +0530 | [diff] [blame] | 947 | hotplug_cpu__broadcast_tick_pull(cpu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | /* This actually kills the CPU. */ |
| 949 | __cpu_die(cpu); |
| 950 | |
Thomas Gleixner | a49b116 | 2015-04-03 02:38:05 +0200 | [diff] [blame] | 951 | tick_cleanup_dead_cpu(cpu); |
Paul E. McKenney | a58163d | 2017-06-20 12:11:34 -0700 | [diff] [blame] | 952 | rcutree_migrate_callbacks(cpu); |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 953 | return 0; |
| 954 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 955 | |
Thomas Gleixner | 71f87b2 | 2016-03-03 10:52:10 +0100 | [diff] [blame] | 956 | static void cpuhp_complete_idle_dead(void *arg) |
| 957 | { |
| 958 | struct cpuhp_cpu_state *st = arg; |
| 959 | |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 960 | complete_ap_thread(st, false); |
Thomas Gleixner | 71f87b2 | 2016-03-03 10:52:10 +0100 | [diff] [blame] | 961 | } |
| 962 | |
Thomas Gleixner | e69aab1 | 2016-02-26 18:43:43 +0000 | [diff] [blame] | 963 | void cpuhp_report_idle_dead(void) |
| 964 | { |
| 965 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
| 966 | |
| 967 | BUG_ON(st->state != CPUHP_AP_OFFLINE); |
Thomas Gleixner | 27d50c7 | 2016-02-26 18:43:44 +0000 | [diff] [blame] | 968 | rcu_report_dead(smp_processor_id()); |
Thomas Gleixner | 71f87b2 | 2016-03-03 10:52:10 +0100 | [diff] [blame] | 969 | st->state = CPUHP_AP_IDLE_DEAD; |
| 970 | /* |
| 971 | * We cannot call complete after rcu_report_dead() so we delegate it |
| 972 | * to an online cpu. |
| 973 | */ |
| 974 | smp_call_function_single(cpumask_first(cpu_online_mask), |
| 975 | cpuhp_complete_idle_dead, st, 0); |
Thomas Gleixner | e69aab1 | 2016-02-26 18:43:43 +0000 | [diff] [blame] | 976 | } |
| 977 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 978 | static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st) |
| 979 | { |
Mukesh Ojha | 6fb86d9 | 2018-08-28 12:24:54 +0530 | [diff] [blame] | 980 | for (st->state++; st->state < st->target; st->state++) |
| 981 | cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 982 | } |
| 983 | |
| 984 | static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st, |
| 985 | enum cpuhp_state target) |
| 986 | { |
| 987 | enum cpuhp_state prev_state = st->state; |
| 988 | int ret = 0; |
| 989 | |
| 990 | for (; st->state > target; st->state--) { |
| 991 | ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL); |
| 992 | if (ret) { |
| 993 | st->target = prev_state; |
Thomas Gleixner | 69fa6eb | 2018-09-06 15:21:38 +0200 | [diff] [blame] | 994 | if (st->state < prev_state) |
| 995 | undo_cpu_down(cpu, st); |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 996 | break; |
| 997 | } |
| 998 | } |
| 999 | return ret; |
| 1000 | } |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1001 | |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 1002 | /* Requires cpu_add_remove_lock to be held */ |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1003 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, |
| 1004 | enum cpuhp_state target) |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 1005 | { |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1006 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 1007 | int prev_state, ret = 0; |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 1008 | |
| 1009 | if (num_online_cpus() == 1) |
| 1010 | return -EBUSY; |
| 1011 | |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1012 | if (!cpu_present(cpu)) |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 1013 | return -EINVAL; |
| 1014 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 1015 | cpus_write_lock(); |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 1016 | |
| 1017 | cpuhp_tasks_frozen = tasks_frozen; |
| 1018 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1019 | prev_state = cpuhp_set_state(st, target); |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1020 | /* |
| 1021 | * If the current CPU state is in the range of the AP hotplug thread, |
| 1022 | * then we need to kick the thread. |
| 1023 | */ |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1024 | if (st->state > CPUHP_TEARDOWN_CPU) { |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1025 | st->target = max((int)target, CPUHP_TEARDOWN_CPU); |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1026 | ret = cpuhp_kick_ap_work(cpu); |
| 1027 | /* |
| 1028 | * The AP side has done the error rollback already. Just |
| 1029 | * return the error code.. |
| 1030 | */ |
| 1031 | if (ret) |
| 1032 | goto out; |
| 1033 | |
| 1034 | /* |
| 1035 | * We might have stopped still in the range of the AP hotplug |
| 1036 | * thread. Nothing to do anymore. |
| 1037 | */ |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1038 | if (st->state > CPUHP_TEARDOWN_CPU) |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1039 | goto out; |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1040 | |
| 1041 | st->target = target; |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1042 | } |
| 1043 | /* |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1044 | * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1045 | * to do the further cleanups. |
| 1046 | */ |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 1047 | ret = cpuhp_down_callbacks(cpu, st, target); |
Thomas Gleixner | 69fa6eb | 2018-09-06 15:21:38 +0200 | [diff] [blame] | 1048 | if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) { |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1049 | cpuhp_reset_state(st, prev_state); |
| 1050 | __cpuhp_kick_ap(st); |
Sebastian Andrzej Siewior | 3b9d6da | 2016-04-08 14:40:15 +0200 | [diff] [blame] | 1051 | } |
Thomas Gleixner | 9845817 | 2016-02-26 18:43:25 +0000 | [diff] [blame] | 1052 | |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1053 | out: |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 1054 | cpus_write_unlock(); |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 1055 | /* |
| 1056 | * Do post unplug cleanup. This is still protected against |
| 1057 | * concurrent CPU hotplug via cpu_add_remove_lock. |
| 1058 | */ |
| 1059 | lockup_detector_cleanup(); |
Thomas Gleixner | a74cfff | 2018-11-25 19:33:39 +0100 | [diff] [blame] | 1060 | arch_smt_update(); |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1061 | return ret; |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1062 | } |
| 1063 | |
Thomas Gleixner | cc1fe21 | 2018-05-29 17:49:05 +0200 | [diff] [blame] | 1064 | static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target) |
| 1065 | { |
| 1066 | if (cpu_hotplug_disabled) |
| 1067 | return -EBUSY; |
| 1068 | return _cpu_down(cpu, 0, target); |
| 1069 | } |
| 1070 | |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1071 | static int cpu_down(unsigned int cpu, enum cpuhp_state target) |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1072 | { |
Heiko Carstens | 9ea09af | 2008-12-22 12:36:30 +0100 | [diff] [blame] | 1073 | int err; |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1074 | |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1075 | cpu_maps_update_begin(); |
Thomas Gleixner | cc1fe21 | 2018-05-29 17:49:05 +0200 | [diff] [blame] | 1076 | err = cpu_down_maps_locked(cpu, target); |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1077 | cpu_maps_update_done(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | return err; |
| 1079 | } |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1080 | |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1081 | /** |
| 1082 | * cpu_device_down - Bring down a cpu device |
| 1083 | * @dev: Pointer to the cpu device to offline |
| 1084 | * |
| 1085 | * This function is meant to be used by device core cpu subsystem only. |
| 1086 | * |
| 1087 | * Other subsystems should use remove_cpu() instead. |
| 1088 | */ |
| 1089 | int cpu_device_down(struct device *dev) |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1090 | { |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1091 | return cpu_down(dev->id, CPUHP_OFFLINE); |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1092 | } |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1093 | |
Qais Yousef | 93ef142 | 2020-03-23 13:50:54 +0000 | [diff] [blame] | 1094 | int remove_cpu(unsigned int cpu) |
| 1095 | { |
| 1096 | int ret; |
| 1097 | |
| 1098 | lock_device_hotplug(); |
| 1099 | ret = device_offline(get_cpu_device(cpu)); |
| 1100 | unlock_device_hotplug(); |
| 1101 | |
| 1102 | return ret; |
| 1103 | } |
| 1104 | EXPORT_SYMBOL_GPL(remove_cpu); |
| 1105 | |
Qais Yousef | 0441a55 | 2020-03-23 13:50:55 +0000 | [diff] [blame] | 1106 | void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) |
| 1107 | { |
| 1108 | unsigned int cpu; |
| 1109 | int error; |
| 1110 | |
| 1111 | cpu_maps_update_begin(); |
| 1112 | |
| 1113 | /* |
| 1114 | * Make certain the cpu I'm about to reboot on is online. |
| 1115 | * |
| 1116 | * This is inline to what migrate_to_reboot_cpu() already do. |
| 1117 | */ |
| 1118 | if (!cpu_online(primary_cpu)) |
| 1119 | primary_cpu = cpumask_first(cpu_online_mask); |
| 1120 | |
| 1121 | for_each_online_cpu(cpu) { |
| 1122 | if (cpu == primary_cpu) |
| 1123 | continue; |
| 1124 | |
| 1125 | error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); |
| 1126 | if (error) { |
| 1127 | pr_err("Failed to offline CPU%d - error=%d", |
| 1128 | cpu, error); |
| 1129 | break; |
| 1130 | } |
| 1131 | } |
| 1132 | |
| 1133 | /* |
| 1134 | * Ensure all but the reboot CPU are offline. |
| 1135 | */ |
| 1136 | BUG_ON(num_online_cpus() > 1); |
| 1137 | |
| 1138 | /* |
| 1139 | * Make sure the CPUs won't be enabled by someone else after this |
| 1140 | * point. Kexec will reboot to a new kernel shortly resetting |
| 1141 | * everything along the way. |
| 1142 | */ |
| 1143 | cpu_hotplug_disabled++; |
| 1144 | |
| 1145 | cpu_maps_update_done(); |
| 1146 | } |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1147 | |
| 1148 | #else |
| 1149 | #define takedown_cpu NULL |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1150 | #endif /*CONFIG_HOTPLUG_CPU*/ |
| 1151 | |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1152 | /** |
Thomas Gleixner | ee1e714 | 2016-08-18 14:57:16 +0200 | [diff] [blame] | 1153 | * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1154 | * @cpu: cpu that just started |
| 1155 | * |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1156 | * It must be called by the arch code on the new cpu, before the new cpu |
| 1157 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
| 1158 | */ |
| 1159 | void notify_cpu_starting(unsigned int cpu) |
| 1160 | { |
| 1161 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 1162 | enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE); |
Peter Zijlstra | 724a868 | 2017-09-20 19:00:18 +0200 | [diff] [blame] | 1163 | int ret; |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1164 | |
Sebastian Andrzej Siewior | 0c6d457 | 2016-08-17 14:21:04 +0200 | [diff] [blame] | 1165 | rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */ |
Thomas Gleixner | e797bda | 2019-07-22 20:47:16 +0200 | [diff] [blame] | 1166 | cpumask_set_cpu(cpu, &cpus_booted_once_mask); |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1167 | while (st->state < target) { |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1168 | st->state++; |
Peter Zijlstra | 724a868 | 2017-09-20 19:00:18 +0200 | [diff] [blame] | 1169 | ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL); |
| 1170 | /* |
| 1171 | * STARTING must not fail! |
| 1172 | */ |
| 1173 | WARN_ON_ONCE(ret); |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1174 | } |
| 1175 | } |
| 1176 | |
Thomas Gleixner | 949338e | 2016-02-26 18:43:35 +0000 | [diff] [blame] | 1177 | /* |
Thomas Gleixner | 9cd4f1a | 2017-07-04 22:20:23 +0200 | [diff] [blame] | 1178 | * Called from the idle task. Wake up the controlling task which brings the |
Peter Zijlstra | 45178ac | 2019-12-10 09:34:54 +0100 | [diff] [blame] | 1179 | * hotplug thread of the upcoming CPU up and then delegates the rest of the |
| 1180 | * online bringup to the hotplug thread. |
Thomas Gleixner | 949338e | 2016-02-26 18:43:35 +0000 | [diff] [blame] | 1181 | */ |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1182 | void cpuhp_online_idle(enum cpuhp_state state) |
Thomas Gleixner | 949338e | 2016-02-26 18:43:35 +0000 | [diff] [blame] | 1183 | { |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1184 | struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1185 | |
| 1186 | /* Happens for the boot cpu */ |
| 1187 | if (state != CPUHP_AP_ONLINE_IDLE) |
| 1188 | return; |
| 1189 | |
Peter Zijlstra | 45178ac | 2019-12-10 09:34:54 +0100 | [diff] [blame] | 1190 | /* |
| 1191 | * Unpart the stopper thread before we start the idle loop (and start |
| 1192 | * scheduling); this ensures the stopper task is always available. |
| 1193 | */ |
| 1194 | stop_machine_unpark(smp_processor_id()); |
| 1195 | |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1196 | st->state = CPUHP_AP_ONLINE_IDLE; |
Peter Zijlstra | 5ebe774 | 2017-09-20 19:00:19 +0200 | [diff] [blame] | 1197 | complete_ap_thread(st, true); |
Thomas Gleixner | 949338e | 2016-02-26 18:43:35 +0000 | [diff] [blame] | 1198 | } |
| 1199 | |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1200 | /* Requires cpu_add_remove_lock to be held */ |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1201 | static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | { |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1203 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
Suresh Siddha | 3bb5d2e | 2012-04-20 17:08:50 -0700 | [diff] [blame] | 1204 | struct task_struct *idle; |
Thomas Gleixner | 2e1a348 | 2016-02-26 18:43:37 +0000 | [diff] [blame] | 1205 | int ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 1207 | cpus_write_lock(); |
Thomas Gleixner | 38498a6 | 2012-04-20 13:05:44 +0000 | [diff] [blame] | 1208 | |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1209 | if (!cpu_present(cpu)) { |
Yasuaki Ishimatsu | 5e5041f | 2012-10-23 01:30:54 +0200 | [diff] [blame] | 1210 | ret = -EINVAL; |
| 1211 | goto out; |
| 1212 | } |
| 1213 | |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1214 | /* |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1215 | * The caller of cpu_up() might have raced with another |
| 1216 | * caller. Nothing to do. |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1217 | */ |
| 1218 | if (st->state >= target) |
Thomas Gleixner | 38498a6 | 2012-04-20 13:05:44 +0000 | [diff] [blame] | 1219 | goto out; |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1220 | |
| 1221 | if (st->state == CPUHP_OFFLINE) { |
| 1222 | /* Let it fail before we try to bring the cpu up */ |
| 1223 | idle = idle_thread_get(cpu); |
| 1224 | if (IS_ERR(idle)) { |
| 1225 | ret = PTR_ERR(idle); |
| 1226 | goto out; |
| 1227 | } |
Suresh Siddha | 3bb5d2e | 2012-04-20 17:08:50 -0700 | [diff] [blame] | 1228 | } |
Thomas Gleixner | 38498a6 | 2012-04-20 13:05:44 +0000 | [diff] [blame] | 1229 | |
Thomas Gleixner | ba99746 | 2016-02-26 18:43:24 +0000 | [diff] [blame] | 1230 | cpuhp_tasks_frozen = tasks_frozen; |
| 1231 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1232 | cpuhp_set_state(st, target); |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1233 | /* |
| 1234 | * If the current CPU state is in the range of the AP hotplug thread, |
| 1235 | * then we need to kick the thread once more. |
| 1236 | */ |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1237 | if (st->state > CPUHP_BRINGUP_CPU) { |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1238 | ret = cpuhp_kick_ap_work(cpu); |
| 1239 | /* |
| 1240 | * The AP side has done the error rollback already. Just |
| 1241 | * return the error code.. |
| 1242 | */ |
| 1243 | if (ret) |
| 1244 | goto out; |
| 1245 | } |
| 1246 | |
| 1247 | /* |
| 1248 | * Try to reach the target state. We max out on the BP at |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1249 | * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1250 | * responsible for bringing it up to the target state. |
| 1251 | */ |
Thomas Gleixner | 8df3e07 | 2016-02-26 18:43:41 +0000 | [diff] [blame] | 1252 | target = min((int)target, CPUHP_BRINGUP_CPU); |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 1253 | ret = cpuhp_up_callbacks(cpu, st, target); |
Thomas Gleixner | 38498a6 | 2012-04-20 13:05:44 +0000 | [diff] [blame] | 1254 | out: |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 1255 | cpus_write_unlock(); |
Thomas Gleixner | a74cfff | 2018-11-25 19:33:39 +0100 | [diff] [blame] | 1256 | arch_smt_update(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1257 | return ret; |
| 1258 | } |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1259 | |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1260 | static int cpu_up(unsigned int cpu, enum cpuhp_state target) |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1261 | { |
| 1262 | int err = 0; |
minskey guo | cf23422 | 2010-05-24 14:32:41 -0700 | [diff] [blame] | 1263 | |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1264 | if (!cpu_possible(cpu)) { |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1265 | pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n", |
| 1266 | cpu); |
Chen Gong | 87d5e023 | 2010-03-05 13:42:38 -0800 | [diff] [blame] | 1267 | #if defined(CONFIG_IA64) |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1268 | pr_err("please check additional_cpus= boot parameter\n"); |
KAMEZAWA Hiroyuki | 73e753a | 2007-10-18 23:40:47 -0700 | [diff] [blame] | 1269 | #endif |
| 1270 | return -EINVAL; |
| 1271 | } |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1272 | |
Toshi Kani | 01b0f19 | 2013-11-12 15:07:25 -0800 | [diff] [blame] | 1273 | err = try_online_node(cpu_to_node(cpu)); |
| 1274 | if (err) |
| 1275 | return err; |
minskey guo | cf23422 | 2010-05-24 14:32:41 -0700 | [diff] [blame] | 1276 | |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1277 | cpu_maps_update_begin(); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1278 | |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 1279 | if (cpu_hotplug_disabled) { |
| 1280 | err = -EBUSY; |
| 1281 | goto out; |
| 1282 | } |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 1283 | if (!cpu_smt_allowed(cpu)) { |
| 1284 | err = -EPERM; |
| 1285 | goto out; |
| 1286 | } |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 1287 | |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1288 | err = _cpu_up(cpu, 0, target); |
Max Krasnyansky | e761b77 | 2008-07-15 04:43:49 -0700 | [diff] [blame] | 1289 | out: |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1290 | cpu_maps_update_done(); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1291 | return err; |
| 1292 | } |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1293 | |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1294 | /** |
| 1295 | * cpu_device_up - Bring up a cpu device |
| 1296 | * @dev: Pointer to the cpu device to online |
| 1297 | * |
| 1298 | * This function is meant to be used by device core cpu subsystem only. |
| 1299 | * |
| 1300 | * Other subsystems should use add_cpu() instead. |
| 1301 | */ |
| 1302 | int cpu_device_up(struct device *dev) |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1303 | { |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1304 | return cpu_up(dev->id, CPUHP_ONLINE); |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1305 | } |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1306 | |
Qais Yousef | 93ef142 | 2020-03-23 13:50:54 +0000 | [diff] [blame] | 1307 | int add_cpu(unsigned int cpu) |
| 1308 | { |
| 1309 | int ret; |
| 1310 | |
| 1311 | lock_device_hotplug(); |
| 1312 | ret = device_online(get_cpu_device(cpu)); |
| 1313 | unlock_device_hotplug(); |
| 1314 | |
| 1315 | return ret; |
| 1316 | } |
| 1317 | EXPORT_SYMBOL_GPL(add_cpu); |
| 1318 | |
Qais Yousef | d720f98 | 2020-03-23 13:51:01 +0000 | [diff] [blame] | 1319 | /** |
| 1320 | * bringup_hibernate_cpu - Bring up the CPU that we hibernated on |
| 1321 | * @sleep_cpu: The cpu we hibernated on and should be brought up. |
| 1322 | * |
| 1323 | * On some architectures like arm64, we can hibernate on any CPU, but on |
| 1324 | * wake up the CPU we hibernated on might be offline as a side effect of |
| 1325 | * using maxcpus= for example. |
| 1326 | */ |
| 1327 | int bringup_hibernate_cpu(unsigned int sleep_cpu) |
| 1328 | { |
| 1329 | int ret; |
| 1330 | |
| 1331 | if (!cpu_online(sleep_cpu)) { |
| 1332 | pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1333 | ret = cpu_up(sleep_cpu, CPUHP_ONLINE); |
Qais Yousef | d720f98 | 2020-03-23 13:51:01 +0000 | [diff] [blame] | 1334 | if (ret) { |
| 1335 | pr_err("Failed to bring hibernate-CPU up!\n"); |
| 1336 | return ret; |
| 1337 | } |
| 1338 | } |
| 1339 | return 0; |
| 1340 | } |
| 1341 | |
Qais Yousef | b99a265 | 2020-03-23 13:51:09 +0000 | [diff] [blame] | 1342 | void bringup_nonboot_cpus(unsigned int setup_max_cpus) |
| 1343 | { |
| 1344 | unsigned int cpu; |
| 1345 | |
| 1346 | for_each_present_cpu(cpu) { |
| 1347 | if (num_online_cpus() >= setup_max_cpus) |
| 1348 | break; |
| 1349 | if (!cpu_online(cpu)) |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 1350 | cpu_up(cpu, CPUHP_ONLINE); |
Qais Yousef | b99a265 | 2020-03-23 13:51:09 +0000 | [diff] [blame] | 1351 | } |
| 1352 | } |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1353 | |
Rafael J. Wysocki | f3de4be | 2007-08-30 23:56:29 -0700 | [diff] [blame] | 1354 | #ifdef CONFIG_PM_SLEEP_SMP |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1355 | static cpumask_var_t frozen_cpus; |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1356 | |
Qais Yousef | fb7fb84 | 2020-04-30 12:40:04 +0100 | [diff] [blame] | 1357 | int freeze_secondary_cpus(int primary) |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1358 | { |
James Morse | d391e55 | 2016-08-17 13:50:25 +0100 | [diff] [blame] | 1359 | int cpu, error = 0; |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1360 | |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1361 | cpu_maps_update_begin(); |
Nicholas Piggin | 9ca12ac | 2019-04-11 13:34:46 +1000 | [diff] [blame] | 1362 | if (primary == -1) { |
James Morse | d391e55 | 2016-08-17 13:50:25 +0100 | [diff] [blame] | 1363 | primary = cpumask_first(cpu_online_mask); |
Nicholas Piggin | 9ca12ac | 2019-04-11 13:34:46 +1000 | [diff] [blame] | 1364 | if (!housekeeping_cpu(primary, HK_FLAG_TIMER)) |
| 1365 | primary = housekeeping_any_cpu(HK_FLAG_TIMER); |
| 1366 | } else { |
| 1367 | if (!cpu_online(primary)) |
| 1368 | primary = cpumask_first(cpu_online_mask); |
| 1369 | } |
| 1370 | |
Xiaotian Feng | 9ee349a | 2009-12-16 18:04:32 +0100 | [diff] [blame] | 1371 | /* |
| 1372 | * We take down all of the non-boot CPUs in one shot to avoid races |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1373 | * with the userspace trying to use the CPU hotplug at the same time |
| 1374 | */ |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1375 | cpumask_clear(frozen_cpus); |
Peter Zijlstra | 6ad4c18 | 2009-11-25 13:31:39 +0100 | [diff] [blame] | 1376 | |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1377 | pr_info("Disabling non-boot CPUs ...\n"); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1378 | for_each_online_cpu(cpu) { |
James Morse | d391e55 | 2016-08-17 13:50:25 +0100 | [diff] [blame] | 1379 | if (cpu == primary) |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1380 | continue; |
Pavankumar Kondeti | a66d955 | 2019-06-03 10:01:03 +0530 | [diff] [blame] | 1381 | |
Qais Yousef | fb7fb84 | 2020-04-30 12:40:04 +0100 | [diff] [blame] | 1382 | if (pm_wakeup_pending()) { |
Pavankumar Kondeti | a66d955 | 2019-06-03 10:01:03 +0530 | [diff] [blame] | 1383 | pr_info("Wakeup pending. Abort CPU freeze\n"); |
| 1384 | error = -EBUSY; |
| 1385 | break; |
| 1386 | } |
| 1387 | |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 1388 | trace_suspend_resume(TPS("CPU_OFF"), cpu, true); |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1389 | error = _cpu_down(cpu, 1, CPUHP_OFFLINE); |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 1390 | trace_suspend_resume(TPS("CPU_OFF"), cpu, false); |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 1391 | if (!error) |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1392 | cpumask_set_cpu(cpu, frozen_cpus); |
Mike Travis | feae320 | 2009-11-17 18:22:13 -0600 | [diff] [blame] | 1393 | else { |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1394 | pr_err("Error taking CPU%d down: %d\n", cpu, error); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1395 | break; |
| 1396 | } |
| 1397 | } |
Joseph Cihula | 86886e5 | 2009-06-30 19:31:07 -0700 | [diff] [blame] | 1398 | |
Vitaly Kuznetsov | 89af7ba | 2015-08-05 00:52:46 -0700 | [diff] [blame] | 1399 | if (!error) |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1400 | BUG_ON(num_online_cpus() > 1); |
Vitaly Kuznetsov | 89af7ba | 2015-08-05 00:52:46 -0700 | [diff] [blame] | 1401 | else |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1402 | pr_err("Non-boot CPUs are not disabled\n"); |
Vitaly Kuznetsov | 89af7ba | 2015-08-05 00:52:46 -0700 | [diff] [blame] | 1403 | |
| 1404 | /* |
| 1405 | * Make sure the CPUs won't be enabled by someone else. We need to do |
Qais Yousef | 5655585 | 2020-04-30 12:40:03 +0100 | [diff] [blame] | 1406 | * this even in case of failure as all freeze_secondary_cpus() users are |
| 1407 | * supposed to do thaw_secondary_cpus() on the failure path. |
Vitaly Kuznetsov | 89af7ba | 2015-08-05 00:52:46 -0700 | [diff] [blame] | 1408 | */ |
| 1409 | cpu_hotplug_disabled++; |
| 1410 | |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1411 | cpu_maps_update_done(); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1412 | return error; |
| 1413 | } |
| 1414 | |
Qais Yousef | 5655585 | 2020-04-30 12:40:03 +0100 | [diff] [blame] | 1415 | void __weak arch_thaw_secondary_cpus_begin(void) |
Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 1416 | { |
| 1417 | } |
| 1418 | |
Qais Yousef | 5655585 | 2020-04-30 12:40:03 +0100 | [diff] [blame] | 1419 | void __weak arch_thaw_secondary_cpus_end(void) |
Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 1420 | { |
| 1421 | } |
| 1422 | |
Qais Yousef | 5655585 | 2020-04-30 12:40:03 +0100 | [diff] [blame] | 1423 | void thaw_secondary_cpus(void) |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1424 | { |
| 1425 | int cpu, error; |
| 1426 | |
| 1427 | /* Allow everyone to use the CPU hotplug again */ |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1428 | cpu_maps_update_begin(); |
Lianwei Wang | 01b4115 | 2016-06-09 23:43:28 -0700 | [diff] [blame] | 1429 | __cpu_hotplug_enable(); |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1430 | if (cpumask_empty(frozen_cpus)) |
Rafael J. Wysocki | 1d64b9c | 2007-04-01 23:49:49 -0700 | [diff] [blame] | 1431 | goto out; |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1432 | |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1433 | pr_info("Enabling non-boot CPUs ...\n"); |
Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 1434 | |
Qais Yousef | 5655585 | 2020-04-30 12:40:03 +0100 | [diff] [blame] | 1435 | arch_thaw_secondary_cpus_begin(); |
Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 1436 | |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1437 | for_each_cpu(cpu, frozen_cpus) { |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 1438 | trace_suspend_resume(TPS("CPU_ON"), cpu, true); |
Thomas Gleixner | af1f404 | 2016-02-26 18:43:30 +0000 | [diff] [blame] | 1439 | error = _cpu_up(cpu, 1, CPUHP_ONLINE); |
Todd E Brandt | bb3632c | 2014-06-06 05:40:17 -0700 | [diff] [blame] | 1440 | trace_suspend_resume(TPS("CPU_ON"), cpu, false); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1441 | if (!error) { |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1442 | pr_info("CPU%d is up\n", cpu); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1443 | continue; |
| 1444 | } |
Fabian Frederick | 84117da | 2014-06-04 16:11:17 -0700 | [diff] [blame] | 1445 | pr_warn("Error taking CPU%d up: %d\n", cpu, error); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1446 | } |
Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 1447 | |
Qais Yousef | 5655585 | 2020-04-30 12:40:03 +0100 | [diff] [blame] | 1448 | arch_thaw_secondary_cpus_end(); |
Suresh Siddha | d0af9ee | 2009-08-19 18:05:36 -0700 | [diff] [blame] | 1449 | |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1450 | cpumask_clear(frozen_cpus); |
Rafael J. Wysocki | 1d64b9c | 2007-04-01 23:49:49 -0700 | [diff] [blame] | 1451 | out: |
Gautham R Shenoy | d221938 | 2008-01-25 21:08:01 +0100 | [diff] [blame] | 1452 | cpu_maps_update_done(); |
Rafael J. Wysocki | e3920fb | 2006-09-25 23:32:48 -0700 | [diff] [blame] | 1453 | } |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1454 | |
Fenghua Yu | d7268a3 | 2011-11-15 21:59:31 +0100 | [diff] [blame] | 1455 | static int __init alloc_frozen_cpus(void) |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 1456 | { |
| 1457 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) |
| 1458 | return -ENOMEM; |
| 1459 | return 0; |
| 1460 | } |
| 1461 | core_initcall(alloc_frozen_cpus); |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 1462 | |
| 1463 | /* |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 1464 | * When callbacks for CPU hotplug notifications are being executed, we must |
| 1465 | * ensure that the state of the system with respect to the tasks being frozen |
| 1466 | * or not, as reported by the notification, remains unchanged *throughout the |
| 1467 | * duration* of the execution of the callbacks. |
| 1468 | * Hence we need to prevent the freezer from racing with regular CPU hotplug. |
| 1469 | * |
| 1470 | * This synchronization is implemented by mutually excluding regular CPU |
| 1471 | * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ |
| 1472 | * Hibernate notifications. |
| 1473 | */ |
| 1474 | static int |
| 1475 | cpu_hotplug_pm_callback(struct notifier_block *nb, |
| 1476 | unsigned long action, void *ptr) |
| 1477 | { |
| 1478 | switch (action) { |
| 1479 | |
| 1480 | case PM_SUSPEND_PREPARE: |
| 1481 | case PM_HIBERNATION_PREPARE: |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 1482 | cpu_hotplug_disable(); |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 1483 | break; |
| 1484 | |
| 1485 | case PM_POST_SUSPEND: |
| 1486 | case PM_POST_HIBERNATION: |
Srivatsa S. Bhat | 16e53db | 2013-06-12 14:04:36 -0700 | [diff] [blame] | 1487 | cpu_hotplug_enable(); |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 1488 | break; |
| 1489 | |
| 1490 | default: |
| 1491 | return NOTIFY_DONE; |
| 1492 | } |
| 1493 | |
| 1494 | return NOTIFY_OK; |
| 1495 | } |
| 1496 | |
| 1497 | |
Fenghua Yu | d7268a3 | 2011-11-15 21:59:31 +0100 | [diff] [blame] | 1498 | static int __init cpu_hotplug_pm_sync_init(void) |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 1499 | { |
Fenghua Yu | 6e32d47 | 2012-11-13 11:32:43 -0800 | [diff] [blame] | 1500 | /* |
| 1501 | * cpu_hotplug_pm_callback has higher priority than x86 |
| 1502 | * bsp_pm_callback which depends on cpu_hotplug_pm_callback |
| 1503 | * to disable cpu hotplug to avoid cpu hotplug race. |
| 1504 | */ |
Srivatsa S. Bhat | 79cfbdf | 2011-11-03 00:59:25 +0100 | [diff] [blame] | 1505 | pm_notifier(cpu_hotplug_pm_callback, 0); |
| 1506 | return 0; |
| 1507 | } |
| 1508 | core_initcall(cpu_hotplug_pm_sync_init); |
| 1509 | |
Rafael J. Wysocki | f3de4be | 2007-08-30 23:56:29 -0700 | [diff] [blame] | 1510 | #endif /* CONFIG_PM_SLEEP_SMP */ |
Max Krasnyansky | 68f4f1e | 2008-05-29 11:17:02 -0700 | [diff] [blame] | 1511 | |
Peter Zijlstra | 8ce371f | 2017-03-20 12:26:55 +0100 | [diff] [blame] | 1512 | int __boot_cpu_id; |
| 1513 | |
Max Krasnyansky | 68f4f1e | 2008-05-29 11:17:02 -0700 | [diff] [blame] | 1514 | #endif /* CONFIG_SMP */ |
Mike Travis | b8d317d | 2008-07-24 18:21:29 -0700 | [diff] [blame] | 1515 | |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1516 | /* Boot processor state steps */ |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 1517 | static struct cpuhp_step cpuhp_hp_states[] = { |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1518 | [CPUHP_OFFLINE] = { |
| 1519 | .name = "offline", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1520 | .startup.single = NULL, |
| 1521 | .teardown.single = NULL, |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1522 | }, |
| 1523 | #ifdef CONFIG_SMP |
| 1524 | [CPUHP_CREATE_THREADS]= { |
Thomas Gleixner | 677f664 | 2016-09-06 16:13:48 +0200 | [diff] [blame] | 1525 | .name = "threads:prepare", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1526 | .startup.single = smpboot_create_threads, |
| 1527 | .teardown.single = NULL, |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1528 | .cant_stop = true, |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1529 | }, |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 1530 | [CPUHP_PERF_PREPARE] = { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1531 | .name = "perf:prepare", |
| 1532 | .startup.single = perf_event_init_cpu, |
| 1533 | .teardown.single = perf_event_exit_cpu, |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 1534 | }, |
Thomas Gleixner | 7ee681b | 2016-07-13 17:16:29 +0000 | [diff] [blame] | 1535 | [CPUHP_WORKQUEUE_PREP] = { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1536 | .name = "workqueue:prepare", |
| 1537 | .startup.single = workqueue_prepare_cpu, |
| 1538 | .teardown.single = NULL, |
Thomas Gleixner | 7ee681b | 2016-07-13 17:16:29 +0000 | [diff] [blame] | 1539 | }, |
Thomas Gleixner | 27590dc | 2016-07-15 10:41:04 +0200 | [diff] [blame] | 1540 | [CPUHP_HRTIMERS_PREPARE] = { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1541 | .name = "hrtimers:prepare", |
| 1542 | .startup.single = hrtimers_prepare_cpu, |
| 1543 | .teardown.single = hrtimers_dead_cpu, |
Thomas Gleixner | 27590dc | 2016-07-15 10:41:04 +0200 | [diff] [blame] | 1544 | }, |
Richard Weinberger | 31487f8 | 2016-07-13 17:17:01 +0000 | [diff] [blame] | 1545 | [CPUHP_SMPCFD_PREPARE] = { |
Thomas Gleixner | 677f664 | 2016-09-06 16:13:48 +0200 | [diff] [blame] | 1546 | .name = "smpcfd:prepare", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1547 | .startup.single = smpcfd_prepare_cpu, |
| 1548 | .teardown.single = smpcfd_dead_cpu, |
Richard Weinberger | 31487f8 | 2016-07-13 17:17:01 +0000 | [diff] [blame] | 1549 | }, |
Richard Weinberger | e6d4989 | 2016-08-18 14:57:17 +0200 | [diff] [blame] | 1550 | [CPUHP_RELAY_PREPARE] = { |
| 1551 | .name = "relay:prepare", |
| 1552 | .startup.single = relay_prepare_cpu, |
| 1553 | .teardown.single = NULL, |
| 1554 | }, |
Sebastian Andrzej Siewior | 6731d4f | 2016-08-23 14:53:19 +0200 | [diff] [blame] | 1555 | [CPUHP_SLAB_PREPARE] = { |
| 1556 | .name = "slab:prepare", |
| 1557 | .startup.single = slab_prepare_cpu, |
| 1558 | .teardown.single = slab_dead_cpu, |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1559 | }, |
Thomas Gleixner | 4df8374 | 2016-07-13 17:17:03 +0000 | [diff] [blame] | 1560 | [CPUHP_RCUTREE_PREP] = { |
Thomas Gleixner | 677f664 | 2016-09-06 16:13:48 +0200 | [diff] [blame] | 1561 | .name = "RCU/tree:prepare", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1562 | .startup.single = rcutree_prepare_cpu, |
| 1563 | .teardown.single = rcutree_dead_cpu, |
Thomas Gleixner | 4df8374 | 2016-07-13 17:17:03 +0000 | [diff] [blame] | 1564 | }, |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1565 | /* |
Richard Cochran | 4fae16d | 2016-07-27 11:08:18 +0200 | [diff] [blame] | 1566 | * On the tear-down path, timers_dead_cpu() must be invoked |
| 1567 | * before blk_mq_queue_reinit_notify() from notify_dead(), |
| 1568 | * otherwise a RCU stall occurs. |
| 1569 | */ |
Thomas Gleixner | 26456f8 | 2017-12-27 21:37:25 +0100 | [diff] [blame] | 1570 | [CPUHP_TIMERS_PREPARE] = { |
Mukesh Ojha | d018031 | 2018-07-24 20:17:48 +0530 | [diff] [blame] | 1571 | .name = "timers:prepare", |
Thomas Gleixner | 26456f8 | 2017-12-27 21:37:25 +0100 | [diff] [blame] | 1572 | .startup.single = timers_prepare_cpu, |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1573 | .teardown.single = timers_dead_cpu, |
Richard Cochran | 4fae16d | 2016-07-27 11:08:18 +0200 | [diff] [blame] | 1574 | }, |
Thomas Gleixner | d10ef6f | 2016-03-08 10:36:13 +0100 | [diff] [blame] | 1575 | /* Kicks the plugged cpu into life */ |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 1576 | [CPUHP_BRINGUP_CPU] = { |
| 1577 | .name = "cpu:bringup", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1578 | .startup.single = bringup_cpu, |
Peter Zijlstra | bf2c59f | 2020-04-01 17:40:33 -0400 | [diff] [blame] | 1579 | .teardown.single = finish_cpu, |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 1580 | .cant_stop = true, |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1581 | }, |
Thomas Gleixner | d10ef6f | 2016-03-08 10:36:13 +0100 | [diff] [blame] | 1582 | /* Final state before CPU kills itself */ |
| 1583 | [CPUHP_AP_IDLE_DEAD] = { |
| 1584 | .name = "idle:dead", |
| 1585 | }, |
| 1586 | /* |
| 1587 | * Last state before CPU enters the idle loop to die. Transient state |
| 1588 | * for synchronization. |
| 1589 | */ |
| 1590 | [CPUHP_AP_OFFLINE] = { |
| 1591 | .name = "ap:offline", |
| 1592 | .cant_stop = true, |
| 1593 | }, |
Thomas Gleixner | 9cf7243 | 2016-03-10 12:54:09 +0100 | [diff] [blame] | 1594 | /* First state is scheduler control. Interrupts are disabled */ |
| 1595 | [CPUHP_AP_SCHED_STARTING] = { |
| 1596 | .name = "sched:starting", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1597 | .startup.single = sched_cpu_starting, |
| 1598 | .teardown.single = sched_cpu_dying, |
Thomas Gleixner | 9cf7243 | 2016-03-10 12:54:09 +0100 | [diff] [blame] | 1599 | }, |
Thomas Gleixner | 4df8374 | 2016-07-13 17:17:03 +0000 | [diff] [blame] | 1600 | [CPUHP_AP_RCUTREE_DYING] = { |
Thomas Gleixner | 677f664 | 2016-09-06 16:13:48 +0200 | [diff] [blame] | 1601 | .name = "RCU/tree:dying", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1602 | .startup.single = NULL, |
| 1603 | .teardown.single = rcutree_dying_cpu, |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1604 | }, |
Lai Jiangshan | 46febd3 | 2017-11-28 21:19:53 +0800 | [diff] [blame] | 1605 | [CPUHP_AP_SMPCFD_DYING] = { |
| 1606 | .name = "smpcfd:dying", |
| 1607 | .startup.single = NULL, |
| 1608 | .teardown.single = smpcfd_dying_cpu, |
| 1609 | }, |
Thomas Gleixner | d10ef6f | 2016-03-08 10:36:13 +0100 | [diff] [blame] | 1610 | /* Entry state on starting. Interrupts enabled from here on. Transient |
| 1611 | * state for synchronsization */ |
| 1612 | [CPUHP_AP_ONLINE] = { |
| 1613 | .name = "ap:online", |
| 1614 | }, |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 1615 | /* |
Thomas Gleixner | 1cf12e0 | 2020-09-16 09:27:18 +0200 | [diff] [blame] | 1616 | * Handled on control processor until the plugged processor manages |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 1617 | * this itself. |
| 1618 | */ |
| 1619 | [CPUHP_TEARDOWN_CPU] = { |
| 1620 | .name = "cpu:teardown", |
| 1621 | .startup.single = NULL, |
| 1622 | .teardown.single = takedown_cpu, |
| 1623 | .cant_stop = true, |
| 1624 | }, |
Thomas Gleixner | 1cf12e0 | 2020-09-16 09:27:18 +0200 | [diff] [blame] | 1625 | |
| 1626 | [CPUHP_AP_SCHED_WAIT_EMPTY] = { |
| 1627 | .name = "sched:waitempty", |
| 1628 | .startup.single = NULL, |
| 1629 | .teardown.single = sched_cpu_wait_empty, |
| 1630 | }, |
| 1631 | |
Thomas Gleixner | d10ef6f | 2016-03-08 10:36:13 +0100 | [diff] [blame] | 1632 | /* Handle smpboot threads park/unpark */ |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1633 | [CPUHP_AP_SMPBOOT_THREADS] = { |
Thomas Gleixner | 677f664 | 2016-09-06 16:13:48 +0200 | [diff] [blame] | 1634 | .name = "smpboot/threads:online", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1635 | .startup.single = smpboot_unpark_threads, |
Thomas Gleixner | c4de656 | 2018-05-29 19:05:25 +0200 | [diff] [blame] | 1636 | .teardown.single = smpboot_park_threads, |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1637 | }, |
Thomas Gleixner | c5cb83b | 2017-06-20 01:37:51 +0200 | [diff] [blame] | 1638 | [CPUHP_AP_IRQ_AFFINITY_ONLINE] = { |
| 1639 | .name = "irq/affinity:online", |
| 1640 | .startup.single = irq_affinity_online_cpu, |
| 1641 | .teardown.single = NULL, |
| 1642 | }, |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 1643 | [CPUHP_AP_PERF_ONLINE] = { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1644 | .name = "perf:online", |
| 1645 | .startup.single = perf_event_init_cpu, |
| 1646 | .teardown.single = perf_event_exit_cpu, |
Thomas Gleixner | 00e16c3 | 2016-07-13 17:16:09 +0000 | [diff] [blame] | 1647 | }, |
Peter Zijlstra | 9cf5773 | 2018-06-07 10:52:03 +0200 | [diff] [blame] | 1648 | [CPUHP_AP_WATCHDOG_ONLINE] = { |
| 1649 | .name = "lockup_detector:online", |
| 1650 | .startup.single = lockup_detector_online_cpu, |
| 1651 | .teardown.single = lockup_detector_offline_cpu, |
| 1652 | }, |
Thomas Gleixner | 7ee681b | 2016-07-13 17:16:29 +0000 | [diff] [blame] | 1653 | [CPUHP_AP_WORKQUEUE_ONLINE] = { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1654 | .name = "workqueue:online", |
| 1655 | .startup.single = workqueue_online_cpu, |
| 1656 | .teardown.single = workqueue_offline_cpu, |
Thomas Gleixner | 7ee681b | 2016-07-13 17:16:29 +0000 | [diff] [blame] | 1657 | }, |
Thomas Gleixner | 4df8374 | 2016-07-13 17:17:03 +0000 | [diff] [blame] | 1658 | [CPUHP_AP_RCUTREE_ONLINE] = { |
Thomas Gleixner | 677f664 | 2016-09-06 16:13:48 +0200 | [diff] [blame] | 1659 | .name = "RCU/tree:online", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1660 | .startup.single = rcutree_online_cpu, |
| 1661 | .teardown.single = rcutree_offline_cpu, |
Thomas Gleixner | 4df8374 | 2016-07-13 17:17:03 +0000 | [diff] [blame] | 1662 | }, |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1663 | #endif |
Thomas Gleixner | d10ef6f | 2016-03-08 10:36:13 +0100 | [diff] [blame] | 1664 | /* |
| 1665 | * The dynamically registered state space is here |
| 1666 | */ |
| 1667 | |
Thomas Gleixner | aaddd7d | 2016-03-10 12:54:19 +0100 | [diff] [blame] | 1668 | #ifdef CONFIG_SMP |
| 1669 | /* Last state is scheduler control setting the cpu active */ |
| 1670 | [CPUHP_AP_ACTIVE] = { |
| 1671 | .name = "sched:active", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1672 | .startup.single = sched_cpu_activate, |
| 1673 | .teardown.single = sched_cpu_deactivate, |
Thomas Gleixner | aaddd7d | 2016-03-10 12:54:19 +0100 | [diff] [blame] | 1674 | }, |
| 1675 | #endif |
| 1676 | |
Thomas Gleixner | d10ef6f | 2016-03-08 10:36:13 +0100 | [diff] [blame] | 1677 | /* CPU is fully up and running. */ |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1678 | [CPUHP_ONLINE] = { |
| 1679 | .name = "online", |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1680 | .startup.single = NULL, |
| 1681 | .teardown.single = NULL, |
Thomas Gleixner | 4baa0af | 2016-02-26 18:43:29 +0000 | [diff] [blame] | 1682 | }, |
| 1683 | }; |
| 1684 | |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1685 | /* Sanity check for callbacks */ |
| 1686 | static int cpuhp_cb_check(enum cpuhp_state state) |
| 1687 | { |
| 1688 | if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) |
| 1689 | return -EINVAL; |
| 1690 | return 0; |
| 1691 | } |
| 1692 | |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1693 | /* |
| 1694 | * Returns a free for dynamic slot assignment of the Online state. The states |
| 1695 | * are protected by the cpuhp_slot_states mutex and an empty slot is identified |
| 1696 | * by having no name assigned. |
| 1697 | */ |
| 1698 | static int cpuhp_reserve_state(enum cpuhp_state state) |
| 1699 | { |
Thomas Gleixner | 4205e47 | 2017-01-10 14:01:05 +0100 | [diff] [blame] | 1700 | enum cpuhp_state i, end; |
| 1701 | struct cpuhp_step *step; |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1702 | |
Thomas Gleixner | 4205e47 | 2017-01-10 14:01:05 +0100 | [diff] [blame] | 1703 | switch (state) { |
| 1704 | case CPUHP_AP_ONLINE_DYN: |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 1705 | step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN; |
Thomas Gleixner | 4205e47 | 2017-01-10 14:01:05 +0100 | [diff] [blame] | 1706 | end = CPUHP_AP_ONLINE_DYN_END; |
| 1707 | break; |
| 1708 | case CPUHP_BP_PREPARE_DYN: |
Lai Jiangshan | 17a2f1c | 2017-12-01 21:50:05 +0800 | [diff] [blame] | 1709 | step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN; |
Thomas Gleixner | 4205e47 | 2017-01-10 14:01:05 +0100 | [diff] [blame] | 1710 | end = CPUHP_BP_PREPARE_DYN_END; |
| 1711 | break; |
| 1712 | default: |
| 1713 | return -EINVAL; |
| 1714 | } |
| 1715 | |
| 1716 | for (i = state; i <= end; i++, step++) { |
| 1717 | if (!step->name) |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1718 | return i; |
| 1719 | } |
| 1720 | WARN(1, "No more dynamic states available for CPU hotplug\n"); |
| 1721 | return -ENOSPC; |
| 1722 | } |
| 1723 | |
| 1724 | static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, |
| 1725 | int (*startup)(unsigned int cpu), |
| 1726 | int (*teardown)(unsigned int cpu), |
| 1727 | bool multi_instance) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1728 | { |
| 1729 | /* (Un)Install the callbacks for further cpu hotplug operations */ |
| 1730 | struct cpuhp_step *sp; |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1731 | int ret = 0; |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1732 | |
Ethan Barnes | 0c96b27 | 2017-07-19 22:36:00 +0000 | [diff] [blame] | 1733 | /* |
| 1734 | * If name is NULL, then the state gets removed. |
| 1735 | * |
| 1736 | * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on |
| 1737 | * the first allocation from these dynamic ranges, so the removal |
| 1738 | * would trigger a new allocation and clear the wrong (already |
| 1739 | * empty) state, leaving the callbacks of the to be cleared state |
| 1740 | * dangling, which causes wreckage on the next hotplug operation. |
| 1741 | */ |
| 1742 | if (name && (state == CPUHP_AP_ONLINE_DYN || |
| 1743 | state == CPUHP_BP_PREPARE_DYN)) { |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1744 | ret = cpuhp_reserve_state(state); |
| 1745 | if (ret < 0) |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1746 | return ret; |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1747 | state = ret; |
| 1748 | } |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1749 | sp = cpuhp_get_step(state); |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1750 | if (name && sp->name) |
| 1751 | return -EBUSY; |
| 1752 | |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1753 | sp->startup.single = startup; |
| 1754 | sp->teardown.single = teardown; |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1755 | sp->name = name; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1756 | sp->multi_instance = multi_instance; |
| 1757 | INIT_HLIST_HEAD(&sp->list); |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1758 | return ret; |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1759 | } |
| 1760 | |
| 1761 | static void *cpuhp_get_teardown_cb(enum cpuhp_state state) |
| 1762 | { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1763 | return cpuhp_get_step(state)->teardown.single; |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1764 | } |
| 1765 | |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1766 | /* |
| 1767 | * Call the startup/teardown function for a step either on the AP or |
| 1768 | * on the current CPU. |
| 1769 | */ |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1770 | static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, |
| 1771 | struct hlist_node *node) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1772 | { |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 1773 | struct cpuhp_step *sp = cpuhp_get_step(state); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1774 | int ret; |
| 1775 | |
Peter Zijlstra | 4dddfb5 | 2017-09-20 19:00:17 +0200 | [diff] [blame] | 1776 | /* |
| 1777 | * If there's nothing to do, we done. |
| 1778 | * Relies on the union for multi_instance. |
| 1779 | */ |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1780 | if ((bringup && !sp->startup.single) || |
| 1781 | (!bringup && !sp->teardown.single)) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1782 | return 0; |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1783 | /* |
| 1784 | * The non AP bound callbacks can fail on bringup. On teardown |
| 1785 | * e.g. module removal we crash for now. |
| 1786 | */ |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1787 | #ifdef CONFIG_SMP |
| 1788 | if (cpuhp_is_ap_state(state)) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1789 | ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1790 | else |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 1791 | ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1792 | #else |
Peter Zijlstra | 96abb96 | 2017-09-20 19:00:16 +0200 | [diff] [blame] | 1793 | ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); |
Thomas Gleixner | 1cf4f62 | 2016-02-26 18:43:39 +0000 | [diff] [blame] | 1794 | #endif |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1795 | BUG_ON(ret && !bringup); |
| 1796 | return ret; |
| 1797 | } |
| 1798 | |
| 1799 | /* |
| 1800 | * Called from __cpuhp_setup_state on a recoverable failure. |
| 1801 | * |
| 1802 | * Note: The teardown callbacks for rollback are not allowed to fail! |
| 1803 | */ |
| 1804 | static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1805 | struct hlist_node *node) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1806 | { |
| 1807 | int cpu; |
| 1808 | |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1809 | /* Roll back the already executed steps on the other cpus */ |
| 1810 | for_each_present_cpu(cpu) { |
| 1811 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 1812 | int cpustate = st->state; |
| 1813 | |
| 1814 | if (cpu >= failedcpu) |
| 1815 | break; |
| 1816 | |
| 1817 | /* Did we invoke the startup call on that cpu ? */ |
| 1818 | if (cpustate >= state) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1819 | cpuhp_issue_call(cpu, state, false, node); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1820 | } |
| 1821 | } |
| 1822 | |
Thomas Gleixner | 9805c67 | 2017-05-24 10:15:15 +0200 | [diff] [blame] | 1823 | int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, |
| 1824 | struct hlist_node *node, |
| 1825 | bool invoke) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1826 | { |
| 1827 | struct cpuhp_step *sp; |
| 1828 | int cpu; |
| 1829 | int ret; |
| 1830 | |
Thomas Gleixner | 9805c67 | 2017-05-24 10:15:15 +0200 | [diff] [blame] | 1831 | lockdep_assert_cpus_held(); |
| 1832 | |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1833 | sp = cpuhp_get_step(state); |
| 1834 | if (sp->multi_instance == false) |
| 1835 | return -EINVAL; |
| 1836 | |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1837 | mutex_lock(&cpuhp_state_mutex); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1838 | |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1839 | if (!invoke || !sp->startup.multi) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1840 | goto add_node; |
| 1841 | |
| 1842 | /* |
| 1843 | * Try to call the startup callback for each present cpu |
| 1844 | * depending on the hotplug state of the cpu. |
| 1845 | */ |
| 1846 | for_each_present_cpu(cpu) { |
| 1847 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 1848 | int cpustate = st->state; |
| 1849 | |
| 1850 | if (cpustate < state) |
| 1851 | continue; |
| 1852 | |
| 1853 | ret = cpuhp_issue_call(cpu, state, true, node); |
| 1854 | if (ret) { |
Thomas Gleixner | 3c1627e | 2016-09-05 15:28:36 +0200 | [diff] [blame] | 1855 | if (sp->teardown.multi) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1856 | cpuhp_rollback_install(cpu, state, node); |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1857 | goto unlock; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1858 | } |
| 1859 | } |
| 1860 | add_node: |
| 1861 | ret = 0; |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1862 | hlist_add_head(node, &sp->list); |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1863 | unlock: |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1864 | mutex_unlock(&cpuhp_state_mutex); |
Thomas Gleixner | 9805c67 | 2017-05-24 10:15:15 +0200 | [diff] [blame] | 1865 | return ret; |
| 1866 | } |
| 1867 | |
| 1868 | int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, |
| 1869 | bool invoke) |
| 1870 | { |
| 1871 | int ret; |
| 1872 | |
| 1873 | cpus_read_lock(); |
| 1874 | ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke); |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 1875 | cpus_read_unlock(); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1876 | return ret; |
| 1877 | } |
| 1878 | EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance); |
| 1879 | |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1880 | /** |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 1881 | * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1882 | * @state: The state to setup |
| 1883 | * @invoke: If true, the startup function is invoked for cpus where |
| 1884 | * cpu state >= @state |
| 1885 | * @startup: startup callback function |
| 1886 | * @teardown: teardown callback function |
| 1887 | * @multi_instance: State is set up for multiple instances which get |
| 1888 | * added afterwards. |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1889 | * |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 1890 | * The caller needs to hold cpus read locked while calling this function. |
Boris Ostrovsky | 512f098 | 2016-12-15 10:00:57 -0500 | [diff] [blame] | 1891 | * Returns: |
| 1892 | * On success: |
| 1893 | * Positive state number if @state is CPUHP_AP_ONLINE_DYN |
| 1894 | * 0 for all other states |
| 1895 | * On failure: proper (negative) error code |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1896 | */ |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 1897 | int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, |
| 1898 | const char *name, bool invoke, |
| 1899 | int (*startup)(unsigned int cpu), |
| 1900 | int (*teardown)(unsigned int cpu), |
| 1901 | bool multi_instance) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1902 | { |
| 1903 | int cpu, ret = 0; |
Thomas Gleixner | b9d9d69 | 2016-12-26 22:58:19 +0100 | [diff] [blame] | 1904 | bool dynstate; |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1905 | |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 1906 | lockdep_assert_cpus_held(); |
| 1907 | |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1908 | if (cpuhp_cb_check(state) || !name) |
| 1909 | return -EINVAL; |
| 1910 | |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1911 | mutex_lock(&cpuhp_state_mutex); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1912 | |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1913 | ret = cpuhp_store_callbacks(state, name, startup, teardown, |
| 1914 | multi_instance); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1915 | |
Thomas Gleixner | b9d9d69 | 2016-12-26 22:58:19 +0100 | [diff] [blame] | 1916 | dynstate = state == CPUHP_AP_ONLINE_DYN; |
| 1917 | if (ret > 0 && dynstate) { |
| 1918 | state = ret; |
| 1919 | ret = 0; |
| 1920 | } |
| 1921 | |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1922 | if (ret || !invoke || !startup) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1923 | goto out; |
| 1924 | |
| 1925 | /* |
| 1926 | * Try to call the startup callback for each present cpu |
| 1927 | * depending on the hotplug state of the cpu. |
| 1928 | */ |
| 1929 | for_each_present_cpu(cpu) { |
| 1930 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 1931 | int cpustate = st->state; |
| 1932 | |
| 1933 | if (cpustate < state) |
| 1934 | continue; |
| 1935 | |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1936 | ret = cpuhp_issue_call(cpu, state, true, NULL); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1937 | if (ret) { |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 1938 | if (teardown) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1939 | cpuhp_rollback_install(cpu, state, NULL); |
| 1940 | cpuhp_store_callbacks(state, NULL, NULL, NULL, false); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1941 | goto out; |
| 1942 | } |
| 1943 | } |
| 1944 | out: |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1945 | mutex_unlock(&cpuhp_state_mutex); |
Thomas Gleixner | dc280d93 | 2016-12-21 20:19:49 +0100 | [diff] [blame] | 1946 | /* |
| 1947 | * If the requested state is CPUHP_AP_ONLINE_DYN, return the |
| 1948 | * dynamically allocated state in case of success. |
| 1949 | */ |
Thomas Gleixner | b9d9d69 | 2016-12-26 22:58:19 +0100 | [diff] [blame] | 1950 | if (!ret && dynstate) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1951 | return state; |
| 1952 | return ret; |
| 1953 | } |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 1954 | EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked); |
| 1955 | |
| 1956 | int __cpuhp_setup_state(enum cpuhp_state state, |
| 1957 | const char *name, bool invoke, |
| 1958 | int (*startup)(unsigned int cpu), |
| 1959 | int (*teardown)(unsigned int cpu), |
| 1960 | bool multi_instance) |
| 1961 | { |
| 1962 | int ret; |
| 1963 | |
| 1964 | cpus_read_lock(); |
| 1965 | ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup, |
| 1966 | teardown, multi_instance); |
| 1967 | cpus_read_unlock(); |
| 1968 | return ret; |
| 1969 | } |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 1970 | EXPORT_SYMBOL(__cpuhp_setup_state); |
| 1971 | |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1972 | int __cpuhp_state_remove_instance(enum cpuhp_state state, |
| 1973 | struct hlist_node *node, bool invoke) |
| 1974 | { |
| 1975 | struct cpuhp_step *sp = cpuhp_get_step(state); |
| 1976 | int cpu; |
| 1977 | |
| 1978 | BUG_ON(cpuhp_cb_check(state)); |
| 1979 | |
| 1980 | if (!sp->multi_instance) |
| 1981 | return -EINVAL; |
| 1982 | |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 1983 | cpus_read_lock(); |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 1984 | mutex_lock(&cpuhp_state_mutex); |
| 1985 | |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 1986 | if (!invoke || !cpuhp_get_teardown_cb(state)) |
| 1987 | goto remove; |
| 1988 | /* |
| 1989 | * Call the teardown callback for each present cpu depending |
| 1990 | * on the hotplug state of the cpu. This function is not |
| 1991 | * allowed to fail currently! |
| 1992 | */ |
| 1993 | for_each_present_cpu(cpu) { |
| 1994 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 1995 | int cpustate = st->state; |
| 1996 | |
| 1997 | if (cpustate >= state) |
| 1998 | cpuhp_issue_call(cpu, state, false, node); |
| 1999 | } |
| 2000 | |
| 2001 | remove: |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 2002 | hlist_del(node); |
| 2003 | mutex_unlock(&cpuhp_state_mutex); |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 2004 | cpus_read_unlock(); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 2005 | |
| 2006 | return 0; |
| 2007 | } |
| 2008 | EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 2009 | |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2010 | /** |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 2011 | * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2012 | * @state: The state to remove |
| 2013 | * @invoke: If true, the teardown function is invoked for cpus where |
| 2014 | * cpu state >= @state |
| 2015 | * |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 2016 | * The caller needs to hold cpus read locked while calling this function. |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2017 | * The teardown callback is currently not allowed to fail. Think |
| 2018 | * about module removal! |
| 2019 | */ |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 2020 | void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2021 | { |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 2022 | struct cpuhp_step *sp = cpuhp_get_step(state); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2023 | int cpu; |
| 2024 | |
| 2025 | BUG_ON(cpuhp_cb_check(state)); |
| 2026 | |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 2027 | lockdep_assert_cpus_held(); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2028 | |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 2029 | mutex_lock(&cpuhp_state_mutex); |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 2030 | if (sp->multi_instance) { |
| 2031 | WARN(!hlist_empty(&sp->list), |
| 2032 | "Error: Removing state %d which has instances left.\n", |
| 2033 | state); |
| 2034 | goto remove; |
| 2035 | } |
| 2036 | |
Thomas Gleixner | a724632 | 2016-08-12 19:49:38 +0200 | [diff] [blame] | 2037 | if (!invoke || !cpuhp_get_teardown_cb(state)) |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2038 | goto remove; |
| 2039 | |
| 2040 | /* |
| 2041 | * Call the teardown callback for each present cpu depending |
| 2042 | * on the hotplug state of the cpu. This function is not |
| 2043 | * allowed to fail currently! |
| 2044 | */ |
| 2045 | for_each_present_cpu(cpu) { |
| 2046 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); |
| 2047 | int cpustate = st->state; |
| 2048 | |
| 2049 | if (cpustate >= state) |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 2050 | cpuhp_issue_call(cpu, state, false, NULL); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2051 | } |
| 2052 | remove: |
Thomas Gleixner | cf392d1 | 2016-08-12 19:49:39 +0200 | [diff] [blame] | 2053 | cpuhp_store_callbacks(state, NULL, NULL, NULL, false); |
Sebastian Andrzej Siewior | dc434e05 | 2017-03-14 16:06:45 +0100 | [diff] [blame] | 2054 | mutex_unlock(&cpuhp_state_mutex); |
Sebastian Andrzej Siewior | 71def42 | 2017-05-24 10:15:14 +0200 | [diff] [blame] | 2055 | } |
| 2056 | EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked); |
| 2057 | |
| 2058 | void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) |
| 2059 | { |
| 2060 | cpus_read_lock(); |
| 2061 | __cpuhp_remove_state_cpuslocked(state, invoke); |
Thomas Gleixner | 8f553c4 | 2017-05-24 10:15:12 +0200 | [diff] [blame] | 2062 | cpus_read_unlock(); |
Thomas Gleixner | 5b7aa87 | 2016-02-26 18:43:33 +0000 | [diff] [blame] | 2063 | } |
| 2064 | EXPORT_SYMBOL(__cpuhp_remove_state); |
| 2065 | |
Arnd Bergmann | dc8d37e | 2019-12-10 20:56:04 +0100 | [diff] [blame] | 2066 | #ifdef CONFIG_HOTPLUG_SMT |
| 2067 | static void cpuhp_offline_cpu_device(unsigned int cpu) |
| 2068 | { |
| 2069 | struct device *dev = get_cpu_device(cpu); |
| 2070 | |
| 2071 | dev->offline = true; |
| 2072 | /* Tell user space about the state change */ |
| 2073 | kobject_uevent(&dev->kobj, KOBJ_OFFLINE); |
| 2074 | } |
| 2075 | |
| 2076 | static void cpuhp_online_cpu_device(unsigned int cpu) |
| 2077 | { |
| 2078 | struct device *dev = get_cpu_device(cpu); |
| 2079 | |
| 2080 | dev->offline = false; |
| 2081 | /* Tell user space about the state change */ |
| 2082 | kobject_uevent(&dev->kobj, KOBJ_ONLINE); |
| 2083 | } |
| 2084 | |
| 2085 | int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) |
| 2086 | { |
| 2087 | int cpu, ret = 0; |
| 2088 | |
| 2089 | cpu_maps_update_begin(); |
| 2090 | for_each_online_cpu(cpu) { |
| 2091 | if (topology_is_primary_thread(cpu)) |
| 2092 | continue; |
| 2093 | ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); |
| 2094 | if (ret) |
| 2095 | break; |
| 2096 | /* |
| 2097 | * As this needs to hold the cpu maps lock it's impossible |
| 2098 | * to call device_offline() because that ends up calling |
| 2099 | * cpu_down() which takes cpu maps lock. cpu maps lock |
| 2100 | * needs to be held as this might race against in kernel |
| 2101 | * abusers of the hotplug machinery (thermal management). |
| 2102 | * |
| 2103 | * So nothing would update device:offline state. That would |
| 2104 | * leave the sysfs entry stale and prevent onlining after |
| 2105 | * smt control has been changed to 'off' again. This is |
| 2106 | * called under the sysfs hotplug lock, so it is properly |
| 2107 | * serialized against the regular offline usage. |
| 2108 | */ |
| 2109 | cpuhp_offline_cpu_device(cpu); |
| 2110 | } |
| 2111 | if (!ret) |
| 2112 | cpu_smt_control = ctrlval; |
| 2113 | cpu_maps_update_done(); |
| 2114 | return ret; |
| 2115 | } |
| 2116 | |
| 2117 | int cpuhp_smt_enable(void) |
| 2118 | { |
| 2119 | int cpu, ret = 0; |
| 2120 | |
| 2121 | cpu_maps_update_begin(); |
| 2122 | cpu_smt_control = CPU_SMT_ENABLED; |
| 2123 | for_each_present_cpu(cpu) { |
| 2124 | /* Skip online CPUs and CPUs on offline nodes */ |
| 2125 | if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) |
| 2126 | continue; |
| 2127 | ret = _cpu_up(cpu, 0, CPUHP_ONLINE); |
| 2128 | if (ret) |
| 2129 | break; |
| 2130 | /* See comment in cpuhp_smt_disable() */ |
| 2131 | cpuhp_online_cpu_device(cpu); |
| 2132 | } |
| 2133 | cpu_maps_update_done(); |
| 2134 | return ret; |
| 2135 | } |
| 2136 | #endif |
| 2137 | |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2138 | #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU) |
| 2139 | static ssize_t show_cpuhp_state(struct device *dev, |
| 2140 | struct device_attribute *attr, char *buf) |
| 2141 | { |
| 2142 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
| 2143 | |
| 2144 | return sprintf(buf, "%d\n", st->state); |
| 2145 | } |
| 2146 | static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL); |
| 2147 | |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 2148 | static ssize_t write_cpuhp_target(struct device *dev, |
| 2149 | struct device_attribute *attr, |
| 2150 | const char *buf, size_t count) |
| 2151 | { |
| 2152 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
| 2153 | struct cpuhp_step *sp; |
| 2154 | int target, ret; |
| 2155 | |
| 2156 | ret = kstrtoint(buf, 10, &target); |
| 2157 | if (ret) |
| 2158 | return ret; |
| 2159 | |
| 2160 | #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL |
| 2161 | if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE) |
| 2162 | return -EINVAL; |
| 2163 | #else |
| 2164 | if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE) |
| 2165 | return -EINVAL; |
| 2166 | #endif |
| 2167 | |
| 2168 | ret = lock_device_hotplug_sysfs(); |
| 2169 | if (ret) |
| 2170 | return ret; |
| 2171 | |
| 2172 | mutex_lock(&cpuhp_state_mutex); |
| 2173 | sp = cpuhp_get_step(target); |
| 2174 | ret = !sp->name || sp->cant_stop ? -EINVAL : 0; |
| 2175 | mutex_unlock(&cpuhp_state_mutex); |
| 2176 | if (ret) |
Sebastian Andrzej Siewior | 40da1b1 | 2017-06-02 16:27:14 +0200 | [diff] [blame] | 2177 | goto out; |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 2178 | |
| 2179 | if (st->state < target) |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 2180 | ret = cpu_up(dev->id, target); |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 2181 | else |
Qais Yousef | 33c3736 | 2020-03-23 13:51:10 +0000 | [diff] [blame] | 2182 | ret = cpu_down(dev->id, target); |
Sebastian Andrzej Siewior | 40da1b1 | 2017-06-02 16:27:14 +0200 | [diff] [blame] | 2183 | out: |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 2184 | unlock_device_hotplug(); |
| 2185 | return ret ? ret : count; |
| 2186 | } |
| 2187 | |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2188 | static ssize_t show_cpuhp_target(struct device *dev, |
| 2189 | struct device_attribute *attr, char *buf) |
| 2190 | { |
| 2191 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
| 2192 | |
| 2193 | return sprintf(buf, "%d\n", st->target); |
| 2194 | } |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 2195 | static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target); |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2196 | |
Peter Zijlstra | 1db4948 | 2017-09-20 19:00:21 +0200 | [diff] [blame] | 2197 | |
| 2198 | static ssize_t write_cpuhp_fail(struct device *dev, |
| 2199 | struct device_attribute *attr, |
| 2200 | const char *buf, size_t count) |
| 2201 | { |
| 2202 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
| 2203 | struct cpuhp_step *sp; |
| 2204 | int fail, ret; |
| 2205 | |
| 2206 | ret = kstrtoint(buf, 10, &fail); |
| 2207 | if (ret) |
| 2208 | return ret; |
| 2209 | |
Eiichi Tsukata | 33d4a5a | 2019-06-27 11:47:32 +0900 | [diff] [blame] | 2210 | if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE) |
| 2211 | return -EINVAL; |
| 2212 | |
Peter Zijlstra | 1db4948 | 2017-09-20 19:00:21 +0200 | [diff] [blame] | 2213 | /* |
| 2214 | * Cannot fail STARTING/DYING callbacks. |
| 2215 | */ |
| 2216 | if (cpuhp_is_atomic_state(fail)) |
| 2217 | return -EINVAL; |
| 2218 | |
| 2219 | /* |
| 2220 | * Cannot fail anything that doesn't have callbacks. |
| 2221 | */ |
| 2222 | mutex_lock(&cpuhp_state_mutex); |
| 2223 | sp = cpuhp_get_step(fail); |
| 2224 | if (!sp->startup.single && !sp->teardown.single) |
| 2225 | ret = -EINVAL; |
| 2226 | mutex_unlock(&cpuhp_state_mutex); |
| 2227 | if (ret) |
| 2228 | return ret; |
| 2229 | |
| 2230 | st->fail = fail; |
| 2231 | |
| 2232 | return count; |
| 2233 | } |
| 2234 | |
| 2235 | static ssize_t show_cpuhp_fail(struct device *dev, |
| 2236 | struct device_attribute *attr, char *buf) |
| 2237 | { |
| 2238 | struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id); |
| 2239 | |
| 2240 | return sprintf(buf, "%d\n", st->fail); |
| 2241 | } |
| 2242 | |
| 2243 | static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail); |
| 2244 | |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2245 | static struct attribute *cpuhp_cpu_attrs[] = { |
| 2246 | &dev_attr_state.attr, |
| 2247 | &dev_attr_target.attr, |
Peter Zijlstra | 1db4948 | 2017-09-20 19:00:21 +0200 | [diff] [blame] | 2248 | &dev_attr_fail.attr, |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2249 | NULL |
| 2250 | }; |
| 2251 | |
Arvind Yadav | 993647a | 2017-06-29 17:40:47 +0530 | [diff] [blame] | 2252 | static const struct attribute_group cpuhp_cpu_attr_group = { |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2253 | .attrs = cpuhp_cpu_attrs, |
| 2254 | .name = "hotplug", |
| 2255 | NULL |
| 2256 | }; |
| 2257 | |
| 2258 | static ssize_t show_cpuhp_states(struct device *dev, |
| 2259 | struct device_attribute *attr, char *buf) |
| 2260 | { |
| 2261 | ssize_t cur, res = 0; |
| 2262 | int i; |
| 2263 | |
| 2264 | mutex_lock(&cpuhp_state_mutex); |
Thomas Gleixner | 757c989 | 2016-02-26 18:43:32 +0000 | [diff] [blame] | 2265 | for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) { |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2266 | struct cpuhp_step *sp = cpuhp_get_step(i); |
| 2267 | |
| 2268 | if (sp->name) { |
| 2269 | cur = sprintf(buf, "%3d: %s\n", i, sp->name); |
| 2270 | buf += cur; |
| 2271 | res += cur; |
| 2272 | } |
| 2273 | } |
| 2274 | mutex_unlock(&cpuhp_state_mutex); |
| 2275 | return res; |
| 2276 | } |
| 2277 | static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL); |
| 2278 | |
| 2279 | static struct attribute *cpuhp_cpu_root_attrs[] = { |
| 2280 | &dev_attr_states.attr, |
| 2281 | NULL |
| 2282 | }; |
| 2283 | |
Arvind Yadav | 993647a | 2017-06-29 17:40:47 +0530 | [diff] [blame] | 2284 | static const struct attribute_group cpuhp_cpu_root_attr_group = { |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2285 | .attrs = cpuhp_cpu_root_attrs, |
| 2286 | .name = "hotplug", |
| 2287 | NULL |
| 2288 | }; |
| 2289 | |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2290 | #ifdef CONFIG_HOTPLUG_SMT |
| 2291 | |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2292 | static ssize_t |
Josh Poimboeuf | de7b77e | 2019-03-27 07:00:29 -0500 | [diff] [blame] | 2293 | __store_smt_control(struct device *dev, struct device_attribute *attr, |
| 2294 | const char *buf, size_t count) |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2295 | { |
| 2296 | int ctrlval, ret; |
| 2297 | |
| 2298 | if (sysfs_streq(buf, "on")) |
| 2299 | ctrlval = CPU_SMT_ENABLED; |
| 2300 | else if (sysfs_streq(buf, "off")) |
| 2301 | ctrlval = CPU_SMT_DISABLED; |
| 2302 | else if (sysfs_streq(buf, "forceoff")) |
| 2303 | ctrlval = CPU_SMT_FORCE_DISABLED; |
| 2304 | else |
| 2305 | return -EINVAL; |
| 2306 | |
| 2307 | if (cpu_smt_control == CPU_SMT_FORCE_DISABLED) |
| 2308 | return -EPERM; |
| 2309 | |
| 2310 | if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) |
| 2311 | return -ENODEV; |
| 2312 | |
| 2313 | ret = lock_device_hotplug_sysfs(); |
| 2314 | if (ret) |
| 2315 | return ret; |
| 2316 | |
| 2317 | if (ctrlval != cpu_smt_control) { |
| 2318 | switch (ctrlval) { |
| 2319 | case CPU_SMT_ENABLED: |
Thomas Gleixner | 215af54 | 2018-07-07 11:40:18 +0200 | [diff] [blame] | 2320 | ret = cpuhp_smt_enable(); |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2321 | break; |
| 2322 | case CPU_SMT_DISABLED: |
| 2323 | case CPU_SMT_FORCE_DISABLED: |
| 2324 | ret = cpuhp_smt_disable(ctrlval); |
| 2325 | break; |
| 2326 | } |
| 2327 | } |
| 2328 | |
| 2329 | unlock_device_hotplug(); |
| 2330 | return ret ? ret : count; |
| 2331 | } |
Josh Poimboeuf | de7b77e | 2019-03-27 07:00:29 -0500 | [diff] [blame] | 2332 | |
| 2333 | #else /* !CONFIG_HOTPLUG_SMT */ |
| 2334 | static ssize_t |
| 2335 | __store_smt_control(struct device *dev, struct device_attribute *attr, |
| 2336 | const char *buf, size_t count) |
| 2337 | { |
| 2338 | return -ENODEV; |
| 2339 | } |
| 2340 | #endif /* CONFIG_HOTPLUG_SMT */ |
| 2341 | |
| 2342 | static const char *smt_states[] = { |
| 2343 | [CPU_SMT_ENABLED] = "on", |
| 2344 | [CPU_SMT_DISABLED] = "off", |
| 2345 | [CPU_SMT_FORCE_DISABLED] = "forceoff", |
| 2346 | [CPU_SMT_NOT_SUPPORTED] = "notsupported", |
| 2347 | [CPU_SMT_NOT_IMPLEMENTED] = "notimplemented", |
| 2348 | }; |
| 2349 | |
| 2350 | static ssize_t |
| 2351 | show_smt_control(struct device *dev, struct device_attribute *attr, char *buf) |
| 2352 | { |
| 2353 | const char *state = smt_states[cpu_smt_control]; |
| 2354 | |
| 2355 | return snprintf(buf, PAGE_SIZE - 2, "%s\n", state); |
| 2356 | } |
| 2357 | |
| 2358 | static ssize_t |
| 2359 | store_smt_control(struct device *dev, struct device_attribute *attr, |
| 2360 | const char *buf, size_t count) |
| 2361 | { |
| 2362 | return __store_smt_control(dev, attr, buf, count); |
| 2363 | } |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2364 | static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control); |
| 2365 | |
| 2366 | static ssize_t |
| 2367 | show_smt_active(struct device *dev, struct device_attribute *attr, char *buf) |
| 2368 | { |
Josh Poimboeuf | de7b77e | 2019-03-27 07:00:29 -0500 | [diff] [blame] | 2369 | return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active()); |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2370 | } |
| 2371 | static DEVICE_ATTR(active, 0444, show_smt_active, NULL); |
| 2372 | |
| 2373 | static struct attribute *cpuhp_smt_attrs[] = { |
| 2374 | &dev_attr_control.attr, |
| 2375 | &dev_attr_active.attr, |
| 2376 | NULL |
| 2377 | }; |
| 2378 | |
| 2379 | static const struct attribute_group cpuhp_smt_attr_group = { |
| 2380 | .attrs = cpuhp_smt_attrs, |
| 2381 | .name = "smt", |
| 2382 | NULL |
| 2383 | }; |
| 2384 | |
Josh Poimboeuf | de7b77e | 2019-03-27 07:00:29 -0500 | [diff] [blame] | 2385 | static int __init cpu_smt_sysfs_init(void) |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2386 | { |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2387 | return sysfs_create_group(&cpu_subsys.dev_root->kobj, |
| 2388 | &cpuhp_smt_attr_group); |
| 2389 | } |
| 2390 | |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2391 | static int __init cpuhp_sysfs_init(void) |
| 2392 | { |
| 2393 | int cpu, ret; |
| 2394 | |
Josh Poimboeuf | de7b77e | 2019-03-27 07:00:29 -0500 | [diff] [blame] | 2395 | ret = cpu_smt_sysfs_init(); |
Thomas Gleixner | 05736e4 | 2018-05-29 17:48:27 +0200 | [diff] [blame] | 2396 | if (ret) |
| 2397 | return ret; |
| 2398 | |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2399 | ret = sysfs_create_group(&cpu_subsys.dev_root->kobj, |
| 2400 | &cpuhp_cpu_root_attr_group); |
| 2401 | if (ret) |
| 2402 | return ret; |
| 2403 | |
| 2404 | for_each_possible_cpu(cpu) { |
| 2405 | struct device *dev = get_cpu_device(cpu); |
| 2406 | |
| 2407 | if (!dev) |
| 2408 | continue; |
| 2409 | ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group); |
| 2410 | if (ret) |
| 2411 | return ret; |
| 2412 | } |
| 2413 | return 0; |
| 2414 | } |
| 2415 | device_initcall(cpuhp_sysfs_init); |
Josh Poimboeuf | de7b77e | 2019-03-27 07:00:29 -0500 | [diff] [blame] | 2416 | #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */ |
Thomas Gleixner | 98f8cdc | 2016-02-26 18:43:31 +0000 | [diff] [blame] | 2417 | |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2418 | /* |
| 2419 | * cpu_bit_bitmap[] is a special, "compressed" data structure that |
| 2420 | * represents all NR_CPUS bits binary values of 1<<nr. |
| 2421 | * |
Rusty Russell | e0b582e | 2009-01-01 10:12:28 +1030 | [diff] [blame] | 2422 | * It is used by cpumask_of() to get a constant address to a CPU |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2423 | * mask value that has a single bit set only. |
| 2424 | */ |
Mike Travis | b8d317d | 2008-07-24 18:21:29 -0700 | [diff] [blame] | 2425 | |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2426 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ |
Michael Rodriguez | 4d51985 | 2011-03-22 16:34:07 -0700 | [diff] [blame] | 2427 | #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x)) |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2428 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) |
| 2429 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) |
| 2430 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) |
Mike Travis | b8d317d | 2008-07-24 18:21:29 -0700 | [diff] [blame] | 2431 | |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2432 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { |
Mike Travis | b8d317d | 2008-07-24 18:21:29 -0700 | [diff] [blame] | 2433 | |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2434 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), |
| 2435 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), |
| 2436 | #if BITS_PER_LONG > 32 |
| 2437 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), |
| 2438 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), |
Mike Travis | b8d317d | 2008-07-24 18:21:29 -0700 | [diff] [blame] | 2439 | #endif |
| 2440 | }; |
Linus Torvalds | e56b3bc | 2008-07-28 11:32:33 -0700 | [diff] [blame] | 2441 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); |
Rusty Russell | 2d3854a | 2008-11-05 13:39:10 +1100 | [diff] [blame] | 2442 | |
| 2443 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; |
| 2444 | EXPORT_SYMBOL(cpu_all_bits); |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 2445 | |
| 2446 | #ifdef CONFIG_INIT_ALL_POSSIBLE |
Rasmus Villemoes | 4b804c8 | 2016-01-20 15:00:19 -0800 | [diff] [blame] | 2447 | struct cpumask __cpu_possible_mask __read_mostly |
Rasmus Villemoes | c4c54dd | 2016-01-20 15:00:16 -0800 | [diff] [blame] | 2448 | = {CPU_BITS_ALL}; |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 2449 | #else |
Rasmus Villemoes | 4b804c8 | 2016-01-20 15:00:19 -0800 | [diff] [blame] | 2450 | struct cpumask __cpu_possible_mask __read_mostly; |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 2451 | #endif |
Rasmus Villemoes | 4b804c8 | 2016-01-20 15:00:19 -0800 | [diff] [blame] | 2452 | EXPORT_SYMBOL(__cpu_possible_mask); |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 2453 | |
Rasmus Villemoes | 4b804c8 | 2016-01-20 15:00:19 -0800 | [diff] [blame] | 2454 | struct cpumask __cpu_online_mask __read_mostly; |
| 2455 | EXPORT_SYMBOL(__cpu_online_mask); |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 2456 | |
Rasmus Villemoes | 4b804c8 | 2016-01-20 15:00:19 -0800 | [diff] [blame] | 2457 | struct cpumask __cpu_present_mask __read_mostly; |
| 2458 | EXPORT_SYMBOL(__cpu_present_mask); |
Rusty Russell | b3199c0 | 2008-12-30 09:05:14 +1030 | [diff] [blame] | 2459 | |
Rasmus Villemoes | 4b804c8 | 2016-01-20 15:00:19 -0800 | [diff] [blame] | 2460 | struct cpumask __cpu_active_mask __read_mostly; |
| 2461 | EXPORT_SYMBOL(__cpu_active_mask); |
Rusty Russell | 3fa4152 | 2008-12-30 09:05:16 +1030 | [diff] [blame] | 2462 | |
Thomas Gleixner | 0c09ab9 | 2019-07-09 16:23:40 +0200 | [diff] [blame] | 2463 | atomic_t __num_online_cpus __read_mostly; |
| 2464 | EXPORT_SYMBOL(__num_online_cpus); |
| 2465 | |
Rusty Russell | 3fa4152 | 2008-12-30 09:05:16 +1030 | [diff] [blame] | 2466 | void init_cpu_present(const struct cpumask *src) |
| 2467 | { |
Rasmus Villemoes | c4c54dd | 2016-01-20 15:00:16 -0800 | [diff] [blame] | 2468 | cpumask_copy(&__cpu_present_mask, src); |
Rusty Russell | 3fa4152 | 2008-12-30 09:05:16 +1030 | [diff] [blame] | 2469 | } |
| 2470 | |
| 2471 | void init_cpu_possible(const struct cpumask *src) |
| 2472 | { |
Rasmus Villemoes | c4c54dd | 2016-01-20 15:00:16 -0800 | [diff] [blame] | 2473 | cpumask_copy(&__cpu_possible_mask, src); |
Rusty Russell | 3fa4152 | 2008-12-30 09:05:16 +1030 | [diff] [blame] | 2474 | } |
| 2475 | |
| 2476 | void init_cpu_online(const struct cpumask *src) |
| 2477 | { |
Rasmus Villemoes | c4c54dd | 2016-01-20 15:00:16 -0800 | [diff] [blame] | 2478 | cpumask_copy(&__cpu_online_mask, src); |
Rusty Russell | 3fa4152 | 2008-12-30 09:05:16 +1030 | [diff] [blame] | 2479 | } |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 2480 | |
Thomas Gleixner | 0c09ab9 | 2019-07-09 16:23:40 +0200 | [diff] [blame] | 2481 | void set_cpu_online(unsigned int cpu, bool online) |
| 2482 | { |
| 2483 | /* |
| 2484 | * atomic_inc/dec() is required to handle the horrid abuse of this |
| 2485 | * function by the reboot and kexec code which invoke it from |
| 2486 | * IPI/NMI broadcasts when shutting down CPUs. Invocation from |
| 2487 | * regular CPU hotplug is properly serialized. |
| 2488 | * |
| 2489 | * Note, that the fact that __num_online_cpus is of type atomic_t |
| 2490 | * does not protect readers which are not serialized against |
| 2491 | * concurrent hotplug operations. |
| 2492 | */ |
| 2493 | if (online) { |
| 2494 | if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask)) |
| 2495 | atomic_inc(&__num_online_cpus); |
| 2496 | } else { |
| 2497 | if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask)) |
| 2498 | atomic_dec(&__num_online_cpus); |
| 2499 | } |
| 2500 | } |
| 2501 | |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 2502 | /* |
| 2503 | * Activate the first processor. |
| 2504 | */ |
| 2505 | void __init boot_cpu_init(void) |
| 2506 | { |
| 2507 | int cpu = smp_processor_id(); |
| 2508 | |
| 2509 | /* Mark the boot cpu "present", "online" etc for SMP and UP case */ |
| 2510 | set_cpu_online(cpu, true); |
| 2511 | set_cpu_active(cpu, true); |
| 2512 | set_cpu_present(cpu, true); |
| 2513 | set_cpu_possible(cpu, true); |
Peter Zijlstra | 8ce371f | 2017-03-20 12:26:55 +0100 | [diff] [blame] | 2514 | |
| 2515 | #ifdef CONFIG_SMP |
| 2516 | __boot_cpu_id = cpu; |
| 2517 | #endif |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 2518 | } |
| 2519 | |
| 2520 | /* |
| 2521 | * Must be called _AFTER_ setting up the per_cpu areas |
| 2522 | */ |
Linus Torvalds | b5b1404 | 2018-08-12 12:19:42 -0700 | [diff] [blame] | 2523 | void __init boot_cpu_hotplug_init(void) |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 2524 | { |
Abel Vesa | 269777a | 2018-08-15 00:26:00 +0300 | [diff] [blame] | 2525 | #ifdef CONFIG_SMP |
Thomas Gleixner | e797bda | 2019-07-22 20:47:16 +0200 | [diff] [blame] | 2526 | cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask); |
Abel Vesa | 269777a | 2018-08-15 00:26:00 +0300 | [diff] [blame] | 2527 | #endif |
Thomas Gleixner | 0cc3cd2 | 2018-06-29 16:05:48 +0200 | [diff] [blame] | 2528 | this_cpu_write(cpuhp_state.state, CPUHP_ONLINE); |
Thomas Gleixner | cff7d37 | 2016-02-26 18:43:28 +0000 | [diff] [blame] | 2529 | } |
Josh Poimboeuf | 98af845 | 2019-04-12 15:39:28 -0500 | [diff] [blame] | 2530 | |
Tyler Hicks | 731dc9d | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2531 | /* |
| 2532 | * These are used for a global "mitigations=" cmdline option for toggling |
| 2533 | * optional CPU mitigations. |
| 2534 | */ |
| 2535 | enum cpu_mitigations { |
| 2536 | CPU_MITIGATIONS_OFF, |
| 2537 | CPU_MITIGATIONS_AUTO, |
| 2538 | CPU_MITIGATIONS_AUTO_NOSMT, |
| 2539 | }; |
| 2540 | |
| 2541 | static enum cpu_mitigations cpu_mitigations __ro_after_init = |
| 2542 | CPU_MITIGATIONS_AUTO; |
Josh Poimboeuf | 98af845 | 2019-04-12 15:39:28 -0500 | [diff] [blame] | 2543 | |
| 2544 | static int __init mitigations_parse_cmdline(char *arg) |
| 2545 | { |
| 2546 | if (!strcmp(arg, "off")) |
| 2547 | cpu_mitigations = CPU_MITIGATIONS_OFF; |
| 2548 | else if (!strcmp(arg, "auto")) |
| 2549 | cpu_mitigations = CPU_MITIGATIONS_AUTO; |
| 2550 | else if (!strcmp(arg, "auto,nosmt")) |
| 2551 | cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT; |
Geert Uytterhoeven | 1bf7272 | 2019-05-16 09:09:35 +0200 | [diff] [blame] | 2552 | else |
| 2553 | pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n", |
| 2554 | arg); |
Josh Poimboeuf | 98af845 | 2019-04-12 15:39:28 -0500 | [diff] [blame] | 2555 | |
| 2556 | return 0; |
| 2557 | } |
| 2558 | early_param("mitigations", mitigations_parse_cmdline); |
Tyler Hicks | 731dc9d | 2019-11-04 12:22:02 +0100 | [diff] [blame] | 2559 | |
| 2560 | /* mitigations=off */ |
| 2561 | bool cpu_mitigations_off(void) |
| 2562 | { |
| 2563 | return cpu_mitigations == CPU_MITIGATIONS_OFF; |
| 2564 | } |
| 2565 | EXPORT_SYMBOL_GPL(cpu_mitigations_off); |
| 2566 | |
| 2567 | /* mitigations=auto,nosmt */ |
| 2568 | bool cpu_mitigations_auto_nosmt(void) |
| 2569 | { |
| 2570 | return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT; |
| 2571 | } |
| 2572 | EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt); |