blob: 237a79989abae0fd169b436e97ea6edd39d63ec5 [file] [log] [blame]
Paul E. McKenney22e40922019-01-17 10:23:39 -08001/* SPDX-License-Identifier: GPL-2.0+ */
Paul E. McKenney3549c2b2016-04-15 16:35:29 -07002/*
3 * RCU expedited grace periods
4 *
Paul E. McKenney3549c2b2016-04-15 16:35:29 -07005 * Copyright IBM Corporation, 2016
6 *
Paul E. McKenney22e40922019-01-17 10:23:39 -08007 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
Paul E. McKenney3549c2b2016-04-15 16:35:29 -07008 */
9
Boqun Feng55ebfce2018-03-09 09:14:51 +080010#include <linux/lockdep.h>
11
Paul E. McKenney142d1062018-11-29 09:15:54 -080012static void rcu_exp_handler(void *unused);
Paul E. McKenneyd87cda52019-01-11 20:51:49 -080013static int rcu_print_task_exp_stall(struct rcu_node *rnp);
Paul E. McKenney142d1062018-11-29 09:15:54 -080014
Paul E. McKenney09e2db32016-12-18 13:31:02 -080015/*
16 * Record the start of an expedited grace period.
17 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070018static void rcu_exp_gp_seq_start(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070019{
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070020 rcu_seq_start(&rcu_state.expedited_sequence);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070021}
Paul E. McKenney09e2db32016-12-18 13:31:02 -080022
23/*
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -080024 * Return the value that the expedited-grace-period counter will have
Paul E. McKenney9a414202018-01-31 19:23:24 -080025 * at the end of the current grace period.
26 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070027static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void)
Paul E. McKenney9a414202018-01-31 19:23:24 -080028{
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070029 return rcu_seq_endval(&rcu_state.expedited_sequence);
Paul E. McKenney9a414202018-01-31 19:23:24 -080030}
31
32/*
Paul E. McKenney09e2db32016-12-18 13:31:02 -080033 * Record the end of an expedited grace period.
34 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070035static void rcu_exp_gp_seq_end(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070036{
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070037 rcu_seq_end(&rcu_state.expedited_sequence);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070038 smp_mb(); /* Ensure that consecutive grace periods serialize. */
39}
Paul E. McKenney09e2db32016-12-18 13:31:02 -080040
41/*
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -080042 * Take a snapshot of the expedited-grace-period counter, which is the
43 * earliest value that will indicate that a full grace period has
44 * elapsed since the current time.
Paul E. McKenney09e2db32016-12-18 13:31:02 -080045 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070046static unsigned long rcu_exp_gp_seq_snap(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070047{
48 unsigned long s;
49
50 smp_mb(); /* Caller's modifications seen first by other CPUs. */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070051 s = rcu_seq_snap(&rcu_state.expedited_sequence);
52 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap"));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070053 return s;
54}
Paul E. McKenney09e2db32016-12-18 13:31:02 -080055
56/*
57 * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true
58 * if a full expedited grace period has elapsed since that snapshot
59 * was taken.
60 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070061static bool rcu_exp_gp_seq_done(unsigned long s)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070062{
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070063 return rcu_seq_done(&rcu_state.expedited_sequence, s);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070064}
65
66/*
67 * Reset the ->expmaskinit values in the rcu_node tree to reflect any
68 * recent CPU-online activity. Note that these masks are not cleared
69 * when CPUs go offline, so they reflect the union of all CPUs that have
70 * ever been online. This means that this function normally takes its
71 * no-work-to-do fastpath.
72 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070073static void sync_exp_reset_tree_hotplug(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070074{
75 bool done;
76 unsigned long flags;
77 unsigned long mask;
78 unsigned long oldmask;
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070079 int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070080 struct rcu_node *rnp;
81 struct rcu_node *rnp_up;
82
83 /* If no new CPUs onlined since last time, nothing to do. */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070084 if (likely(ncpus == rcu_state.ncpus_snap))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070085 return;
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -070086 rcu_state.ncpus_snap = ncpus;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070087
88 /*
89 * Each pass through the following loop propagates newly onlined
90 * CPUs for the current rcu_node structure up the rcu_node tree.
91 */
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -070092 rcu_for_each_leaf_node(rnp) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -070093 raw_spin_lock_irqsave_rcu_node(rnp, flags);
94 if (rnp->expmaskinit == rnp->expmaskinitnext) {
95 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
96 continue; /* No new CPUs, nothing to do. */
97 }
98
99 /* Update this node's mask, track old value for propagation. */
100 oldmask = rnp->expmaskinit;
101 rnp->expmaskinit = rnp->expmaskinitnext;
102 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
103
104 /* If was already nonzero, nothing to propagate. */
105 if (oldmask)
106 continue;
107
108 /* Propagate the new CPU up the tree. */
109 mask = rnp->grpmask;
110 rnp_up = rnp->parent;
111 done = false;
112 while (rnp_up) {
113 raw_spin_lock_irqsave_rcu_node(rnp_up, flags);
114 if (rnp_up->expmaskinit)
115 done = true;
116 rnp_up->expmaskinit |= mask;
117 raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
118 if (done)
119 break;
120 mask = rnp_up->grpmask;
121 rnp_up = rnp_up->parent;
122 }
123 }
124}
125
126/*
127 * Reset the ->expmask values in the rcu_node tree in preparation for
128 * a new expedited grace period.
129 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700130static void __maybe_unused sync_exp_reset_tree(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700131{
132 unsigned long flags;
133 struct rcu_node *rnp;
134
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700135 sync_exp_reset_tree_hotplug();
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700136 rcu_for_each_node_breadth_first(rnp) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700137 raw_spin_lock_irqsave_rcu_node(rnp, flags);
138 WARN_ON_ONCE(rnp->expmask);
Paul E. McKenney15c7c972019-10-07 18:53:18 -0700139 WRITE_ONCE(rnp->expmask, rnp->expmaskinit);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700140 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
141 }
142}
143
144/*
145 * Return non-zero if there is no RCU expedited grace period in progress
146 * for the specified rcu_node structure, in other words, if all CPUs and
147 * tasks covered by the specified rcu_node structure have done their bit
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -0800148 * for the current expedited grace period.
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700149 */
Paul E. McKenney6c7d7db2019-11-27 13:59:37 -0800150static bool sync_rcu_exp_done(struct rcu_node *rnp)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700151{
Boqun Feng55ebfce2018-03-09 09:14:51 +0800152 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800153 return READ_ONCE(rnp->exp_tasks) == NULL &&
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700154 READ_ONCE(rnp->expmask) == 0;
155}
156
157/*
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -0800158 * Like sync_rcu_exp_done(), but where the caller does not hold the
159 * rcu_node's ->lock.
Boqun Feng55ebfce2018-03-09 09:14:51 +0800160 */
Paul E. McKenney6c7d7db2019-11-27 13:59:37 -0800161static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
Boqun Feng55ebfce2018-03-09 09:14:51 +0800162{
163 unsigned long flags;
164 bool ret;
165
166 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney6c7d7db2019-11-27 13:59:37 -0800167 ret = sync_rcu_exp_done(rnp);
Boqun Feng55ebfce2018-03-09 09:14:51 +0800168 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
169
170 return ret;
171}
172
173
174/*
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700175 * Report the exit from RCU read-side critical section for the last task
176 * that queued itself during or before the current expedited preemptible-RCU
177 * grace period. This event is reported either to the rcu_node structure on
178 * which the task was queued or to one of that rcu_node structure's ancestors,
179 * recursively up the tree. (Calm down, calm down, we do the recursion
180 * iteratively!)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700181 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700182static void __rcu_report_exp_rnp(struct rcu_node *rnp,
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700183 bool wake, unsigned long flags)
184 __releases(rnp->lock)
185{
186 unsigned long mask;
187
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -0800188 raw_lockdep_assert_held_rcu_node(rnp);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700189 for (;;) {
Paul E. McKenney6c7d7db2019-11-27 13:59:37 -0800190 if (!sync_rcu_exp_done(rnp)) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700191 if (!rnp->expmask)
192 rcu_initiate_boost(rnp, flags);
193 else
194 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
195 break;
196 }
197 if (rnp->parent == NULL) {
198 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
199 if (wake) {
200 smp_mb(); /* EGP done before wake_up(). */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700201 swake_up_one(&rcu_state.expedited_wq);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700202 }
203 break;
204 }
205 mask = rnp->grpmask;
206 raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
207 rnp = rnp->parent;
208 raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
209 WARN_ON_ONCE(!(rnp->expmask & mask));
Paul E. McKenney15c7c972019-10-07 18:53:18 -0700210 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700211 }
212}
213
214/*
215 * Report expedited quiescent state for specified node. This is a
216 * lock-acquisition wrapper function for __rcu_report_exp_rnp().
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700217 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700218static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700219{
220 unsigned long flags;
221
222 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700223 __rcu_report_exp_rnp(rnp, wake, flags);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700224}
225
226/*
227 * Report expedited quiescent state for multiple CPUs, all covered by the
Boqun Feng7be8c562018-03-07 16:49:39 +0800228 * specified leaf rcu_node structure.
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700229 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700230static void rcu_report_exp_cpu_mult(struct rcu_node *rnp,
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700231 unsigned long mask, bool wake)
232{
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800233 int cpu;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700234 unsigned long flags;
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800235 struct rcu_data *rdp;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700236
237 raw_spin_lock_irqsave_rcu_node(rnp, flags);
238 if (!(rnp->expmask & mask)) {
239 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
240 return;
241 }
Paul E. McKenney15c7c972019-10-07 18:53:18 -0700242 WRITE_ONCE(rnp->expmask, rnp->expmask & ~mask);
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800243 for_each_leaf_node_cpu_mask(rnp, cpu, mask) {
244 rdp = per_cpu_ptr(&rcu_data, cpu);
245 if (!IS_ENABLED(CONFIG_NO_HZ_FULL) || !rdp->rcu_forced_tick_exp)
246 continue;
247 rdp->rcu_forced_tick_exp = false;
248 tick_dep_clear_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
249 }
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700250 __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700251}
252
253/*
254 * Report expedited quiescent state for specified rcu_data (CPU).
255 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700256static void rcu_report_exp_rdp(struct rcu_data *rdp)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700257{
Frederic Weisbecker6e16b0f2021-09-16 14:10:47 +0200258 WRITE_ONCE(rdp->cpu_no_qs.b.exp, false);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700259 rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700260}
261
Paul E. McKenney45975c72018-07-02 14:30:37 -0700262/* Common code for work-done checking. */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700263static bool sync_exp_work_done(unsigned long s)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700264{
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700265 if (rcu_exp_gp_seq_done(s)) {
266 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done"));
Paul E. McKenney96050c62019-04-20 01:40:54 -0700267 smp_mb(); /* Ensure test happens before caller kfree(). */
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700268 return true;
269 }
270 return false;
271}
272
273/*
274 * Funnel-lock acquisition for expedited grace periods. Returns true
275 * if some other task completed an expedited grace period that this task
276 * can piggy-back on, and with no mutex held. Otherwise, returns false
277 * with the mutex held, indicating that the caller must actually do the
278 * expedited grace period.
279 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700280static bool exp_funnel_lock(unsigned long s)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700281{
Paul E. McKenneyda1df502018-07-03 15:37:16 -0700282 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id());
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700283 struct rcu_node *rnp = rdp->mynode;
Paul E. McKenney336a4f62018-07-03 17:22:34 -0700284 struct rcu_node *rnp_root = rcu_get_root();
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700285
286 /* Low-contention fastpath. */
287 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
288 (rnp == rnp_root ||
289 ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700290 mutex_trylock(&rcu_state.exp_mutex))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700291 goto fastpath;
292
293 /*
294 * Each pass through the following loop works its way up
295 * the rcu_node tree, returning if others have done the work or
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700296 * otherwise falls through to acquire ->exp_mutex. The mapping
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700297 * from CPU to rcu_node structure can be inexact, as it is just
298 * promoting locality and is not strictly needed for correctness.
299 */
300 for (; rnp != NULL; rnp = rnp->parent) {
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700301 if (sync_exp_work_done(s))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700302 return true;
303
304 /* Work not done, either wait here or go up. */
305 spin_lock(&rnp->exp_lock);
306 if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
307
308 /* Someone else doing GP, so wait for them. */
309 spin_unlock(&rnp->exp_lock);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700310 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700311 rnp->grplo, rnp->grphi,
312 TPS("wait"));
Paul E. McKenney031aeee2017-03-21 07:28:14 -0700313 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700314 sync_exp_work_done(s));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700315 return true;
316 }
Paul E. McKenneyb0c18c82020-01-03 12:12:06 -0800317 WRITE_ONCE(rnp->exp_seq_rq, s); /* Followers can wait on us. */
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700318 spin_unlock(&rnp->exp_lock);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700319 trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level,
320 rnp->grplo, rnp->grphi, TPS("nxtlvl"));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700321 }
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700322 mutex_lock(&rcu_state.exp_mutex);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700323fastpath:
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700324 if (sync_exp_work_done(s)) {
325 mutex_unlock(&rcu_state.exp_mutex);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700326 return true;
327 }
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700328 rcu_exp_gp_seq_start();
329 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start"));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700330 return false;
331}
332
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700333/*
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800334 * Select the CPUs within the specified rcu_node that the upcoming
335 * expedited grace period needs to wait for.
336 */
337static void sync_rcu_exp_select_node_cpus(struct work_struct *wp)
338{
339 int cpu;
340 unsigned long flags;
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800341 unsigned long mask_ofl_test;
342 unsigned long mask_ofl_ipi;
343 int ret;
344 struct rcu_exp_work *rewp =
345 container_of(wp, struct rcu_exp_work, rew_work);
346 struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800347
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800348 raw_spin_lock_irqsave_rcu_node(rnp, flags);
349
350 /* Each pass checks a CPU for identity, offline, and idle. */
351 mask_ofl_test = 0;
352 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
Paul E. McKenneyda1df502018-07-03 15:37:16 -0700353 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenneyaca29912019-10-30 06:51:57 -0700354 unsigned long mask = rdp->grpmask;
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800355 int snap;
356
357 if (raw_smp_processor_id() == cpu ||
358 !(rnp->qsmaskinitnext & mask)) {
359 mask_ofl_test |= mask;
360 } else {
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700361 snap = rcu_dynticks_snap(rdp);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800362 if (rcu_dynticks_in_eqs(snap))
363 mask_ofl_test |= mask;
364 else
365 rdp->exp_dynticks_snap = snap;
366 }
367 }
368 mask_ofl_ipi = rnp->expmask & ~mask_ofl_test;
369
370 /*
371 * Need to wait for any blocked tasks as well. Note that
372 * additional blocking tasks will also block the expedited GP
373 * until such time as the ->expmask bits are cleared.
374 */
375 if (rcu_preempt_has_tasks(rnp))
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800376 WRITE_ONCE(rnp->exp_tasks, rnp->blkd_tasks.next);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800377 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
378
379 /* IPI the remaining CPUs for expedited quiescent state. */
Paul E. McKenney15c7c972019-10-07 18:53:18 -0700380 for_each_leaf_node_cpu_mask(rnp, cpu, mask_ofl_ipi) {
Paul E. McKenneyda1df502018-07-03 15:37:16 -0700381 struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenneyaca29912019-10-30 06:51:57 -0700382 unsigned long mask = rdp->grpmask;
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800383
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800384retry_ipi:
Paul E. McKenneydc5a4f22018-08-03 21:00:38 -0700385 if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) {
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800386 mask_ofl_test |= mask;
387 continue;
388 }
Paul E. McKenneyb9ad4d62019-03-27 09:09:47 -0700389 if (get_cpu() == cpu) {
Frederic Weisbecker81f6d492021-11-30 17:21:08 +0100390 mask_ofl_test |= mask;
Paul E. McKenneyb9ad4d62019-03-27 09:09:47 -0700391 put_cpu();
392 continue;
393 }
Paul E. McKenney142d1062018-11-29 09:15:54 -0800394 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
Paul E. McKenneyb9ad4d62019-03-27 09:09:47 -0700395 put_cpu();
Boqun Feng9f08cf02019-10-08 13:01:40 +0800396 /* The CPU will report the QS in response to the IPI. */
397 if (!ret)
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800398 continue;
Boqun Feng9f08cf02019-10-08 13:01:40 +0800399
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800400 /* Failed, raced with CPU hotplug operation. */
401 raw_spin_lock_irqsave_rcu_node(rnp, flags);
402 if ((rnp->qsmaskinitnext & mask) &&
403 (rnp->expmask & mask)) {
404 /* Online, so delay for a bit and try again. */
405 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700406 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl"));
Paul E. McKenney68c2f272020-05-07 16:38:29 -0700407 schedule_timeout_idle(1);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800408 goto retry_ipi;
409 }
Boqun Feng9f08cf02019-10-08 13:01:40 +0800410 /* CPU really is offline, so we must report its QS. */
411 if (rnp->expmask & mask)
412 mask_ofl_test |= mask;
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800413 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
414 }
415 /* Report quiescent states for those that went offline. */
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800416 if (mask_ofl_test)
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700417 rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800418}
419
420/*
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700421 * Select the nodes that the upcoming expedited grace period needs
422 * to wait for.
423 */
Paul E. McKenney142d1062018-11-29 09:15:54 -0800424static void sync_rcu_exp_select_cpus(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700425{
Boqun Fengfcc63542018-06-15 12:06:31 -0700426 int cpu;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700427 struct rcu_node *rnp;
428
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700429 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset"));
430 sync_exp_reset_tree();
431 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select"));
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800432
433 /* Schedule work for each leaf rcu_node structure. */
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700434 rcu_for_each_leaf_node(rnp) {
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800435 rnp->exp_need_flush = false;
436 if (!READ_ONCE(rnp->expmask))
437 continue; /* Avoid early boot non-existent wq. */
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800438 if (!READ_ONCE(rcu_par_gp_wq) ||
Paul E. McKenney52575142018-04-24 11:03:39 -0700439 rcu_scheduler_active != RCU_SCHEDULER_RUNNING ||
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700440 rcu_is_last_leaf_node(rnp)) {
Paul E. McKenney52575142018-04-24 11:03:39 -0700441 /* No workqueues yet or last leaf, do direct call. */
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800442 sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work);
443 continue;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700444 }
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800445 INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus);
Paul E. McKenney9cac83a2018-09-11 08:57:48 -0700446 cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1);
Boqun Fengfcc63542018-06-15 12:06:31 -0700447 /* If all offline, queue the work on an unbound CPU. */
Paul E. McKenney9cac83a2018-09-11 08:57:48 -0700448 if (unlikely(cpu > rnp->grphi - rnp->grplo))
Boqun Fengfcc63542018-06-15 12:06:31 -0700449 cpu = WORK_CPU_UNBOUND;
Paul E. McKenney9cac83a2018-09-11 08:57:48 -0700450 else
451 cpu += rnp->grplo;
Boqun Fengfcc63542018-06-15 12:06:31 -0700452 queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work);
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800453 rnp->exp_need_flush = true;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700454 }
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800455
456 /* Wait for workqueue jobs (if any) to complete. */
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700457 rcu_for_each_leaf_node(rnp)
Paul E. McKenney25f3d7e2018-02-01 22:05:38 -0800458 if (rnp->exp_need_flush)
459 flush_work(&rnp->rew.rew_work);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700460}
461
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -0800462/*
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800463 * Wait for the expedited grace period to elapse, within time limit.
464 * If the time limit is exceeded without the grace period elapsing,
465 * return false, otherwise return true.
466 */
467static bool synchronize_rcu_expedited_wait_once(long tlimit)
468{
469 int t;
470 struct rcu_node *rnp_root = rcu_get_root();
471
472 t = swait_event_timeout_exclusive(rcu_state.expedited_wq,
473 sync_rcu_exp_done_unlocked(rnp_root),
474 tlimit);
475 // Workqueues should not be signaled.
476 if (t > 0 || sync_rcu_exp_done_unlocked(rnp_root))
477 return true;
478 WARN_ON(t < 0); /* workqueues should not be signaled. */
479 return false;
480}
481
482/*
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -0800483 * Wait for the expedited grace period to elapse, issuing any needed
484 * RCU CPU stall warnings along the way.
485 */
Paul E. McKenney28f03612019-11-27 14:24:58 -0800486static void synchronize_rcu_expedited_wait(void)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700487{
488 int cpu;
Paul E. McKenney59ee0322019-11-28 18:54:06 -0800489 unsigned long j;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700490 unsigned long jiffies_stall;
491 unsigned long jiffies_start;
492 unsigned long mask;
493 int ndetected;
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800494 struct rcu_data *rdp;
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700495 struct rcu_node *rnp;
Paul E. McKenney336a4f62018-07-03 17:22:34 -0700496 struct rcu_node *rnp_root = rcu_get_root();
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700497
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700498 trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait"));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700499 jiffies_stall = rcu_jiffies_till_stall_check();
500 jiffies_start = jiffies;
Paul E. McKenney59ee0322019-11-28 18:54:06 -0800501 if (tick_nohz_full_enabled() && rcu_inkernel_boot_has_ended()) {
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800502 if (synchronize_rcu_expedited_wait_once(1))
503 return;
504 rcu_for_each_leaf_node(rnp) {
505 for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) {
506 rdp = per_cpu_ptr(&rcu_data, cpu);
507 if (rdp->rcu_forced_tick_exp)
508 continue;
509 rdp->rcu_forced_tick_exp = true;
Paul E. McKenney147f04b2021-09-29 09:21:34 -0700510 preempt_disable();
511 if (cpu_online(cpu))
512 tick_dep_set_cpu(cpu, TICK_DEP_BIT_RCU_EXP);
513 preempt_enable();
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800514 }
515 }
Paul E. McKenney59ee0322019-11-28 18:54:06 -0800516 j = READ_ONCE(jiffies_till_first_fqs);
517 if (synchronize_rcu_expedited_wait_once(j + HZ))
518 return;
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800519 }
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700520
521 for (;;) {
Paul E. McKenneydf1e8492019-11-27 16:36:45 -0800522 if (synchronize_rcu_expedited_wait_once(jiffies_stall))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700523 return;
Paul E. McKenney58c53362019-12-05 11:29:01 -0800524 if (rcu_stall_is_suppressed())
Paul E. McKenney24a6cff2016-06-29 14:49:29 -0700525 continue;
526 panic_on_rcu_stall();
Sangmoon Kim565cfb92021-03-02 20:55:15 +0900527 trace_rcu_stall_warning(rcu_state.name, TPS("ExpeditedStall"));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700528 pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {",
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700529 rcu_state.name);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700530 ndetected = 0;
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700531 rcu_for_each_leaf_node(rnp) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700532 ndetected += rcu_print_task_exp_stall(rnp);
Mark Rutlandbc75e992016-06-03 15:20:04 +0100533 for_each_leaf_node_possible_cpu(rnp, cpu) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700534 struct rcu_data *rdp;
535
Mark Rutlandbc75e992016-06-03 15:20:04 +0100536 mask = leaf_node_cpu_bit(rnp, cpu);
Paul E. McKenney15c7c972019-10-07 18:53:18 -0700537 if (!(READ_ONCE(rnp->expmask) & mask))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700538 continue;
539 ndetected++;
Paul E. McKenneyda1df502018-07-03 15:37:16 -0700540 rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700541 pr_cont(" %d-%c%c%c", cpu,
542 "O."[!!cpu_online(cpu)],
543 "o."[!!(rdp->grpmask & rnp->expmaskinit)],
544 "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
545 }
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700546 }
547 pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700548 jiffies - jiffies_start, rcu_state.expedited_sequence,
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800549 data_race(rnp_root->expmask),
550 ".T"[!!data_race(rnp_root->exp_tasks)]);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700551 if (ndetected) {
Paul E. McKenneyb08ea1d2020-11-06 13:52:31 -0800552 pr_err("blocking rcu_node structures (internal RCU debug):");
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700553 rcu_for_each_node_breadth_first(rnp) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700554 if (rnp == rnp_root)
555 continue; /* printed unconditionally */
Paul E. McKenney6c7d7db2019-11-27 13:59:37 -0800556 if (sync_rcu_exp_done_unlocked(rnp))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700557 continue;
558 pr_cont(" l=%u:%d-%d:%#lx/%c",
559 rnp->level, rnp->grplo, rnp->grphi,
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800560 data_race(rnp->expmask),
561 ".T"[!!data_race(rnp->exp_tasks)]);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700562 }
563 pr_cont("\n");
564 }
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700565 rcu_for_each_leaf_node(rnp) {
Mark Rutlandbc75e992016-06-03 15:20:04 +0100566 for_each_leaf_node_possible_cpu(rnp, cpu) {
567 mask = leaf_node_cpu_bit(rnp, cpu);
Paul E. McKenney15c7c972019-10-07 18:53:18 -0700568 if (!(READ_ONCE(rnp->expmask) & mask))
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700569 continue;
570 dump_cpu_task(cpu);
571 }
572 }
573 jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3;
574 }
575}
576
577/*
578 * Wait for the current expedited grace period to complete, and then
579 * wake up everyone who piggybacked on the just-completed expedited
580 * grace period. Also update all the ->exp_seq_rq counters as needed
581 * in order to avoid counter-wrap problems.
582 */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700583static void rcu_exp_wait_wake(unsigned long s)
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700584{
585 struct rcu_node *rnp;
586
Paul E. McKenney28f03612019-11-27 14:24:58 -0800587 synchronize_rcu_expedited_wait();
Neeraj Upadhyay4bc6b742019-11-19 11:50:52 -0800588
589 // Switch over to wakeup mode, allowing the next GP to proceed.
590 // End the previous grace period only after acquiring the mutex
591 // to ensure that only one GP runs concurrently with wakeups.
592 mutex_lock(&rcu_state.exp_wake_mutex);
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700593 rcu_exp_gp_seq_end();
594 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700595
Paul E. McKenneyaedf4ba2018-07-04 14:33:59 -0700596 rcu_for_each_node_breadth_first(rnp) {
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700597 if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
598 spin_lock(&rnp->exp_lock);
599 /* Recheck, avoid hang in case someone just arrived. */
600 if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
Paul E. McKenney24bb9ec2019-12-22 19:55:50 -0800601 WRITE_ONCE(rnp->exp_seq_rq, s);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700602 spin_unlock(&rnp->exp_lock);
603 }
Paul E. McKenney3c345825c82017-03-04 12:33:53 -0800604 smp_mb(); /* All above changes before wakeup. */
Neeraj Upadhyayfd6bc192019-11-19 03:17:07 +0000605 wake_up_all(&rnp->exp_wq[rcu_seq_ctr(s) & 0x3]);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700606 }
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700607 trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake"));
608 mutex_unlock(&rcu_state.exp_wake_mutex);
Paul E. McKenney3549c2b2016-04-15 16:35:29 -0700609}
610
Paul E. McKenney8b355e32016-06-29 13:46:25 -0700611/*
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800612 * Common code to drive an expedited grace period forward, used by
613 * workqueues and mid-boot-time tasks.
614 */
Paul E. McKenney142d1062018-11-29 09:15:54 -0800615static void rcu_exp_sel_wait_wake(unsigned long s)
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800616{
617 /* Initialize the rcu_node tree in preparation for the wait. */
Paul E. McKenney142d1062018-11-29 09:15:54 -0800618 sync_rcu_exp_select_cpus();
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800619
620 /* Wait and clean up, including waking everyone. */
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700621 rcu_exp_wait_wake(s);
Paul E. McKenney52d7e482017-01-10 02:28:26 -0800622}
623
624/*
Paul E. McKenney8b355e32016-06-29 13:46:25 -0700625 * Work-queue handler to drive an expedited grace period forward.
626 */
627static void wait_rcu_exp_gp(struct work_struct *wp)
628{
629 struct rcu_exp_work *rewp;
630
Paul E. McKenney8b355e32016-06-29 13:46:25 -0700631 rewp = container_of(wp, struct rcu_exp_work, rew_work);
Paul E. McKenney142d1062018-11-29 09:15:54 -0800632 rcu_exp_sel_wait_wake(rewp->rew_s);
Paul E. McKenney8b355e32016-06-29 13:46:25 -0700633}
634
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700635#ifdef CONFIG_PREEMPT_RCU
636
637/*
638 * Remote handler for smp_call_function_single(). If there is an
639 * RCU read-side critical section in effect, request that the
640 * next rcu_read_unlock() record the quiescent state up the
641 * ->expmask fields in the rcu_node tree. Otherwise, immediately
642 * report the quiescent state.
643 */
Paul E. McKenney142d1062018-11-29 09:15:54 -0800644static void rcu_exp_handler(void *unused)
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700645{
Lai Jiangshan5f5fa7e2020-02-15 15:23:26 -0800646 int depth = rcu_preempt_depth();
Paul E. McKenney3e310092018-06-21 12:50:01 -0700647 unsigned long flags;
Paul E. McKenneyda1df502018-07-03 15:37:16 -0700648 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
Paul E. McKenney3e310092018-06-21 12:50:01 -0700649 struct rcu_node *rnp = rdp->mynode;
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700650 struct task_struct *t = current;
651
652 /*
Paul E. McKenney3e310092018-06-21 12:50:01 -0700653 * First, the common case of not being in an RCU read-side
654 * critical section. If also enabled or idle, immediately
655 * report the quiescent state, otherwise defer.
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700656 */
Lai Jiangshan5f5fa7e2020-02-15 15:23:26 -0800657 if (!depth) {
Paul E. McKenney3e310092018-06-21 12:50:01 -0700658 if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
659 rcu_dynticks_curr_cpu_in_eqs()) {
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700660 rcu_report_exp_rdp(rdp);
Paul E. McKenney3e310092018-06-21 12:50:01 -0700661 } else {
Frederic Weisbecker6120b722021-09-16 14:10:48 +0200662 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
Paul E. McKenneyfced9c82018-07-26 13:44:00 -0700663 set_tsk_need_resched(t);
664 set_preempt_need_resched();
Paul E. McKenney3e310092018-06-21 12:50:01 -0700665 }
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700666 return;
667 }
668
669 /*
Paul E. McKenney3e310092018-06-21 12:50:01 -0700670 * Second, the less-common case of being in an RCU read-side
671 * critical section. In this case we can count on a future
672 * rcu_read_unlock(). However, this rcu_read_unlock() might
673 * execute on some other CPU, but in that case there will be
674 * a future context switch. Either way, if the expedited
675 * grace period is still waiting on this CPU, set ->deferred_qs
676 * so that the eventual quiescent state will be reported.
677 * Note that there is a large group of race conditions that
678 * can have caused this quiescent state to already have been
679 * reported, so we really do need to check ->expmask.
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700680 */
Lai Jiangshan5f5fa7e2020-02-15 15:23:26 -0800681 if (depth > 0) {
Paul E. McKenney3e310092018-06-21 12:50:01 -0700682 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenney05f41572018-10-16 04:12:58 -0700683 if (rnp->expmask & rdp->grpmask) {
Frederic Weisbecker6120b722021-09-16 14:10:48 +0200684 WRITE_ONCE(rdp->cpu_no_qs.b.exp, true);
Paul E. McKenneyadd0d372019-03-26 10:22:22 -0700685 t->rcu_read_unlock_special.b.exp_hint = true;
Paul E. McKenney05f41572018-10-16 04:12:58 -0700686 }
Paul E. McKenney3e310092018-06-21 12:50:01 -0700687 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenney1de462e2018-11-28 10:37:42 -0800688 return;
Paul E. McKenney3e310092018-06-21 12:50:01 -0700689 }
690
Lai Jiangshan5f5fa7e2020-02-15 15:23:26 -0800691 // Finally, negative nesting depth should not happen.
692 WARN_ON_ONCE(1);
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700693}
694
Sebastian Andrzej Siewior90326f02019-10-15 21:18:14 +0200695/* PREEMPTION=y, so no PREEMPTION=n expedited grace period to clean up after. */
Paul E. McKenney45975c72018-07-02 14:30:37 -0700696static void sync_sched_exp_online_cleanup(int cpu)
697{
698}
699
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800700/*
701 * Scan the current list of tasks blocked within RCU read-side critical
702 * sections, printing out the tid of each that is blocking the current
703 * expedited grace period.
704 */
705static int rcu_print_task_exp_stall(struct rcu_node *rnp)
706{
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800707 unsigned long flags;
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800708 int ndetected = 0;
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800709 struct task_struct *t;
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800710
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800711 if (!READ_ONCE(rnp->exp_tasks))
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800712 return 0;
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800713 raw_spin_lock_irqsave_rcu_node(rnp, flags);
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800714 t = list_entry(rnp->exp_tasks->prev,
715 struct task_struct, rcu_node_entry);
716 list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
717 pr_cont(" P%d", t->pid);
718 ndetected++;
719 }
Paul E. McKenney314eeb42020-01-03 14:18:12 -0800720 raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800721 return ndetected;
722}
723
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700724#else /* #ifdef CONFIG_PREEMPT_RCU */
725
Paul E. McKenneye015a342019-03-27 10:03:12 -0700726/* Request an expedited quiescent state. */
727static void rcu_exp_need_qs(void)
728{
729 __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
730 /* Store .exp before .rcu_urgent_qs. */
731 smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true);
732 set_tsk_need_resched(current);
733 set_preempt_need_resched();
734}
735
Paul E. McKenney45975c72018-07-02 14:30:37 -0700736/* Invoked on each online non-idle CPU for expedited quiescent state. */
Paul E. McKenney142d1062018-11-29 09:15:54 -0800737static void rcu_exp_handler(void *unused)
Paul E. McKenney45975c72018-07-02 14:30:37 -0700738{
Paul E. McKenney7487ea02020-06-18 09:51:12 -0700739 struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
740 struct rcu_node *rnp = rdp->mynode;
Paul E. McKenney45975c72018-07-02 14:30:37 -0700741
Paul E. McKenney45975c72018-07-02 14:30:37 -0700742 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
743 __this_cpu_read(rcu_data.cpu_no_qs.b.exp))
744 return;
745 if (rcu_is_cpu_rrupt_from_idle()) {
Paul E. McKenney63d4c8c2018-07-03 17:22:34 -0700746 rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
Paul E. McKenney45975c72018-07-02 14:30:37 -0700747 return;
748 }
Paul E. McKenneye015a342019-03-27 10:03:12 -0700749 rcu_exp_need_qs();
Paul E. McKenney45975c72018-07-02 14:30:37 -0700750}
751
752/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
753static void sync_sched_exp_online_cleanup(int cpu)
754{
Paul E. McKenneye015a342019-03-27 10:03:12 -0700755 unsigned long flags;
756 int my_cpu;
Paul E. McKenney45975c72018-07-02 14:30:37 -0700757 struct rcu_data *rdp;
758 int ret;
759 struct rcu_node *rnp;
Paul E. McKenney45975c72018-07-02 14:30:37 -0700760
Paul E. McKenneyda1df502018-07-03 15:37:16 -0700761 rdp = per_cpu_ptr(&rcu_data, cpu);
Paul E. McKenney45975c72018-07-02 14:30:37 -0700762 rnp = rdp->mynode;
Paul E. McKenneye015a342019-03-27 10:03:12 -0700763 my_cpu = get_cpu();
764 /* Quiescent state either not needed or already requested, leave. */
765 if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
Paul E. McKenney5401cc52021-09-28 11:06:35 -0700766 READ_ONCE(rdp->cpu_no_qs.b.exp)) {
Paul E. McKenneye015a342019-03-27 10:03:12 -0700767 put_cpu();
Paul E. McKenney45975c72018-07-02 14:30:37 -0700768 return;
Paul E. McKenneye015a342019-03-27 10:03:12 -0700769 }
770 /* Quiescent state needed on current CPU, so set it up locally. */
771 if (my_cpu == cpu) {
772 local_irq_save(flags);
773 rcu_exp_need_qs();
774 local_irq_restore(flags);
775 put_cpu();
776 return;
777 }
778 /* Quiescent state needed on some other CPU, send IPI. */
Paul E. McKenney142d1062018-11-29 09:15:54 -0800779 ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0);
Paul E. McKenneye015a342019-03-27 10:03:12 -0700780 put_cpu();
Paul E. McKenney45975c72018-07-02 14:30:37 -0700781 WARN_ON_ONCE(ret);
782}
783
Paul E. McKenneyd87cda52019-01-11 20:51:49 -0800784/*
785 * Because preemptible RCU does not exist, we never have to check for
786 * tasks blocked within RCU read-side critical sections that are
787 * blocking the current expedited grace period.
788 */
789static int rcu_print_task_exp_stall(struct rcu_node *rnp)
790{
791 return 0;
792}
793
Paul E. McKenney3cd4ca42018-11-29 10:01:52 -0800794#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
795
796/**
797 * synchronize_rcu_expedited - Brute-force RCU grace period
798 *
799 * Wait for an RCU grace period, but expedite it. The basic idea is to
800 * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether
801 * the CPU is in an RCU critical section, and if so, it sets a flag that
802 * causes the outermost rcu_read_unlock() to report the quiescent state
803 * for RCU-preempt or asks the scheduler for help for RCU-sched. On the
804 * other hand, if the CPU is not in an RCU read-side critical section,
805 * the IPI handler reports the quiescent state immediately.
806 *
Mukesh Ojha511b44f2019-07-29 13:25:57 +0530807 * Although this is a great improvement over previous expedited
Paul E. McKenney3cd4ca42018-11-29 10:01:52 -0800808 * implementations, it is still unfriendly to real-time workloads, so is
809 * thus not recommended for any sort of common-case code. In fact, if
810 * you are using synchronize_rcu_expedited() in a loop, please restructure
Paul E. McKenneyde8cd0a2019-11-27 14:20:41 -0800811 * your code to batch your updates, and then use a single synchronize_rcu()
Paul E. McKenney3cd4ca42018-11-29 10:01:52 -0800812 * instead.
813 *
814 * This has the same semantics as (but is more brutal than) synchronize_rcu().
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700815 */
816void synchronize_rcu_expedited(void)
817{
Paul E. McKenneyfbad01a2019-06-19 15:42:51 -0700818 bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT);
Paul E. McKenney89230722018-11-29 11:50:04 -0800819 struct rcu_exp_work rew;
820 struct rcu_node *rnp;
821 unsigned long s;
822
Paul E. McKenney45975c72018-07-02 14:30:37 -0700823 RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
824 lock_is_held(&rcu_lock_map) ||
825 lock_is_held(&rcu_sched_lock_map),
Paul E. McKenney8fa946d2018-07-07 18:12:26 -0700826 "Illegal synchronize_rcu_expedited() in RCU read-side critical section");
Paul E. McKenney45975c72018-07-02 14:30:37 -0700827
Paul E. McKenney3cd4ca42018-11-29 10:01:52 -0800828 /* Is the state is such that the call is a grace period? */
Paul E. McKenney45975c72018-07-02 14:30:37 -0700829 if (rcu_blocking_is_gp())
830 return;
831
Paul E. McKenney89230722018-11-29 11:50:04 -0800832 /* If expedited grace periods are prohibited, fall back to normal. */
833 if (rcu_gp_is_normal()) {
834 wait_rcu_gp(call_rcu);
835 return;
836 }
837
838 /* Take a snapshot of the sequence number. */
839 s = rcu_exp_gp_seq_snap();
840 if (exp_funnel_lock(s))
841 return; /* Someone else did our work for us. */
842
843 /* Ensure that load happens before action based on it. */
Paul E. McKenneyfbad01a2019-06-19 15:42:51 -0700844 if (unlikely(boottime)) {
Paul E. McKenney89230722018-11-29 11:50:04 -0800845 /* Direct call during scheduler init and early_initcalls(). */
846 rcu_exp_sel_wait_wake(s);
847 } else {
848 /* Marshall arguments & schedule the expedited grace period. */
849 rew.rew_s = s;
850 INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
851 queue_work(rcu_gp_wq, &rew.rew_work);
852 }
853
854 /* Wait for expedited grace period to complete. */
Paul E. McKenney89230722018-11-29 11:50:04 -0800855 rnp = rcu_get_root();
856 wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3],
857 sync_exp_work_done(s));
858 smp_mb(); /* Workqueue actions happen before return. */
859
860 /* Let the next expedited grace period start. */
861 mutex_unlock(&rcu_state.exp_mutex);
Paul E. McKenneyfbad01a2019-06-19 15:42:51 -0700862
863 if (likely(!boottime))
864 destroy_work_on_stack(&rew.rew_work);
Paul E. McKenney40e0a6c2016-04-15 16:44:07 -0700865}
866EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);