Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 1 | /* |
| 2 | * RCU expedited grace periods |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * Copyright IBM Corporation, 2016 |
| 19 | * |
| 20 | * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 21 | */ |
| 22 | |
Boqun Feng | 55ebfce | 2018-03-09 09:14:51 +0800 | [diff] [blame] | 23 | #include <linux/lockdep.h> |
| 24 | |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 25 | static void rcu_exp_handler(void *unused); |
| 26 | |
Paul E. McKenney | 09e2db3 | 2016-12-18 13:31:02 -0800 | [diff] [blame] | 27 | /* |
| 28 | * Record the start of an expedited grace period. |
| 29 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 30 | static void rcu_exp_gp_seq_start(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 31 | { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 32 | rcu_seq_start(&rcu_state.expedited_sequence); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 33 | } |
Paul E. McKenney | 09e2db3 | 2016-12-18 13:31:02 -0800 | [diff] [blame] | 34 | |
| 35 | /* |
Paul E. McKenney | 9a41420 | 2018-01-31 19:23:24 -0800 | [diff] [blame] | 36 | * Return then value that expedited-grace-period counter will have |
| 37 | * at the end of the current grace period. |
| 38 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 39 | static __maybe_unused unsigned long rcu_exp_gp_seq_endval(void) |
Paul E. McKenney | 9a41420 | 2018-01-31 19:23:24 -0800 | [diff] [blame] | 40 | { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 41 | return rcu_seq_endval(&rcu_state.expedited_sequence); |
Paul E. McKenney | 9a41420 | 2018-01-31 19:23:24 -0800 | [diff] [blame] | 42 | } |
| 43 | |
| 44 | /* |
Paul E. McKenney | 09e2db3 | 2016-12-18 13:31:02 -0800 | [diff] [blame] | 45 | * Record the end of an expedited grace period. |
| 46 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 47 | static void rcu_exp_gp_seq_end(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 48 | { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 49 | rcu_seq_end(&rcu_state.expedited_sequence); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 50 | smp_mb(); /* Ensure that consecutive grace periods serialize. */ |
| 51 | } |
Paul E. McKenney | 09e2db3 | 2016-12-18 13:31:02 -0800 | [diff] [blame] | 52 | |
| 53 | /* |
| 54 | * Take a snapshot of the expedited-grace-period counter. |
| 55 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 56 | static unsigned long rcu_exp_gp_seq_snap(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 57 | { |
| 58 | unsigned long s; |
| 59 | |
| 60 | smp_mb(); /* Caller's modifications seen first by other CPUs. */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 61 | s = rcu_seq_snap(&rcu_state.expedited_sequence); |
| 62 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("snap")); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 63 | return s; |
| 64 | } |
Paul E. McKenney | 09e2db3 | 2016-12-18 13:31:02 -0800 | [diff] [blame] | 65 | |
| 66 | /* |
| 67 | * Given a counter snapshot from rcu_exp_gp_seq_snap(), return true |
| 68 | * if a full expedited grace period has elapsed since that snapshot |
| 69 | * was taken. |
| 70 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 71 | static bool rcu_exp_gp_seq_done(unsigned long s) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 72 | { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 73 | return rcu_seq_done(&rcu_state.expedited_sequence, s); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 74 | } |
| 75 | |
| 76 | /* |
| 77 | * Reset the ->expmaskinit values in the rcu_node tree to reflect any |
| 78 | * recent CPU-online activity. Note that these masks are not cleared |
| 79 | * when CPUs go offline, so they reflect the union of all CPUs that have |
| 80 | * ever been online. This means that this function normally takes its |
| 81 | * no-work-to-do fastpath. |
| 82 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 83 | static void sync_exp_reset_tree_hotplug(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 84 | { |
| 85 | bool done; |
| 86 | unsigned long flags; |
| 87 | unsigned long mask; |
| 88 | unsigned long oldmask; |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 89 | int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 90 | struct rcu_node *rnp; |
| 91 | struct rcu_node *rnp_up; |
| 92 | |
| 93 | /* If no new CPUs onlined since last time, nothing to do. */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 94 | if (likely(ncpus == rcu_state.ncpus_snap)) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 95 | return; |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 96 | rcu_state.ncpus_snap = ncpus; |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 97 | |
| 98 | /* |
| 99 | * Each pass through the following loop propagates newly onlined |
| 100 | * CPUs for the current rcu_node structure up the rcu_node tree. |
| 101 | */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 102 | rcu_for_each_leaf_node(rnp) { |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 103 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 104 | if (rnp->expmaskinit == rnp->expmaskinitnext) { |
| 105 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 106 | continue; /* No new CPUs, nothing to do. */ |
| 107 | } |
| 108 | |
| 109 | /* Update this node's mask, track old value for propagation. */ |
| 110 | oldmask = rnp->expmaskinit; |
| 111 | rnp->expmaskinit = rnp->expmaskinitnext; |
| 112 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 113 | |
| 114 | /* If was already nonzero, nothing to propagate. */ |
| 115 | if (oldmask) |
| 116 | continue; |
| 117 | |
| 118 | /* Propagate the new CPU up the tree. */ |
| 119 | mask = rnp->grpmask; |
| 120 | rnp_up = rnp->parent; |
| 121 | done = false; |
| 122 | while (rnp_up) { |
| 123 | raw_spin_lock_irqsave_rcu_node(rnp_up, flags); |
| 124 | if (rnp_up->expmaskinit) |
| 125 | done = true; |
| 126 | rnp_up->expmaskinit |= mask; |
| 127 | raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags); |
| 128 | if (done) |
| 129 | break; |
| 130 | mask = rnp_up->grpmask; |
| 131 | rnp_up = rnp_up->parent; |
| 132 | } |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * Reset the ->expmask values in the rcu_node tree in preparation for |
| 138 | * a new expedited grace period. |
| 139 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 140 | static void __maybe_unused sync_exp_reset_tree(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 141 | { |
| 142 | unsigned long flags; |
| 143 | struct rcu_node *rnp; |
| 144 | |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 145 | sync_exp_reset_tree_hotplug(); |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 146 | rcu_for_each_node_breadth_first(rnp) { |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 147 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 148 | WARN_ON_ONCE(rnp->expmask); |
| 149 | rnp->expmask = rnp->expmaskinit; |
| 150 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 151 | } |
| 152 | } |
| 153 | |
| 154 | /* |
| 155 | * Return non-zero if there is no RCU expedited grace period in progress |
| 156 | * for the specified rcu_node structure, in other words, if all CPUs and |
| 157 | * tasks covered by the specified rcu_node structure have done their bit |
| 158 | * for the current expedited grace period. Works only for preemptible |
| 159 | * RCU -- other RCU implementation use other means. |
| 160 | * |
Boqun Feng | 7be8c56 | 2018-03-07 16:49:39 +0800 | [diff] [blame] | 161 | * Caller must hold the specificed rcu_node structure's ->lock |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 162 | */ |
Paul E. McKenney | dcfc315 | 2017-04-18 09:53:07 -0700 | [diff] [blame] | 163 | static bool sync_rcu_preempt_exp_done(struct rcu_node *rnp) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 164 | { |
Boqun Feng | 55ebfce | 2018-03-09 09:14:51 +0800 | [diff] [blame] | 165 | raw_lockdep_assert_held_rcu_node(rnp); |
| 166 | |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 167 | return rnp->exp_tasks == NULL && |
| 168 | READ_ONCE(rnp->expmask) == 0; |
| 169 | } |
| 170 | |
| 171 | /* |
Boqun Feng | 55ebfce | 2018-03-09 09:14:51 +0800 | [diff] [blame] | 172 | * Like sync_rcu_preempt_exp_done(), but this function assumes the caller |
| 173 | * doesn't hold the rcu_node's ->lock, and will acquire and release the lock |
| 174 | * itself |
| 175 | */ |
| 176 | static bool sync_rcu_preempt_exp_done_unlocked(struct rcu_node *rnp) |
| 177 | { |
| 178 | unsigned long flags; |
| 179 | bool ret; |
| 180 | |
| 181 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 182 | ret = sync_rcu_preempt_exp_done(rnp); |
| 183 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 184 | |
| 185 | return ret; |
| 186 | } |
| 187 | |
| 188 | |
| 189 | /* |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 190 | * Report the exit from RCU read-side critical section for the last task |
| 191 | * that queued itself during or before the current expedited preemptible-RCU |
| 192 | * grace period. This event is reported either to the rcu_node structure on |
| 193 | * which the task was queued or to one of that rcu_node structure's ancestors, |
| 194 | * recursively up the tree. (Calm down, calm down, we do the recursion |
| 195 | * iteratively!) |
| 196 | * |
Boqun Feng | 7be8c56 | 2018-03-07 16:49:39 +0800 | [diff] [blame] | 197 | * Caller must hold the specified rcu_node structure's ->lock. |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 198 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 199 | static void __rcu_report_exp_rnp(struct rcu_node *rnp, |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 200 | bool wake, unsigned long flags) |
| 201 | __releases(rnp->lock) |
| 202 | { |
| 203 | unsigned long mask; |
| 204 | |
| 205 | for (;;) { |
| 206 | if (!sync_rcu_preempt_exp_done(rnp)) { |
| 207 | if (!rnp->expmask) |
| 208 | rcu_initiate_boost(rnp, flags); |
| 209 | else |
| 210 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 211 | break; |
| 212 | } |
| 213 | if (rnp->parent == NULL) { |
| 214 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 215 | if (wake) { |
| 216 | smp_mb(); /* EGP done before wake_up(). */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 217 | swake_up_one(&rcu_state.expedited_wq); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 218 | } |
| 219 | break; |
| 220 | } |
| 221 | mask = rnp->grpmask; |
| 222 | raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */ |
| 223 | rnp = rnp->parent; |
| 224 | raw_spin_lock_rcu_node(rnp); /* irqs already disabled */ |
| 225 | WARN_ON_ONCE(!(rnp->expmask & mask)); |
| 226 | rnp->expmask &= ~mask; |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Report expedited quiescent state for specified node. This is a |
| 232 | * lock-acquisition wrapper function for __rcu_report_exp_rnp(). |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 233 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 234 | static void __maybe_unused rcu_report_exp_rnp(struct rcu_node *rnp, bool wake) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 235 | { |
| 236 | unsigned long flags; |
| 237 | |
| 238 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 239 | __rcu_report_exp_rnp(rnp, wake, flags); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 240 | } |
| 241 | |
| 242 | /* |
| 243 | * Report expedited quiescent state for multiple CPUs, all covered by the |
Boqun Feng | 7be8c56 | 2018-03-07 16:49:39 +0800 | [diff] [blame] | 244 | * specified leaf rcu_node structure. |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 245 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 246 | static void rcu_report_exp_cpu_mult(struct rcu_node *rnp, |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 247 | unsigned long mask, bool wake) |
| 248 | { |
| 249 | unsigned long flags; |
| 250 | |
| 251 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 252 | if (!(rnp->expmask & mask)) { |
| 253 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 254 | return; |
| 255 | } |
| 256 | rnp->expmask &= ~mask; |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 257 | __rcu_report_exp_rnp(rnp, wake, flags); /* Releases rnp->lock. */ |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Report expedited quiescent state for specified rcu_data (CPU). |
| 262 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 263 | static void rcu_report_exp_rdp(struct rcu_data *rdp) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 264 | { |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 265 | WRITE_ONCE(rdp->deferred_qs, false); |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 266 | rcu_report_exp_cpu_mult(rdp->mynode, rdp->grpmask, true); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 267 | } |
| 268 | |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 269 | /* Common code for work-done checking. */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 270 | static bool sync_exp_work_done(unsigned long s) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 271 | { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 272 | if (rcu_exp_gp_seq_done(s)) { |
| 273 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("done")); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 274 | /* Ensure test happens before caller kfree(). */ |
| 275 | smp_mb__before_atomic(); /* ^^^ */ |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 276 | return true; |
| 277 | } |
| 278 | return false; |
| 279 | } |
| 280 | |
| 281 | /* |
| 282 | * Funnel-lock acquisition for expedited grace periods. Returns true |
| 283 | * if some other task completed an expedited grace period that this task |
| 284 | * can piggy-back on, and with no mutex held. Otherwise, returns false |
| 285 | * with the mutex held, indicating that the caller must actually do the |
| 286 | * expedited grace period. |
| 287 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 288 | static bool exp_funnel_lock(unsigned long s) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 289 | { |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 290 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 291 | struct rcu_node *rnp = rdp->mynode; |
Paul E. McKenney | 336a4f6 | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 292 | struct rcu_node *rnp_root = rcu_get_root(); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 293 | |
| 294 | /* Low-contention fastpath. */ |
| 295 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) && |
| 296 | (rnp == rnp_root || |
| 297 | ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) && |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 298 | mutex_trylock(&rcu_state.exp_mutex)) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 299 | goto fastpath; |
| 300 | |
| 301 | /* |
| 302 | * Each pass through the following loop works its way up |
| 303 | * the rcu_node tree, returning if others have done the work or |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 304 | * otherwise falls through to acquire ->exp_mutex. The mapping |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 305 | * from CPU to rcu_node structure can be inexact, as it is just |
| 306 | * promoting locality and is not strictly needed for correctness. |
| 307 | */ |
| 308 | for (; rnp != NULL; rnp = rnp->parent) { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 309 | if (sync_exp_work_done(s)) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 310 | return true; |
| 311 | |
| 312 | /* Work not done, either wait here or go up. */ |
| 313 | spin_lock(&rnp->exp_lock); |
| 314 | if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) { |
| 315 | |
| 316 | /* Someone else doing GP, so wait for them. */ |
| 317 | spin_unlock(&rnp->exp_lock); |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 318 | trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 319 | rnp->grplo, rnp->grphi, |
| 320 | TPS("wait")); |
Paul E. McKenney | 031aeee | 2017-03-21 07:28:14 -0700 | [diff] [blame] | 321 | wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 322 | sync_exp_work_done(s)); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 323 | return true; |
| 324 | } |
| 325 | rnp->exp_seq_rq = s; /* Followers can wait on us. */ |
| 326 | spin_unlock(&rnp->exp_lock); |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 327 | trace_rcu_exp_funnel_lock(rcu_state.name, rnp->level, |
| 328 | rnp->grplo, rnp->grphi, TPS("nxtlvl")); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 329 | } |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 330 | mutex_lock(&rcu_state.exp_mutex); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 331 | fastpath: |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 332 | if (sync_exp_work_done(s)) { |
| 333 | mutex_unlock(&rcu_state.exp_mutex); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 334 | return true; |
| 335 | } |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 336 | rcu_exp_gp_seq_start(); |
| 337 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("start")); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 338 | return false; |
| 339 | } |
| 340 | |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 341 | /* |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 342 | * Select the CPUs within the specified rcu_node that the upcoming |
| 343 | * expedited grace period needs to wait for. |
| 344 | */ |
| 345 | static void sync_rcu_exp_select_node_cpus(struct work_struct *wp) |
| 346 | { |
| 347 | int cpu; |
| 348 | unsigned long flags; |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 349 | unsigned long mask_ofl_test; |
| 350 | unsigned long mask_ofl_ipi; |
| 351 | int ret; |
| 352 | struct rcu_exp_work *rewp = |
| 353 | container_of(wp, struct rcu_exp_work, rew_work); |
| 354 | struct rcu_node *rnp = container_of(rewp, struct rcu_node, rew); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 355 | |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 356 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 357 | |
| 358 | /* Each pass checks a CPU for identity, offline, and idle. */ |
| 359 | mask_ofl_test = 0; |
| 360 | for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { |
| 361 | unsigned long mask = leaf_node_cpu_bit(rnp, cpu); |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 362 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 363 | int snap; |
| 364 | |
| 365 | if (raw_smp_processor_id() == cpu || |
| 366 | !(rnp->qsmaskinitnext & mask)) { |
| 367 | mask_ofl_test |= mask; |
| 368 | } else { |
Paul E. McKenney | dc5a4f2 | 2018-08-03 21:00:38 -0700 | [diff] [blame] | 369 | snap = rcu_dynticks_snap(rdp); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 370 | if (rcu_dynticks_in_eqs(snap)) |
| 371 | mask_ofl_test |= mask; |
| 372 | else |
| 373 | rdp->exp_dynticks_snap = snap; |
| 374 | } |
| 375 | } |
| 376 | mask_ofl_ipi = rnp->expmask & ~mask_ofl_test; |
| 377 | |
| 378 | /* |
| 379 | * Need to wait for any blocked tasks as well. Note that |
| 380 | * additional blocking tasks will also block the expedited GP |
| 381 | * until such time as the ->expmask bits are cleared. |
| 382 | */ |
| 383 | if (rcu_preempt_has_tasks(rnp)) |
| 384 | rnp->exp_tasks = rnp->blkd_tasks.next; |
| 385 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 386 | |
| 387 | /* IPI the remaining CPUs for expedited quiescent state. */ |
| 388 | for_each_leaf_node_cpu_mask(rnp, cpu, rnp->expmask) { |
| 389 | unsigned long mask = leaf_node_cpu_bit(rnp, cpu); |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 390 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 391 | |
| 392 | if (!(mask_ofl_ipi & mask)) |
| 393 | continue; |
| 394 | retry_ipi: |
Paul E. McKenney | dc5a4f2 | 2018-08-03 21:00:38 -0700 | [diff] [blame] | 395 | if (rcu_dynticks_in_eqs_since(rdp, rdp->exp_dynticks_snap)) { |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 396 | mask_ofl_test |= mask; |
| 397 | continue; |
| 398 | } |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 399 | ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 400 | if (!ret) { |
| 401 | mask_ofl_ipi &= ~mask; |
| 402 | continue; |
| 403 | } |
| 404 | /* Failed, raced with CPU hotplug operation. */ |
| 405 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 406 | if ((rnp->qsmaskinitnext & mask) && |
| 407 | (rnp->expmask & mask)) { |
| 408 | /* Online, so delay for a bit and try again. */ |
| 409 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 410 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("selectofl")); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 411 | schedule_timeout_uninterruptible(1); |
| 412 | goto retry_ipi; |
| 413 | } |
| 414 | /* CPU really is offline, so we can ignore it. */ |
| 415 | if (!(rnp->expmask & mask)) |
| 416 | mask_ofl_ipi &= ~mask; |
| 417 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 418 | } |
| 419 | /* Report quiescent states for those that went offline. */ |
| 420 | mask_ofl_test |= mask_ofl_ipi; |
| 421 | if (mask_ofl_test) |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 422 | rcu_report_exp_cpu_mult(rnp, mask_ofl_test, false); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 423 | } |
| 424 | |
| 425 | /* |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 426 | * Select the nodes that the upcoming expedited grace period needs |
| 427 | * to wait for. |
| 428 | */ |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 429 | static void sync_rcu_exp_select_cpus(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 430 | { |
Boqun Feng | fcc6354 | 2018-06-15 12:06:31 -0700 | [diff] [blame] | 431 | int cpu; |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 432 | struct rcu_node *rnp; |
| 433 | |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 434 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("reset")); |
| 435 | sync_exp_reset_tree(); |
| 436 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("select")); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 437 | |
| 438 | /* Schedule work for each leaf rcu_node structure. */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 439 | rcu_for_each_leaf_node(rnp) { |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 440 | rnp->exp_need_flush = false; |
| 441 | if (!READ_ONCE(rnp->expmask)) |
| 442 | continue; /* Avoid early boot non-existent wq. */ |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 443 | if (!READ_ONCE(rcu_par_gp_wq) || |
Paul E. McKenney | 5257514 | 2018-04-24 11:03:39 -0700 | [diff] [blame] | 444 | rcu_scheduler_active != RCU_SCHEDULER_RUNNING || |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 445 | rcu_is_last_leaf_node(rnp)) { |
Paul E. McKenney | 5257514 | 2018-04-24 11:03:39 -0700 | [diff] [blame] | 446 | /* No workqueues yet or last leaf, do direct call. */ |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 447 | sync_rcu_exp_select_node_cpus(&rnp->rew.rew_work); |
| 448 | continue; |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 449 | } |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 450 | INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); |
Boqun Feng | fcc6354 | 2018-06-15 12:06:31 -0700 | [diff] [blame] | 451 | preempt_disable(); |
Paul E. McKenney | 9cac83a | 2018-09-11 08:57:48 -0700 | [diff] [blame] | 452 | cpu = find_next_bit(&rnp->ffmask, BITS_PER_LONG, -1); |
Boqun Feng | fcc6354 | 2018-06-15 12:06:31 -0700 | [diff] [blame] | 453 | /* If all offline, queue the work on an unbound CPU. */ |
Paul E. McKenney | 9cac83a | 2018-09-11 08:57:48 -0700 | [diff] [blame] | 454 | if (unlikely(cpu > rnp->grphi - rnp->grplo)) |
Boqun Feng | fcc6354 | 2018-06-15 12:06:31 -0700 | [diff] [blame] | 455 | cpu = WORK_CPU_UNBOUND; |
Paul E. McKenney | 9cac83a | 2018-09-11 08:57:48 -0700 | [diff] [blame] | 456 | else |
| 457 | cpu += rnp->grplo; |
Boqun Feng | fcc6354 | 2018-06-15 12:06:31 -0700 | [diff] [blame] | 458 | queue_work_on(cpu, rcu_par_gp_wq, &rnp->rew.rew_work); |
| 459 | preempt_enable(); |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 460 | rnp->exp_need_flush = true; |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 461 | } |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 462 | |
| 463 | /* Wait for workqueue jobs (if any) to complete. */ |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 464 | rcu_for_each_leaf_node(rnp) |
Paul E. McKenney | 25f3d7e | 2018-02-01 22:05:38 -0800 | [diff] [blame] | 465 | if (rnp->exp_need_flush) |
| 466 | flush_work(&rnp->rew.rew_work); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 467 | } |
| 468 | |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 469 | static void synchronize_sched_expedited_wait(void) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 470 | { |
| 471 | int cpu; |
| 472 | unsigned long jiffies_stall; |
| 473 | unsigned long jiffies_start; |
| 474 | unsigned long mask; |
| 475 | int ndetected; |
| 476 | struct rcu_node *rnp; |
Paul E. McKenney | 336a4f6 | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 477 | struct rcu_node *rnp_root = rcu_get_root(); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 478 | int ret; |
| 479 | |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 480 | trace_rcu_exp_grace_period(rcu_state.name, rcu_exp_gp_seq_endval(), TPS("startwait")); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 481 | jiffies_stall = rcu_jiffies_till_stall_check(); |
| 482 | jiffies_start = jiffies; |
| 483 | |
| 484 | for (;;) { |
Peter Zijlstra | b3dae10 | 2018-06-12 10:34:52 +0200 | [diff] [blame] | 485 | ret = swait_event_timeout_exclusive( |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 486 | rcu_state.expedited_wq, |
Boqun Feng | 55ebfce | 2018-03-09 09:14:51 +0800 | [diff] [blame] | 487 | sync_rcu_preempt_exp_done_unlocked(rnp_root), |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 488 | jiffies_stall); |
Boqun Feng | 55ebfce | 2018-03-09 09:14:51 +0800 | [diff] [blame] | 489 | if (ret > 0 || sync_rcu_preempt_exp_done_unlocked(rnp_root)) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 490 | return; |
Paul E. McKenney | 908d2c1 | 2016-06-29 14:34:59 -0700 | [diff] [blame] | 491 | WARN_ON(ret < 0); /* workqueues should not be signaled. */ |
Paul E. McKenney | 24a6cff | 2016-06-29 14:49:29 -0700 | [diff] [blame] | 492 | if (rcu_cpu_stall_suppress) |
| 493 | continue; |
| 494 | panic_on_rcu_stall(); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 495 | pr_err("INFO: %s detected expedited stalls on CPUs/tasks: {", |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 496 | rcu_state.name); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 497 | ndetected = 0; |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 498 | rcu_for_each_leaf_node(rnp) { |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 499 | ndetected += rcu_print_task_exp_stall(rnp); |
Mark Rutland | bc75e99 | 2016-06-03 15:20:04 +0100 | [diff] [blame] | 500 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 501 | struct rcu_data *rdp; |
| 502 | |
Mark Rutland | bc75e99 | 2016-06-03 15:20:04 +0100 | [diff] [blame] | 503 | mask = leaf_node_cpu_bit(rnp, cpu); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 504 | if (!(rnp->expmask & mask)) |
| 505 | continue; |
| 506 | ndetected++; |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 507 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 508 | pr_cont(" %d-%c%c%c", cpu, |
| 509 | "O."[!!cpu_online(cpu)], |
| 510 | "o."[!!(rdp->grpmask & rnp->expmaskinit)], |
| 511 | "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]); |
| 512 | } |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 513 | } |
| 514 | pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n", |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 515 | jiffies - jiffies_start, rcu_state.expedited_sequence, |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 516 | rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]); |
| 517 | if (ndetected) { |
| 518 | pr_err("blocking rcu_node structures:"); |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 519 | rcu_for_each_node_breadth_first(rnp) { |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 520 | if (rnp == rnp_root) |
| 521 | continue; /* printed unconditionally */ |
Boqun Feng | 55ebfce | 2018-03-09 09:14:51 +0800 | [diff] [blame] | 522 | if (sync_rcu_preempt_exp_done_unlocked(rnp)) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 523 | continue; |
| 524 | pr_cont(" l=%u:%d-%d:%#lx/%c", |
| 525 | rnp->level, rnp->grplo, rnp->grphi, |
| 526 | rnp->expmask, |
| 527 | ".T"[!!rnp->exp_tasks]); |
| 528 | } |
| 529 | pr_cont("\n"); |
| 530 | } |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 531 | rcu_for_each_leaf_node(rnp) { |
Mark Rutland | bc75e99 | 2016-06-03 15:20:04 +0100 | [diff] [blame] | 532 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 533 | mask = leaf_node_cpu_bit(rnp, cpu); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 534 | if (!(rnp->expmask & mask)) |
| 535 | continue; |
| 536 | dump_cpu_task(cpu); |
| 537 | } |
| 538 | } |
| 539 | jiffies_stall = 3 * rcu_jiffies_till_stall_check() + 3; |
| 540 | } |
| 541 | } |
| 542 | |
| 543 | /* |
| 544 | * Wait for the current expedited grace period to complete, and then |
| 545 | * wake up everyone who piggybacked on the just-completed expedited |
| 546 | * grace period. Also update all the ->exp_seq_rq counters as needed |
| 547 | * in order to avoid counter-wrap problems. |
| 548 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 549 | static void rcu_exp_wait_wake(unsigned long s) |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 550 | { |
| 551 | struct rcu_node *rnp; |
| 552 | |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 553 | synchronize_sched_expedited_wait(); |
| 554 | rcu_exp_gp_seq_end(); |
| 555 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end")); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 556 | |
| 557 | /* |
| 558 | * Switch over to wakeup mode, allowing the next GP, but -only- the |
| 559 | * next GP, to proceed. |
| 560 | */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 561 | mutex_lock(&rcu_state.exp_wake_mutex); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 562 | |
Paul E. McKenney | aedf4ba | 2018-07-04 14:33:59 -0700 | [diff] [blame] | 563 | rcu_for_each_node_breadth_first(rnp) { |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 564 | if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) { |
| 565 | spin_lock(&rnp->exp_lock); |
| 566 | /* Recheck, avoid hang in case someone just arrived. */ |
| 567 | if (ULONG_CMP_LT(rnp->exp_seq_rq, s)) |
| 568 | rnp->exp_seq_rq = s; |
| 569 | spin_unlock(&rnp->exp_lock); |
| 570 | } |
Paul E. McKenney | 3c345825c8 | 2017-03-04 12:33:53 -0800 | [diff] [blame] | 571 | smp_mb(); /* All above changes before wakeup. */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 572 | wake_up_all(&rnp->exp_wq[rcu_seq_ctr(rcu_state.expedited_sequence) & 0x3]); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 573 | } |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 574 | trace_rcu_exp_grace_period(rcu_state.name, s, TPS("endwake")); |
| 575 | mutex_unlock(&rcu_state.exp_wake_mutex); |
Paul E. McKenney | 3549c2b | 2016-04-15 16:35:29 -0700 | [diff] [blame] | 576 | } |
| 577 | |
Paul E. McKenney | 8b355e3 | 2016-06-29 13:46:25 -0700 | [diff] [blame] | 578 | /* |
Paul E. McKenney | 52d7e48 | 2017-01-10 02:28:26 -0800 | [diff] [blame] | 579 | * Common code to drive an expedited grace period forward, used by |
| 580 | * workqueues and mid-boot-time tasks. |
| 581 | */ |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 582 | static void rcu_exp_sel_wait_wake(unsigned long s) |
Paul E. McKenney | 52d7e48 | 2017-01-10 02:28:26 -0800 | [diff] [blame] | 583 | { |
| 584 | /* Initialize the rcu_node tree in preparation for the wait. */ |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 585 | sync_rcu_exp_select_cpus(); |
Paul E. McKenney | 52d7e48 | 2017-01-10 02:28:26 -0800 | [diff] [blame] | 586 | |
| 587 | /* Wait and clean up, including waking everyone. */ |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 588 | rcu_exp_wait_wake(s); |
Paul E. McKenney | 52d7e48 | 2017-01-10 02:28:26 -0800 | [diff] [blame] | 589 | } |
| 590 | |
| 591 | /* |
Paul E. McKenney | 8b355e3 | 2016-06-29 13:46:25 -0700 | [diff] [blame] | 592 | * Work-queue handler to drive an expedited grace period forward. |
| 593 | */ |
| 594 | static void wait_rcu_exp_gp(struct work_struct *wp) |
| 595 | { |
| 596 | struct rcu_exp_work *rewp; |
| 597 | |
Paul E. McKenney | 8b355e3 | 2016-06-29 13:46:25 -0700 | [diff] [blame] | 598 | rewp = container_of(wp, struct rcu_exp_work, rew_work); |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 599 | rcu_exp_sel_wait_wake(rewp->rew_s); |
Paul E. McKenney | 8b355e3 | 2016-06-29 13:46:25 -0700 | [diff] [blame] | 600 | } |
| 601 | |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 602 | #ifdef CONFIG_PREEMPT_RCU |
| 603 | |
| 604 | /* |
| 605 | * Remote handler for smp_call_function_single(). If there is an |
| 606 | * RCU read-side critical section in effect, request that the |
| 607 | * next rcu_read_unlock() record the quiescent state up the |
| 608 | * ->expmask fields in the rcu_node tree. Otherwise, immediately |
| 609 | * report the quiescent state. |
| 610 | */ |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 611 | static void rcu_exp_handler(void *unused) |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 612 | { |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 613 | unsigned long flags; |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 614 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 615 | struct rcu_node *rnp = rdp->mynode; |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 616 | struct task_struct *t = current; |
| 617 | |
| 618 | /* |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 619 | * First, the common case of not being in an RCU read-side |
| 620 | * critical section. If also enabled or idle, immediately |
| 621 | * report the quiescent state, otherwise defer. |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 622 | */ |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 623 | if (!t->rcu_read_lock_nesting) { |
| 624 | if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || |
| 625 | rcu_dynticks_curr_cpu_in_eqs()) { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 626 | rcu_report_exp_rdp(rdp); |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 627 | } else { |
| 628 | rdp->deferred_qs = true; |
Paul E. McKenney | fced9c8 | 2018-07-26 13:44:00 -0700 | [diff] [blame] | 629 | set_tsk_need_resched(t); |
| 630 | set_preempt_need_resched(); |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 631 | } |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 632 | return; |
| 633 | } |
| 634 | |
| 635 | /* |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 636 | * Second, the less-common case of being in an RCU read-side |
| 637 | * critical section. In this case we can count on a future |
| 638 | * rcu_read_unlock(). However, this rcu_read_unlock() might |
| 639 | * execute on some other CPU, but in that case there will be |
| 640 | * a future context switch. Either way, if the expedited |
| 641 | * grace period is still waiting on this CPU, set ->deferred_qs |
| 642 | * so that the eventual quiescent state will be reported. |
| 643 | * Note that there is a large group of race conditions that |
| 644 | * can have caused this quiescent state to already have been |
| 645 | * reported, so we really do need to check ->expmask. |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 646 | */ |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 647 | if (t->rcu_read_lock_nesting > 0) { |
| 648 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 05f4157 | 2018-10-16 04:12:58 -0700 | [diff] [blame] | 649 | if (rnp->expmask & rdp->grpmask) { |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 650 | rdp->deferred_qs = true; |
Paul E. McKenney | 05f4157 | 2018-10-16 04:12:58 -0700 | [diff] [blame] | 651 | WRITE_ONCE(t->rcu_read_unlock_special.b.exp_hint, true); |
| 652 | } |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 653 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 1de462e | 2018-11-28 10:37:42 -0800 | [diff] [blame] | 654 | return; |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 655 | } |
| 656 | |
| 657 | /* |
| 658 | * The final and least likely case is where the interrupted |
| 659 | * code was just about to or just finished exiting the RCU-preempt |
| 660 | * read-side critical section, and no, we can't tell which. |
| 661 | * So either way, set ->deferred_qs to flag later code that |
| 662 | * a quiescent state is required. |
| 663 | * |
| 664 | * If the CPU is fully enabled (or if some buggy RCU-preempt |
| 665 | * read-side critical section is being used from idle), just |
| 666 | * invoke rcu_preempt_defer_qs() to immediately report the |
| 667 | * quiescent state. We cannot use rcu_read_unlock_special() |
| 668 | * because we are in an interrupt handler, which will cause that |
| 669 | * function to take an early exit without doing anything. |
| 670 | * |
Paul E. McKenney | fced9c8 | 2018-07-26 13:44:00 -0700 | [diff] [blame] | 671 | * Otherwise, force a context switch after the CPU enables everything. |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 672 | */ |
| 673 | rdp->deferred_qs = true; |
| 674 | if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) || |
Paul E. McKenney | fced9c8 | 2018-07-26 13:44:00 -0700 | [diff] [blame] | 675 | WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs())) { |
Paul E. McKenney | 3e31009 | 2018-06-21 12:50:01 -0700 | [diff] [blame] | 676 | rcu_preempt_deferred_qs(t); |
Paul E. McKenney | fced9c8 | 2018-07-26 13:44:00 -0700 | [diff] [blame] | 677 | } else { |
| 678 | set_tsk_need_resched(t); |
| 679 | set_preempt_need_resched(); |
| 680 | } |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 681 | } |
| 682 | |
Paul E. McKenney | 8fa946d | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 683 | /* PREEMPT=y, so no PREEMPT=n expedited grace period to clean up after. */ |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 684 | static void sync_sched_exp_online_cleanup(int cpu) |
| 685 | { |
| 686 | } |
| 687 | |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 688 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 689 | |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 690 | /* Invoked on each online non-idle CPU for expedited quiescent state. */ |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 691 | static void rcu_exp_handler(void *unused) |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 692 | { |
| 693 | struct rcu_data *rdp; |
| 694 | struct rcu_node *rnp; |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 695 | |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 696 | rdp = this_cpu_ptr(&rcu_data); |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 697 | rnp = rdp->mynode; |
| 698 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) || |
| 699 | __this_cpu_read(rcu_data.cpu_no_qs.b.exp)) |
| 700 | return; |
| 701 | if (rcu_is_cpu_rrupt_from_idle()) { |
Paul E. McKenney | 63d4c8c | 2018-07-03 17:22:34 -0700 | [diff] [blame] | 702 | rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 703 | return; |
| 704 | } |
| 705 | __this_cpu_write(rcu_data.cpu_no_qs.b.exp, true); |
| 706 | /* Store .exp before .rcu_urgent_qs. */ |
Paul E. McKenney | 2dba13f | 2018-08-03 21:00:38 -0700 | [diff] [blame] | 707 | smp_store_release(this_cpu_ptr(&rcu_data.rcu_urgent_qs), true); |
Paul E. McKenney | fced9c8 | 2018-07-26 13:44:00 -0700 | [diff] [blame] | 708 | set_tsk_need_resched(current); |
| 709 | set_preempt_need_resched(); |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | /* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */ |
| 713 | static void sync_sched_exp_online_cleanup(int cpu) |
| 714 | { |
| 715 | struct rcu_data *rdp; |
| 716 | int ret; |
| 717 | struct rcu_node *rnp; |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 718 | |
Paul E. McKenney | da1df50 | 2018-07-03 15:37:16 -0700 | [diff] [blame] | 719 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 720 | rnp = rdp->mynode; |
| 721 | if (!(READ_ONCE(rnp->expmask) & rdp->grpmask)) |
| 722 | return; |
Paul E. McKenney | 142d106 | 2018-11-29 09:15:54 -0800 | [diff] [blame] | 723 | ret = smp_call_function_single(cpu, rcu_exp_handler, NULL, 0); |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 724 | WARN_ON_ONCE(ret); |
| 725 | } |
| 726 | |
Paul E. McKenney | 3cd4ca4 | 2018-11-29 10:01:52 -0800 | [diff] [blame] | 727 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 728 | |
| 729 | /** |
| 730 | * synchronize_rcu_expedited - Brute-force RCU grace period |
| 731 | * |
| 732 | * Wait for an RCU grace period, but expedite it. The basic idea is to |
| 733 | * IPI all non-idle non-nohz online CPUs. The IPI handler checks whether |
| 734 | * the CPU is in an RCU critical section, and if so, it sets a flag that |
| 735 | * causes the outermost rcu_read_unlock() to report the quiescent state |
| 736 | * for RCU-preempt or asks the scheduler for help for RCU-sched. On the |
| 737 | * other hand, if the CPU is not in an RCU read-side critical section, |
| 738 | * the IPI handler reports the quiescent state immediately. |
| 739 | * |
| 740 | * Although this is a greate improvement over previous expedited |
| 741 | * implementations, it is still unfriendly to real-time workloads, so is |
| 742 | * thus not recommended for any sort of common-case code. In fact, if |
| 743 | * you are using synchronize_rcu_expedited() in a loop, please restructure |
| 744 | * your code to batch your updates, and then Use a single synchronize_rcu() |
| 745 | * instead. |
| 746 | * |
| 747 | * This has the same semantics as (but is more brutal than) synchronize_rcu(). |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 748 | */ |
| 749 | void synchronize_rcu_expedited(void) |
| 750 | { |
Paul E. McKenney | 8923072 | 2018-11-29 11:50:04 -0800 | [diff] [blame^] | 751 | struct rcu_data *rdp; |
| 752 | struct rcu_exp_work rew; |
| 753 | struct rcu_node *rnp; |
| 754 | unsigned long s; |
| 755 | |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 756 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
| 757 | lock_is_held(&rcu_lock_map) || |
| 758 | lock_is_held(&rcu_sched_lock_map), |
Paul E. McKenney | 8fa946d | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 759 | "Illegal synchronize_rcu_expedited() in RCU read-side critical section"); |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 760 | |
Paul E. McKenney | 3cd4ca4 | 2018-11-29 10:01:52 -0800 | [diff] [blame] | 761 | /* Is the state is such that the call is a grace period? */ |
Paul E. McKenney | 45975c7 | 2018-07-02 14:30:37 -0700 | [diff] [blame] | 762 | if (rcu_blocking_is_gp()) |
| 763 | return; |
| 764 | |
Paul E. McKenney | 8923072 | 2018-11-29 11:50:04 -0800 | [diff] [blame^] | 765 | /* If expedited grace periods are prohibited, fall back to normal. */ |
| 766 | if (rcu_gp_is_normal()) { |
| 767 | wait_rcu_gp(call_rcu); |
| 768 | return; |
| 769 | } |
| 770 | |
| 771 | /* Take a snapshot of the sequence number. */ |
| 772 | s = rcu_exp_gp_seq_snap(); |
| 773 | if (exp_funnel_lock(s)) |
| 774 | return; /* Someone else did our work for us. */ |
| 775 | |
| 776 | /* Ensure that load happens before action based on it. */ |
| 777 | if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) { |
| 778 | /* Direct call during scheduler init and early_initcalls(). */ |
| 779 | rcu_exp_sel_wait_wake(s); |
| 780 | } else { |
| 781 | /* Marshall arguments & schedule the expedited grace period. */ |
| 782 | rew.rew_s = s; |
| 783 | INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp); |
| 784 | queue_work(rcu_gp_wq, &rew.rew_work); |
| 785 | } |
| 786 | |
| 787 | /* Wait for expedited grace period to complete. */ |
| 788 | rdp = per_cpu_ptr(&rcu_data, raw_smp_processor_id()); |
| 789 | rnp = rcu_get_root(); |
| 790 | wait_event(rnp->exp_wq[rcu_seq_ctr(s) & 0x3], |
| 791 | sync_exp_work_done(s)); |
| 792 | smp_mb(); /* Workqueue actions happen before return. */ |
| 793 | |
| 794 | /* Let the next expedited grace period start. */ |
| 795 | mutex_unlock(&rcu_state.exp_mutex); |
Paul E. McKenney | 40e0a6c | 2016-04-15 16:44:07 -0700 | [diff] [blame] | 796 | } |
| 797 | EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); |