Paul E. McKenney | 96b903f | 2019-01-17 10:19:01 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 2 | /* |
| 3 | * RCU-based infrastructure for lightweight reader-writer locking |
| 4 | * |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 5 | * Copyright (c) 2015, Red Hat, Inc. |
| 6 | * |
| 7 | * Author: Oleg Nesterov <oleg@redhat.com> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/rcu_sync.h> |
| 11 | #include <linux/sched.h> |
| 12 | |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_PROVE_RCU |
| 14 | #define __INIT_HELD(func) .held = func, |
| 15 | #else |
| 16 | #define __INIT_HELD(func) |
| 17 | #endif |
| 18 | |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 19 | static const struct { |
| 20 | void (*sync)(void); |
| 21 | void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); |
Oleg Nesterov | 07899a6 | 2015-08-21 19:42:52 +0200 | [diff] [blame] | 22 | void (*wait)(void); |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 23 | #ifdef CONFIG_PROVE_RCU |
| 24 | int (*held)(void); |
| 25 | #endif |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 26 | } gp_ops[] = { |
| 27 | [RCU_SYNC] = { |
| 28 | .sync = synchronize_rcu, |
| 29 | .call = call_rcu, |
Oleg Nesterov | 07899a6 | 2015-08-21 19:42:52 +0200 | [diff] [blame] | 30 | .wait = rcu_barrier, |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 31 | __INIT_HELD(rcu_read_lock_held) |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 32 | }, |
| 33 | [RCU_SCHED_SYNC] = { |
Paul E. McKenney | d3ff389 | 2018-07-11 14:42:53 -0700 | [diff] [blame] | 34 | .sync = synchronize_rcu, |
| 35 | .call = call_rcu, |
| 36 | .wait = rcu_barrier, |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 37 | __INIT_HELD(rcu_read_lock_sched_held) |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 38 | }, |
| 39 | [RCU_BH_SYNC] = { |
Paul E. McKenney | d3ff389 | 2018-07-11 14:42:53 -0700 | [diff] [blame] | 40 | .sync = synchronize_rcu, |
| 41 | .call = call_rcu, |
| 42 | .wait = rcu_barrier, |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 43 | __INIT_HELD(rcu_read_lock_bh_held) |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 44 | }, |
| 45 | }; |
| 46 | |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 47 | enum { GP_IDLE = 0, GP_PENDING, GP_PASSED }; |
| 48 | enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; |
| 49 | |
| 50 | #define rss_lock gp_wait.lock |
| 51 | |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 52 | #ifdef CONFIG_PROVE_RCU |
Oleg Nesterov | 4bace73 | 2015-09-11 17:59:18 +0200 | [diff] [blame] | 53 | void rcu_sync_lockdep_assert(struct rcu_sync *rsp) |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 54 | { |
Oleg Nesterov | 4bace73 | 2015-09-11 17:59:18 +0200 | [diff] [blame] | 55 | RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(), |
| 56 | "suspicious rcu_sync_is_idle() usage"); |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 57 | } |
Peter Zijlstra | 80127a3 | 2016-07-14 20:08:46 +0200 | [diff] [blame] | 58 | |
| 59 | EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert); |
Oleg Nesterov | 3a518b7 | 2015-08-21 19:42:50 +0200 | [diff] [blame] | 60 | #endif |
| 61 | |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 62 | /** |
| 63 | * rcu_sync_init() - Initialize an rcu_sync structure |
| 64 | * @rsp: Pointer to rcu_sync structure to be initialized |
| 65 | * @type: Flavor of RCU with which to synchronize rcu_sync structure |
| 66 | */ |
| 67 | void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type) |
| 68 | { |
| 69 | memset(rsp, 0, sizeof(*rsp)); |
| 70 | init_waitqueue_head(&rsp->gp_wait); |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 71 | rsp->gp_type = type; |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | /** |
Paul E. McKenney | 27fdb35 | 2017-10-19 14:26:21 -0700 | [diff] [blame] | 75 | * rcu_sync_enter_start - Force readers onto slow path for multiple updates |
| 76 | * @rsp: Pointer to rcu_sync structure to use for synchronization |
| 77 | * |
Peter Zijlstra | 3942a9b | 2016-08-11 18:54:13 +0200 | [diff] [blame] | 78 | * Must be called after rcu_sync_init() and before first use. |
| 79 | * |
| 80 | * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() |
| 81 | * pairs turn into NO-OPs. |
| 82 | */ |
| 83 | void rcu_sync_enter_start(struct rcu_sync *rsp) |
| 84 | { |
| 85 | rsp->gp_count++; |
| 86 | rsp->gp_state = GP_PASSED; |
| 87 | } |
| 88 | |
| 89 | /** |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 90 | * rcu_sync_enter() - Force readers onto slowpath |
| 91 | * @rsp: Pointer to rcu_sync structure to use for synchronization |
| 92 | * |
| 93 | * This function is used by updaters who need readers to make use of |
| 94 | * a slowpath during the update. After this function returns, all |
| 95 | * subsequent calls to rcu_sync_is_idle() will return false, which |
| 96 | * tells readers to stay off their fastpaths. A later call to |
| 97 | * rcu_sync_exit() re-enables reader slowpaths. |
| 98 | * |
| 99 | * When called in isolation, rcu_sync_enter() must wait for a grace |
| 100 | * period, however, closely spaced calls to rcu_sync_enter() can |
| 101 | * optimize away the grace-period wait via a state machine implemented |
| 102 | * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func(). |
| 103 | */ |
| 104 | void rcu_sync_enter(struct rcu_sync *rsp) |
| 105 | { |
| 106 | bool need_wait, need_sync; |
| 107 | |
| 108 | spin_lock_irq(&rsp->rss_lock); |
| 109 | need_wait = rsp->gp_count++; |
| 110 | need_sync = rsp->gp_state == GP_IDLE; |
| 111 | if (need_sync) |
| 112 | rsp->gp_state = GP_PENDING; |
| 113 | spin_unlock_irq(&rsp->rss_lock); |
| 114 | |
Paul E. McKenney | 042d4c7 | 2018-10-22 07:43:22 -0700 | [diff] [blame] | 115 | WARN_ON_ONCE(need_wait && need_sync); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 116 | if (need_sync) { |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 117 | gp_ops[rsp->gp_type].sync(); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 118 | rsp->gp_state = GP_PASSED; |
| 119 | wake_up_all(&rsp->gp_wait); |
| 120 | } else if (need_wait) { |
| 121 | wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED); |
| 122 | } else { |
| 123 | /* |
| 124 | * Possible when there's a pending CB from a rcu_sync_exit(). |
| 125 | * Nobody has yet been allowed the 'fast' path and thus we can |
| 126 | * avoid doing any sync(). The callback will get 'dropped'. |
| 127 | */ |
Paul E. McKenney | 042d4c7 | 2018-10-22 07:43:22 -0700 | [diff] [blame] | 128 | WARN_ON_ONCE(rsp->gp_state != GP_PASSED); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 129 | } |
| 130 | } |
| 131 | |
| 132 | /** |
| 133 | * rcu_sync_func() - Callback function managing reader access to fastpath |
Paul E. McKenney | 27fdb35 | 2017-10-19 14:26:21 -0700 | [diff] [blame] | 134 | * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 135 | * |
| 136 | * This function is passed to one of the call_rcu() functions by |
| 137 | * rcu_sync_exit(), so that it is invoked after a grace period following the |
| 138 | * that invocation of rcu_sync_exit(). It takes action based on events that |
| 139 | * have taken place in the meantime, so that closely spaced rcu_sync_enter() |
| 140 | * and rcu_sync_exit() pairs need not wait for a grace period. |
| 141 | * |
| 142 | * If another rcu_sync_enter() is invoked before the grace period |
| 143 | * ended, reset state to allow the next rcu_sync_exit() to let the |
| 144 | * readers back onto their fastpaths (after a grace period). If both |
| 145 | * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked |
| 146 | * before the grace period ended, re-invoke call_rcu() on behalf of that |
| 147 | * rcu_sync_exit(). Otherwise, set all state back to idle so that readers |
| 148 | * can again use their fastpaths. |
| 149 | */ |
Paul E. McKenney | 27fdb35 | 2017-10-19 14:26:21 -0700 | [diff] [blame] | 150 | static void rcu_sync_func(struct rcu_head *rhp) |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 151 | { |
Paul E. McKenney | 27fdb35 | 2017-10-19 14:26:21 -0700 | [diff] [blame] | 152 | struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 153 | unsigned long flags; |
| 154 | |
Paul E. McKenney | 042d4c7 | 2018-10-22 07:43:22 -0700 | [diff] [blame] | 155 | WARN_ON_ONCE(rsp->gp_state != GP_PASSED); |
| 156 | WARN_ON_ONCE(rsp->cb_state == CB_IDLE); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 157 | |
| 158 | spin_lock_irqsave(&rsp->rss_lock, flags); |
| 159 | if (rsp->gp_count) { |
| 160 | /* |
| 161 | * A new rcu_sync_begin() has happened; drop the callback. |
| 162 | */ |
| 163 | rsp->cb_state = CB_IDLE; |
| 164 | } else if (rsp->cb_state == CB_REPLAY) { |
| 165 | /* |
| 166 | * A new rcu_sync_exit() has happened; requeue the callback |
| 167 | * to catch a later GP. |
| 168 | */ |
| 169 | rsp->cb_state = CB_PENDING; |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 170 | gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 171 | } else { |
| 172 | /* |
| 173 | * We're at least a GP after rcu_sync_exit(); eveybody will now |
| 174 | * have observed the write side critical section. Let 'em rip!. |
| 175 | */ |
| 176 | rsp->cb_state = CB_IDLE; |
| 177 | rsp->gp_state = GP_IDLE; |
| 178 | } |
| 179 | spin_unlock_irqrestore(&rsp->rss_lock, flags); |
| 180 | } |
| 181 | |
| 182 | /** |
| 183 | * rcu_sync_exit() - Allow readers back onto fast patch after grace period |
| 184 | * @rsp: Pointer to rcu_sync structure to use for synchronization |
| 185 | * |
| 186 | * This function is used by updaters who have completed, and can therefore |
| 187 | * now allow readers to make use of their fastpaths after a grace period |
| 188 | * has elapsed. After this grace period has completed, all subsequent |
| 189 | * calls to rcu_sync_is_idle() will return true, which tells readers that |
| 190 | * they can once again use their fastpaths. |
| 191 | */ |
| 192 | void rcu_sync_exit(struct rcu_sync *rsp) |
| 193 | { |
| 194 | spin_lock_irq(&rsp->rss_lock); |
| 195 | if (!--rsp->gp_count) { |
| 196 | if (rsp->cb_state == CB_IDLE) { |
| 197 | rsp->cb_state = CB_PENDING; |
Oleg Nesterov | 82e8c56 | 2015-08-21 19:42:47 +0200 | [diff] [blame] | 198 | gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func); |
Oleg Nesterov | cc44ca8 | 2015-08-21 19:42:44 +0200 | [diff] [blame] | 199 | } else if (rsp->cb_state == CB_PENDING) { |
| 200 | rsp->cb_state = CB_REPLAY; |
| 201 | } |
| 202 | } |
| 203 | spin_unlock_irq(&rsp->rss_lock); |
| 204 | } |
Oleg Nesterov | 07899a6 | 2015-08-21 19:42:52 +0200 | [diff] [blame] | 205 | |
| 206 | /** |
| 207 | * rcu_sync_dtor() - Clean up an rcu_sync structure |
| 208 | * @rsp: Pointer to rcu_sync structure to be cleaned up |
| 209 | */ |
| 210 | void rcu_sync_dtor(struct rcu_sync *rsp) |
| 211 | { |
| 212 | int cb_state; |
| 213 | |
Paul E. McKenney | 042d4c7 | 2018-10-22 07:43:22 -0700 | [diff] [blame] | 214 | WARN_ON_ONCE(rsp->gp_count); |
Oleg Nesterov | 07899a6 | 2015-08-21 19:42:52 +0200 | [diff] [blame] | 215 | |
| 216 | spin_lock_irq(&rsp->rss_lock); |
| 217 | if (rsp->cb_state == CB_REPLAY) |
| 218 | rsp->cb_state = CB_PENDING; |
| 219 | cb_state = rsp->cb_state; |
| 220 | spin_unlock_irq(&rsp->rss_lock); |
| 221 | |
| 222 | if (cb_state != CB_IDLE) { |
| 223 | gp_ops[rsp->gp_type].wait(); |
Paul E. McKenney | 042d4c7 | 2018-10-22 07:43:22 -0700 | [diff] [blame] | 224 | WARN_ON_ONCE(rsp->cb_state != CB_IDLE); |
Oleg Nesterov | 07899a6 | 2015-08-21 19:42:52 +0200 | [diff] [blame] | 225 | } |
| 226 | } |