Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2006 |
| 19 | * Copyright (C) Fujitsu, 2012 |
| 20 | * |
| 21 | * Author: Paul McKenney <paulmck@us.ibm.com> |
| 22 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
| 23 | * |
| 24 | * For detailed explanation of Read-Copy Update mechanism see - |
| 25 | * Documentation/RCU/ *.txt |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <linux/export.h> |
| 30 | #include <linux/mutex.h> |
| 31 | #include <linux/percpu.h> |
| 32 | #include <linux/preempt.h> |
| 33 | #include <linux/rcupdate_wait.h> |
| 34 | #include <linux/sched.h> |
| 35 | #include <linux/smp.h> |
| 36 | #include <linux/delay.h> |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 37 | #include <linux/module.h> |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 38 | #include <linux/srcu.h> |
| 39 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 40 | #include "rcu.h" |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 41 | #include "rcu_segcblist.h" |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 42 | |
Paul E. McKenney | 0c8e0e3 | 2017-04-28 11:24:22 -0700 | [diff] [blame] | 43 | /* Holdoff in nanoseconds for auto-expediting. */ |
| 44 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) |
| 45 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 46 | module_param(exp_holdoff, ulong, 0444); |
| 47 | |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 48 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
| 49 | static ulong counter_wrap_check = (ULONG_MAX >> 2); |
| 50 | module_param(counter_wrap_check, ulong, 0444); |
| 51 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 52 | static void srcu_invoke_callbacks(struct work_struct *work); |
| 53 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); |
Paul E. McKenney | 0d8a1e8 | 2017-06-15 17:06:38 -0700 | [diff] [blame] | 54 | static void process_srcu(struct work_struct *work); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 55 | |
| 56 | /* |
| 57 | * Initialize SRCU combining tree. Note that statically allocated |
| 58 | * srcu_struct structures might already have srcu_read_lock() and |
| 59 | * srcu_read_unlock() running against them. So if the is_static parameter |
| 60 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. |
| 61 | */ |
| 62 | static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 63 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 64 | int cpu; |
| 65 | int i; |
| 66 | int level = 0; |
| 67 | int levelspread[RCU_NUM_LVLS]; |
| 68 | struct srcu_data *sdp; |
| 69 | struct srcu_node *snp; |
| 70 | struct srcu_node *snp_first; |
| 71 | |
| 72 | /* Work out the overall tree geometry. */ |
| 73 | sp->level[0] = &sp->node[0]; |
| 74 | for (i = 1; i < rcu_num_lvls; i++) |
| 75 | sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1]; |
| 76 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
| 77 | |
| 78 | /* Each pass through this loop initializes one srcu_node structure. */ |
| 79 | rcu_for_each_node_breadth_first(sp, snp) { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 80 | raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 81 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
| 82 | ARRAY_SIZE(snp->srcu_data_have_cbs)); |
| 83 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 84 | snp->srcu_have_cbs[i] = 0; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 85 | snp->srcu_data_have_cbs[i] = 0; |
| 86 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 87 | snp->srcu_gp_seq_needed_exp = 0; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 88 | snp->grplo = -1; |
| 89 | snp->grphi = -1; |
| 90 | if (snp == &sp->node[0]) { |
| 91 | /* Root node, special case. */ |
| 92 | snp->srcu_parent = NULL; |
| 93 | continue; |
| 94 | } |
| 95 | |
| 96 | /* Non-root node. */ |
| 97 | if (snp == sp->level[level + 1]) |
| 98 | level++; |
| 99 | snp->srcu_parent = sp->level[level - 1] + |
| 100 | (snp - sp->level[level]) / |
| 101 | levelspread[level - 1]; |
| 102 | } |
| 103 | |
| 104 | /* |
| 105 | * Initialize the per-CPU srcu_data array, which feeds into the |
| 106 | * leaves of the srcu_node tree. |
| 107 | */ |
| 108 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != |
| 109 | ARRAY_SIZE(sdp->srcu_unlock_count)); |
| 110 | level = rcu_num_lvls - 1; |
| 111 | snp_first = sp->level[level]; |
| 112 | for_each_possible_cpu(cpu) { |
| 113 | sdp = per_cpu_ptr(sp->sda, cpu); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 114 | raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 115 | rcu_segcblist_init(&sdp->srcu_cblist); |
| 116 | sdp->srcu_cblist_invoking = false; |
| 117 | sdp->srcu_gp_seq_needed = sp->srcu_gp_seq; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 118 | sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 119 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
| 120 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { |
| 121 | if (snp->grplo < 0) |
| 122 | snp->grplo = cpu; |
| 123 | snp->grphi = cpu; |
| 124 | } |
| 125 | sdp->cpu = cpu; |
| 126 | INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks); |
| 127 | sdp->sp = sp; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 128 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 129 | if (is_static) |
| 130 | continue; |
| 131 | |
| 132 | /* Dynamically allocated, better be no srcu_read_locks()! */ |
| 133 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { |
| 134 | sdp->srcu_lock_count[i] = 0; |
| 135 | sdp->srcu_unlock_count[i] = 0; |
| 136 | } |
| 137 | } |
| 138 | } |
| 139 | |
| 140 | /* |
| 141 | * Initialize non-compile-time initialized fields, including the |
| 142 | * associated srcu_node and srcu_data structures. The is_static |
| 143 | * parameter is passed through to init_srcu_struct_nodes(), and |
| 144 | * also tells us that ->sda has already been wired up to srcu_data. |
| 145 | */ |
| 146 | static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static) |
| 147 | { |
| 148 | mutex_init(&sp->srcu_cb_mutex); |
| 149 | mutex_init(&sp->srcu_gp_mutex); |
| 150 | sp->srcu_idx = 0; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 151 | sp->srcu_gp_seq = 0; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 152 | sp->srcu_barrier_seq = 0; |
| 153 | mutex_init(&sp->srcu_barrier_mutex); |
| 154 | atomic_set(&sp->srcu_barrier_cpu_cnt, 0); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 155 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 156 | if (!is_static) |
| 157 | sp->sda = alloc_percpu(struct srcu_data); |
| 158 | init_srcu_struct_nodes(sp, is_static); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 159 | sp->srcu_gp_seq_needed_exp = 0; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 160 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 161 | smp_store_release(&sp->srcu_gp_seq_needed, 0); /* Init done. */ |
| 162 | return sp->sda ? 0 : -ENOMEM; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 166 | |
| 167 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
| 168 | struct lock_class_key *key) |
| 169 | { |
| 170 | /* Don't re-initialize a lock while it is held. */ |
| 171 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
| 172 | lockdep_init_map(&sp->dep_map, name, key, 0); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 173 | raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 174 | return init_srcu_struct_fields(sp, false); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 175 | } |
| 176 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
| 177 | |
| 178 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 179 | |
| 180 | /** |
| 181 | * init_srcu_struct - initialize a sleep-RCU structure |
| 182 | * @sp: structure to initialize. |
| 183 | * |
| 184 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
| 185 | * to any other function. Each srcu_struct represents a separate domain |
| 186 | * of SRCU protection. |
| 187 | */ |
| 188 | int init_srcu_struct(struct srcu_struct *sp) |
| 189 | { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 190 | raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 191 | return init_srcu_struct_fields(sp, false); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 192 | } |
| 193 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
| 194 | |
| 195 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 196 | |
| 197 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 198 | * First-use initialization of statically allocated srcu_struct |
| 199 | * structure. Wiring up the combining tree is more than can be |
| 200 | * done with compile-time initialization, so this check is added |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 201 | * to each update-side SRCU primitive. Use sp->lock, which -is- |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 202 | * compile-time initialized, to resolve races involving multiple |
| 203 | * CPUs trying to garner first-use privileges. |
| 204 | */ |
| 205 | static void check_init_srcu_struct(struct srcu_struct *sp) |
| 206 | { |
| 207 | unsigned long flags; |
| 208 | |
| 209 | WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT); |
| 210 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
| 211 | if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/ |
| 212 | return; /* Already initialized. */ |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 213 | raw_spin_lock_irqsave_rcu_node(sp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 214 | if (!rcu_seq_state(sp->srcu_gp_seq_needed)) { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 215 | raw_spin_unlock_irqrestore_rcu_node(sp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 216 | return; |
| 217 | } |
| 218 | init_srcu_struct_fields(sp, true); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 219 | raw_spin_unlock_irqrestore_rcu_node(sp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 220 | } |
| 221 | |
| 222 | /* |
| 223 | * Returns approximate total of the readers' ->srcu_lock_count[] values |
| 224 | * for the rank of per-CPU counters specified by idx. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 225 | */ |
| 226 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) |
| 227 | { |
| 228 | int cpu; |
| 229 | unsigned long sum = 0; |
| 230 | |
| 231 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 232 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 233 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 234 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 235 | } |
| 236 | return sum; |
| 237 | } |
| 238 | |
| 239 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 240 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
| 241 | * for the rank of per-CPU counters specified by idx. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 242 | */ |
| 243 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) |
| 244 | { |
| 245 | int cpu; |
| 246 | unsigned long sum = 0; |
| 247 | |
| 248 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 249 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 250 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 251 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 252 | } |
| 253 | return sum; |
| 254 | } |
| 255 | |
| 256 | /* |
| 257 | * Return true if the number of pre-existing readers is determined to |
| 258 | * be zero. |
| 259 | */ |
| 260 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) |
| 261 | { |
| 262 | unsigned long unlocks; |
| 263 | |
| 264 | unlocks = srcu_readers_unlock_idx(sp, idx); |
| 265 | |
| 266 | /* |
| 267 | * Make sure that a lock is always counted if the corresponding |
| 268 | * unlock is counted. Needs to be a smp_mb() as the read side may |
| 269 | * contain a read from a variable that is written to before the |
| 270 | * synchronize_srcu() in the write side. In this case smp_mb()s |
| 271 | * A and B act like the store buffering pattern. |
| 272 | * |
| 273 | * This smp_mb() also pairs with smp_mb() C to prevent accesses |
| 274 | * after the synchronize_srcu() from being executed before the |
| 275 | * grace period ends. |
| 276 | */ |
| 277 | smp_mb(); /* A */ |
| 278 | |
| 279 | /* |
| 280 | * If the locks are the same as the unlocks, then there must have |
| 281 | * been no readers on this index at some time in between. This does |
| 282 | * not mean that there are no more readers, as one could have read |
| 283 | * the current index but not have incremented the lock counter yet. |
| 284 | * |
Paul E. McKenney | 881ec9d | 2017-04-12 15:16:50 -0700 | [diff] [blame] | 285 | * So suppose that the updater is preempted here for so long |
| 286 | * that more than ULONG_MAX non-nested readers come and go in |
| 287 | * the meantime. It turns out that this cannot result in overflow |
| 288 | * because if a reader modifies its unlock count after we read it |
| 289 | * above, then that reader's next load of ->srcu_idx is guaranteed |
| 290 | * to get the new value, which will cause it to operate on the |
| 291 | * other bank of counters, where it cannot contribute to the |
| 292 | * overflow of these counters. This means that there is a maximum |
| 293 | * of 2*NR_CPUS increments, which cannot overflow given current |
| 294 | * systems, especially not on 64-bit systems. |
| 295 | * |
| 296 | * OK, how about nesting? This does impose a limit on nesting |
| 297 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, |
| 298 | * especially on 64-bit systems. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 299 | */ |
| 300 | return srcu_readers_lock_idx(sp, idx) == unlocks; |
| 301 | } |
| 302 | |
| 303 | /** |
| 304 | * srcu_readers_active - returns true if there are readers. and false |
| 305 | * otherwise |
| 306 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
| 307 | * |
| 308 | * Note that this is not an atomic primitive, and can therefore suffer |
| 309 | * severe errors when invoked on an active srcu_struct. That said, it |
| 310 | * can be useful as an error check at cleanup time. |
| 311 | */ |
| 312 | static bool srcu_readers_active(struct srcu_struct *sp) |
| 313 | { |
| 314 | int cpu; |
| 315 | unsigned long sum = 0; |
| 316 | |
| 317 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 318 | struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 319 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 320 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
| 321 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); |
| 322 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); |
| 323 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 324 | } |
| 325 | return sum; |
| 326 | } |
| 327 | |
| 328 | #define SRCU_INTERVAL 1 |
| 329 | |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 330 | /* |
| 331 | * Return grace-period delay, zero if there are expedited grace |
| 332 | * periods pending, SRCU_INTERVAL otherwise. |
| 333 | */ |
| 334 | static unsigned long srcu_get_delay(struct srcu_struct *sp) |
| 335 | { |
| 336 | if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq), |
| 337 | READ_ONCE(sp->srcu_gp_seq_needed_exp))) |
| 338 | return 0; |
| 339 | return SRCU_INTERVAL; |
| 340 | } |
| 341 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 342 | /** |
| 343 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
| 344 | * @sp: structure to clean up. |
| 345 | * |
| 346 | * Must invoke this after you are finished using a given srcu_struct that |
| 347 | * was initialized via init_srcu_struct(), else you leak memory. |
| 348 | */ |
| 349 | void cleanup_srcu_struct(struct srcu_struct *sp) |
| 350 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 351 | int cpu; |
| 352 | |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 353 | if (WARN_ON(!srcu_get_delay(sp))) |
| 354 | return; /* Leakage unless caller handles error. */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 355 | if (WARN_ON(srcu_readers_active(sp))) |
| 356 | return; /* Leakage unless caller handles error. */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 357 | flush_delayed_work(&sp->work); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 358 | for_each_possible_cpu(cpu) |
| 359 | flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work); |
| 360 | if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
| 361 | WARN_ON(srcu_readers_active(sp))) { |
| 362 | pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 363 | return; /* Caller forgot to stop doing call_srcu()? */ |
| 364 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 365 | free_percpu(sp->sda); |
| 366 | sp->sda = NULL; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 367 | } |
| 368 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
| 369 | |
| 370 | /* |
| 371 | * Counts the new reader in the appropriate per-CPU element of the |
Paolo Bonzini | cdf7abc | 2017-05-31 14:03:10 +0200 | [diff] [blame] | 372 | * srcu_struct. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 373 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
| 374 | */ |
| 375 | int __srcu_read_lock(struct srcu_struct *sp) |
| 376 | { |
| 377 | int idx; |
| 378 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 379 | idx = READ_ONCE(sp->srcu_idx) & 0x1; |
Paolo Bonzini | cdf7abc | 2017-05-31 14:03:10 +0200 | [diff] [blame] | 380 | this_cpu_inc(sp->sda->srcu_lock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 381 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 382 | return idx; |
| 383 | } |
| 384 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
| 385 | |
| 386 | /* |
| 387 | * Removes the count for the old reader from the appropriate per-CPU |
| 388 | * element of the srcu_struct. Note that this may well be a different |
| 389 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 390 | */ |
| 391 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 392 | { |
| 393 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 394 | this_cpu_inc(sp->sda->srcu_unlock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 395 | } |
| 396 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 397 | |
| 398 | /* |
| 399 | * We use an adaptive strategy for synchronize_srcu() and especially for |
| 400 | * synchronize_srcu_expedited(). We spin for a fixed time period |
| 401 | * (defined below) to allow SRCU readers to exit their read-side critical |
| 402 | * sections. If there are still some readers after a few microseconds, |
| 403 | * we repeatedly block for 1-millisecond time periods. |
| 404 | */ |
| 405 | #define SRCU_RETRY_CHECK_DELAY 5 |
| 406 | |
| 407 | /* |
| 408 | * Start an SRCU grace period. |
| 409 | */ |
| 410 | static void srcu_gp_start(struct srcu_struct *sp) |
| 411 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 412 | struct srcu_data *sdp = this_cpu_ptr(sp->sda); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 413 | int state; |
| 414 | |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 415 | lockdep_assert_held(&sp->lock); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 416 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
| 417 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 418 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 419 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
| 420 | rcu_seq_snap(&sp->srcu_gp_seq)); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 421 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 422 | rcu_seq_start(&sp->srcu_gp_seq); |
| 423 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 424 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
| 425 | } |
| 426 | |
| 427 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 428 | * Track online CPUs to guide callback workqueue placement. |
| 429 | */ |
| 430 | DEFINE_PER_CPU(bool, srcu_online); |
| 431 | |
| 432 | void srcu_online_cpu(unsigned int cpu) |
| 433 | { |
| 434 | WRITE_ONCE(per_cpu(srcu_online, cpu), true); |
| 435 | } |
| 436 | |
| 437 | void srcu_offline_cpu(unsigned int cpu) |
| 438 | { |
| 439 | WRITE_ONCE(per_cpu(srcu_online, cpu), false); |
| 440 | } |
| 441 | |
| 442 | /* |
| 443 | * Place the workqueue handler on the specified CPU if online, otherwise |
| 444 | * just run it whereever. This is useful for placing workqueue handlers |
| 445 | * that are to invoke the specified CPU's callbacks. |
| 446 | */ |
| 447 | static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 448 | struct delayed_work *dwork, |
| 449 | unsigned long delay) |
| 450 | { |
| 451 | bool ret; |
| 452 | |
| 453 | preempt_disable(); |
| 454 | if (READ_ONCE(per_cpu(srcu_online, cpu))) |
| 455 | ret = queue_delayed_work_on(cpu, wq, dwork, delay); |
| 456 | else |
| 457 | ret = queue_delayed_work(wq, dwork, delay); |
| 458 | preempt_enable(); |
| 459 | return ret; |
| 460 | } |
| 461 | |
| 462 | /* |
| 463 | * Schedule callback invocation for the specified srcu_data structure, |
| 464 | * if possible, on the corresponding CPU. |
| 465 | */ |
| 466 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) |
| 467 | { |
| 468 | srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq, |
| 469 | &sdp->work, delay); |
| 470 | } |
| 471 | |
| 472 | /* |
| 473 | * Schedule callback invocation for all srcu_data structures associated |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 474 | * with the specified srcu_node structure that have callbacks for the |
| 475 | * just-completed grace period, the one corresponding to idx. If possible, |
| 476 | * schedule this invocation on the corresponding CPUs. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 477 | */ |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 478 | static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 479 | unsigned long mask, unsigned long delay) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 480 | { |
| 481 | int cpu; |
| 482 | |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 483 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
| 484 | if (!(mask & (1 << (cpu - snp->grplo)))) |
| 485 | continue; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 486 | srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 487 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 488 | } |
| 489 | |
| 490 | /* |
| 491 | * Note the end of an SRCU grace period. Initiates callback invocation |
| 492 | * and starts a new grace period if needed. |
| 493 | * |
| 494 | * The ->srcu_cb_mutex acquisition does not protect any data, but |
| 495 | * instead prevents more than one grace period from starting while we |
| 496 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] |
| 497 | * array to have a finite number of elements. |
| 498 | */ |
| 499 | static void srcu_gp_end(struct srcu_struct *sp) |
| 500 | { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 501 | unsigned long cbdelay; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 502 | bool cbs; |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 503 | int cpu; |
| 504 | unsigned long flags; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 505 | unsigned long gpseq; |
| 506 | int idx; |
| 507 | int idxnext; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 508 | unsigned long mask; |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 509 | struct srcu_data *sdp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 510 | struct srcu_node *snp; |
| 511 | |
| 512 | /* Prevent more than one additional grace period. */ |
| 513 | mutex_lock(&sp->srcu_cb_mutex); |
| 514 | |
| 515 | /* End the current grace period. */ |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 516 | raw_spin_lock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 517 | idx = rcu_seq_state(sp->srcu_gp_seq); |
| 518 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 519 | cbdelay = srcu_get_delay(sp); |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 520 | sp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 521 | rcu_seq_end(&sp->srcu_gp_seq); |
| 522 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 523 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq)) |
| 524 | sp->srcu_gp_seq_needed_exp = gpseq; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 525 | raw_spin_unlock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 526 | mutex_unlock(&sp->srcu_gp_mutex); |
| 527 | /* A new grace period can start at this point. But only one. */ |
| 528 | |
| 529 | /* Initiate callback invocation as needed. */ |
| 530 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); |
| 531 | idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs); |
| 532 | rcu_for_each_node_breadth_first(sp, snp) { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 533 | raw_spin_lock_irq_rcu_node(snp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 534 | cbs = false; |
| 535 | if (snp >= sp->level[rcu_num_lvls - 1]) |
| 536 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
| 537 | snp->srcu_have_cbs[idx] = gpseq; |
| 538 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 539 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
| 540 | snp->srcu_gp_seq_needed_exp = gpseq; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 541 | mask = snp->srcu_data_have_cbs[idx]; |
| 542 | snp->srcu_data_have_cbs[idx] = 0; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 543 | raw_spin_unlock_irq_rcu_node(snp); |
| 544 | if (cbs) |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 545 | srcu_schedule_cbs_snp(sp, snp, mask, cbdelay); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 546 | |
| 547 | /* Occasionally prevent srcu_data counter wrap. */ |
| 548 | if (!(gpseq & counter_wrap_check)) |
| 549 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
| 550 | sdp = per_cpu_ptr(sp->sda, cpu); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 551 | raw_spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 552 | if (ULONG_CMP_GE(gpseq, |
| 553 | sdp->srcu_gp_seq_needed + 100)) |
| 554 | sdp->srcu_gp_seq_needed = gpseq; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 555 | raw_spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 556 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 557 | } |
| 558 | |
| 559 | /* Callback initiation done, allow grace periods after next. */ |
| 560 | mutex_unlock(&sp->srcu_cb_mutex); |
| 561 | |
| 562 | /* Start a new grace period if needed. */ |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 563 | raw_spin_lock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 564 | gpseq = rcu_seq_current(&sp->srcu_gp_seq); |
| 565 | if (!rcu_seq_state(gpseq) && |
| 566 | ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) { |
| 567 | srcu_gp_start(sp); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 568 | raw_spin_unlock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 569 | /* Throttle expedited grace periods: Should be rare! */ |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 570 | srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff |
| 571 | ? 0 : SRCU_INTERVAL); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 572 | } else { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 573 | raw_spin_unlock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 574 | } |
| 575 | } |
| 576 | |
| 577 | /* |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 578 | * Funnel-locking scheme to scalably mediate many concurrent expedited |
| 579 | * grace-period requests. This function is invoked for the first known |
| 580 | * expedited request for a grace period that has already been requested, |
| 581 | * but without expediting. To start a completely new grace period, |
| 582 | * whether expedited or not, use srcu_funnel_gp_start() instead. |
| 583 | */ |
| 584 | static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp, |
| 585 | unsigned long s) |
| 586 | { |
| 587 | unsigned long flags; |
| 588 | |
| 589 | for (; snp != NULL; snp = snp->srcu_parent) { |
| 590 | if (rcu_seq_done(&sp->srcu_gp_seq, s) || |
| 591 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
| 592 | return; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 593 | raw_spin_lock_irqsave_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 594 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 595 | raw_spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 596 | return; |
| 597 | } |
| 598 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 599 | raw_spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 600 | } |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 601 | raw_spin_lock_irqsave_rcu_node(sp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 602 | if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
| 603 | sp->srcu_gp_seq_needed_exp = s; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 604 | raw_spin_unlock_irqrestore_rcu_node(sp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 605 | } |
| 606 | |
| 607 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 608 | * Funnel-locking scheme to scalably mediate many concurrent grace-period |
| 609 | * requests. The winner has to do the work of actually starting grace |
| 610 | * period s. Losers must either ensure that their desired grace-period |
| 611 | * number is recorded on at least their leaf srcu_node structure, or they |
| 612 | * must take steps to invoke their own callbacks. |
| 613 | */ |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 614 | static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp, |
| 615 | unsigned long s, bool do_norm) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 616 | { |
| 617 | unsigned long flags; |
| 618 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); |
| 619 | struct srcu_node *snp = sdp->mynode; |
| 620 | unsigned long snp_seq; |
| 621 | |
| 622 | /* Each pass through the loop does one level of the srcu_node tree. */ |
| 623 | for (; snp != NULL; snp = snp->srcu_parent) { |
| 624 | if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode) |
| 625 | return; /* GP already done and CBs recorded. */ |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 626 | raw_spin_lock_irqsave_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 627 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
| 628 | snp_seq = snp->srcu_have_cbs[idx]; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 629 | if (snp == sdp->mynode && snp_seq == s) |
| 630 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 631 | raw_spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 632 | if (snp == sdp->mynode && snp_seq != s) { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 633 | srcu_schedule_cbs_sdp(sdp, do_norm |
| 634 | ? SRCU_INTERVAL |
| 635 | : 0); |
| 636 | return; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 637 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 638 | if (!do_norm) |
| 639 | srcu_funnel_exp_start(sp, snp, s); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 640 | return; |
| 641 | } |
| 642 | snp->srcu_have_cbs[idx] = s; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 643 | if (snp == sdp->mynode) |
| 644 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 645 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
| 646 | snp->srcu_gp_seq_needed_exp = s; |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 647 | raw_spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 648 | } |
| 649 | |
| 650 | /* Top of tree, must ensure the grace period will be started. */ |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 651 | raw_spin_lock_irqsave_rcu_node(sp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 652 | if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) { |
| 653 | /* |
| 654 | * Record need for grace period s. Pair with load |
| 655 | * acquire setting up for initialization. |
| 656 | */ |
| 657 | smp_store_release(&sp->srcu_gp_seq_needed, s); /*^^^*/ |
| 658 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 659 | if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s)) |
| 660 | sp->srcu_gp_seq_needed_exp = s; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 661 | |
| 662 | /* If grace period not already done and none in progress, start it. */ |
| 663 | if (!rcu_seq_done(&sp->srcu_gp_seq, s) && |
| 664 | rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
| 665 | WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)); |
| 666 | srcu_gp_start(sp); |
| 667 | queue_delayed_work(system_power_efficient_wq, &sp->work, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 668 | srcu_get_delay(sp)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 669 | } |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 670 | raw_spin_unlock_irqrestore_rcu_node(sp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 671 | } |
| 672 | |
| 673 | /* |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 674 | * Wait until all readers counted by array index idx complete, but |
| 675 | * loop an additional time if there is an expedited grace period pending. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 676 | * The caller must ensure that ->srcu_idx is not changed while checking. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 677 | */ |
| 678 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
| 679 | { |
| 680 | for (;;) { |
| 681 | if (srcu_readers_active_idx_check(sp, idx)) |
| 682 | return true; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 683 | if (--trycount + !srcu_get_delay(sp) <= 0) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 684 | return false; |
| 685 | udelay(SRCU_RETRY_CHECK_DELAY); |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 690 | * Increment the ->srcu_idx counter so that future SRCU readers will |
| 691 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 692 | * us to wait for pre-existing readers in a starvation-free manner. |
| 693 | */ |
| 694 | static void srcu_flip(struct srcu_struct *sp) |
| 695 | { |
Paul E. McKenney | 881ec9d | 2017-04-12 15:16:50 -0700 | [diff] [blame] | 696 | /* |
| 697 | * Ensure that if this updater saw a given reader's increment |
| 698 | * from __srcu_read_lock(), that reader was using an old value |
| 699 | * of ->srcu_idx. Also ensure that if a given reader sees the |
| 700 | * new value of ->srcu_idx, this updater's earlier scans cannot |
| 701 | * have seen that reader's increments (which is OK, because this |
| 702 | * grace period need not wait on that reader). |
| 703 | */ |
| 704 | smp_mb(); /* E */ /* Pairs with B and C. */ |
| 705 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 706 | WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 707 | |
| 708 | /* |
| 709 | * Ensure that if the updater misses an __srcu_read_unlock() |
| 710 | * increment, that task's next __srcu_read_lock() will see the |
| 711 | * above counter update. Note that both this memory barrier |
| 712 | * and the one in srcu_readers_active_idx_check() provide the |
| 713 | * guarantee for __srcu_read_lock(). |
| 714 | */ |
| 715 | smp_mb(); /* D */ /* Pairs with C. */ |
| 716 | } |
| 717 | |
| 718 | /* |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 719 | * If SRCU is likely idle, return true, otherwise return false. |
| 720 | * |
| 721 | * Note that it is OK for several current from-idle requests for a new |
| 722 | * grace period from idle to specify expediting because they will all end |
| 723 | * up requesting the same grace period anyhow. So no loss. |
| 724 | * |
| 725 | * Note also that if any CPU (including the current one) is still invoking |
| 726 | * callbacks, this function will nevertheless say "idle". This is not |
| 727 | * ideal, but the overhead of checking all CPUs' callback lists is even |
| 728 | * less ideal, especially on large systems. Furthermore, the wakeup |
| 729 | * can happen before the callback is fully removed, so we have no choice |
| 730 | * but to accept this type of error. |
| 731 | * |
| 732 | * This function is also subject to counter-wrap errors, but let's face |
| 733 | * it, if this function was preempted for enough time for the counters |
| 734 | * to wrap, it really doesn't matter whether or not we expedite the grace |
| 735 | * period. The extra overhead of a needlessly expedited grace period is |
| 736 | * negligible when amoritized over that time period, and the extra latency |
| 737 | * of a needlessly non-expedited grace period is similarly negligible. |
| 738 | */ |
| 739 | static bool srcu_might_be_idle(struct srcu_struct *sp) |
| 740 | { |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 741 | unsigned long curseq; |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 742 | unsigned long flags; |
| 743 | struct srcu_data *sdp; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 744 | unsigned long t; |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 745 | |
| 746 | /* If the local srcu_data structure has callbacks, not idle. */ |
| 747 | local_irq_save(flags); |
| 748 | sdp = this_cpu_ptr(sp->sda); |
| 749 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
| 750 | local_irq_restore(flags); |
| 751 | return false; /* Callbacks already present, so not idle. */ |
| 752 | } |
| 753 | local_irq_restore(flags); |
| 754 | |
| 755 | /* |
| 756 | * No local callbacks, so probabalistically probe global state. |
| 757 | * Exact information would require acquiring locks, which would |
| 758 | * kill scalability, hence the probabalistic nature of the probe. |
| 759 | */ |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 760 | |
| 761 | /* First, see if enough time has passed since the last GP. */ |
| 762 | t = ktime_get_mono_fast_ns(); |
| 763 | if (exp_holdoff == 0 || |
| 764 | time_in_range_open(t, sp->srcu_last_gp_end, |
| 765 | sp->srcu_last_gp_end + exp_holdoff)) |
| 766 | return false; /* Too soon after last GP. */ |
| 767 | |
| 768 | /* Next, check for probable idleness. */ |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 769 | curseq = rcu_seq_current(&sp->srcu_gp_seq); |
| 770 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
| 771 | if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed))) |
| 772 | return false; /* Grace period in progress, so not idle. */ |
| 773 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ |
| 774 | if (curseq != rcu_seq_current(&sp->srcu_gp_seq)) |
| 775 | return false; /* GP # changed, so not idle. */ |
| 776 | return true; /* With reasonable probability, idle! */ |
| 777 | } |
| 778 | |
| 779 | /* |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 780 | * SRCU callback function to leak a callback. |
| 781 | */ |
| 782 | static void srcu_leak_callback(struct rcu_head *rhp) |
| 783 | { |
| 784 | } |
| 785 | |
| 786 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 787 | * Enqueue an SRCU callback on the srcu_data structure associated with |
| 788 | * the current CPU and the specified srcu_struct structure, initiating |
| 789 | * grace-period processing if it is not already running. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 790 | * |
| 791 | * Note that all CPUs must agree that the grace period extended beyond |
| 792 | * all pre-existing SRCU read-side critical section. On systems with |
| 793 | * more than one CPU, this means that when "func()" is invoked, each CPU |
| 794 | * is guaranteed to have executed a full memory barrier since the end of |
| 795 | * its last corresponding SRCU read-side critical section whose beginning |
| 796 | * preceded the call to call_rcu(). It also means that each CPU executing |
| 797 | * an SRCU read-side critical section that continues beyond the start of |
| 798 | * "func()" must have executed a memory barrier after the call_rcu() |
| 799 | * but before the beginning of that SRCU read-side critical section. |
| 800 | * Note that these guarantees include CPUs that are offline, idle, or |
| 801 | * executing in user mode, as well as CPUs that are executing in the kernel. |
| 802 | * |
| 803 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the |
| 804 | * resulting SRCU callback function "func()", then both CPU A and CPU |
| 805 | * B are guaranteed to execute a full memory barrier during the time |
| 806 | * interval between the call to call_rcu() and the invocation of "func()". |
| 807 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
| 808 | * again only if the system has more than one CPU). |
| 809 | * |
| 810 | * Of course, these guarantees apply only for invocations of call_srcu(), |
| 811 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
| 812 | * srcu_struct structure. |
| 813 | */ |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 814 | void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
| 815 | rcu_callback_t func, bool do_norm) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 816 | { |
| 817 | unsigned long flags; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 818 | bool needexp = false; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 819 | bool needgp = false; |
| 820 | unsigned long s; |
| 821 | struct srcu_data *sdp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 822 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 823 | check_init_srcu_struct(sp); |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 824 | if (debug_rcu_head_queue(rhp)) { |
| 825 | /* Probable double call_srcu(), so leak the callback. */ |
| 826 | WRITE_ONCE(rhp->func, srcu_leak_callback); |
| 827 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); |
| 828 | return; |
| 829 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 830 | rhp->func = func; |
| 831 | local_irq_save(flags); |
| 832 | sdp = this_cpu_ptr(sp->sda); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 833 | raw_spin_lock_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 834 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); |
| 835 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 836 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 837 | s = rcu_seq_snap(&sp->srcu_gp_seq); |
| 838 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
| 839 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
| 840 | sdp->srcu_gp_seq_needed = s; |
| 841 | needgp = true; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 842 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 843 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
| 844 | sdp->srcu_gp_seq_needed_exp = s; |
| 845 | needexp = true; |
| 846 | } |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 847 | raw_spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 848 | if (needgp) |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 849 | srcu_funnel_gp_start(sp, sdp, s, do_norm); |
| 850 | else if (needexp) |
| 851 | srcu_funnel_exp_start(sp, sdp->mynode, s); |
| 852 | } |
| 853 | |
Paul E. McKenney | 5a0465e | 2017-05-04 11:31:04 -0700 | [diff] [blame] | 854 | /** |
| 855 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
| 856 | * @sp: srcu_struct in queue the callback |
| 857 | * @head: structure to be used for queueing the SRCU callback. |
| 858 | * @func: function to be invoked after the SRCU grace period |
| 859 | * |
| 860 | * The callback function will be invoked some time after a full SRCU |
| 861 | * grace period elapses, in other words after all pre-existing SRCU |
| 862 | * read-side critical sections have completed. However, the callback |
| 863 | * function might well execute concurrently with other SRCU read-side |
| 864 | * critical sections that started after call_srcu() was invoked. SRCU |
| 865 | * read-side critical sections are delimited by srcu_read_lock() and |
| 866 | * srcu_read_unlock(), and may be nested. |
| 867 | * |
| 868 | * The callback will be invoked from process context, but must nevertheless |
| 869 | * be fast and must not block. |
| 870 | */ |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 871 | void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, |
| 872 | rcu_callback_t func) |
| 873 | { |
| 874 | __call_srcu(sp, rhp, func, true); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 875 | } |
| 876 | EXPORT_SYMBOL_GPL(call_srcu); |
| 877 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 878 | /* |
| 879 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 880 | */ |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 881 | static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 882 | { |
| 883 | struct rcu_synchronize rcu; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 884 | |
| 885 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || |
| 886 | lock_is_held(&rcu_bh_lock_map) || |
| 887 | lock_is_held(&rcu_lock_map) || |
| 888 | lock_is_held(&rcu_sched_lock_map), |
| 889 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); |
| 890 | |
| 891 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
| 892 | return; |
| 893 | might_sleep(); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 894 | check_init_srcu_struct(sp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 895 | init_completion(&rcu.completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 896 | init_rcu_head_on_stack(&rcu.head); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 897 | __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 898 | wait_for_completion(&rcu.completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 899 | destroy_rcu_head_on_stack(&rcu.head); |
Paul E. McKenney | 35732cf | 2017-07-05 13:30:21 -0700 | [diff] [blame] | 900 | |
| 901 | /* |
| 902 | * Make sure that later code is ordered after the SRCU grace |
| 903 | * period. This pairs with the raw_spin_lock_irq_rcu_node() |
| 904 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
| 905 | * because the current CPU might have been totally uninvolved with |
| 906 | * (and thus unordered against) that grace period. |
| 907 | */ |
| 908 | smp_mb(); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 909 | } |
| 910 | |
| 911 | /** |
| 912 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
| 913 | * @sp: srcu_struct with which to synchronize. |
| 914 | * |
| 915 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 916 | * spinning rather than blocking when waiting. |
| 917 | * |
| 918 | * Note that synchronize_srcu_expedited() has the same deadlock and |
| 919 | * memory-ordering properties as does synchronize_srcu(). |
| 920 | */ |
| 921 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
| 922 | { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 923 | __synchronize_srcu(sp, rcu_gp_is_normal()); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 924 | } |
| 925 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
| 926 | |
| 927 | /** |
| 928 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
| 929 | * @sp: srcu_struct with which to synchronize. |
| 930 | * |
| 931 | * Wait for the count to drain to zero of both indexes. To avoid the |
| 932 | * possible starvation of synchronize_srcu(), it waits for the count of |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 933 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
| 934 | * and then flip the srcu_idx and wait for the count of the other index. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 935 | * |
| 936 | * Can block; must be called from process context. |
| 937 | * |
| 938 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 939 | * SRCU read-side critical section; doing so will result in deadlock. |
| 940 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 941 | * srcu_struct from some other srcu_struct's read-side critical section, |
| 942 | * as long as the resulting graph of srcu_structs is acyclic. |
| 943 | * |
| 944 | * There are memory-ordering constraints implied by synchronize_srcu(). |
| 945 | * On systems with more than one CPU, when synchronize_srcu() returns, |
| 946 | * each CPU is guaranteed to have executed a full memory barrier since |
| 947 | * the end of its last corresponding SRCU-sched read-side critical section |
| 948 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
| 949 | * each CPU having an SRCU read-side critical section that extends beyond |
| 950 | * the return from synchronize_srcu() is guaranteed to have executed a |
| 951 | * full memory barrier after the beginning of synchronize_srcu() and before |
| 952 | * the beginning of that SRCU read-side critical section. Note that these |
| 953 | * guarantees include CPUs that are offline, idle, or executing in user mode, |
| 954 | * as well as CPUs that are executing in the kernel. |
| 955 | * |
| 956 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned |
| 957 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed |
| 958 | * to have executed a full memory barrier during the execution of |
| 959 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B |
| 960 | * are the same CPU, but again only if the system has more than one CPU. |
| 961 | * |
| 962 | * Of course, these memory-ordering guarantees apply only when |
| 963 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are |
| 964 | * passed the same srcu_struct structure. |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 965 | * |
| 966 | * If SRCU is likely idle, expedite the first request. This semantic |
| 967 | * was provided by Classic SRCU, and is relied upon by its users, so TREE |
| 968 | * SRCU must also provide it. Note that detecting idleness is heuristic |
| 969 | * and subject to both false positives and negatives. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 970 | */ |
| 971 | void synchronize_srcu(struct srcu_struct *sp) |
| 972 | { |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 973 | if (srcu_might_be_idle(sp) || rcu_gp_is_expedited()) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 974 | synchronize_srcu_expedited(sp); |
| 975 | else |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 976 | __synchronize_srcu(sp, true); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 977 | } |
| 978 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
| 979 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 980 | /* |
| 981 | * Callback function for srcu_barrier() use. |
| 982 | */ |
| 983 | static void srcu_barrier_cb(struct rcu_head *rhp) |
| 984 | { |
| 985 | struct srcu_data *sdp; |
| 986 | struct srcu_struct *sp; |
| 987 | |
| 988 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); |
| 989 | sp = sdp->sp; |
| 990 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) |
| 991 | complete(&sp->srcu_barrier_completion); |
| 992 | } |
| 993 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 994 | /** |
| 995 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
| 996 | * @sp: srcu_struct on which to wait for in-flight callbacks. |
| 997 | */ |
| 998 | void srcu_barrier(struct srcu_struct *sp) |
| 999 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1000 | int cpu; |
| 1001 | struct srcu_data *sdp; |
| 1002 | unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq); |
| 1003 | |
| 1004 | check_init_srcu_struct(sp); |
| 1005 | mutex_lock(&sp->srcu_barrier_mutex); |
| 1006 | if (rcu_seq_done(&sp->srcu_barrier_seq, s)) { |
| 1007 | smp_mb(); /* Force ordering following return. */ |
| 1008 | mutex_unlock(&sp->srcu_barrier_mutex); |
| 1009 | return; /* Someone else did our work for us. */ |
| 1010 | } |
| 1011 | rcu_seq_start(&sp->srcu_barrier_seq); |
| 1012 | init_completion(&sp->srcu_barrier_completion); |
| 1013 | |
| 1014 | /* Initial count prevents reaching zero until all CBs are posted. */ |
| 1015 | atomic_set(&sp->srcu_barrier_cpu_cnt, 1); |
| 1016 | |
| 1017 | /* |
| 1018 | * Each pass through this loop enqueues a callback, but only |
| 1019 | * on CPUs already having callbacks enqueued. Note that if |
| 1020 | * a CPU already has callbacks enqueue, it must have already |
| 1021 | * registered the need for a future grace period, so all we |
| 1022 | * need do is enqueue a callback that will use the same |
| 1023 | * grace period as the last callback already in the queue. |
| 1024 | */ |
| 1025 | for_each_possible_cpu(cpu) { |
| 1026 | sdp = per_cpu_ptr(sp->sda, cpu); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1027 | raw_spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1028 | atomic_inc(&sp->srcu_barrier_cpu_cnt); |
| 1029 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1030 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1031 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1032 | &sdp->srcu_barrier_head, 0)) { |
| 1033 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1034 | atomic_dec(&sp->srcu_barrier_cpu_cnt); |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1035 | } |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1036 | raw_spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1037 | } |
| 1038 | |
| 1039 | /* Remove the initial count, at which point reaching zero can happen. */ |
| 1040 | if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt)) |
| 1041 | complete(&sp->srcu_barrier_completion); |
| 1042 | wait_for_completion(&sp->srcu_barrier_completion); |
| 1043 | |
| 1044 | rcu_seq_end(&sp->srcu_barrier_seq); |
| 1045 | mutex_unlock(&sp->srcu_barrier_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1046 | } |
| 1047 | EXPORT_SYMBOL_GPL(srcu_barrier); |
| 1048 | |
| 1049 | /** |
| 1050 | * srcu_batches_completed - return batches completed. |
| 1051 | * @sp: srcu_struct on which to report batch completion. |
| 1052 | * |
| 1053 | * Report the number of batches, correlated with, but not necessarily |
| 1054 | * precisely the same as, the number of grace periods that have elapsed. |
| 1055 | */ |
| 1056 | unsigned long srcu_batches_completed(struct srcu_struct *sp) |
| 1057 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1058 | return sp->srcu_idx; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1059 | } |
| 1060 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
| 1061 | |
| 1062 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1063 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
| 1064 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has |
| 1065 | * completed in that state. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1066 | */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1067 | static void srcu_advance_state(struct srcu_struct *sp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1068 | { |
| 1069 | int idx; |
| 1070 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1071 | mutex_lock(&sp->srcu_gp_mutex); |
| 1072 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1073 | /* |
| 1074 | * Because readers might be delayed for an extended period after |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1075 | * fetching ->srcu_idx for their index, at any point in time there |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1076 | * might well be readers using both idx=0 and idx=1. We therefore |
| 1077 | * need to wait for readers to clear from both index values before |
| 1078 | * invoking a callback. |
| 1079 | * |
| 1080 | * The load-acquire ensures that we see the accesses performed |
| 1081 | * by the prior grace period. |
| 1082 | */ |
| 1083 | idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ |
| 1084 | if (idx == SRCU_STATE_IDLE) { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1085 | raw_spin_lock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1086 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
| 1087 | WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq)); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1088 | raw_spin_unlock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1089 | mutex_unlock(&sp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1090 | return; |
| 1091 | } |
| 1092 | idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 1093 | if (idx == SRCU_STATE_IDLE) |
| 1094 | srcu_gp_start(sp); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1095 | raw_spin_unlock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1096 | if (idx != SRCU_STATE_IDLE) { |
| 1097 | mutex_unlock(&sp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1098 | return; /* Someone else started the grace period. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1099 | } |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1100 | } |
| 1101 | |
| 1102 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1103 | idx = 1 ^ (sp->srcu_idx & 1); |
| 1104 | if (!try_check_zero(sp, idx, 1)) { |
| 1105 | mutex_unlock(&sp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1106 | return; /* readers present, retry later. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1107 | } |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1108 | srcu_flip(sp); |
| 1109 | rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); |
| 1110 | } |
| 1111 | |
| 1112 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
| 1113 | |
| 1114 | /* |
| 1115 | * SRCU read-side critical sections are normally short, |
| 1116 | * so check at least twice in quick succession after a flip. |
| 1117 | */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1118 | idx = 1 ^ (sp->srcu_idx & 1); |
| 1119 | if (!try_check_zero(sp, idx, 2)) { |
| 1120 | mutex_unlock(&sp->srcu_gp_mutex); |
| 1121 | return; /* readers present, retry later. */ |
| 1122 | } |
| 1123 | srcu_gp_end(sp); /* Releases ->srcu_gp_mutex. */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1124 | } |
| 1125 | } |
| 1126 | |
| 1127 | /* |
| 1128 | * Invoke a limited number of SRCU callbacks that have passed through |
| 1129 | * their grace period. If there are more to do, SRCU will reschedule |
| 1130 | * the workqueue. Note that needed memory barriers have been executed |
| 1131 | * in this task's context by srcu_readers_active_idx_check(). |
| 1132 | */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1133 | static void srcu_invoke_callbacks(struct work_struct *work) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1134 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1135 | bool more; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1136 | struct rcu_cblist ready_cbs; |
| 1137 | struct rcu_head *rhp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1138 | struct srcu_data *sdp; |
| 1139 | struct srcu_struct *sp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1140 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1141 | sdp = container_of(work, struct srcu_data, work.work); |
| 1142 | sp = sdp->sp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1143 | rcu_cblist_init(&ready_cbs); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1144 | raw_spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1145 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 1146 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 1147 | if (sdp->srcu_cblist_invoking || |
| 1148 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1149 | raw_spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1150 | return; /* Someone else on the job or nothing to do. */ |
| 1151 | } |
| 1152 | |
| 1153 | /* We are on the job! Extract and invoke ready callbacks. */ |
| 1154 | sdp->srcu_cblist_invoking = true; |
| 1155 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1156 | raw_spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1157 | rhp = rcu_cblist_dequeue(&ready_cbs); |
| 1158 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1159 | debug_rcu_head_unqueue(rhp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1160 | local_bh_disable(); |
| 1161 | rhp->func(rhp); |
| 1162 | local_bh_enable(); |
| 1163 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1164 | |
| 1165 | /* |
| 1166 | * Update counts, accelerate new callbacks, and if needed, |
| 1167 | * schedule another round of callback invocation. |
| 1168 | */ |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1169 | raw_spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1170 | rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs); |
| 1171 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
| 1172 | rcu_seq_snap(&sp->srcu_gp_seq)); |
| 1173 | sdp->srcu_cblist_invoking = false; |
| 1174 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1175 | raw_spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1176 | if (more) |
| 1177 | srcu_schedule_cbs_sdp(sdp, 0); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1178 | } |
| 1179 | |
| 1180 | /* |
| 1181 | * Finished one round of SRCU grace period. Start another if there are |
| 1182 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
| 1183 | */ |
| 1184 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) |
| 1185 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1186 | bool pushgp = true; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1187 | |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1188 | raw_spin_lock_irq_rcu_node(sp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1189 | if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) { |
| 1190 | if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) { |
| 1191 | /* All requests fulfilled, time to go idle. */ |
| 1192 | pushgp = false; |
| 1193 | } |
| 1194 | } else if (!rcu_seq_state(sp->srcu_gp_seq)) { |
| 1195 | /* Outstanding request and no GP. Start one. */ |
| 1196 | srcu_gp_start(sp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1197 | } |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 1198 | raw_spin_unlock_irq_rcu_node(sp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1199 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1200 | if (pushgp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1201 | queue_delayed_work(system_power_efficient_wq, &sp->work, delay); |
| 1202 | } |
| 1203 | |
| 1204 | /* |
| 1205 | * This is the work-queue function that handles SRCU grace periods. |
| 1206 | */ |
Paul E. McKenney | 0d8a1e8 | 2017-06-15 17:06:38 -0700 | [diff] [blame] | 1207 | static void process_srcu(struct work_struct *work) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1208 | { |
| 1209 | struct srcu_struct *sp; |
| 1210 | |
| 1211 | sp = container_of(work, struct srcu_struct, work.work); |
| 1212 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1213 | srcu_advance_state(sp); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 1214 | srcu_reschedule(sp, srcu_get_delay(sp)); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1215 | } |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1216 | |
| 1217 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 1218 | struct srcu_struct *sp, int *flags, |
| 1219 | unsigned long *gpnum, unsigned long *completed) |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1220 | { |
| 1221 | if (test_type != SRCU_FLAVOR) |
| 1222 | return; |
| 1223 | *flags = 0; |
| 1224 | *completed = rcu_seq_ctr(sp->srcu_gp_seq); |
| 1225 | *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed); |
| 1226 | } |
| 1227 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1228 | |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1229 | void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf) |
| 1230 | { |
| 1231 | int cpu; |
| 1232 | int idx; |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1233 | unsigned long s0 = 0, s1 = 0; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1234 | |
| 1235 | idx = sp->srcu_idx & 0x1; |
| 1236 | pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx); |
| 1237 | for_each_possible_cpu(cpu) { |
| 1238 | unsigned long l0, l1; |
| 1239 | unsigned long u0, u1; |
| 1240 | long c0, c1; |
| 1241 | struct srcu_data *counts; |
| 1242 | |
| 1243 | counts = per_cpu_ptr(sp->sda, cpu); |
| 1244 | u0 = counts->srcu_unlock_count[!idx]; |
| 1245 | u1 = counts->srcu_unlock_count[idx]; |
| 1246 | |
| 1247 | /* |
| 1248 | * Make sure that a lock is always counted if the corresponding |
| 1249 | * unlock is counted. |
| 1250 | */ |
| 1251 | smp_rmb(); |
| 1252 | |
| 1253 | l0 = counts->srcu_lock_count[!idx]; |
| 1254 | l1 = counts->srcu_lock_count[idx]; |
| 1255 | |
| 1256 | c0 = l0 - u0; |
| 1257 | c1 = l1 - u1; |
| 1258 | pr_cont(" %d(%ld,%ld)", cpu, c0, c1); |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1259 | s0 += c0; |
| 1260 | s1 += c1; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1261 | } |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1262 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1263 | } |
| 1264 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); |
| 1265 | |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1266 | static int __init srcu_bootup_announce(void) |
| 1267 | { |
| 1268 | pr_info("Hierarchical SRCU implementation.\n"); |
Paul E. McKenney | 0c8e0e3 | 2017-04-28 11:24:22 -0700 | [diff] [blame] | 1269 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
| 1270 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1271 | return 0; |
| 1272 | } |
| 1273 | early_initcall(srcu_bootup_announce); |