Paul E. McKenney | e7ee150 | 2019-01-17 10:18:16 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
| 4 | * |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2006 |
| 6 | * Copyright (C) Fujitsu, 2012 |
| 7 | * |
SeongJae Park | 65bb0dc | 2020-01-06 21:08:02 +0100 | [diff] [blame] | 8 | * Authors: Paul McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 9 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
| 10 | * |
| 11 | * For detailed explanation of Read-Copy Update mechanism see - |
| 12 | * Documentation/RCU/ *.txt |
| 13 | * |
| 14 | */ |
| 15 | |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 16 | #define pr_fmt(fmt) "rcu: " fmt |
| 17 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 18 | #include <linux/export.h> |
| 19 | #include <linux/mutex.h> |
| 20 | #include <linux/percpu.h> |
| 21 | #include <linux/preempt.h> |
| 22 | #include <linux/rcupdate_wait.h> |
| 23 | #include <linux/sched.h> |
| 24 | #include <linux/smp.h> |
| 25 | #include <linux/delay.h> |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 27 | #include <linux/srcu.h> |
| 28 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 29 | #include "rcu.h" |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 30 | #include "rcu_segcblist.h" |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 31 | |
Paul E. McKenney | 0c8e0e3 | 2017-04-28 11:24:22 -0700 | [diff] [blame] | 32 | /* Holdoff in nanoseconds for auto-expediting. */ |
| 33 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) |
| 34 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 35 | module_param(exp_holdoff, ulong, 0444); |
| 36 | |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 37 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
| 38 | static ulong counter_wrap_check = (ULONG_MAX >> 2); |
| 39 | module_param(counter_wrap_check, ulong, 0444); |
| 40 | |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 41 | /* Early-boot callback-management, so early that no lock is required! */ |
| 42 | static LIST_HEAD(srcu_boot_list); |
| 43 | static bool __read_mostly srcu_init_done; |
| 44 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 45 | static void srcu_invoke_callbacks(struct work_struct *work); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 46 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
Paul E. McKenney | 0d8a1e8 | 2017-06-15 17:06:38 -0700 | [diff] [blame] | 47 | static void process_srcu(struct work_struct *work); |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 48 | static void srcu_delay_timer(struct timer_list *t); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 49 | |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 50 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
| 51 | #define spin_lock_rcu_node(p) \ |
| 52 | do { \ |
| 53 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ |
| 54 | smp_mb__after_unlock_lock(); \ |
| 55 | } while (0) |
| 56 | |
| 57 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) |
| 58 | |
| 59 | #define spin_lock_irq_rcu_node(p) \ |
| 60 | do { \ |
| 61 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ |
| 62 | smp_mb__after_unlock_lock(); \ |
| 63 | } while (0) |
| 64 | |
| 65 | #define spin_unlock_irq_rcu_node(p) \ |
| 66 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) |
| 67 | |
| 68 | #define spin_lock_irqsave_rcu_node(p, flags) \ |
| 69 | do { \ |
| 70 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
| 71 | smp_mb__after_unlock_lock(); \ |
| 72 | } while (0) |
| 73 | |
| 74 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ |
| 75 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ |
| 76 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 77 | /* |
| 78 | * Initialize SRCU combining tree. Note that statically allocated |
| 79 | * srcu_struct structures might already have srcu_read_lock() and |
| 80 | * srcu_read_unlock() running against them. So if the is_static parameter |
| 81 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. |
| 82 | */ |
Frederic Weisbecker | 94df76a | 2021-04-02 01:47:03 +0200 | [diff] [blame] | 83 | static void init_srcu_struct_nodes(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 84 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 85 | int cpu; |
| 86 | int i; |
| 87 | int level = 0; |
| 88 | int levelspread[RCU_NUM_LVLS]; |
| 89 | struct srcu_data *sdp; |
| 90 | struct srcu_node *snp; |
| 91 | struct srcu_node *snp_first; |
| 92 | |
Frederic Weisbecker | b5befe84 | 2021-04-17 15:16:49 +0200 | [diff] [blame] | 93 | /* Initialize geometry if it has not already been initialized. */ |
| 94 | rcu_init_geometry(); |
| 95 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 96 | /* Work out the overall tree geometry. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 97 | ssp->level[0] = &ssp->node[0]; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 98 | for (i = 1; i < rcu_num_lvls; i++) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 99 | ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 100 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
| 101 | |
| 102 | /* Each pass through this loop initializes one srcu_node structure. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 103 | srcu_for_each_node_breadth_first(ssp, snp) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 104 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 105 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
| 106 | ARRAY_SIZE(snp->srcu_data_have_cbs)); |
| 107 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 108 | snp->srcu_have_cbs[i] = 0; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 109 | snp->srcu_data_have_cbs[i] = 0; |
| 110 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 111 | snp->srcu_gp_seq_needed_exp = 0; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 112 | snp->grplo = -1; |
| 113 | snp->grphi = -1; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 114 | if (snp == &ssp->node[0]) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 115 | /* Root node, special case. */ |
| 116 | snp->srcu_parent = NULL; |
| 117 | continue; |
| 118 | } |
| 119 | |
| 120 | /* Non-root node. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 121 | if (snp == ssp->level[level + 1]) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 122 | level++; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 123 | snp->srcu_parent = ssp->level[level - 1] + |
| 124 | (snp - ssp->level[level]) / |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 125 | levelspread[level - 1]; |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Initialize the per-CPU srcu_data array, which feeds into the |
| 130 | * leaves of the srcu_node tree. |
| 131 | */ |
| 132 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != |
| 133 | ARRAY_SIZE(sdp->srcu_unlock_count)); |
| 134 | level = rcu_num_lvls - 1; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 135 | snp_first = ssp->level[level]; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 136 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 137 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 138 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 139 | rcu_segcblist_init(&sdp->srcu_cblist); |
| 140 | sdp->srcu_cblist_invoking = false; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 141 | sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
| 142 | sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 143 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
| 144 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { |
| 145 | if (snp->grplo < 0) |
| 146 | snp->grplo = cpu; |
| 147 | snp->grphi = cpu; |
| 148 | } |
| 149 | sdp->cpu = cpu; |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 150 | INIT_WORK(&sdp->work, srcu_invoke_callbacks); |
| 151 | timer_setup(&sdp->delay_work, srcu_delay_timer, 0); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 152 | sdp->ssp = ssp; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 153 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 154 | } |
| 155 | } |
| 156 | |
| 157 | /* |
| 158 | * Initialize non-compile-time initialized fields, including the |
| 159 | * associated srcu_node and srcu_data structures. The is_static |
| 160 | * parameter is passed through to init_srcu_struct_nodes(), and |
| 161 | * also tells us that ->sda has already been wired up to srcu_data. |
| 162 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 163 | static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 164 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 165 | mutex_init(&ssp->srcu_cb_mutex); |
| 166 | mutex_init(&ssp->srcu_gp_mutex); |
| 167 | ssp->srcu_idx = 0; |
| 168 | ssp->srcu_gp_seq = 0; |
| 169 | ssp->srcu_barrier_seq = 0; |
| 170 | mutex_init(&ssp->srcu_barrier_mutex); |
| 171 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); |
| 172 | INIT_DELAYED_WORK(&ssp->work, process_srcu); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 173 | if (!is_static) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 174 | ssp->sda = alloc_percpu(struct srcu_data); |
Paul E. McKenney | 50edb98 | 2020-09-10 11:54:42 -0700 | [diff] [blame] | 175 | if (!ssp->sda) |
| 176 | return -ENOMEM; |
Frederic Weisbecker | 94df76a | 2021-04-02 01:47:03 +0200 | [diff] [blame] | 177 | init_srcu_struct_nodes(ssp); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 178 | ssp->srcu_gp_seq_needed_exp = 0; |
| 179 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
| 180 | smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ |
Paul E. McKenney | 50edb98 | 2020-09-10 11:54:42 -0700 | [diff] [blame] | 181 | return 0; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 182 | } |
| 183 | |
| 184 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 185 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 186 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 187 | struct lock_class_key *key) |
| 188 | { |
| 189 | /* Don't re-initialize a lock while it is held. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 190 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
| 191 | lockdep_init_map(&ssp->dep_map, name, key, 0); |
| 192 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
| 193 | return init_srcu_struct_fields(ssp, false); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 194 | } |
| 195 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
| 196 | |
| 197 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 198 | |
| 199 | /** |
| 200 | * init_srcu_struct - initialize a sleep-RCU structure |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 201 | * @ssp: structure to initialize. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 202 | * |
| 203 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
| 204 | * to any other function. Each srcu_struct represents a separate domain |
| 205 | * of SRCU protection. |
| 206 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 207 | int init_srcu_struct(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 208 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 209 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
| 210 | return init_srcu_struct_fields(ssp, false); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 211 | } |
| 212 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
| 213 | |
| 214 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 215 | |
| 216 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 217 | * First-use initialization of statically allocated srcu_struct |
| 218 | * structure. Wiring up the combining tree is more than can be |
| 219 | * done with compile-time initialization, so this check is added |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 220 | * to each update-side SRCU primitive. Use ssp->lock, which -is- |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 221 | * compile-time initialized, to resolve races involving multiple |
| 222 | * CPUs trying to garner first-use privileges. |
| 223 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 224 | static void check_init_srcu_struct(struct srcu_struct *ssp) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 225 | { |
| 226 | unsigned long flags; |
| 227 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 228 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 229 | if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 230 | return; /* Already initialized. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 231 | spin_lock_irqsave_rcu_node(ssp, flags); |
| 232 | if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { |
| 233 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 234 | return; |
| 235 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 236 | init_srcu_struct_fields(ssp, true); |
| 237 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Returns approximate total of the readers' ->srcu_lock_count[] values |
| 242 | * for the rank of per-CPU counters specified by idx. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 243 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 244 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 245 | { |
| 246 | int cpu; |
| 247 | unsigned long sum = 0; |
| 248 | |
| 249 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 250 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 251 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 252 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 253 | } |
| 254 | return sum; |
| 255 | } |
| 256 | |
| 257 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 258 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
| 259 | * for the rank of per-CPU counters specified by idx. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 260 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 261 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 262 | { |
| 263 | int cpu; |
| 264 | unsigned long sum = 0; |
| 265 | |
| 266 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 267 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 268 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 269 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 270 | } |
| 271 | return sum; |
| 272 | } |
| 273 | |
| 274 | /* |
| 275 | * Return true if the number of pre-existing readers is determined to |
| 276 | * be zero. |
| 277 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 278 | static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 279 | { |
| 280 | unsigned long unlocks; |
| 281 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 282 | unlocks = srcu_readers_unlock_idx(ssp, idx); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 283 | |
| 284 | /* |
| 285 | * Make sure that a lock is always counted if the corresponding |
| 286 | * unlock is counted. Needs to be a smp_mb() as the read side may |
| 287 | * contain a read from a variable that is written to before the |
| 288 | * synchronize_srcu() in the write side. In this case smp_mb()s |
| 289 | * A and B act like the store buffering pattern. |
| 290 | * |
| 291 | * This smp_mb() also pairs with smp_mb() C to prevent accesses |
| 292 | * after the synchronize_srcu() from being executed before the |
| 293 | * grace period ends. |
| 294 | */ |
| 295 | smp_mb(); /* A */ |
| 296 | |
| 297 | /* |
| 298 | * If the locks are the same as the unlocks, then there must have |
| 299 | * been no readers on this index at some time in between. This does |
| 300 | * not mean that there are no more readers, as one could have read |
| 301 | * the current index but not have incremented the lock counter yet. |
| 302 | * |
Paul E. McKenney | 881ec9d | 2017-04-12 15:16:50 -0700 | [diff] [blame] | 303 | * So suppose that the updater is preempted here for so long |
| 304 | * that more than ULONG_MAX non-nested readers come and go in |
| 305 | * the meantime. It turns out that this cannot result in overflow |
| 306 | * because if a reader modifies its unlock count after we read it |
| 307 | * above, then that reader's next load of ->srcu_idx is guaranteed |
| 308 | * to get the new value, which will cause it to operate on the |
| 309 | * other bank of counters, where it cannot contribute to the |
| 310 | * overflow of these counters. This means that there is a maximum |
| 311 | * of 2*NR_CPUS increments, which cannot overflow given current |
| 312 | * systems, especially not on 64-bit systems. |
| 313 | * |
| 314 | * OK, how about nesting? This does impose a limit on nesting |
| 315 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, |
| 316 | * especially on 64-bit systems. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 317 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 318 | return srcu_readers_lock_idx(ssp, idx) == unlocks; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 319 | } |
| 320 | |
| 321 | /** |
| 322 | * srcu_readers_active - returns true if there are readers. and false |
| 323 | * otherwise |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 324 | * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 325 | * |
| 326 | * Note that this is not an atomic primitive, and can therefore suffer |
| 327 | * severe errors when invoked on an active srcu_struct. That said, it |
| 328 | * can be useful as an error check at cleanup time. |
| 329 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 330 | static bool srcu_readers_active(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 331 | { |
| 332 | int cpu; |
| 333 | unsigned long sum = 0; |
| 334 | |
| 335 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 336 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 337 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 338 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
| 339 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); |
| 340 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); |
| 341 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 342 | } |
| 343 | return sum; |
| 344 | } |
| 345 | |
| 346 | #define SRCU_INTERVAL 1 |
| 347 | |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 348 | /* |
| 349 | * Return grace-period delay, zero if there are expedited grace |
| 350 | * periods pending, SRCU_INTERVAL otherwise. |
| 351 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 352 | static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 353 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 354 | if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
| 355 | READ_ONCE(ssp->srcu_gp_seq_needed_exp))) |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 356 | return 0; |
| 357 | return SRCU_INTERVAL; |
| 358 | } |
| 359 | |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 360 | /** |
| 361 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
| 362 | * @ssp: structure to clean up. |
| 363 | * |
| 364 | * Must invoke this after you are finished using a given srcu_struct that |
| 365 | * was initialized via init_srcu_struct(), else you leak memory. |
| 366 | */ |
| 367 | void cleanup_srcu_struct(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 368 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 369 | int cpu; |
| 370 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 371 | if (WARN_ON(!srcu_get_delay(ssp))) |
Paul E. McKenney | f7194ac | 2018-04-05 17:19:17 -0700 | [diff] [blame] | 372 | return; /* Just leak it! */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 373 | if (WARN_ON(srcu_readers_active(ssp))) |
Paul E. McKenney | f7194ac | 2018-04-05 17:19:17 -0700 | [diff] [blame] | 374 | return; /* Just leak it! */ |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 375 | flush_delayed_work(&ssp->work); |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 376 | for_each_possible_cpu(cpu) { |
| 377 | struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); |
| 378 | |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 379 | del_timer_sync(&sdp->delay_work); |
| 380 | flush_work(&sdp->work); |
Paul E. McKenney | 5cdfd17 | 2019-02-12 10:44:33 -0800 | [diff] [blame] | 381 | if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) |
| 382 | return; /* Forgot srcu_barrier(), so just leak it! */ |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 383 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 384 | if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
| 385 | WARN_ON(srcu_readers_active(ssp))) { |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 386 | pr_info("%s: Active srcu_struct %p state: %d\n", |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 387 | __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 388 | return; /* Caller forgot to stop doing call_srcu()? */ |
| 389 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 390 | free_percpu(ssp->sda); |
| 391 | ssp->sda = NULL; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 392 | } |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 393 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 394 | |
| 395 | /* |
| 396 | * Counts the new reader in the appropriate per-CPU element of the |
Paolo Bonzini | cdf7abc | 2017-05-31 14:03:10 +0200 | [diff] [blame] | 397 | * srcu_struct. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 398 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
| 399 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 400 | int __srcu_read_lock(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 401 | { |
| 402 | int idx; |
| 403 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 404 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
| 405 | this_cpu_inc(ssp->sda->srcu_lock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 406 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 407 | return idx; |
| 408 | } |
| 409 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
| 410 | |
| 411 | /* |
| 412 | * Removes the count for the old reader from the appropriate per-CPU |
| 413 | * element of the srcu_struct. Note that this may well be a different |
| 414 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 415 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 416 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 417 | { |
| 418 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 419 | this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 420 | } |
| 421 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 422 | |
| 423 | /* |
| 424 | * We use an adaptive strategy for synchronize_srcu() and especially for |
| 425 | * synchronize_srcu_expedited(). We spin for a fixed time period |
| 426 | * (defined below) to allow SRCU readers to exit their read-side critical |
| 427 | * sections. If there are still some readers after a few microseconds, |
| 428 | * we repeatedly block for 1-millisecond time periods. |
| 429 | */ |
| 430 | #define SRCU_RETRY_CHECK_DELAY 5 |
| 431 | |
| 432 | /* |
| 433 | * Start an SRCU grace period. |
| 434 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 435 | static void srcu_gp_start(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 436 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 437 | struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 438 | int state; |
| 439 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 440 | lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
| 441 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
Dennis Krein | eb4c238 | 2018-10-26 07:38:24 -0700 | [diff] [blame] | 442 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 443 | rcu_segcblist_advance(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 444 | rcu_seq_current(&ssp->srcu_gp_seq)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 445 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 446 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
Dennis Krein | eb4c238 | 2018-10-26 07:38:24 -0700 | [diff] [blame] | 447 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 448 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 449 | rcu_seq_start(&ssp->srcu_gp_seq); |
Paul E. McKenney | 7104260 | 2020-01-03 11:42:05 -0800 | [diff] [blame] | 450 | state = rcu_seq_state(ssp->srcu_gp_seq); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 451 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
| 452 | } |
| 453 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 454 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 455 | static void srcu_delay_timer(struct timer_list *t) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 456 | { |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 457 | struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); |
| 458 | |
| 459 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 460 | } |
| 461 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 462 | static void srcu_queue_delayed_work_on(struct srcu_data *sdp, |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 463 | unsigned long delay) |
| 464 | { |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 465 | if (!delay) { |
| 466 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
| 467 | return; |
| 468 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 469 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 470 | timer_reduce(&sdp->delay_work, jiffies + delay); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 471 | } |
| 472 | |
| 473 | /* |
| 474 | * Schedule callback invocation for the specified srcu_data structure, |
| 475 | * if possible, on the corresponding CPU. |
| 476 | */ |
| 477 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) |
| 478 | { |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 479 | srcu_queue_delayed_work_on(sdp, delay); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 480 | } |
| 481 | |
| 482 | /* |
| 483 | * Schedule callback invocation for all srcu_data structures associated |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 484 | * with the specified srcu_node structure that have callbacks for the |
| 485 | * just-completed grace period, the one corresponding to idx. If possible, |
| 486 | * schedule this invocation on the corresponding CPUs. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 487 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 488 | static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 489 | unsigned long mask, unsigned long delay) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 490 | { |
| 491 | int cpu; |
| 492 | |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 493 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
| 494 | if (!(mask & (1 << (cpu - snp->grplo)))) |
| 495 | continue; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 496 | srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 497 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | /* |
| 501 | * Note the end of an SRCU grace period. Initiates callback invocation |
| 502 | * and starts a new grace period if needed. |
| 503 | * |
| 504 | * The ->srcu_cb_mutex acquisition does not protect any data, but |
| 505 | * instead prevents more than one grace period from starting while we |
| 506 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] |
| 507 | * array to have a finite number of elements. |
| 508 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 509 | static void srcu_gp_end(struct srcu_struct *ssp) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 510 | { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 511 | unsigned long cbdelay; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 512 | bool cbs; |
Ildar Ismagilov | 8ddbd88 | 2018-01-31 22:42:21 +0300 | [diff] [blame] | 513 | bool last_lvl; |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 514 | int cpu; |
| 515 | unsigned long flags; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 516 | unsigned long gpseq; |
| 517 | int idx; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 518 | unsigned long mask; |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 519 | struct srcu_data *sdp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 520 | struct srcu_node *snp; |
| 521 | |
| 522 | /* Prevent more than one additional grace period. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 523 | mutex_lock(&ssp->srcu_cb_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 524 | |
| 525 | /* End the current grace period. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 526 | spin_lock_irq_rcu_node(ssp); |
| 527 | idx = rcu_seq_state(ssp->srcu_gp_seq); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 528 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 529 | cbdelay = srcu_get_delay(ssp); |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 530 | WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 531 | rcu_seq_end(&ssp->srcu_gp_seq); |
| 532 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
| 533 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) |
Paul E. McKenney | 8c9e0cb | 2019-12-22 19:36:33 -0800 | [diff] [blame] | 534 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 535 | spin_unlock_irq_rcu_node(ssp); |
| 536 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 537 | /* A new grace period can start at this point. But only one. */ |
| 538 | |
| 539 | /* Initiate callback invocation as needed. */ |
| 540 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 541 | srcu_for_each_node_breadth_first(ssp, snp) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 542 | spin_lock_irq_rcu_node(snp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 543 | cbs = false; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 544 | last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
Ildar Ismagilov | 8ddbd88 | 2018-01-31 22:42:21 +0300 | [diff] [blame] | 545 | if (last_lvl) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 546 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
| 547 | snp->srcu_have_cbs[idx] = gpseq; |
| 548 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 549 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
Paul E. McKenney | 7ff8b45 | 2019-12-22 19:32:54 -0800 | [diff] [blame] | 550 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 551 | mask = snp->srcu_data_have_cbs[idx]; |
| 552 | snp->srcu_data_have_cbs[idx] = 0; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 553 | spin_unlock_irq_rcu_node(snp); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 554 | if (cbs) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 555 | srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 556 | |
| 557 | /* Occasionally prevent srcu_data counter wrap. */ |
Ildar Ismagilov | 8ddbd88 | 2018-01-31 22:42:21 +0300 | [diff] [blame] | 558 | if (!(gpseq & counter_wrap_check) && last_lvl) |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 559 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 560 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 561 | spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 562 | if (ULONG_CMP_GE(gpseq, |
| 563 | sdp->srcu_gp_seq_needed + 100)) |
| 564 | sdp->srcu_gp_seq_needed = gpseq; |
Ildar Ismagilov | a35d13e | 2018-01-31 22:39:53 +0300 | [diff] [blame] | 565 | if (ULONG_CMP_GE(gpseq, |
| 566 | sdp->srcu_gp_seq_needed_exp + 100)) |
| 567 | sdp->srcu_gp_seq_needed_exp = gpseq; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 568 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 569 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 570 | } |
| 571 | |
| 572 | /* Callback initiation done, allow grace periods after next. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 573 | mutex_unlock(&ssp->srcu_cb_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 574 | |
| 575 | /* Start a new grace period if needed. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 576 | spin_lock_irq_rcu_node(ssp); |
| 577 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 578 | if (!rcu_seq_state(gpseq) && |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 579 | ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
| 580 | srcu_gp_start(ssp); |
| 581 | spin_unlock_irq_rcu_node(ssp); |
| 582 | srcu_reschedule(ssp, 0); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 583 | } else { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 584 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 585 | } |
| 586 | } |
| 587 | |
| 588 | /* |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 589 | * Funnel-locking scheme to scalably mediate many concurrent expedited |
| 590 | * grace-period requests. This function is invoked for the first known |
| 591 | * expedited request for a grace period that has already been requested, |
| 592 | * but without expediting. To start a completely new grace period, |
| 593 | * whether expedited or not, use srcu_funnel_gp_start() instead. |
| 594 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 595 | static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 596 | unsigned long s) |
| 597 | { |
| 598 | unsigned long flags; |
| 599 | |
| 600 | for (; snp != NULL; snp = snp->srcu_parent) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 601 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 602 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
| 603 | return; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 604 | spin_lock_irqsave_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 605 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 606 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 607 | return; |
| 608 | } |
| 609 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 610 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 611 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 612 | spin_lock_irqsave_rcu_node(ssp, flags); |
| 613 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
Paul E. McKenney | 8c9e0cb | 2019-12-22 19:36:33 -0800 | [diff] [blame] | 614 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 615 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 616 | } |
| 617 | |
| 618 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 619 | * Funnel-locking scheme to scalably mediate many concurrent grace-period |
| 620 | * requests. The winner has to do the work of actually starting grace |
| 621 | * period s. Losers must either ensure that their desired grace-period |
| 622 | * number is recorded on at least their leaf srcu_node structure, or they |
| 623 | * must take steps to invoke their own callbacks. |
Paul E. McKenney | 17294ce | 2018-04-25 12:03:36 -0700 | [diff] [blame] | 624 | * |
| 625 | * Note that this function also does the work of srcu_funnel_exp_start(), |
| 626 | * in some cases by directly invoking it. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 627 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 628 | static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 629 | unsigned long s, bool do_norm) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 630 | { |
| 631 | unsigned long flags; |
| 632 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); |
| 633 | struct srcu_node *snp = sdp->mynode; |
| 634 | unsigned long snp_seq; |
| 635 | |
| 636 | /* Each pass through the loop does one level of the srcu_node tree. */ |
| 637 | for (; snp != NULL; snp = snp->srcu_parent) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 638 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 639 | return; /* GP already done and CBs recorded. */ |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 640 | spin_lock_irqsave_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 641 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
| 642 | snp_seq = snp->srcu_have_cbs[idx]; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 643 | if (snp == sdp->mynode && snp_seq == s) |
| 644 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 645 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 646 | if (snp == sdp->mynode && snp_seq != s) { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 647 | srcu_schedule_cbs_sdp(sdp, do_norm |
| 648 | ? SRCU_INTERVAL |
| 649 | : 0); |
| 650 | return; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 651 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 652 | if (!do_norm) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 653 | srcu_funnel_exp_start(ssp, snp, s); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 654 | return; |
| 655 | } |
| 656 | snp->srcu_have_cbs[idx] = s; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 657 | if (snp == sdp->mynode) |
| 658 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 659 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
Paul E. McKenney | 7ff8b45 | 2019-12-22 19:32:54 -0800 | [diff] [blame] | 660 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 661 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 662 | } |
| 663 | |
| 664 | /* Top of tree, must ensure the grace period will be started. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 665 | spin_lock_irqsave_rcu_node(ssp, flags); |
| 666 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 667 | /* |
| 668 | * Record need for grace period s. Pair with load |
| 669 | * acquire setting up for initialization. |
| 670 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 671 | smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 672 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 673 | if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
Paul E. McKenney | 8c9e0cb | 2019-12-22 19:36:33 -0800 | [diff] [blame] | 674 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 675 | |
| 676 | /* If grace period not already done and none in progress, start it. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 677 | if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
| 678 | rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
| 679 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
| 680 | srcu_gp_start(ssp); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 681 | if (likely(srcu_init_done)) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 682 | queue_delayed_work(rcu_gp_wq, &ssp->work, |
| 683 | srcu_get_delay(ssp)); |
| 684 | else if (list_empty(&ssp->work.work.entry)) |
| 685 | list_add(&ssp->work.work.entry, &srcu_boot_list); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 686 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 687 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 688 | } |
| 689 | |
| 690 | /* |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 691 | * Wait until all readers counted by array index idx complete, but |
| 692 | * loop an additional time if there is an expedited grace period pending. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 693 | * The caller must ensure that ->srcu_idx is not changed while checking. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 694 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 695 | static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 696 | { |
| 697 | for (;;) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 698 | if (srcu_readers_active_idx_check(ssp, idx)) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 699 | return true; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 700 | if (--trycount + !srcu_get_delay(ssp) <= 0) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 701 | return false; |
| 702 | udelay(SRCU_RETRY_CHECK_DELAY); |
| 703 | } |
| 704 | } |
| 705 | |
| 706 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 707 | * Increment the ->srcu_idx counter so that future SRCU readers will |
| 708 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 709 | * us to wait for pre-existing readers in a starvation-free manner. |
| 710 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 711 | static void srcu_flip(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 712 | { |
Paul E. McKenney | 881ec9d | 2017-04-12 15:16:50 -0700 | [diff] [blame] | 713 | /* |
| 714 | * Ensure that if this updater saw a given reader's increment |
| 715 | * from __srcu_read_lock(), that reader was using an old value |
| 716 | * of ->srcu_idx. Also ensure that if a given reader sees the |
| 717 | * new value of ->srcu_idx, this updater's earlier scans cannot |
| 718 | * have seen that reader's increments (which is OK, because this |
| 719 | * grace period need not wait on that reader). |
| 720 | */ |
| 721 | smp_mb(); /* E */ /* Pairs with B and C. */ |
| 722 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 723 | WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 724 | |
| 725 | /* |
| 726 | * Ensure that if the updater misses an __srcu_read_unlock() |
| 727 | * increment, that task's next __srcu_read_lock() will see the |
| 728 | * above counter update. Note that both this memory barrier |
| 729 | * and the one in srcu_readers_active_idx_check() provide the |
| 730 | * guarantee for __srcu_read_lock(). |
| 731 | */ |
| 732 | smp_mb(); /* D */ /* Pairs with C. */ |
| 733 | } |
| 734 | |
| 735 | /* |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 736 | * If SRCU is likely idle, return true, otherwise return false. |
| 737 | * |
| 738 | * Note that it is OK for several current from-idle requests for a new |
| 739 | * grace period from idle to specify expediting because they will all end |
| 740 | * up requesting the same grace period anyhow. So no loss. |
| 741 | * |
| 742 | * Note also that if any CPU (including the current one) is still invoking |
| 743 | * callbacks, this function will nevertheless say "idle". This is not |
| 744 | * ideal, but the overhead of checking all CPUs' callback lists is even |
| 745 | * less ideal, especially on large systems. Furthermore, the wakeup |
| 746 | * can happen before the callback is fully removed, so we have no choice |
| 747 | * but to accept this type of error. |
| 748 | * |
| 749 | * This function is also subject to counter-wrap errors, but let's face |
| 750 | * it, if this function was preempted for enough time for the counters |
| 751 | * to wrap, it really doesn't matter whether or not we expedite the grace |
| 752 | * period. The extra overhead of a needlessly expedited grace period is |
Ethon Paul | 7fef6cf | 2020-04-18 19:46:47 +0800 | [diff] [blame] | 753 | * negligible when amortized over that time period, and the extra latency |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 754 | * of a needlessly non-expedited grace period is similarly negligible. |
| 755 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 756 | static bool srcu_might_be_idle(struct srcu_struct *ssp) |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 757 | { |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 758 | unsigned long curseq; |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 759 | unsigned long flags; |
| 760 | struct srcu_data *sdp; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 761 | unsigned long t; |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 762 | unsigned long tlast; |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 763 | |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 764 | check_init_srcu_struct(ssp); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 765 | /* If the local srcu_data structure has callbacks, not idle. */ |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 766 | sdp = raw_cpu_ptr(ssp->sda); |
| 767 | spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 768 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 769 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 770 | return false; /* Callbacks already present, so not idle. */ |
| 771 | } |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 772 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 773 | |
| 774 | /* |
Ingo Molnar | a616aec | 2021-03-22 22:29:10 -0700 | [diff] [blame] | 775 | * No local callbacks, so probabilistically probe global state. |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 776 | * Exact information would require acquiring locks, which would |
Ingo Molnar | a616aec | 2021-03-22 22:29:10 -0700 | [diff] [blame] | 777 | * kill scalability, hence the probabilistic nature of the probe. |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 778 | */ |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 779 | |
| 780 | /* First, see if enough time has passed since the last GP. */ |
| 781 | t = ktime_get_mono_fast_ns(); |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 782 | tlast = READ_ONCE(ssp->srcu_last_gp_end); |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 783 | if (exp_holdoff == 0 || |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 784 | time_in_range_open(t, tlast, tlast + exp_holdoff)) |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 785 | return false; /* Too soon after last GP. */ |
| 786 | |
| 787 | /* Next, check for probable idleness. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 788 | curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 789 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 790 | if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 791 | return false; /* Grace period in progress, so not idle. */ |
| 792 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 793 | if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 794 | return false; /* GP # changed, so not idle. */ |
| 795 | return true; /* With reasonable probability, idle! */ |
| 796 | } |
| 797 | |
| 798 | /* |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 799 | * SRCU callback function to leak a callback. |
| 800 | */ |
| 801 | static void srcu_leak_callback(struct rcu_head *rhp) |
| 802 | { |
| 803 | } |
| 804 | |
| 805 | /* |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 806 | * Start an SRCU grace period, and also queue the callback if non-NULL. |
| 807 | */ |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 808 | static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, |
| 809 | struct rcu_head *rhp, bool do_norm) |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 810 | { |
| 811 | unsigned long flags; |
| 812 | int idx; |
| 813 | bool needexp = false; |
| 814 | bool needgp = false; |
| 815 | unsigned long s; |
| 816 | struct srcu_data *sdp; |
| 817 | |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 818 | check_init_srcu_struct(ssp); |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 819 | idx = srcu_read_lock(ssp); |
| 820 | sdp = raw_cpu_ptr(ssp->sda); |
| 821 | spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 822 | if (rhp) |
| 823 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 824 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 825 | rcu_seq_current(&ssp->srcu_gp_seq)); |
| 826 | s = rcu_seq_snap(&ssp->srcu_gp_seq); |
| 827 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
| 828 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
| 829 | sdp->srcu_gp_seq_needed = s; |
| 830 | needgp = true; |
| 831 | } |
| 832 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
| 833 | sdp->srcu_gp_seq_needed_exp = s; |
| 834 | needexp = true; |
| 835 | } |
| 836 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
| 837 | if (needgp) |
| 838 | srcu_funnel_gp_start(ssp, sdp, s, do_norm); |
| 839 | else if (needexp) |
| 840 | srcu_funnel_exp_start(ssp, sdp->mynode, s); |
| 841 | srcu_read_unlock(ssp, idx); |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 842 | return s; |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 843 | } |
| 844 | |
| 845 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 846 | * Enqueue an SRCU callback on the srcu_data structure associated with |
| 847 | * the current CPU and the specified srcu_struct structure, initiating |
| 848 | * grace-period processing if it is not already running. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 849 | * |
| 850 | * Note that all CPUs must agree that the grace period extended beyond |
| 851 | * all pre-existing SRCU read-side critical section. On systems with |
| 852 | * more than one CPU, this means that when "func()" is invoked, each CPU |
| 853 | * is guaranteed to have executed a full memory barrier since the end of |
| 854 | * its last corresponding SRCU read-side critical section whose beginning |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 855 | * preceded the call to call_srcu(). It also means that each CPU executing |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 856 | * an SRCU read-side critical section that continues beyond the start of |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 857 | * "func()" must have executed a memory barrier after the call_srcu() |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 858 | * but before the beginning of that SRCU read-side critical section. |
| 859 | * Note that these guarantees include CPUs that are offline, idle, or |
| 860 | * executing in user mode, as well as CPUs that are executing in the kernel. |
| 861 | * |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 862 | * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 863 | * resulting SRCU callback function "func()", then both CPU A and CPU |
| 864 | * B are guaranteed to execute a full memory barrier during the time |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 865 | * interval between the call to call_srcu() and the invocation of "func()". |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 866 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
| 867 | * again only if the system has more than one CPU). |
| 868 | * |
| 869 | * Of course, these guarantees apply only for invocations of call_srcu(), |
| 870 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
| 871 | * srcu_struct structure. |
| 872 | */ |
Jiang Biao | 11b0004 | 2019-04-23 09:22:56 +0800 | [diff] [blame] | 873 | static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
| 874 | rcu_callback_t func, bool do_norm) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 875 | { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 876 | if (debug_rcu_head_queue(rhp)) { |
| 877 | /* Probable double call_srcu(), so leak the callback. */ |
| 878 | WRITE_ONCE(rhp->func, srcu_leak_callback); |
| 879 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); |
| 880 | return; |
| 881 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 882 | rhp->func = func; |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 883 | (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 884 | } |
| 885 | |
Paul E. McKenney | 5a0465e | 2017-05-04 11:31:04 -0700 | [diff] [blame] | 886 | /** |
| 887 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 888 | * @ssp: srcu_struct in queue the callback |
Paul E. McKenney | 27fdb35 | 2017-10-19 14:26:21 -0700 | [diff] [blame] | 889 | * @rhp: structure to be used for queueing the SRCU callback. |
Paul E. McKenney | 5a0465e | 2017-05-04 11:31:04 -0700 | [diff] [blame] | 890 | * @func: function to be invoked after the SRCU grace period |
| 891 | * |
| 892 | * The callback function will be invoked some time after a full SRCU |
| 893 | * grace period elapses, in other words after all pre-existing SRCU |
| 894 | * read-side critical sections have completed. However, the callback |
| 895 | * function might well execute concurrently with other SRCU read-side |
| 896 | * critical sections that started after call_srcu() was invoked. SRCU |
| 897 | * read-side critical sections are delimited by srcu_read_lock() and |
| 898 | * srcu_read_unlock(), and may be nested. |
| 899 | * |
| 900 | * The callback will be invoked from process context, but must nevertheless |
| 901 | * be fast and must not block. |
| 902 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 903 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 904 | rcu_callback_t func) |
| 905 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 906 | __call_srcu(ssp, rhp, func, true); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 907 | } |
| 908 | EXPORT_SYMBOL_GPL(call_srcu); |
| 909 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 910 | /* |
| 911 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 912 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 913 | static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 914 | { |
| 915 | struct rcu_synchronize rcu; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 916 | |
Jakub Kicinski | f505d43 | 2020-09-16 11:45:26 -0700 | [diff] [blame] | 917 | RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 918 | lock_is_held(&rcu_bh_lock_map) || |
| 919 | lock_is_held(&rcu_lock_map) || |
| 920 | lock_is_held(&rcu_sched_lock_map), |
| 921 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); |
| 922 | |
| 923 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
| 924 | return; |
| 925 | might_sleep(); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 926 | check_init_srcu_struct(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 927 | init_completion(&rcu.completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 928 | init_rcu_head_on_stack(&rcu.head); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 929 | __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 930 | wait_for_completion(&rcu.completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 931 | destroy_rcu_head_on_stack(&rcu.head); |
Paul E. McKenney | 35732cf | 2017-07-05 13:30:21 -0700 | [diff] [blame] | 932 | |
| 933 | /* |
| 934 | * Make sure that later code is ordered after the SRCU grace |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 935 | * period. This pairs with the spin_lock_irq_rcu_node() |
Paul E. McKenney | 35732cf | 2017-07-05 13:30:21 -0700 | [diff] [blame] | 936 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
| 937 | * because the current CPU might have been totally uninvolved with |
| 938 | * (and thus unordered against) that grace period. |
| 939 | */ |
| 940 | smp_mb(); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 941 | } |
| 942 | |
| 943 | /** |
| 944 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 945 | * @ssp: srcu_struct with which to synchronize. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 946 | * |
| 947 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 948 | * spinning rather than blocking when waiting. |
| 949 | * |
| 950 | * Note that synchronize_srcu_expedited() has the same deadlock and |
| 951 | * memory-ordering properties as does synchronize_srcu(). |
| 952 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 953 | void synchronize_srcu_expedited(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 954 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 955 | __synchronize_srcu(ssp, rcu_gp_is_normal()); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 956 | } |
| 957 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
| 958 | |
| 959 | /** |
| 960 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 961 | * @ssp: srcu_struct with which to synchronize. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 962 | * |
| 963 | * Wait for the count to drain to zero of both indexes. To avoid the |
| 964 | * possible starvation of synchronize_srcu(), it waits for the count of |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 965 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
| 966 | * and then flip the srcu_idx and wait for the count of the other index. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 967 | * |
| 968 | * Can block; must be called from process context. |
| 969 | * |
| 970 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 971 | * SRCU read-side critical section; doing so will result in deadlock. |
| 972 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 973 | * srcu_struct from some other srcu_struct's read-side critical section, |
| 974 | * as long as the resulting graph of srcu_structs is acyclic. |
| 975 | * |
| 976 | * There are memory-ordering constraints implied by synchronize_srcu(). |
| 977 | * On systems with more than one CPU, when synchronize_srcu() returns, |
| 978 | * each CPU is guaranteed to have executed a full memory barrier since |
Paul E. McKenney | 6eb95cc | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 979 | * the end of its last corresponding SRCU read-side critical section |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 980 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
| 981 | * each CPU having an SRCU read-side critical section that extends beyond |
| 982 | * the return from synchronize_srcu() is guaranteed to have executed a |
| 983 | * full memory barrier after the beginning of synchronize_srcu() and before |
| 984 | * the beginning of that SRCU read-side critical section. Note that these |
| 985 | * guarantees include CPUs that are offline, idle, or executing in user mode, |
| 986 | * as well as CPUs that are executing in the kernel. |
| 987 | * |
| 988 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned |
| 989 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed |
| 990 | * to have executed a full memory barrier during the execution of |
| 991 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B |
| 992 | * are the same CPU, but again only if the system has more than one CPU. |
| 993 | * |
| 994 | * Of course, these memory-ordering guarantees apply only when |
| 995 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are |
| 996 | * passed the same srcu_struct structure. |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 997 | * |
Paul E. McKenney | 3d3a0d1 | 2021-04-16 16:53:16 -0700 | [diff] [blame] | 998 | * Implementation of these memory-ordering guarantees is similar to |
| 999 | * that of synchronize_rcu(). |
| 1000 | * |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 1001 | * If SRCU is likely idle, expedite the first request. This semantic |
| 1002 | * was provided by Classic SRCU, and is relied upon by its users, so TREE |
| 1003 | * SRCU must also provide it. Note that detecting idleness is heuristic |
| 1004 | * and subject to both false positives and negatives. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1005 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1006 | void synchronize_srcu(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1007 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1008 | if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
| 1009 | synchronize_srcu_expedited(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1010 | else |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1011 | __synchronize_srcu(ssp, true); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1012 | } |
| 1013 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
| 1014 | |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 1015 | /** |
| 1016 | * get_state_synchronize_srcu - Provide an end-of-grace-period cookie |
| 1017 | * @ssp: srcu_struct to provide cookie for. |
| 1018 | * |
| 1019 | * This function returns a cookie that can be passed to |
| 1020 | * poll_state_synchronize_srcu(), which will return true if a full grace |
| 1021 | * period has elapsed in the meantime. It is the caller's responsibility |
| 1022 | * to make sure that grace period happens, for example, by invoking |
| 1023 | * call_srcu() after return from get_state_synchronize_srcu(). |
| 1024 | */ |
| 1025 | unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) |
| 1026 | { |
| 1027 | // Any prior manipulation of SRCU-protected data must happen |
| 1028 | // before the load from ->srcu_gp_seq. |
| 1029 | smp_mb(); |
| 1030 | return rcu_seq_snap(&ssp->srcu_gp_seq); |
| 1031 | } |
| 1032 | EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); |
| 1033 | |
| 1034 | /** |
| 1035 | * start_poll_synchronize_srcu - Provide cookie and start grace period |
| 1036 | * @ssp: srcu_struct to provide cookie for. |
| 1037 | * |
| 1038 | * This function returns a cookie that can be passed to |
| 1039 | * poll_state_synchronize_srcu(), which will return true if a full grace |
| 1040 | * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), |
| 1041 | * this function also ensures that any needed SRCU grace period will be |
| 1042 | * started. This convenience does come at a cost in terms of CPU overhead. |
| 1043 | */ |
| 1044 | unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) |
| 1045 | { |
| 1046 | return srcu_gp_start_if_needed(ssp, NULL, true); |
| 1047 | } |
| 1048 | EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); |
| 1049 | |
| 1050 | /** |
| 1051 | * poll_state_synchronize_srcu - Has cookie's grace period ended? |
| 1052 | * @ssp: srcu_struct to provide cookie for. |
| 1053 | * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). |
| 1054 | * |
| 1055 | * This function takes the cookie that was returned from either |
| 1056 | * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and |
| 1057 | * returns @true if an SRCU grace period elapsed since the time that the |
| 1058 | * cookie was created. |
Paul E. McKenney | 4e7ccfa | 2020-11-15 20:33:38 -0800 | [diff] [blame] | 1059 | * |
| 1060 | * Because cookies are finite in size, wrapping/overflow is possible. |
| 1061 | * This is more pronounced on 32-bit systems where cookies are 32 bits, |
| 1062 | * where in theory wrapping could happen in about 14 hours assuming |
| 1063 | * 25-microsecond expedited SRCU grace periods. However, a more likely |
| 1064 | * overflow lower bound is on the order of 24 days in the case of |
| 1065 | * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit |
| 1066 | * system requires geologic timespans, as in more than seven million years |
| 1067 | * even for expedited SRCU grace periods. |
| 1068 | * |
| 1069 | * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems |
| 1070 | * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses |
| 1071 | * a 16-bit cookie, which rcutorture routinely wraps in a matter of a |
| 1072 | * few minutes. If this proves to be a problem, this counter will be |
| 1073 | * expanded to the same size as for Tree SRCU. |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 1074 | */ |
| 1075 | bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) |
| 1076 | { |
| 1077 | if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) |
| 1078 | return false; |
| 1079 | // Ensure that the end of the SRCU grace period happens before |
| 1080 | // any subsequent code that the caller might execute. |
| 1081 | smp_mb(); // ^^^ |
| 1082 | return true; |
| 1083 | } |
| 1084 | EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); |
| 1085 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1086 | /* |
| 1087 | * Callback function for srcu_barrier() use. |
| 1088 | */ |
| 1089 | static void srcu_barrier_cb(struct rcu_head *rhp) |
| 1090 | { |
| 1091 | struct srcu_data *sdp; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1092 | struct srcu_struct *ssp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1093 | |
| 1094 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1095 | ssp = sdp->ssp; |
| 1096 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
| 1097 | complete(&ssp->srcu_barrier_completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1098 | } |
| 1099 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1100 | /** |
| 1101 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1102 | * @ssp: srcu_struct on which to wait for in-flight callbacks. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1103 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1104 | void srcu_barrier(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1105 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1106 | int cpu; |
| 1107 | struct srcu_data *sdp; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1108 | unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1109 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1110 | check_init_srcu_struct(ssp); |
| 1111 | mutex_lock(&ssp->srcu_barrier_mutex); |
| 1112 | if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1113 | smp_mb(); /* Force ordering following return. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1114 | mutex_unlock(&ssp->srcu_barrier_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1115 | return; /* Someone else did our work for us. */ |
| 1116 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1117 | rcu_seq_start(&ssp->srcu_barrier_seq); |
| 1118 | init_completion(&ssp->srcu_barrier_completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1119 | |
| 1120 | /* Initial count prevents reaching zero until all CBs are posted. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1121 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1122 | |
| 1123 | /* |
| 1124 | * Each pass through this loop enqueues a callback, but only |
| 1125 | * on CPUs already having callbacks enqueued. Note that if |
| 1126 | * a CPU already has callbacks enqueue, it must have already |
| 1127 | * registered the need for a future grace period, so all we |
| 1128 | * need do is enqueue a callback that will use the same |
| 1129 | * grace period as the last callback already in the queue. |
| 1130 | */ |
| 1131 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1132 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1133 | spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1134 | atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1135 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1136 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1137 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 1138 | &sdp->srcu_barrier_head)) { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1139 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1140 | atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1141 | } |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1142 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1143 | } |
| 1144 | |
| 1145 | /* Remove the initial count, at which point reaching zero can happen. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1146 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
| 1147 | complete(&ssp->srcu_barrier_completion); |
| 1148 | wait_for_completion(&ssp->srcu_barrier_completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1149 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1150 | rcu_seq_end(&ssp->srcu_barrier_seq); |
| 1151 | mutex_unlock(&ssp->srcu_barrier_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1152 | } |
| 1153 | EXPORT_SYMBOL_GPL(srcu_barrier); |
| 1154 | |
| 1155 | /** |
| 1156 | * srcu_batches_completed - return batches completed. |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1157 | * @ssp: srcu_struct on which to report batch completion. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1158 | * |
| 1159 | * Report the number of batches, correlated with, but not necessarily |
| 1160 | * precisely the same as, the number of grace periods that have elapsed. |
| 1161 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1162 | unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1163 | { |
Paul E. McKenney | 39f9150 | 2019-12-22 19:39:35 -0800 | [diff] [blame] | 1164 | return READ_ONCE(ssp->srcu_idx); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1165 | } |
| 1166 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
| 1167 | |
| 1168 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1169 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
| 1170 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has |
| 1171 | * completed in that state. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1172 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1173 | static void srcu_advance_state(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1174 | { |
| 1175 | int idx; |
| 1176 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1177 | mutex_lock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1178 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1179 | /* |
| 1180 | * Because readers might be delayed for an extended period after |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1181 | * fetching ->srcu_idx for their index, at any point in time there |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1182 | * might well be readers using both idx=0 and idx=1. We therefore |
| 1183 | * need to wait for readers to clear from both index values before |
| 1184 | * invoking a callback. |
| 1185 | * |
| 1186 | * The load-acquire ensures that we see the accesses performed |
| 1187 | * by the prior grace period. |
| 1188 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1189 | idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1190 | if (idx == SRCU_STATE_IDLE) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1191 | spin_lock_irq_rcu_node(ssp); |
| 1192 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
| 1193 | WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); |
| 1194 | spin_unlock_irq_rcu_node(ssp); |
| 1195 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1196 | return; |
| 1197 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1198 | idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1199 | if (idx == SRCU_STATE_IDLE) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1200 | srcu_gp_start(ssp); |
| 1201 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1202 | if (idx != SRCU_STATE_IDLE) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1203 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1204 | return; /* Someone else started the grace period. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1205 | } |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1206 | } |
| 1207 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1208 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
| 1209 | idx = 1 ^ (ssp->srcu_idx & 1); |
| 1210 | if (!try_check_zero(ssp, idx, 1)) { |
| 1211 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1212 | return; /* readers present, retry later. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1213 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1214 | srcu_flip(ssp); |
Paul E. McKenney | 7104260 | 2020-01-03 11:42:05 -0800 | [diff] [blame] | 1215 | spin_lock_irq_rcu_node(ssp); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1216 | rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
Paul E. McKenney | 7104260 | 2020-01-03 11:42:05 -0800 | [diff] [blame] | 1217 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1218 | } |
| 1219 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1220 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1221 | |
| 1222 | /* |
| 1223 | * SRCU read-side critical sections are normally short, |
| 1224 | * so check at least twice in quick succession after a flip. |
| 1225 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1226 | idx = 1 ^ (ssp->srcu_idx & 1); |
| 1227 | if (!try_check_zero(ssp, idx, 2)) { |
| 1228 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1229 | return; /* readers present, retry later. */ |
| 1230 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1231 | srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1232 | } |
| 1233 | } |
| 1234 | |
| 1235 | /* |
| 1236 | * Invoke a limited number of SRCU callbacks that have passed through |
| 1237 | * their grace period. If there are more to do, SRCU will reschedule |
| 1238 | * the workqueue. Note that needed memory barriers have been executed |
| 1239 | * in this task's context by srcu_readers_active_idx_check(). |
| 1240 | */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1241 | static void srcu_invoke_callbacks(struct work_struct *work) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1242 | { |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1243 | long len; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1244 | bool more; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1245 | struct rcu_cblist ready_cbs; |
| 1246 | struct rcu_head *rhp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1247 | struct srcu_data *sdp; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1248 | struct srcu_struct *ssp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1249 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 1250 | sdp = container_of(work, struct srcu_data, work); |
| 1251 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1252 | ssp = sdp->ssp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1253 | rcu_cblist_init(&ready_cbs); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1254 | spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1255 | rcu_segcblist_advance(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1256 | rcu_seq_current(&ssp->srcu_gp_seq)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1257 | if (sdp->srcu_cblist_invoking || |
| 1258 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1259 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1260 | return; /* Someone else on the job or nothing to do. */ |
| 1261 | } |
| 1262 | |
| 1263 | /* We are on the job! Extract and invoke ready callbacks. */ |
| 1264 | sdp->srcu_cblist_invoking = true; |
| 1265 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1266 | len = ready_cbs.len; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1267 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1268 | rhp = rcu_cblist_dequeue(&ready_cbs); |
| 1269 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1270 | debug_rcu_head_unqueue(rhp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1271 | local_bh_disable(); |
| 1272 | rhp->func(rhp); |
| 1273 | local_bh_enable(); |
| 1274 | } |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1275 | WARN_ON_ONCE(ready_cbs.len); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1276 | |
| 1277 | /* |
| 1278 | * Update counts, accelerate new callbacks, and if needed, |
| 1279 | * schedule another round of callback invocation. |
| 1280 | */ |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1281 | spin_lock_irq_rcu_node(sdp); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1282 | rcu_segcblist_add_len(&sdp->srcu_cblist, -len); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1283 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1284 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1285 | sdp->srcu_cblist_invoking = false; |
| 1286 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1287 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1288 | if (more) |
| 1289 | srcu_schedule_cbs_sdp(sdp, 0); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1290 | } |
| 1291 | |
| 1292 | /* |
| 1293 | * Finished one round of SRCU grace period. Start another if there are |
| 1294 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
| 1295 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1296 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1297 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1298 | bool pushgp = true; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1299 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1300 | spin_lock_irq_rcu_node(ssp); |
| 1301 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
| 1302 | if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1303 | /* All requests fulfilled, time to go idle. */ |
| 1304 | pushgp = false; |
| 1305 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1306 | } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1307 | /* Outstanding request and no GP. Start one. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1308 | srcu_gp_start(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1309 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1310 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1311 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1312 | if (pushgp) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1313 | queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1314 | } |
| 1315 | |
| 1316 | /* |
| 1317 | * This is the work-queue function that handles SRCU grace periods. |
| 1318 | */ |
Paul E. McKenney | 0d8a1e8 | 2017-06-15 17:06:38 -0700 | [diff] [blame] | 1319 | static void process_srcu(struct work_struct *work) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1320 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1321 | struct srcu_struct *ssp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1322 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1323 | ssp = container_of(work, struct srcu_struct, work.work); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1324 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1325 | srcu_advance_state(ssp); |
| 1326 | srcu_reschedule(ssp, srcu_get_delay(ssp)); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1327 | } |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1328 | |
| 1329 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1330 | struct srcu_struct *ssp, int *flags, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1331 | unsigned long *gp_seq) |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1332 | { |
| 1333 | if (test_type != SRCU_FLAVOR) |
| 1334 | return; |
| 1335 | *flags = 0; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1336 | *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1337 | } |
| 1338 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1339 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1340 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1341 | { |
| 1342 | int cpu; |
| 1343 | int idx; |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1344 | unsigned long s0 = 0, s1 = 0; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1345 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1346 | idx = ssp->srcu_idx & 0x1; |
Paul E. McKenney | 52e17ba | 2018-06-19 08:54:37 -0700 | [diff] [blame] | 1347 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1348 | tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1349 | for_each_possible_cpu(cpu) { |
| 1350 | unsigned long l0, l1; |
| 1351 | unsigned long u0, u1; |
| 1352 | long c0, c1; |
Paul E. McKenney | 5ab07a8 | 2018-05-22 12:28:04 -0700 | [diff] [blame] | 1353 | struct srcu_data *sdp; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1354 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1355 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | b68c614 | 2020-01-03 16:36:59 -0800 | [diff] [blame] | 1356 | u0 = data_race(sdp->srcu_unlock_count[!idx]); |
| 1357 | u1 = data_race(sdp->srcu_unlock_count[idx]); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1358 | |
| 1359 | /* |
| 1360 | * Make sure that a lock is always counted if the corresponding |
| 1361 | * unlock is counted. |
| 1362 | */ |
| 1363 | smp_rmb(); |
| 1364 | |
Paul E. McKenney | b68c614 | 2020-01-03 16:36:59 -0800 | [diff] [blame] | 1365 | l0 = data_race(sdp->srcu_lock_count[!idx]); |
| 1366 | l1 = data_race(sdp->srcu_lock_count[idx]); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1367 | |
| 1368 | c0 = l0 - u0; |
| 1369 | c1 = l1 - u1; |
Paul E. McKenney | 7e210a6 | 2019-06-28 17:11:10 -0700 | [diff] [blame] | 1370 | pr_cont(" %d(%ld,%ld %c)", |
| 1371 | cpu, c0, c1, |
| 1372 | "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1373 | s0 += c0; |
| 1374 | s1 += c1; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1375 | } |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1376 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1377 | } |
| 1378 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); |
| 1379 | |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1380 | static int __init srcu_bootup_announce(void) |
| 1381 | { |
| 1382 | pr_info("Hierarchical SRCU implementation.\n"); |
Paul E. McKenney | 0c8e0e3 | 2017-04-28 11:24:22 -0700 | [diff] [blame] | 1383 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
| 1384 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1385 | return 0; |
| 1386 | } |
| 1387 | early_initcall(srcu_bootup_announce); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1388 | |
| 1389 | void __init srcu_init(void) |
| 1390 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1391 | struct srcu_struct *ssp; |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1392 | |
Frederic Weisbecker | 8e9c01c | 2021-04-09 00:38:59 +0200 | [diff] [blame] | 1393 | /* |
| 1394 | * Once that is set, call_srcu() can follow the normal path and |
| 1395 | * queue delayed work. This must follow RCU workqueues creation |
| 1396 | * and timers initialization. |
| 1397 | */ |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1398 | srcu_init_done = true; |
| 1399 | while (!list_empty(&srcu_boot_list)) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1400 | ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
Paul E. McKenney | 4e6ea4e | 2018-08-14 14:41:49 -0700 | [diff] [blame] | 1401 | work.work.entry); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1402 | list_del_init(&ssp->work.work.entry); |
| 1403 | queue_work(rcu_gp_wq, &ssp->work.work); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1404 | } |
| 1405 | } |
Paul E. McKenney | fe15b50 | 2019-04-05 16:15:00 -0700 | [diff] [blame] | 1406 | |
| 1407 | #ifdef CONFIG_MODULES |
| 1408 | |
| 1409 | /* Initialize any global-scope srcu_struct structures used by this module. */ |
| 1410 | static int srcu_module_coming(struct module *mod) |
| 1411 | { |
| 1412 | int i; |
| 1413 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; |
| 1414 | int ret; |
| 1415 | |
| 1416 | for (i = 0; i < mod->num_srcu_structs; i++) { |
| 1417 | ret = init_srcu_struct(*(sspp++)); |
| 1418 | if (WARN_ON_ONCE(ret)) |
| 1419 | return ret; |
| 1420 | } |
| 1421 | return 0; |
| 1422 | } |
| 1423 | |
| 1424 | /* Clean up any global-scope srcu_struct structures used by this module. */ |
| 1425 | static void srcu_module_going(struct module *mod) |
| 1426 | { |
| 1427 | int i; |
| 1428 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; |
| 1429 | |
| 1430 | for (i = 0; i < mod->num_srcu_structs; i++) |
| 1431 | cleanup_srcu_struct(*(sspp++)); |
| 1432 | } |
| 1433 | |
| 1434 | /* Handle one module, either coming or going. */ |
| 1435 | static int srcu_module_notify(struct notifier_block *self, |
| 1436 | unsigned long val, void *data) |
| 1437 | { |
| 1438 | struct module *mod = data; |
| 1439 | int ret = 0; |
| 1440 | |
| 1441 | switch (val) { |
| 1442 | case MODULE_STATE_COMING: |
| 1443 | ret = srcu_module_coming(mod); |
| 1444 | break; |
| 1445 | case MODULE_STATE_GOING: |
| 1446 | srcu_module_going(mod); |
| 1447 | break; |
| 1448 | default: |
| 1449 | break; |
| 1450 | } |
| 1451 | return ret; |
| 1452 | } |
| 1453 | |
| 1454 | static struct notifier_block srcu_module_nb = { |
| 1455 | .notifier_call = srcu_module_notify, |
| 1456 | .priority = 0, |
| 1457 | }; |
| 1458 | |
| 1459 | static __init int init_srcu_module_notifier(void) |
| 1460 | { |
| 1461 | int ret; |
| 1462 | |
| 1463 | ret = register_module_notifier(&srcu_module_nb); |
| 1464 | if (ret) |
| 1465 | pr_warn("Failed to register srcu module notifier\n"); |
| 1466 | return ret; |
| 1467 | } |
| 1468 | late_initcall(init_srcu_module_notifier); |
| 1469 | |
| 1470 | #endif /* #ifdef CONFIG_MODULES */ |