Paul E. McKenney | e7ee150 | 2019-01-17 10:18:16 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
| 4 | * |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 5 | * Copyright (C) IBM Corporation, 2006 |
| 6 | * Copyright (C) Fujitsu, 2012 |
| 7 | * |
SeongJae Park | 65bb0dc | 2020-01-06 21:08:02 +0100 | [diff] [blame] | 8 | * Authors: Paul McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 9 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
| 10 | * |
| 11 | * For detailed explanation of Read-Copy Update mechanism see - |
| 12 | * Documentation/RCU/ *.txt |
| 13 | * |
| 14 | */ |
| 15 | |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 16 | #define pr_fmt(fmt) "rcu: " fmt |
| 17 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 18 | #include <linux/export.h> |
| 19 | #include <linux/mutex.h> |
| 20 | #include <linux/percpu.h> |
| 21 | #include <linux/preempt.h> |
| 22 | #include <linux/rcupdate_wait.h> |
| 23 | #include <linux/sched.h> |
| 24 | #include <linux/smp.h> |
| 25 | #include <linux/delay.h> |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 26 | #include <linux/module.h> |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 27 | #include <linux/srcu.h> |
| 28 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 29 | #include "rcu.h" |
Ingo Molnar | 45753c5 | 2017-05-02 10:31:18 +0200 | [diff] [blame] | 30 | #include "rcu_segcblist.h" |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 31 | |
Paul E. McKenney | 0c8e0e3 | 2017-04-28 11:24:22 -0700 | [diff] [blame] | 32 | /* Holdoff in nanoseconds for auto-expediting. */ |
| 33 | #define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000) |
| 34 | static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 35 | module_param(exp_holdoff, ulong, 0444); |
| 36 | |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 37 | /* Overflow-check frequency. N bits roughly says every 2**N grace periods. */ |
| 38 | static ulong counter_wrap_check = (ULONG_MAX >> 2); |
| 39 | module_param(counter_wrap_check, ulong, 0444); |
| 40 | |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 41 | /* Early-boot callback-management, so early that no lock is required! */ |
| 42 | static LIST_HEAD(srcu_boot_list); |
| 43 | static bool __read_mostly srcu_init_done; |
| 44 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 45 | static void srcu_invoke_callbacks(struct work_struct *work); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 46 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay); |
Paul E. McKenney | 0d8a1e8 | 2017-06-15 17:06:38 -0700 | [diff] [blame] | 47 | static void process_srcu(struct work_struct *work); |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 48 | static void srcu_delay_timer(struct timer_list *t); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 49 | |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 50 | /* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */ |
| 51 | #define spin_lock_rcu_node(p) \ |
| 52 | do { \ |
| 53 | spin_lock(&ACCESS_PRIVATE(p, lock)); \ |
| 54 | smp_mb__after_unlock_lock(); \ |
| 55 | } while (0) |
| 56 | |
| 57 | #define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock)) |
| 58 | |
| 59 | #define spin_lock_irq_rcu_node(p) \ |
| 60 | do { \ |
| 61 | spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \ |
| 62 | smp_mb__after_unlock_lock(); \ |
| 63 | } while (0) |
| 64 | |
| 65 | #define spin_unlock_irq_rcu_node(p) \ |
| 66 | spin_unlock_irq(&ACCESS_PRIVATE(p, lock)) |
| 67 | |
| 68 | #define spin_lock_irqsave_rcu_node(p, flags) \ |
| 69 | do { \ |
| 70 | spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \ |
| 71 | smp_mb__after_unlock_lock(); \ |
| 72 | } while (0) |
| 73 | |
| 74 | #define spin_unlock_irqrestore_rcu_node(p, flags) \ |
| 75 | spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \ |
| 76 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 77 | /* |
| 78 | * Initialize SRCU combining tree. Note that statically allocated |
| 79 | * srcu_struct structures might already have srcu_read_lock() and |
| 80 | * srcu_read_unlock() running against them. So if the is_static parameter |
| 81 | * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[]. |
| 82 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 83 | static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 84 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 85 | int cpu; |
| 86 | int i; |
| 87 | int level = 0; |
| 88 | int levelspread[RCU_NUM_LVLS]; |
| 89 | struct srcu_data *sdp; |
| 90 | struct srcu_node *snp; |
| 91 | struct srcu_node *snp_first; |
| 92 | |
| 93 | /* Work out the overall tree geometry. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 94 | ssp->level[0] = &ssp->node[0]; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 95 | for (i = 1; i < rcu_num_lvls; i++) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 96 | ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1]; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 97 | rcu_init_levelspread(levelspread, num_rcu_lvl); |
| 98 | |
| 99 | /* Each pass through this loop initializes one srcu_node structure. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 100 | srcu_for_each_node_breadth_first(ssp, snp) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 101 | spin_lock_init(&ACCESS_PRIVATE(snp, lock)); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 102 | WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) != |
| 103 | ARRAY_SIZE(snp->srcu_data_have_cbs)); |
| 104 | for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 105 | snp->srcu_have_cbs[i] = 0; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 106 | snp->srcu_data_have_cbs[i] = 0; |
| 107 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 108 | snp->srcu_gp_seq_needed_exp = 0; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 109 | snp->grplo = -1; |
| 110 | snp->grphi = -1; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 111 | if (snp == &ssp->node[0]) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 112 | /* Root node, special case. */ |
| 113 | snp->srcu_parent = NULL; |
| 114 | continue; |
| 115 | } |
| 116 | |
| 117 | /* Non-root node. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 118 | if (snp == ssp->level[level + 1]) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 119 | level++; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 120 | snp->srcu_parent = ssp->level[level - 1] + |
| 121 | (snp - ssp->level[level]) / |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 122 | levelspread[level - 1]; |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * Initialize the per-CPU srcu_data array, which feeds into the |
| 127 | * leaves of the srcu_node tree. |
| 128 | */ |
| 129 | WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) != |
| 130 | ARRAY_SIZE(sdp->srcu_unlock_count)); |
| 131 | level = rcu_num_lvls - 1; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 132 | snp_first = ssp->level[level]; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 133 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 134 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 135 | spin_lock_init(&ACCESS_PRIVATE(sdp, lock)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 136 | rcu_segcblist_init(&sdp->srcu_cblist); |
| 137 | sdp->srcu_cblist_invoking = false; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 138 | sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq; |
| 139 | sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 140 | sdp->mynode = &snp_first[cpu / levelspread[level]]; |
| 141 | for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) { |
| 142 | if (snp->grplo < 0) |
| 143 | snp->grplo = cpu; |
| 144 | snp->grphi = cpu; |
| 145 | } |
| 146 | sdp->cpu = cpu; |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 147 | INIT_WORK(&sdp->work, srcu_invoke_callbacks); |
| 148 | timer_setup(&sdp->delay_work, srcu_delay_timer, 0); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 149 | sdp->ssp = ssp; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 150 | sdp->grpmask = 1 << (cpu - sdp->mynode->grplo); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 151 | if (is_static) |
| 152 | continue; |
| 153 | |
| 154 | /* Dynamically allocated, better be no srcu_read_locks()! */ |
| 155 | for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) { |
| 156 | sdp->srcu_lock_count[i] = 0; |
| 157 | sdp->srcu_unlock_count[i] = 0; |
| 158 | } |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | /* |
| 163 | * Initialize non-compile-time initialized fields, including the |
| 164 | * associated srcu_node and srcu_data structures. The is_static |
| 165 | * parameter is passed through to init_srcu_struct_nodes(), and |
| 166 | * also tells us that ->sda has already been wired up to srcu_data. |
| 167 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 168 | static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 169 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 170 | mutex_init(&ssp->srcu_cb_mutex); |
| 171 | mutex_init(&ssp->srcu_gp_mutex); |
| 172 | ssp->srcu_idx = 0; |
| 173 | ssp->srcu_gp_seq = 0; |
| 174 | ssp->srcu_barrier_seq = 0; |
| 175 | mutex_init(&ssp->srcu_barrier_mutex); |
| 176 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 0); |
| 177 | INIT_DELAYED_WORK(&ssp->work, process_srcu); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 178 | if (!is_static) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 179 | ssp->sda = alloc_percpu(struct srcu_data); |
Paul E. McKenney | 50edb98 | 2020-09-10 11:54:42 -0700 | [diff] [blame] | 180 | if (!ssp->sda) |
| 181 | return -ENOMEM; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 182 | init_srcu_struct_nodes(ssp, is_static); |
| 183 | ssp->srcu_gp_seq_needed_exp = 0; |
| 184 | ssp->srcu_last_gp_end = ktime_get_mono_fast_ns(); |
| 185 | smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */ |
Paul E. McKenney | 50edb98 | 2020-09-10 11:54:42 -0700 | [diff] [blame] | 186 | return 0; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 187 | } |
| 188 | |
| 189 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 190 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 191 | int __init_srcu_struct(struct srcu_struct *ssp, const char *name, |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 192 | struct lock_class_key *key) |
| 193 | { |
| 194 | /* Don't re-initialize a lock while it is held. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 195 | debug_check_no_locks_freed((void *)ssp, sizeof(*ssp)); |
| 196 | lockdep_init_map(&ssp->dep_map, name, key, 0); |
| 197 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
| 198 | return init_srcu_struct_fields(ssp, false); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 199 | } |
| 200 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
| 201 | |
| 202 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 203 | |
| 204 | /** |
| 205 | * init_srcu_struct - initialize a sleep-RCU structure |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 206 | * @ssp: structure to initialize. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 207 | * |
| 208 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
| 209 | * to any other function. Each srcu_struct represents a separate domain |
| 210 | * of SRCU protection. |
| 211 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 212 | int init_srcu_struct(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 213 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 214 | spin_lock_init(&ACCESS_PRIVATE(ssp, lock)); |
| 215 | return init_srcu_struct_fields(ssp, false); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 216 | } |
| 217 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
| 218 | |
| 219 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 220 | |
| 221 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 222 | * First-use initialization of statically allocated srcu_struct |
| 223 | * structure. Wiring up the combining tree is more than can be |
| 224 | * done with compile-time initialization, so this check is added |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 225 | * to each update-side SRCU primitive. Use ssp->lock, which -is- |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 226 | * compile-time initialized, to resolve races involving multiple |
| 227 | * CPUs trying to garner first-use privileges. |
| 228 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 229 | static void check_init_srcu_struct(struct srcu_struct *ssp) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 230 | { |
| 231 | unsigned long flags; |
| 232 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 233 | /* The smp_load_acquire() pairs with the smp_store_release(). */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 234 | if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 235 | return; /* Already initialized. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 236 | spin_lock_irqsave_rcu_node(ssp, flags); |
| 237 | if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) { |
| 238 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 239 | return; |
| 240 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 241 | init_srcu_struct_fields(ssp, true); |
| 242 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 243 | } |
| 244 | |
| 245 | /* |
| 246 | * Returns approximate total of the readers' ->srcu_lock_count[] values |
| 247 | * for the rank of per-CPU counters specified by idx. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 248 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 249 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 250 | { |
| 251 | int cpu; |
| 252 | unsigned long sum = 0; |
| 253 | |
| 254 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 255 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 256 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 257 | sum += READ_ONCE(cpuc->srcu_lock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 258 | } |
| 259 | return sum; |
| 260 | } |
| 261 | |
| 262 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 263 | * Returns approximate total of the readers' ->srcu_unlock_count[] values |
| 264 | * for the rank of per-CPU counters specified by idx. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 265 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 266 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 267 | { |
| 268 | int cpu; |
| 269 | unsigned long sum = 0; |
| 270 | |
| 271 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 272 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 273 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 274 | sum += READ_ONCE(cpuc->srcu_unlock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 275 | } |
| 276 | return sum; |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * Return true if the number of pre-existing readers is determined to |
| 281 | * be zero. |
| 282 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 283 | static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 284 | { |
| 285 | unsigned long unlocks; |
| 286 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 287 | unlocks = srcu_readers_unlock_idx(ssp, idx); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 288 | |
| 289 | /* |
| 290 | * Make sure that a lock is always counted if the corresponding |
| 291 | * unlock is counted. Needs to be a smp_mb() as the read side may |
| 292 | * contain a read from a variable that is written to before the |
| 293 | * synchronize_srcu() in the write side. In this case smp_mb()s |
| 294 | * A and B act like the store buffering pattern. |
| 295 | * |
| 296 | * This smp_mb() also pairs with smp_mb() C to prevent accesses |
| 297 | * after the synchronize_srcu() from being executed before the |
| 298 | * grace period ends. |
| 299 | */ |
| 300 | smp_mb(); /* A */ |
| 301 | |
| 302 | /* |
| 303 | * If the locks are the same as the unlocks, then there must have |
| 304 | * been no readers on this index at some time in between. This does |
| 305 | * not mean that there are no more readers, as one could have read |
| 306 | * the current index but not have incremented the lock counter yet. |
| 307 | * |
Paul E. McKenney | 881ec9d | 2017-04-12 15:16:50 -0700 | [diff] [blame] | 308 | * So suppose that the updater is preempted here for so long |
| 309 | * that more than ULONG_MAX non-nested readers come and go in |
| 310 | * the meantime. It turns out that this cannot result in overflow |
| 311 | * because if a reader modifies its unlock count after we read it |
| 312 | * above, then that reader's next load of ->srcu_idx is guaranteed |
| 313 | * to get the new value, which will cause it to operate on the |
| 314 | * other bank of counters, where it cannot contribute to the |
| 315 | * overflow of these counters. This means that there is a maximum |
| 316 | * of 2*NR_CPUS increments, which cannot overflow given current |
| 317 | * systems, especially not on 64-bit systems. |
| 318 | * |
| 319 | * OK, how about nesting? This does impose a limit on nesting |
| 320 | * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient, |
| 321 | * especially on 64-bit systems. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 322 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 323 | return srcu_readers_lock_idx(ssp, idx) == unlocks; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | /** |
| 327 | * srcu_readers_active - returns true if there are readers. and false |
| 328 | * otherwise |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 329 | * @ssp: which srcu_struct to count active readers (holding srcu_read_lock). |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 330 | * |
| 331 | * Note that this is not an atomic primitive, and can therefore suffer |
| 332 | * severe errors when invoked on an active srcu_struct. That said, it |
| 333 | * can be useful as an error check at cleanup time. |
| 334 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 335 | static bool srcu_readers_active(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 336 | { |
| 337 | int cpu; |
| 338 | unsigned long sum = 0; |
| 339 | |
| 340 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 341 | struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 342 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 343 | sum += READ_ONCE(cpuc->srcu_lock_count[0]); |
| 344 | sum += READ_ONCE(cpuc->srcu_lock_count[1]); |
| 345 | sum -= READ_ONCE(cpuc->srcu_unlock_count[0]); |
| 346 | sum -= READ_ONCE(cpuc->srcu_unlock_count[1]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 347 | } |
| 348 | return sum; |
| 349 | } |
| 350 | |
| 351 | #define SRCU_INTERVAL 1 |
| 352 | |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 353 | /* |
| 354 | * Return grace-period delay, zero if there are expedited grace |
| 355 | * periods pending, SRCU_INTERVAL otherwise. |
| 356 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 357 | static unsigned long srcu_get_delay(struct srcu_struct *ssp) |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 358 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 359 | if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), |
| 360 | READ_ONCE(ssp->srcu_gp_seq_needed_exp))) |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 361 | return 0; |
| 362 | return SRCU_INTERVAL; |
| 363 | } |
| 364 | |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 365 | /** |
| 366 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
| 367 | * @ssp: structure to clean up. |
| 368 | * |
| 369 | * Must invoke this after you are finished using a given srcu_struct that |
| 370 | * was initialized via init_srcu_struct(), else you leak memory. |
| 371 | */ |
| 372 | void cleanup_srcu_struct(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 373 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 374 | int cpu; |
| 375 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 376 | if (WARN_ON(!srcu_get_delay(ssp))) |
Paul E. McKenney | f7194ac | 2018-04-05 17:19:17 -0700 | [diff] [blame] | 377 | return; /* Just leak it! */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 378 | if (WARN_ON(srcu_readers_active(ssp))) |
Paul E. McKenney | f7194ac | 2018-04-05 17:19:17 -0700 | [diff] [blame] | 379 | return; /* Just leak it! */ |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 380 | flush_delayed_work(&ssp->work); |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 381 | for_each_possible_cpu(cpu) { |
| 382 | struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu); |
| 383 | |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 384 | del_timer_sync(&sdp->delay_work); |
| 385 | flush_work(&sdp->work); |
Paul E. McKenney | 5cdfd17 | 2019-02-12 10:44:33 -0800 | [diff] [blame] | 386 | if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist))) |
| 387 | return; /* Forgot srcu_barrier(), so just leak it! */ |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 388 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 389 | if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) || |
| 390 | WARN_ON(srcu_readers_active(ssp))) { |
Joe Perches | a753835 | 2018-05-14 13:27:33 -0700 | [diff] [blame] | 391 | pr_info("%s: Active srcu_struct %p state: %d\n", |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 392 | __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 393 | return; /* Caller forgot to stop doing call_srcu()? */ |
| 394 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 395 | free_percpu(ssp->sda); |
| 396 | ssp->sda = NULL; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 397 | } |
Paul E. McKenney | f5ad399 | 2019-02-13 13:54:37 -0800 | [diff] [blame] | 398 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 399 | |
| 400 | /* |
| 401 | * Counts the new reader in the appropriate per-CPU element of the |
Paolo Bonzini | cdf7abc | 2017-05-31 14:03:10 +0200 | [diff] [blame] | 402 | * srcu_struct. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 403 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
| 404 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 405 | int __srcu_read_lock(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 406 | { |
| 407 | int idx; |
| 408 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 409 | idx = READ_ONCE(ssp->srcu_idx) & 0x1; |
| 410 | this_cpu_inc(ssp->sda->srcu_lock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 411 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 412 | return idx; |
| 413 | } |
| 414 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
| 415 | |
| 416 | /* |
| 417 | * Removes the count for the old reader from the appropriate per-CPU |
| 418 | * element of the srcu_struct. Note that this may well be a different |
| 419 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 420 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 421 | void __srcu_read_unlock(struct srcu_struct *ssp, int idx) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 422 | { |
| 423 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 424 | this_cpu_inc(ssp->sda->srcu_unlock_count[idx]); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 425 | } |
| 426 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 427 | |
| 428 | /* |
| 429 | * We use an adaptive strategy for synchronize_srcu() and especially for |
| 430 | * synchronize_srcu_expedited(). We spin for a fixed time period |
| 431 | * (defined below) to allow SRCU readers to exit their read-side critical |
| 432 | * sections. If there are still some readers after a few microseconds, |
| 433 | * we repeatedly block for 1-millisecond time periods. |
| 434 | */ |
| 435 | #define SRCU_RETRY_CHECK_DELAY 5 |
| 436 | |
| 437 | /* |
| 438 | * Start an SRCU grace period. |
| 439 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 440 | static void srcu_gp_start(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 441 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 442 | struct srcu_data *sdp = this_cpu_ptr(ssp->sda); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 443 | int state; |
| 444 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 445 | lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock)); |
| 446 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
Dennis Krein | eb4c238 | 2018-10-26 07:38:24 -0700 | [diff] [blame] | 447 | spin_lock_rcu_node(sdp); /* Interrupts already disabled. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 448 | rcu_segcblist_advance(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 449 | rcu_seq_current(&ssp->srcu_gp_seq)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 450 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 451 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
Dennis Krein | eb4c238 | 2018-10-26 07:38:24 -0700 | [diff] [blame] | 452 | spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */ |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 453 | smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 454 | rcu_seq_start(&ssp->srcu_gp_seq); |
Paul E. McKenney | 7104260 | 2020-01-03 11:42:05 -0800 | [diff] [blame] | 455 | state = rcu_seq_state(ssp->srcu_gp_seq); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 456 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
| 457 | } |
| 458 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 459 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 460 | static void srcu_delay_timer(struct timer_list *t) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 461 | { |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 462 | struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work); |
| 463 | |
| 464 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 465 | } |
| 466 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 467 | static void srcu_queue_delayed_work_on(struct srcu_data *sdp, |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 468 | unsigned long delay) |
| 469 | { |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 470 | if (!delay) { |
| 471 | queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work); |
| 472 | return; |
| 473 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 474 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 475 | timer_reduce(&sdp->delay_work, jiffies + delay); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | /* |
| 479 | * Schedule callback invocation for the specified srcu_data structure, |
| 480 | * if possible, on the corresponding CPU. |
| 481 | */ |
| 482 | static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay) |
| 483 | { |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 484 | srcu_queue_delayed_work_on(sdp, delay); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 485 | } |
| 486 | |
| 487 | /* |
| 488 | * Schedule callback invocation for all srcu_data structures associated |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 489 | * with the specified srcu_node structure that have callbacks for the |
| 490 | * just-completed grace period, the one corresponding to idx. If possible, |
| 491 | * schedule this invocation on the corresponding CPUs. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 492 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 493 | static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 494 | unsigned long mask, unsigned long delay) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 495 | { |
| 496 | int cpu; |
| 497 | |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 498 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
| 499 | if (!(mask & (1 << (cpu - snp->grplo)))) |
| 500 | continue; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 501 | srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 502 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 503 | } |
| 504 | |
| 505 | /* |
| 506 | * Note the end of an SRCU grace period. Initiates callback invocation |
| 507 | * and starts a new grace period if needed. |
| 508 | * |
| 509 | * The ->srcu_cb_mutex acquisition does not protect any data, but |
| 510 | * instead prevents more than one grace period from starting while we |
| 511 | * are initiating callback invocation. This allows the ->srcu_have_cbs[] |
| 512 | * array to have a finite number of elements. |
| 513 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 514 | static void srcu_gp_end(struct srcu_struct *ssp) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 515 | { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 516 | unsigned long cbdelay; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 517 | bool cbs; |
Ildar Ismagilov | 8ddbd88 | 2018-01-31 22:42:21 +0300 | [diff] [blame] | 518 | bool last_lvl; |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 519 | int cpu; |
| 520 | unsigned long flags; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 521 | unsigned long gpseq; |
| 522 | int idx; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 523 | unsigned long mask; |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 524 | struct srcu_data *sdp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 525 | struct srcu_node *snp; |
| 526 | |
| 527 | /* Prevent more than one additional grace period. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 528 | mutex_lock(&ssp->srcu_cb_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 529 | |
| 530 | /* End the current grace period. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 531 | spin_lock_irq_rcu_node(ssp); |
| 532 | idx = rcu_seq_state(ssp->srcu_gp_seq); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 533 | WARN_ON_ONCE(idx != SRCU_STATE_SCAN2); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 534 | cbdelay = srcu_get_delay(ssp); |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 535 | WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns()); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 536 | rcu_seq_end(&ssp->srcu_gp_seq); |
| 537 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
| 538 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq)) |
Paul E. McKenney | 8c9e0cb | 2019-12-22 19:36:33 -0800 | [diff] [blame] | 539 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 540 | spin_unlock_irq_rcu_node(ssp); |
| 541 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 542 | /* A new grace period can start at this point. But only one. */ |
| 543 | |
| 544 | /* Initiate callback invocation as needed. */ |
| 545 | idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 546 | srcu_for_each_node_breadth_first(ssp, snp) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 547 | spin_lock_irq_rcu_node(snp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 548 | cbs = false; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 549 | last_lvl = snp >= ssp->level[rcu_num_lvls - 1]; |
Ildar Ismagilov | 8ddbd88 | 2018-01-31 22:42:21 +0300 | [diff] [blame] | 550 | if (last_lvl) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 551 | cbs = snp->srcu_have_cbs[idx] == gpseq; |
| 552 | snp->srcu_have_cbs[idx] = gpseq; |
| 553 | rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 554 | if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq)) |
Paul E. McKenney | 7ff8b45 | 2019-12-22 19:32:54 -0800 | [diff] [blame] | 555 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq); |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 556 | mask = snp->srcu_data_have_cbs[idx]; |
| 557 | snp->srcu_data_have_cbs[idx] = 0; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 558 | spin_unlock_irq_rcu_node(snp); |
Paul E. McKenney | a3883df | 2017-05-09 15:00:14 -0700 | [diff] [blame] | 559 | if (cbs) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 560 | srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 561 | |
| 562 | /* Occasionally prevent srcu_data counter wrap. */ |
Ildar Ismagilov | 8ddbd88 | 2018-01-31 22:42:21 +0300 | [diff] [blame] | 563 | if (!(gpseq & counter_wrap_check) && last_lvl) |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 564 | for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 565 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 566 | spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 567 | if (ULONG_CMP_GE(gpseq, |
| 568 | sdp->srcu_gp_seq_needed + 100)) |
| 569 | sdp->srcu_gp_seq_needed = gpseq; |
Ildar Ismagilov | a35d13e | 2018-01-31 22:39:53 +0300 | [diff] [blame] | 570 | if (ULONG_CMP_GE(gpseq, |
| 571 | sdp->srcu_gp_seq_needed_exp + 100)) |
| 572 | sdp->srcu_gp_seq_needed_exp = gpseq; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 573 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | c350c00 | 2017-05-03 15:35:32 -0700 | [diff] [blame] | 574 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 575 | } |
| 576 | |
| 577 | /* Callback initiation done, allow grace periods after next. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 578 | mutex_unlock(&ssp->srcu_cb_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 579 | |
| 580 | /* Start a new grace period if needed. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 581 | spin_lock_irq_rcu_node(ssp); |
| 582 | gpseq = rcu_seq_current(&ssp->srcu_gp_seq); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 583 | if (!rcu_seq_state(gpseq) && |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 584 | ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) { |
| 585 | srcu_gp_start(ssp); |
| 586 | spin_unlock_irq_rcu_node(ssp); |
| 587 | srcu_reschedule(ssp, 0); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 588 | } else { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 589 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 590 | } |
| 591 | } |
| 592 | |
| 593 | /* |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 594 | * Funnel-locking scheme to scalably mediate many concurrent expedited |
| 595 | * grace-period requests. This function is invoked for the first known |
| 596 | * expedited request for a grace period that has already been requested, |
| 597 | * but without expediting. To start a completely new grace period, |
| 598 | * whether expedited or not, use srcu_funnel_gp_start() instead. |
| 599 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 600 | static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 601 | unsigned long s) |
| 602 | { |
| 603 | unsigned long flags; |
| 604 | |
| 605 | for (; snp != NULL; snp = snp->srcu_parent) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 606 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) || |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 607 | ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s)) |
| 608 | return; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 609 | spin_lock_irqsave_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 610 | if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 611 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 612 | return; |
| 613 | } |
| 614 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 615 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 616 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 617 | spin_lock_irqsave_rcu_node(ssp, flags); |
| 618 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
Paul E. McKenney | 8c9e0cb | 2019-12-22 19:36:33 -0800 | [diff] [blame] | 619 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 620 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 621 | } |
| 622 | |
| 623 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 624 | * Funnel-locking scheme to scalably mediate many concurrent grace-period |
| 625 | * requests. The winner has to do the work of actually starting grace |
| 626 | * period s. Losers must either ensure that their desired grace-period |
| 627 | * number is recorded on at least their leaf srcu_node structure, or they |
| 628 | * must take steps to invoke their own callbacks. |
Paul E. McKenney | 17294ce | 2018-04-25 12:03:36 -0700 | [diff] [blame] | 629 | * |
| 630 | * Note that this function also does the work of srcu_funnel_exp_start(), |
| 631 | * in some cases by directly invoking it. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 632 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 633 | static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 634 | unsigned long s, bool do_norm) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 635 | { |
| 636 | unsigned long flags; |
| 637 | int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs); |
| 638 | struct srcu_node *snp = sdp->mynode; |
| 639 | unsigned long snp_seq; |
| 640 | |
| 641 | /* Each pass through the loop does one level of the srcu_node tree. */ |
| 642 | for (; snp != NULL; snp = snp->srcu_parent) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 643 | if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode) |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 644 | return; /* GP already done and CBs recorded. */ |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 645 | spin_lock_irqsave_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 646 | if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) { |
| 647 | snp_seq = snp->srcu_have_cbs[idx]; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 648 | if (snp == sdp->mynode && snp_seq == s) |
| 649 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 650 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 651 | if (snp == sdp->mynode && snp_seq != s) { |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 652 | srcu_schedule_cbs_sdp(sdp, do_norm |
| 653 | ? SRCU_INTERVAL |
| 654 | : 0); |
| 655 | return; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 656 | } |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 657 | if (!do_norm) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 658 | srcu_funnel_exp_start(ssp, snp, s); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 659 | return; |
| 660 | } |
| 661 | snp->srcu_have_cbs[idx] = s; |
Paul E. McKenney | c7e8806 | 2017-04-18 16:01:46 -0700 | [diff] [blame] | 662 | if (snp == sdp->mynode) |
| 663 | snp->srcu_data_have_cbs[idx] |= sdp->grpmask; |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 664 | if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s)) |
Paul E. McKenney | 7ff8b45 | 2019-12-22 19:32:54 -0800 | [diff] [blame] | 665 | WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 666 | spin_unlock_irqrestore_rcu_node(snp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | /* Top of tree, must ensure the grace period will be started. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 670 | spin_lock_irqsave_rcu_node(ssp, flags); |
| 671 | if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 672 | /* |
| 673 | * Record need for grace period s. Pair with load |
| 674 | * acquire setting up for initialization. |
| 675 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 676 | smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 677 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 678 | if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s)) |
Paul E. McKenney | 8c9e0cb | 2019-12-22 19:36:33 -0800 | [diff] [blame] | 679 | WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 680 | |
| 681 | /* If grace period not already done and none in progress, start it. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 682 | if (!rcu_seq_done(&ssp->srcu_gp_seq, s) && |
| 683 | rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) { |
| 684 | WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)); |
| 685 | srcu_gp_start(ssp); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 686 | if (likely(srcu_init_done)) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 687 | queue_delayed_work(rcu_gp_wq, &ssp->work, |
| 688 | srcu_get_delay(ssp)); |
| 689 | else if (list_empty(&ssp->work.work.entry)) |
| 690 | list_add(&ssp->work.work.entry, &srcu_boot_list); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 691 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 692 | spin_unlock_irqrestore_rcu_node(ssp, flags); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | /* |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 696 | * Wait until all readers counted by array index idx complete, but |
| 697 | * loop an additional time if there is an expedited grace period pending. |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 698 | * The caller must ensure that ->srcu_idx is not changed while checking. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 699 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 700 | static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 701 | { |
| 702 | for (;;) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 703 | if (srcu_readers_active_idx_check(ssp, idx)) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 704 | return true; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 705 | if (--trycount + !srcu_get_delay(ssp) <= 0) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 706 | return false; |
| 707 | udelay(SRCU_RETRY_CHECK_DELAY); |
| 708 | } |
| 709 | } |
| 710 | |
| 711 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 712 | * Increment the ->srcu_idx counter so that future SRCU readers will |
| 713 | * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 714 | * us to wait for pre-existing readers in a starvation-free manner. |
| 715 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 716 | static void srcu_flip(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 717 | { |
Paul E. McKenney | 881ec9d | 2017-04-12 15:16:50 -0700 | [diff] [blame] | 718 | /* |
| 719 | * Ensure that if this updater saw a given reader's increment |
| 720 | * from __srcu_read_lock(), that reader was using an old value |
| 721 | * of ->srcu_idx. Also ensure that if a given reader sees the |
| 722 | * new value of ->srcu_idx, this updater's earlier scans cannot |
| 723 | * have seen that reader's increments (which is OK, because this |
| 724 | * grace period need not wait on that reader). |
| 725 | */ |
| 726 | smp_mb(); /* E */ /* Pairs with B and C. */ |
| 727 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 728 | WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 729 | |
| 730 | /* |
| 731 | * Ensure that if the updater misses an __srcu_read_unlock() |
| 732 | * increment, that task's next __srcu_read_lock() will see the |
| 733 | * above counter update. Note that both this memory barrier |
| 734 | * and the one in srcu_readers_active_idx_check() provide the |
| 735 | * guarantee for __srcu_read_lock(). |
| 736 | */ |
| 737 | smp_mb(); /* D */ /* Pairs with C. */ |
| 738 | } |
| 739 | |
| 740 | /* |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 741 | * If SRCU is likely idle, return true, otherwise return false. |
| 742 | * |
| 743 | * Note that it is OK for several current from-idle requests for a new |
| 744 | * grace period from idle to specify expediting because they will all end |
| 745 | * up requesting the same grace period anyhow. So no loss. |
| 746 | * |
| 747 | * Note also that if any CPU (including the current one) is still invoking |
| 748 | * callbacks, this function will nevertheless say "idle". This is not |
| 749 | * ideal, but the overhead of checking all CPUs' callback lists is even |
| 750 | * less ideal, especially on large systems. Furthermore, the wakeup |
| 751 | * can happen before the callback is fully removed, so we have no choice |
| 752 | * but to accept this type of error. |
| 753 | * |
| 754 | * This function is also subject to counter-wrap errors, but let's face |
| 755 | * it, if this function was preempted for enough time for the counters |
| 756 | * to wrap, it really doesn't matter whether or not we expedite the grace |
| 757 | * period. The extra overhead of a needlessly expedited grace period is |
Ethon Paul | 7fef6cf | 2020-04-18 19:46:47 +0800 | [diff] [blame] | 758 | * negligible when amortized over that time period, and the extra latency |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 759 | * of a needlessly non-expedited grace period is similarly negligible. |
| 760 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 761 | static bool srcu_might_be_idle(struct srcu_struct *ssp) |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 762 | { |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 763 | unsigned long curseq; |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 764 | unsigned long flags; |
| 765 | struct srcu_data *sdp; |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 766 | unsigned long t; |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 767 | unsigned long tlast; |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 768 | |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 769 | check_init_srcu_struct(ssp); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 770 | /* If the local srcu_data structure has callbacks, not idle. */ |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 771 | sdp = raw_cpu_ptr(ssp->sda); |
| 772 | spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 773 | if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) { |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 774 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 775 | return false; /* Callbacks already present, so not idle. */ |
| 776 | } |
Sebastian Andrzej Siewior | bde50d8 | 2020-05-26 15:41:34 +0200 | [diff] [blame] | 777 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 778 | |
| 779 | /* |
| 780 | * No local callbacks, so probabalistically probe global state. |
| 781 | * Exact information would require acquiring locks, which would |
| 782 | * kill scalability, hence the probabalistic nature of the probe. |
| 783 | */ |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 784 | |
| 785 | /* First, see if enough time has passed since the last GP. */ |
| 786 | t = ktime_get_mono_fast_ns(); |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 787 | tlast = READ_ONCE(ssp->srcu_last_gp_end); |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 788 | if (exp_holdoff == 0 || |
Paul E. McKenney | 844a378 | 2019-11-04 08:08:30 -0800 | [diff] [blame] | 789 | time_in_range_open(t, tlast, tlast + exp_holdoff)) |
Paul E. McKenney | 22607d6 | 2017-04-25 14:03:11 -0700 | [diff] [blame] | 790 | return false; /* Too soon after last GP. */ |
| 791 | |
| 792 | /* Next, check for probable idleness. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 793 | curseq = rcu_seq_current(&ssp->srcu_gp_seq); |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 794 | smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 795 | if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed))) |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 796 | return false; /* Grace period in progress, so not idle. */ |
| 797 | smp_mb(); /* Order ->srcu_gp_seq with prior access. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 798 | if (curseq != rcu_seq_current(&ssp->srcu_gp_seq)) |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 799 | return false; /* GP # changed, so not idle. */ |
| 800 | return true; /* With reasonable probability, idle! */ |
| 801 | } |
| 802 | |
| 803 | /* |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 804 | * SRCU callback function to leak a callback. |
| 805 | */ |
| 806 | static void srcu_leak_callback(struct rcu_head *rhp) |
| 807 | { |
| 808 | } |
| 809 | |
| 810 | /* |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 811 | * Start an SRCU grace period, and also queue the callback if non-NULL. |
| 812 | */ |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 813 | static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp, |
| 814 | struct rcu_head *rhp, bool do_norm) |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 815 | { |
| 816 | unsigned long flags; |
| 817 | int idx; |
| 818 | bool needexp = false; |
| 819 | bool needgp = false; |
| 820 | unsigned long s; |
| 821 | struct srcu_data *sdp; |
| 822 | |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 823 | check_init_srcu_struct(ssp); |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 824 | idx = srcu_read_lock(ssp); |
| 825 | sdp = raw_cpu_ptr(ssp->sda); |
| 826 | spin_lock_irqsave_rcu_node(sdp, flags); |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 827 | if (rhp) |
| 828 | rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 829 | rcu_segcblist_advance(&sdp->srcu_cblist, |
| 830 | rcu_seq_current(&ssp->srcu_gp_seq)); |
| 831 | s = rcu_seq_snap(&ssp->srcu_gp_seq); |
| 832 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); |
| 833 | if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { |
| 834 | sdp->srcu_gp_seq_needed = s; |
| 835 | needgp = true; |
| 836 | } |
| 837 | if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { |
| 838 | sdp->srcu_gp_seq_needed_exp = s; |
| 839 | needexp = true; |
| 840 | } |
| 841 | spin_unlock_irqrestore_rcu_node(sdp, flags); |
| 842 | if (needgp) |
| 843 | srcu_funnel_gp_start(ssp, sdp, s, do_norm); |
| 844 | else if (needexp) |
| 845 | srcu_funnel_exp_start(ssp, sdp->mynode, s); |
| 846 | srcu_read_unlock(ssp, idx); |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 847 | return s; |
Paul E. McKenney | 29d2bb9 | 2020-11-13 10:08:09 -0800 | [diff] [blame] | 848 | } |
| 849 | |
| 850 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 851 | * Enqueue an SRCU callback on the srcu_data structure associated with |
| 852 | * the current CPU and the specified srcu_struct structure, initiating |
| 853 | * grace-period processing if it is not already running. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 854 | * |
| 855 | * Note that all CPUs must agree that the grace period extended beyond |
| 856 | * all pre-existing SRCU read-side critical section. On systems with |
| 857 | * more than one CPU, this means that when "func()" is invoked, each CPU |
| 858 | * is guaranteed to have executed a full memory barrier since the end of |
| 859 | * its last corresponding SRCU read-side critical section whose beginning |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 860 | * preceded the call to call_srcu(). It also means that each CPU executing |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 861 | * an SRCU read-side critical section that continues beyond the start of |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 862 | * "func()" must have executed a memory barrier after the call_srcu() |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 863 | * but before the beginning of that SRCU read-side critical section. |
| 864 | * Note that these guarantees include CPUs that are offline, idle, or |
| 865 | * executing in user mode, as well as CPUs that are executing in the kernel. |
| 866 | * |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 867 | * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 868 | * resulting SRCU callback function "func()", then both CPU A and CPU |
| 869 | * B are guaranteed to execute a full memory barrier during the time |
Paul E. McKenney | 5ef98a6 | 2018-04-24 21:30:13 -0700 | [diff] [blame] | 870 | * interval between the call to call_srcu() and the invocation of "func()". |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 871 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
| 872 | * again only if the system has more than one CPU). |
| 873 | * |
| 874 | * Of course, these guarantees apply only for invocations of call_srcu(), |
| 875 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
| 876 | * srcu_struct structure. |
| 877 | */ |
Jiang Biao | 11b0004 | 2019-04-23 09:22:56 +0800 | [diff] [blame] | 878 | static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
| 879 | rcu_callback_t func, bool do_norm) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 880 | { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 881 | if (debug_rcu_head_queue(rhp)) { |
| 882 | /* Probable double call_srcu(), so leak the callback. */ |
| 883 | WRITE_ONCE(rhp->func, srcu_leak_callback); |
| 884 | WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n"); |
| 885 | return; |
| 886 | } |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 887 | rhp->func = func; |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 888 | (void)srcu_gp_start_if_needed(ssp, rhp, do_norm); |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 889 | } |
| 890 | |
Paul E. McKenney | 5a0465e | 2017-05-04 11:31:04 -0700 | [diff] [blame] | 891 | /** |
| 892 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 893 | * @ssp: srcu_struct in queue the callback |
Paul E. McKenney | 27fdb35 | 2017-10-19 14:26:21 -0700 | [diff] [blame] | 894 | * @rhp: structure to be used for queueing the SRCU callback. |
Paul E. McKenney | 5a0465e | 2017-05-04 11:31:04 -0700 | [diff] [blame] | 895 | * @func: function to be invoked after the SRCU grace period |
| 896 | * |
| 897 | * The callback function will be invoked some time after a full SRCU |
| 898 | * grace period elapses, in other words after all pre-existing SRCU |
| 899 | * read-side critical sections have completed. However, the callback |
| 900 | * function might well execute concurrently with other SRCU read-side |
| 901 | * critical sections that started after call_srcu() was invoked. SRCU |
| 902 | * read-side critical sections are delimited by srcu_read_lock() and |
| 903 | * srcu_read_unlock(), and may be nested. |
| 904 | * |
| 905 | * The callback will be invoked from process context, but must nevertheless |
| 906 | * be fast and must not block. |
| 907 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 908 | void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, |
Paul E. McKenney | 1e9a038 | 2017-04-24 16:02:09 -0700 | [diff] [blame] | 909 | rcu_callback_t func) |
| 910 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 911 | __call_srcu(ssp, rhp, func, true); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 912 | } |
| 913 | EXPORT_SYMBOL_GPL(call_srcu); |
| 914 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 915 | /* |
| 916 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 917 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 918 | static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 919 | { |
| 920 | struct rcu_synchronize rcu; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 921 | |
Jakub Kicinski | f505d43 | 2020-09-16 11:45:26 -0700 | [diff] [blame] | 922 | RCU_LOCKDEP_WARN(lockdep_is_held(ssp) || |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 923 | lock_is_held(&rcu_bh_lock_map) || |
| 924 | lock_is_held(&rcu_lock_map) || |
| 925 | lock_is_held(&rcu_sched_lock_map), |
| 926 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); |
| 927 | |
| 928 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
| 929 | return; |
| 930 | might_sleep(); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 931 | check_init_srcu_struct(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 932 | init_completion(&rcu.completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 933 | init_rcu_head_on_stack(&rcu.head); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 934 | __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 935 | wait_for_completion(&rcu.completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 936 | destroy_rcu_head_on_stack(&rcu.head); |
Paul E. McKenney | 35732cf | 2017-07-05 13:30:21 -0700 | [diff] [blame] | 937 | |
| 938 | /* |
| 939 | * Make sure that later code is ordered after the SRCU grace |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 940 | * period. This pairs with the spin_lock_irq_rcu_node() |
Paul E. McKenney | 35732cf | 2017-07-05 13:30:21 -0700 | [diff] [blame] | 941 | * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed |
| 942 | * because the current CPU might have been totally uninvolved with |
| 943 | * (and thus unordered against) that grace period. |
| 944 | */ |
| 945 | smp_mb(); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 946 | } |
| 947 | |
| 948 | /** |
| 949 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 950 | * @ssp: srcu_struct with which to synchronize. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 951 | * |
| 952 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 953 | * spinning rather than blocking when waiting. |
| 954 | * |
| 955 | * Note that synchronize_srcu_expedited() has the same deadlock and |
| 956 | * memory-ordering properties as does synchronize_srcu(). |
| 957 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 958 | void synchronize_srcu_expedited(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 959 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 960 | __synchronize_srcu(ssp, rcu_gp_is_normal()); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 961 | } |
| 962 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
| 963 | |
| 964 | /** |
| 965 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 966 | * @ssp: srcu_struct with which to synchronize. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 967 | * |
| 968 | * Wait for the count to drain to zero of both indexes. To avoid the |
| 969 | * possible starvation of synchronize_srcu(), it waits for the count of |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 970 | * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first, |
| 971 | * and then flip the srcu_idx and wait for the count of the other index. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 972 | * |
| 973 | * Can block; must be called from process context. |
| 974 | * |
| 975 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 976 | * SRCU read-side critical section; doing so will result in deadlock. |
| 977 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 978 | * srcu_struct from some other srcu_struct's read-side critical section, |
| 979 | * as long as the resulting graph of srcu_structs is acyclic. |
| 980 | * |
| 981 | * There are memory-ordering constraints implied by synchronize_srcu(). |
| 982 | * On systems with more than one CPU, when synchronize_srcu() returns, |
| 983 | * each CPU is guaranteed to have executed a full memory barrier since |
Paul E. McKenney | 6eb95cc | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 984 | * the end of its last corresponding SRCU read-side critical section |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 985 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
| 986 | * each CPU having an SRCU read-side critical section that extends beyond |
| 987 | * the return from synchronize_srcu() is guaranteed to have executed a |
| 988 | * full memory barrier after the beginning of synchronize_srcu() and before |
| 989 | * the beginning of that SRCU read-side critical section. Note that these |
| 990 | * guarantees include CPUs that are offline, idle, or executing in user mode, |
| 991 | * as well as CPUs that are executing in the kernel. |
| 992 | * |
| 993 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned |
| 994 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed |
| 995 | * to have executed a full memory barrier during the execution of |
| 996 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B |
| 997 | * are the same CPU, but again only if the system has more than one CPU. |
| 998 | * |
| 999 | * Of course, these memory-ordering guarantees apply only when |
| 1000 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are |
| 1001 | * passed the same srcu_struct structure. |
Paul E. McKenney | 2da4b2a | 2017-04-25 11:34:40 -0700 | [diff] [blame] | 1002 | * |
| 1003 | * If SRCU is likely idle, expedite the first request. This semantic |
| 1004 | * was provided by Classic SRCU, and is relied upon by its users, so TREE |
| 1005 | * SRCU must also provide it. Note that detecting idleness is heuristic |
| 1006 | * and subject to both false positives and negatives. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1007 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1008 | void synchronize_srcu(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1009 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1010 | if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited()) |
| 1011 | synchronize_srcu_expedited(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1012 | else |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1013 | __synchronize_srcu(ssp, true); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1014 | } |
| 1015 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
| 1016 | |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 1017 | /** |
| 1018 | * get_state_synchronize_srcu - Provide an end-of-grace-period cookie |
| 1019 | * @ssp: srcu_struct to provide cookie for. |
| 1020 | * |
| 1021 | * This function returns a cookie that can be passed to |
| 1022 | * poll_state_synchronize_srcu(), which will return true if a full grace |
| 1023 | * period has elapsed in the meantime. It is the caller's responsibility |
| 1024 | * to make sure that grace period happens, for example, by invoking |
| 1025 | * call_srcu() after return from get_state_synchronize_srcu(). |
| 1026 | */ |
| 1027 | unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) |
| 1028 | { |
| 1029 | // Any prior manipulation of SRCU-protected data must happen |
| 1030 | // before the load from ->srcu_gp_seq. |
| 1031 | smp_mb(); |
| 1032 | return rcu_seq_snap(&ssp->srcu_gp_seq); |
| 1033 | } |
| 1034 | EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); |
| 1035 | |
| 1036 | /** |
| 1037 | * start_poll_synchronize_srcu - Provide cookie and start grace period |
| 1038 | * @ssp: srcu_struct to provide cookie for. |
| 1039 | * |
| 1040 | * This function returns a cookie that can be passed to |
| 1041 | * poll_state_synchronize_srcu(), which will return true if a full grace |
| 1042 | * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(), |
| 1043 | * this function also ensures that any needed SRCU grace period will be |
| 1044 | * started. This convenience does come at a cost in terms of CPU overhead. |
| 1045 | */ |
| 1046 | unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) |
| 1047 | { |
| 1048 | return srcu_gp_start_if_needed(ssp, NULL, true); |
| 1049 | } |
| 1050 | EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); |
| 1051 | |
| 1052 | /** |
| 1053 | * poll_state_synchronize_srcu - Has cookie's grace period ended? |
| 1054 | * @ssp: srcu_struct to provide cookie for. |
| 1055 | * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu(). |
| 1056 | * |
| 1057 | * This function takes the cookie that was returned from either |
| 1058 | * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and |
| 1059 | * returns @true if an SRCU grace period elapsed since the time that the |
| 1060 | * cookie was created. |
Paul E. McKenney | 4e7ccfa | 2020-11-15 20:33:38 -0800 | [diff] [blame] | 1061 | * |
| 1062 | * Because cookies are finite in size, wrapping/overflow is possible. |
| 1063 | * This is more pronounced on 32-bit systems where cookies are 32 bits, |
| 1064 | * where in theory wrapping could happen in about 14 hours assuming |
| 1065 | * 25-microsecond expedited SRCU grace periods. However, a more likely |
| 1066 | * overflow lower bound is on the order of 24 days in the case of |
| 1067 | * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit |
| 1068 | * system requires geologic timespans, as in more than seven million years |
| 1069 | * even for expedited SRCU grace periods. |
| 1070 | * |
| 1071 | * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems |
| 1072 | * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses |
| 1073 | * a 16-bit cookie, which rcutorture routinely wraps in a matter of a |
| 1074 | * few minutes. If this proves to be a problem, this counter will be |
| 1075 | * expanded to the same size as for Tree SRCU. |
Paul E. McKenney | 5358c9f | 2020-11-13 17:31:55 -0800 | [diff] [blame] | 1076 | */ |
| 1077 | bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) |
| 1078 | { |
| 1079 | if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie)) |
| 1080 | return false; |
| 1081 | // Ensure that the end of the SRCU grace period happens before |
| 1082 | // any subsequent code that the caller might execute. |
| 1083 | smp_mb(); // ^^^ |
| 1084 | return true; |
| 1085 | } |
| 1086 | EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); |
| 1087 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1088 | /* |
| 1089 | * Callback function for srcu_barrier() use. |
| 1090 | */ |
| 1091 | static void srcu_barrier_cb(struct rcu_head *rhp) |
| 1092 | { |
| 1093 | struct srcu_data *sdp; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1094 | struct srcu_struct *ssp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1095 | |
| 1096 | sdp = container_of(rhp, struct srcu_data, srcu_barrier_head); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1097 | ssp = sdp->ssp; |
| 1098 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
| 1099 | complete(&ssp->srcu_barrier_completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1100 | } |
| 1101 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1102 | /** |
| 1103 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1104 | * @ssp: srcu_struct on which to wait for in-flight callbacks. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1105 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1106 | void srcu_barrier(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1107 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1108 | int cpu; |
| 1109 | struct srcu_data *sdp; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1110 | unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1111 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1112 | check_init_srcu_struct(ssp); |
| 1113 | mutex_lock(&ssp->srcu_barrier_mutex); |
| 1114 | if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1115 | smp_mb(); /* Force ordering following return. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1116 | mutex_unlock(&ssp->srcu_barrier_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1117 | return; /* Someone else did our work for us. */ |
| 1118 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1119 | rcu_seq_start(&ssp->srcu_barrier_seq); |
| 1120 | init_completion(&ssp->srcu_barrier_completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1121 | |
| 1122 | /* Initial count prevents reaching zero until all CBs are posted. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1123 | atomic_set(&ssp->srcu_barrier_cpu_cnt, 1); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1124 | |
| 1125 | /* |
| 1126 | * Each pass through this loop enqueues a callback, but only |
| 1127 | * on CPUs already having callbacks enqueued. Note that if |
| 1128 | * a CPU already has callbacks enqueue, it must have already |
| 1129 | * registered the need for a future grace period, so all we |
| 1130 | * need do is enqueue a callback that will use the same |
| 1131 | * grace period as the last callback already in the queue. |
| 1132 | */ |
| 1133 | for_each_possible_cpu(cpu) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1134 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1135 | spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1136 | atomic_inc(&ssp->srcu_barrier_cpu_cnt); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1137 | sdp->srcu_barrier_head.func = srcu_barrier_cb; |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1138 | debug_rcu_head_queue(&sdp->srcu_barrier_head); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1139 | if (!rcu_segcblist_entrain(&sdp->srcu_cblist, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 1140 | &sdp->srcu_barrier_head)) { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1141 | debug_rcu_head_unqueue(&sdp->srcu_barrier_head); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1142 | atomic_dec(&ssp->srcu_barrier_cpu_cnt); |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1143 | } |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1144 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1145 | } |
| 1146 | |
| 1147 | /* Remove the initial count, at which point reaching zero can happen. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1148 | if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt)) |
| 1149 | complete(&ssp->srcu_barrier_completion); |
| 1150 | wait_for_completion(&ssp->srcu_barrier_completion); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1151 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1152 | rcu_seq_end(&ssp->srcu_barrier_seq); |
| 1153 | mutex_unlock(&ssp->srcu_barrier_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1154 | } |
| 1155 | EXPORT_SYMBOL_GPL(srcu_barrier); |
| 1156 | |
| 1157 | /** |
| 1158 | * srcu_batches_completed - return batches completed. |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1159 | * @ssp: srcu_struct on which to report batch completion. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1160 | * |
| 1161 | * Report the number of batches, correlated with, but not necessarily |
| 1162 | * precisely the same as, the number of grace periods that have elapsed. |
| 1163 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1164 | unsigned long srcu_batches_completed(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1165 | { |
Paul E. McKenney | 39f9150 | 2019-12-22 19:39:35 -0800 | [diff] [blame] | 1166 | return READ_ONCE(ssp->srcu_idx); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1167 | } |
| 1168 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
| 1169 | |
| 1170 | /* |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1171 | * Core SRCU state machine. Push state bits of ->srcu_gp_seq |
| 1172 | * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has |
| 1173 | * completed in that state. |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1174 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1175 | static void srcu_advance_state(struct srcu_struct *ssp) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1176 | { |
| 1177 | int idx; |
| 1178 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1179 | mutex_lock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1180 | |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1181 | /* |
| 1182 | * Because readers might be delayed for an extended period after |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1183 | * fetching ->srcu_idx for their index, at any point in time there |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1184 | * might well be readers using both idx=0 and idx=1. We therefore |
| 1185 | * need to wait for readers to clear from both index values before |
| 1186 | * invoking a callback. |
| 1187 | * |
| 1188 | * The load-acquire ensures that we see the accesses performed |
| 1189 | * by the prior grace period. |
| 1190 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1191 | idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1192 | if (idx == SRCU_STATE_IDLE) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1193 | spin_lock_irq_rcu_node(ssp); |
| 1194 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
| 1195 | WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq)); |
| 1196 | spin_unlock_irq_rcu_node(ssp); |
| 1197 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1198 | return; |
| 1199 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1200 | idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1201 | if (idx == SRCU_STATE_IDLE) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1202 | srcu_gp_start(ssp); |
| 1203 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1204 | if (idx != SRCU_STATE_IDLE) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1205 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1206 | return; /* Someone else started the grace period. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1207 | } |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1208 | } |
| 1209 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1210 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
| 1211 | idx = 1 ^ (ssp->srcu_idx & 1); |
| 1212 | if (!try_check_zero(ssp, idx, 1)) { |
| 1213 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1214 | return; /* readers present, retry later. */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1215 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1216 | srcu_flip(ssp); |
Paul E. McKenney | 7104260 | 2020-01-03 11:42:05 -0800 | [diff] [blame] | 1217 | spin_lock_irq_rcu_node(ssp); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1218 | rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2); |
Paul E. McKenney | 7104260 | 2020-01-03 11:42:05 -0800 | [diff] [blame] | 1219 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1220 | } |
| 1221 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1222 | if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1223 | |
| 1224 | /* |
| 1225 | * SRCU read-side critical sections are normally short, |
| 1226 | * so check at least twice in quick succession after a flip. |
| 1227 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1228 | idx = 1 ^ (ssp->srcu_idx & 1); |
| 1229 | if (!try_check_zero(ssp, idx, 2)) { |
| 1230 | mutex_unlock(&ssp->srcu_gp_mutex); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1231 | return; /* readers present, retry later. */ |
| 1232 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1233 | srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */ |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1234 | } |
| 1235 | } |
| 1236 | |
| 1237 | /* |
| 1238 | * Invoke a limited number of SRCU callbacks that have passed through |
| 1239 | * their grace period. If there are more to do, SRCU will reschedule |
| 1240 | * the workqueue. Note that needed memory barriers have been executed |
| 1241 | * in this task's context by srcu_readers_active_idx_check(). |
| 1242 | */ |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1243 | static void srcu_invoke_callbacks(struct work_struct *work) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1244 | { |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1245 | long len; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1246 | bool more; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1247 | struct rcu_cblist ready_cbs; |
| 1248 | struct rcu_head *rhp; |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1249 | struct srcu_data *sdp; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1250 | struct srcu_struct *ssp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1251 | |
Sebastian Andrzej Siewior | e81baf4 | 2018-12-11 12:12:38 +0100 | [diff] [blame] | 1252 | sdp = container_of(work, struct srcu_data, work); |
| 1253 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1254 | ssp = sdp->ssp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1255 | rcu_cblist_init(&ready_cbs); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1256 | spin_lock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1257 | rcu_segcblist_advance(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1258 | rcu_seq_current(&ssp->srcu_gp_seq)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1259 | if (sdp->srcu_cblist_invoking || |
| 1260 | !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) { |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1261 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1262 | return; /* Someone else on the job or nothing to do. */ |
| 1263 | } |
| 1264 | |
| 1265 | /* We are on the job! Extract and invoke ready callbacks. */ |
| 1266 | sdp->srcu_cblist_invoking = true; |
| 1267 | rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1268 | len = ready_cbs.len; |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1269 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1270 | rhp = rcu_cblist_dequeue(&ready_cbs); |
| 1271 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { |
Paul E. McKenney | a602538 | 2017-04-28 15:39:34 -0700 | [diff] [blame] | 1272 | debug_rcu_head_unqueue(rhp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1273 | local_bh_disable(); |
| 1274 | rhp->func(rhp); |
| 1275 | local_bh_enable(); |
| 1276 | } |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1277 | WARN_ON_ONCE(ready_cbs.len); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1278 | |
| 1279 | /* |
| 1280 | * Update counts, accelerate new callbacks, and if needed, |
| 1281 | * schedule another round of callback invocation. |
| 1282 | */ |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1283 | spin_lock_irq_rcu_node(sdp); |
Joel Fernandes (Google) | ae5c234 | 2020-09-23 11:22:09 -0400 | [diff] [blame] | 1284 | rcu_segcblist_add_len(&sdp->srcu_cblist, -len); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1285 | (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1286 | rcu_seq_snap(&ssp->srcu_gp_seq)); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1287 | sdp->srcu_cblist_invoking = false; |
| 1288 | more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist); |
Paul E. McKenney | d633198 | 2017-10-10 13:52:30 -0700 | [diff] [blame] | 1289 | spin_unlock_irq_rcu_node(sdp); |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1290 | if (more) |
| 1291 | srcu_schedule_cbs_sdp(sdp, 0); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1292 | } |
| 1293 | |
| 1294 | /* |
| 1295 | * Finished one round of SRCU grace period. Start another if there are |
| 1296 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
| 1297 | */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1298 | static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1299 | { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1300 | bool pushgp = true; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1301 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1302 | spin_lock_irq_rcu_node(ssp); |
| 1303 | if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) { |
| 1304 | if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1305 | /* All requests fulfilled, time to go idle. */ |
| 1306 | pushgp = false; |
| 1307 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1308 | } else if (!rcu_seq_state(ssp->srcu_gp_seq)) { |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1309 | /* Outstanding request and no GP. Start one. */ |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1310 | srcu_gp_start(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1311 | } |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1312 | spin_unlock_irq_rcu_node(ssp); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1313 | |
Paul E. McKenney | da915ad | 2017-04-05 09:01:53 -0700 | [diff] [blame] | 1314 | if (pushgp) |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1315 | queue_delayed_work(rcu_gp_wq, &ssp->work, delay); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1316 | } |
| 1317 | |
| 1318 | /* |
| 1319 | * This is the work-queue function that handles SRCU grace periods. |
| 1320 | */ |
Paul E. McKenney | 0d8a1e8 | 2017-06-15 17:06:38 -0700 | [diff] [blame] | 1321 | static void process_srcu(struct work_struct *work) |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1322 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1323 | struct srcu_struct *ssp; |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1324 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1325 | ssp = container_of(work, struct srcu_struct, work.work); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1326 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1327 | srcu_advance_state(ssp); |
| 1328 | srcu_reschedule(ssp, srcu_get_delay(ssp)); |
Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame] | 1329 | } |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1330 | |
| 1331 | void srcutorture_get_gp_data(enum rcutorture_type test_type, |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1332 | struct srcu_struct *ssp, int *flags, |
Paul E. McKenney | aebc826 | 2018-05-01 06:42:51 -0700 | [diff] [blame] | 1333 | unsigned long *gp_seq) |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1334 | { |
| 1335 | if (test_type != SRCU_FLAVOR) |
| 1336 | return; |
| 1337 | *flags = 0; |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1338 | *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq); |
Paul E. McKenney | 7f6733c | 2017-04-18 17:17:35 -0700 | [diff] [blame] | 1339 | } |
| 1340 | EXPORT_SYMBOL_GPL(srcutorture_get_gp_data); |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1341 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1342 | void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1343 | { |
| 1344 | int cpu; |
| 1345 | int idx; |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1346 | unsigned long s0 = 0, s1 = 0; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1347 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1348 | idx = ssp->srcu_idx & 0x1; |
Paul E. McKenney | 52e17ba | 2018-06-19 08:54:37 -0700 | [diff] [blame] | 1349 | pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):", |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1350 | tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1351 | for_each_possible_cpu(cpu) { |
| 1352 | unsigned long l0, l1; |
| 1353 | unsigned long u0, u1; |
| 1354 | long c0, c1; |
Paul E. McKenney | 5ab07a8 | 2018-05-22 12:28:04 -0700 | [diff] [blame] | 1355 | struct srcu_data *sdp; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1356 | |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1357 | sdp = per_cpu_ptr(ssp->sda, cpu); |
Paul E. McKenney | b68c614 | 2020-01-03 16:36:59 -0800 | [diff] [blame] | 1358 | u0 = data_race(sdp->srcu_unlock_count[!idx]); |
| 1359 | u1 = data_race(sdp->srcu_unlock_count[idx]); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1360 | |
| 1361 | /* |
| 1362 | * Make sure that a lock is always counted if the corresponding |
| 1363 | * unlock is counted. |
| 1364 | */ |
| 1365 | smp_rmb(); |
| 1366 | |
Paul E. McKenney | b68c614 | 2020-01-03 16:36:59 -0800 | [diff] [blame] | 1367 | l0 = data_race(sdp->srcu_lock_count[!idx]); |
| 1368 | l1 = data_race(sdp->srcu_lock_count[idx]); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1369 | |
| 1370 | c0 = l0 - u0; |
| 1371 | c1 = l1 - u1; |
Paul E. McKenney | 7e210a6 | 2019-06-28 17:11:10 -0700 | [diff] [blame] | 1372 | pr_cont(" %d(%ld,%ld %c)", |
| 1373 | cpu, c0, c1, |
| 1374 | "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]); |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1375 | s0 += c0; |
| 1376 | s1 += c1; |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1377 | } |
Paul E. McKenney | ac3748c | 2017-05-22 13:59:52 -0700 | [diff] [blame] | 1378 | pr_cont(" T(%ld,%ld)\n", s0, s1); |
Paul E. McKenney | 115a1a5 | 2017-05-22 13:31:03 -0700 | [diff] [blame] | 1379 | } |
| 1380 | EXPORT_SYMBOL_GPL(srcu_torture_stats_print); |
| 1381 | |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1382 | static int __init srcu_bootup_announce(void) |
| 1383 | { |
| 1384 | pr_info("Hierarchical SRCU implementation.\n"); |
Paul E. McKenney | 0c8e0e3 | 2017-04-28 11:24:22 -0700 | [diff] [blame] | 1385 | if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF) |
| 1386 | pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff); |
Paul E. McKenney | 1f4f6da | 2017-04-21 11:16:32 -0700 | [diff] [blame] | 1387 | return 0; |
| 1388 | } |
| 1389 | early_initcall(srcu_bootup_announce); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1390 | |
| 1391 | void __init srcu_init(void) |
| 1392 | { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1393 | struct srcu_struct *ssp; |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1394 | |
| 1395 | srcu_init_done = true; |
| 1396 | while (!list_empty(&srcu_boot_list)) { |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1397 | ssp = list_first_entry(&srcu_boot_list, struct srcu_struct, |
Paul E. McKenney | 4e6ea4e | 2018-08-14 14:41:49 -0700 | [diff] [blame] | 1398 | work.work.entry); |
Paul E. McKenney | aacb5d9 | 2018-10-28 10:32:51 -0700 | [diff] [blame] | 1399 | check_init_srcu_struct(ssp); |
| 1400 | list_del_init(&ssp->work.work.entry); |
| 1401 | queue_work(rcu_gp_wq, &ssp->work.work); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 1402 | } |
| 1403 | } |
Paul E. McKenney | fe15b50 | 2019-04-05 16:15:00 -0700 | [diff] [blame] | 1404 | |
| 1405 | #ifdef CONFIG_MODULES |
| 1406 | |
| 1407 | /* Initialize any global-scope srcu_struct structures used by this module. */ |
| 1408 | static int srcu_module_coming(struct module *mod) |
| 1409 | { |
| 1410 | int i; |
| 1411 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; |
| 1412 | int ret; |
| 1413 | |
| 1414 | for (i = 0; i < mod->num_srcu_structs; i++) { |
| 1415 | ret = init_srcu_struct(*(sspp++)); |
| 1416 | if (WARN_ON_ONCE(ret)) |
| 1417 | return ret; |
| 1418 | } |
| 1419 | return 0; |
| 1420 | } |
| 1421 | |
| 1422 | /* Clean up any global-scope srcu_struct structures used by this module. */ |
| 1423 | static void srcu_module_going(struct module *mod) |
| 1424 | { |
| 1425 | int i; |
| 1426 | struct srcu_struct **sspp = mod->srcu_struct_ptrs; |
| 1427 | |
| 1428 | for (i = 0; i < mod->num_srcu_structs; i++) |
| 1429 | cleanup_srcu_struct(*(sspp++)); |
| 1430 | } |
| 1431 | |
| 1432 | /* Handle one module, either coming or going. */ |
| 1433 | static int srcu_module_notify(struct notifier_block *self, |
| 1434 | unsigned long val, void *data) |
| 1435 | { |
| 1436 | struct module *mod = data; |
| 1437 | int ret = 0; |
| 1438 | |
| 1439 | switch (val) { |
| 1440 | case MODULE_STATE_COMING: |
| 1441 | ret = srcu_module_coming(mod); |
| 1442 | break; |
| 1443 | case MODULE_STATE_GOING: |
| 1444 | srcu_module_going(mod); |
| 1445 | break; |
| 1446 | default: |
| 1447 | break; |
| 1448 | } |
| 1449 | return ret; |
| 1450 | } |
| 1451 | |
| 1452 | static struct notifier_block srcu_module_nb = { |
| 1453 | .notifier_call = srcu_module_notify, |
| 1454 | .priority = 0, |
| 1455 | }; |
| 1456 | |
| 1457 | static __init int init_srcu_module_notifier(void) |
| 1458 | { |
| 1459 | int ret; |
| 1460 | |
| 1461 | ret = register_module_notifier(&srcu_module_nb); |
| 1462 | if (ret) |
| 1463 | pr_warn("Failed to register srcu module notifier\n"); |
| 1464 | return ret; |
| 1465 | } |
| 1466 | late_initcall(init_srcu_module_notifier); |
| 1467 | |
| 1468 | #endif /* #ifdef CONFIG_MODULES */ |