blob: 6833d888718169fe307536c738764b0dbbd5e26d [file] [log] [blame]
Paul E. McKenneye7ee1502019-01-17 10:18:16 -08001// SPDX-License-Identifier: GPL-2.0+
Paul E. McKenneydad81a22017-03-25 17:23:44 -07002/*
3 * Sleepable Read-Copy Update mechanism for mutual exclusion.
4 *
Paul E. McKenneydad81a22017-03-25 17:23:44 -07005 * Copyright (C) IBM Corporation, 2006
6 * Copyright (C) Fujitsu, 2012
7 *
SeongJae Park65bb0dc2020-01-06 21:08:02 +01008 * Authors: Paul McKenney <paulmck@linux.ibm.com>
Paul E. McKenneydad81a22017-03-25 17:23:44 -07009 * Lai Jiangshan <laijs@cn.fujitsu.com>
10 *
11 * For detailed explanation of Read-Copy Update mechanism see -
12 * Documentation/RCU/ *.txt
13 *
14 */
15
Joe Perchesa7538352018-05-14 13:27:33 -070016#define pr_fmt(fmt) "rcu: " fmt
17
Paul E. McKenneydad81a22017-03-25 17:23:44 -070018#include <linux/export.h>
19#include <linux/mutex.h>
20#include <linux/percpu.h>
21#include <linux/preempt.h>
22#include <linux/rcupdate_wait.h>
23#include <linux/sched.h>
24#include <linux/smp.h>
25#include <linux/delay.h>
Paul E. McKenney22607d62017-04-25 14:03:11 -070026#include <linux/module.h>
Paul E. McKenneydad81a22017-03-25 17:23:44 -070027#include <linux/srcu.h>
28
Paul E. McKenneydad81a22017-03-25 17:23:44 -070029#include "rcu.h"
Ingo Molnar45753c52017-05-02 10:31:18 +020030#include "rcu_segcblist.h"
Paul E. McKenneydad81a22017-03-25 17:23:44 -070031
Paul E. McKenney0c8e0e32017-04-28 11:24:22 -070032/* Holdoff in nanoseconds for auto-expediting. */
33#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
34static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
Paul E. McKenney22607d62017-04-25 14:03:11 -070035module_param(exp_holdoff, ulong, 0444);
36
Paul E. McKenneyc350c002017-05-03 15:35:32 -070037/* Overflow-check frequency. N bits roughly says every 2**N grace periods. */
38static ulong counter_wrap_check = (ULONG_MAX >> 2);
39module_param(counter_wrap_check, ulong, 0444);
40
Paul E. McKenneye0fcba92018-08-14 08:45:54 -070041/* Early-boot callback-management, so early that no lock is required! */
42static LIST_HEAD(srcu_boot_list);
43static bool __read_mostly srcu_init_done;
44
Paul E. McKenneyda915ad2017-04-05 09:01:53 -070045static void srcu_invoke_callbacks(struct work_struct *work);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070046static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
Paul E. McKenney0d8a1e82017-06-15 17:06:38 -070047static void process_srcu(struct work_struct *work);
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +010048static void srcu_delay_timer(struct timer_list *t);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -070049
Paul E. McKenneyd6331982017-10-10 13:52:30 -070050/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
51#define spin_lock_rcu_node(p) \
52do { \
53 spin_lock(&ACCESS_PRIVATE(p, lock)); \
54 smp_mb__after_unlock_lock(); \
55} while (0)
56
57#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
58
59#define spin_lock_irq_rcu_node(p) \
60do { \
61 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
62 smp_mb__after_unlock_lock(); \
63} while (0)
64
65#define spin_unlock_irq_rcu_node(p) \
66 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
67
68#define spin_lock_irqsave_rcu_node(p, flags) \
69do { \
70 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
71 smp_mb__after_unlock_lock(); \
72} while (0)
73
74#define spin_unlock_irqrestore_rcu_node(p, flags) \
75 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
76
Paul E. McKenneyda915ad2017-04-05 09:01:53 -070077/*
78 * Initialize SRCU combining tree. Note that statically allocated
79 * srcu_struct structures might already have srcu_read_lock() and
80 * srcu_read_unlock() running against them. So if the is_static parameter
81 * is set, don't initialize ->srcu_lock_count[] and ->srcu_unlock_count[].
82 */
Frederic Weisbecker94df76a2021-04-02 01:47:03 +020083static void init_srcu_struct_nodes(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -070084{
Paul E. McKenneyda915ad2017-04-05 09:01:53 -070085 int cpu;
86 int i;
87 int level = 0;
88 int levelspread[RCU_NUM_LVLS];
89 struct srcu_data *sdp;
90 struct srcu_node *snp;
91 struct srcu_node *snp_first;
92
Frederic Weisbeckerb5befe842021-04-17 15:16:49 +020093 /* Initialize geometry if it has not already been initialized. */
94 rcu_init_geometry();
95
Paul E. McKenneyda915ad2017-04-05 09:01:53 -070096 /* Work out the overall tree geometry. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070097 ssp->level[0] = &ssp->node[0];
Paul E. McKenneyda915ad2017-04-05 09:01:53 -070098 for (i = 1; i < rcu_num_lvls; i++)
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -070099 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700100 rcu_init_levelspread(levelspread, num_rcu_lvl);
101
102 /* Each pass through this loop initializes one srcu_node structure. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700103 srcu_for_each_node_breadth_first(ssp, snp) {
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700104 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700105 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
106 ARRAY_SIZE(snp->srcu_data_have_cbs));
107 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700108 snp->srcu_have_cbs[i] = 0;
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700109 snp->srcu_data_have_cbs[i] = 0;
110 }
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700111 snp->srcu_gp_seq_needed_exp = 0;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700112 snp->grplo = -1;
113 snp->grphi = -1;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700114 if (snp == &ssp->node[0]) {
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700115 /* Root node, special case. */
116 snp->srcu_parent = NULL;
117 continue;
118 }
119
120 /* Non-root node. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700121 if (snp == ssp->level[level + 1])
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700122 level++;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700123 snp->srcu_parent = ssp->level[level - 1] +
124 (snp - ssp->level[level]) /
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700125 levelspread[level - 1];
126 }
127
128 /*
129 * Initialize the per-CPU srcu_data array, which feeds into the
130 * leaves of the srcu_node tree.
131 */
132 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
133 ARRAY_SIZE(sdp->srcu_unlock_count));
134 level = rcu_num_lvls - 1;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700135 snp_first = ssp->level[level];
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700136 for_each_possible_cpu(cpu) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700137 sdp = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700138 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700139 rcu_segcblist_init(&sdp->srcu_cblist);
140 sdp->srcu_cblist_invoking = false;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700141 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
142 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700143 sdp->mynode = &snp_first[cpu / levelspread[level]];
144 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
145 if (snp->grplo < 0)
146 snp->grplo = cpu;
147 snp->grphi = cpu;
148 }
149 sdp->cpu = cpu;
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100150 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
151 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700152 sdp->ssp = ssp;
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700153 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700154 }
155}
156
157/*
158 * Initialize non-compile-time initialized fields, including the
159 * associated srcu_node and srcu_data structures. The is_static
160 * parameter is passed through to init_srcu_struct_nodes(), and
161 * also tells us that ->sda has already been wired up to srcu_data.
162 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700163static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700164{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700165 mutex_init(&ssp->srcu_cb_mutex);
166 mutex_init(&ssp->srcu_gp_mutex);
167 ssp->srcu_idx = 0;
168 ssp->srcu_gp_seq = 0;
169 ssp->srcu_barrier_seq = 0;
170 mutex_init(&ssp->srcu_barrier_mutex);
171 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
172 INIT_DELAYED_WORK(&ssp->work, process_srcu);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700173 if (!is_static)
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700174 ssp->sda = alloc_percpu(struct srcu_data);
Paul E. McKenney50edb982020-09-10 11:54:42 -0700175 if (!ssp->sda)
176 return -ENOMEM;
Frederic Weisbecker94df76a2021-04-02 01:47:03 +0200177 init_srcu_struct_nodes(ssp);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700178 ssp->srcu_gp_seq_needed_exp = 0;
179 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
180 smp_store_release(&ssp->srcu_gp_seq_needed, 0); /* Init done. */
Paul E. McKenney50edb982020-09-10 11:54:42 -0700181 return 0;
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700182}
183
184#ifdef CONFIG_DEBUG_LOCK_ALLOC
185
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700186int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700187 struct lock_class_key *key)
188{
189 /* Don't re-initialize a lock while it is held. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700190 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
191 lockdep_init_map(&ssp->dep_map, name, key, 0);
192 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
193 return init_srcu_struct_fields(ssp, false);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700194}
195EXPORT_SYMBOL_GPL(__init_srcu_struct);
196
197#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
198
199/**
200 * init_srcu_struct - initialize a sleep-RCU structure
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700201 * @ssp: structure to initialize.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700202 *
203 * Must invoke this on a given srcu_struct before passing that srcu_struct
204 * to any other function. Each srcu_struct represents a separate domain
205 * of SRCU protection.
206 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700207int init_srcu_struct(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700208{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700209 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
210 return init_srcu_struct_fields(ssp, false);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700211}
212EXPORT_SYMBOL_GPL(init_srcu_struct);
213
214#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
215
216/*
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700217 * First-use initialization of statically allocated srcu_struct
218 * structure. Wiring up the combining tree is more than can be
219 * done with compile-time initialization, so this check is added
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700220 * to each update-side SRCU primitive. Use ssp->lock, which -is-
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700221 * compile-time initialized, to resolve races involving multiple
222 * CPUs trying to garner first-use privileges.
223 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700224static void check_init_srcu_struct(struct srcu_struct *ssp)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700225{
226 unsigned long flags;
227
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700228 /* The smp_load_acquire() pairs with the smp_store_release(). */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700229 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700230 return; /* Already initialized. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700231 spin_lock_irqsave_rcu_node(ssp, flags);
232 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
233 spin_unlock_irqrestore_rcu_node(ssp, flags);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700234 return;
235 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700236 init_srcu_struct_fields(ssp, true);
237 spin_unlock_irqrestore_rcu_node(ssp, flags);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700238}
239
240/*
241 * Returns approximate total of the readers' ->srcu_lock_count[] values
242 * for the rank of per-CPU counters specified by idx.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700243 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700244static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700245{
246 int cpu;
247 unsigned long sum = 0;
248
249 for_each_possible_cpu(cpu) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700250 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700251
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700252 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700253 }
254 return sum;
255}
256
257/*
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700258 * Returns approximate total of the readers' ->srcu_unlock_count[] values
259 * for the rank of per-CPU counters specified by idx.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700260 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700261static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700262{
263 int cpu;
264 unsigned long sum = 0;
265
266 for_each_possible_cpu(cpu) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700267 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700268
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700269 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700270 }
271 return sum;
272}
273
274/*
275 * Return true if the number of pre-existing readers is determined to
276 * be zero.
277 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700278static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700279{
280 unsigned long unlocks;
281
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700282 unlocks = srcu_readers_unlock_idx(ssp, idx);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700283
284 /*
285 * Make sure that a lock is always counted if the corresponding
286 * unlock is counted. Needs to be a smp_mb() as the read side may
287 * contain a read from a variable that is written to before the
288 * synchronize_srcu() in the write side. In this case smp_mb()s
289 * A and B act like the store buffering pattern.
290 *
291 * This smp_mb() also pairs with smp_mb() C to prevent accesses
292 * after the synchronize_srcu() from being executed before the
293 * grace period ends.
294 */
295 smp_mb(); /* A */
296
297 /*
298 * If the locks are the same as the unlocks, then there must have
299 * been no readers on this index at some time in between. This does
300 * not mean that there are no more readers, as one could have read
301 * the current index but not have incremented the lock counter yet.
302 *
Paul E. McKenney881ec9d2017-04-12 15:16:50 -0700303 * So suppose that the updater is preempted here for so long
304 * that more than ULONG_MAX non-nested readers come and go in
305 * the meantime. It turns out that this cannot result in overflow
306 * because if a reader modifies its unlock count after we read it
307 * above, then that reader's next load of ->srcu_idx is guaranteed
308 * to get the new value, which will cause it to operate on the
309 * other bank of counters, where it cannot contribute to the
310 * overflow of these counters. This means that there is a maximum
311 * of 2*NR_CPUS increments, which cannot overflow given current
312 * systems, especially not on 64-bit systems.
313 *
314 * OK, how about nesting? This does impose a limit on nesting
315 * of floor(ULONG_MAX/NR_CPUS/2), which should be sufficient,
316 * especially on 64-bit systems.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700317 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700318 return srcu_readers_lock_idx(ssp, idx) == unlocks;
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700319}
320
321/**
322 * srcu_readers_active - returns true if there are readers. and false
323 * otherwise
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700324 * @ssp: which srcu_struct to count active readers (holding srcu_read_lock).
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700325 *
326 * Note that this is not an atomic primitive, and can therefore suffer
327 * severe errors when invoked on an active srcu_struct. That said, it
328 * can be useful as an error check at cleanup time.
329 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700330static bool srcu_readers_active(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700331{
332 int cpu;
333 unsigned long sum = 0;
334
335 for_each_possible_cpu(cpu) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700336 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700337
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700338 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
339 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
340 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
341 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700342 }
343 return sum;
344}
345
346#define SRCU_INTERVAL 1
347
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700348/*
349 * Return grace-period delay, zero if there are expedited grace
350 * periods pending, SRCU_INTERVAL otherwise.
351 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700352static unsigned long srcu_get_delay(struct srcu_struct *ssp)
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700353{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700354 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
355 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700356 return 0;
357 return SRCU_INTERVAL;
358}
359
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800360/**
361 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
362 * @ssp: structure to clean up.
363 *
364 * Must invoke this after you are finished using a given srcu_struct that
365 * was initialized via init_srcu_struct(), else you leak memory.
366 */
367void cleanup_srcu_struct(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700368{
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700369 int cpu;
370
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700371 if (WARN_ON(!srcu_get_delay(ssp)))
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700372 return; /* Just leak it! */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700373 if (WARN_ON(srcu_readers_active(ssp)))
Paul E. McKenneyf7194ac2018-04-05 17:19:17 -0700374 return; /* Just leak it! */
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800375 flush_delayed_work(&ssp->work);
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100376 for_each_possible_cpu(cpu) {
377 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
378
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800379 del_timer_sync(&sdp->delay_work);
380 flush_work(&sdp->work);
Paul E. McKenney5cdfd172019-02-12 10:44:33 -0800381 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
382 return; /* Forgot srcu_barrier(), so just leak it! */
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100383 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700384 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
385 WARN_ON(srcu_readers_active(ssp))) {
Joe Perchesa7538352018-05-14 13:27:33 -0700386 pr_info("%s: Active srcu_struct %p state: %d\n",
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700387 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700388 return; /* Caller forgot to stop doing call_srcu()? */
389 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700390 free_percpu(ssp->sda);
391 ssp->sda = NULL;
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700392}
Paul E. McKenneyf5ad3992019-02-13 13:54:37 -0800393EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700394
395/*
396 * Counts the new reader in the appropriate per-CPU element of the
Paolo Bonzinicdf7abc2017-05-31 14:03:10 +0200397 * srcu_struct.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700398 * Returns an index that must be passed to the matching srcu_read_unlock().
399 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700400int __srcu_read_lock(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700401{
402 int idx;
403
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700404 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
405 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700406 smp_mb(); /* B */ /* Avoid leaking the critical section. */
407 return idx;
408}
409EXPORT_SYMBOL_GPL(__srcu_read_lock);
410
411/*
412 * Removes the count for the old reader from the appropriate per-CPU
413 * element of the srcu_struct. Note that this may well be a different
414 * CPU than that which was incremented by the corresponding srcu_read_lock().
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700415 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700416void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700417{
418 smp_mb(); /* C */ /* Avoid leaking the critical section. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700419 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700420}
421EXPORT_SYMBOL_GPL(__srcu_read_unlock);
422
423/*
424 * We use an adaptive strategy for synchronize_srcu() and especially for
425 * synchronize_srcu_expedited(). We spin for a fixed time period
426 * (defined below) to allow SRCU readers to exit their read-side critical
427 * sections. If there are still some readers after a few microseconds,
428 * we repeatedly block for 1-millisecond time periods.
429 */
430#define SRCU_RETRY_CHECK_DELAY 5
431
432/*
433 * Start an SRCU grace period.
434 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700435static void srcu_gp_start(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700436{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700437 struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700438 int state;
439
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700440 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
441 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
Dennis Kreineb4c2382018-10-26 07:38:24 -0700442 spin_lock_rcu_node(sdp); /* Interrupts already disabled. */
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700443 rcu_segcblist_advance(&sdp->srcu_cblist,
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700444 rcu_seq_current(&ssp->srcu_gp_seq));
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700445 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700446 rcu_seq_snap(&ssp->srcu_gp_seq));
Dennis Kreineb4c2382018-10-26 07:38:24 -0700447 spin_unlock_rcu_node(sdp); /* Interrupts remain disabled. */
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700448 smp_mb(); /* Order prior store to ->srcu_gp_seq_needed vs. GP start. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700449 rcu_seq_start(&ssp->srcu_gp_seq);
Paul E. McKenney71042602020-01-03 11:42:05 -0800450 state = rcu_seq_state(ssp->srcu_gp_seq);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700451 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
452}
453
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700454
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100455static void srcu_delay_timer(struct timer_list *t)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700456{
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100457 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
458
459 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700460}
461
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100462static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700463 unsigned long delay)
464{
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100465 if (!delay) {
466 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
467 return;
468 }
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700469
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100470 timer_reduce(&sdp->delay_work, jiffies + delay);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700471}
472
473/*
474 * Schedule callback invocation for the specified srcu_data structure,
475 * if possible, on the corresponding CPU.
476 */
477static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
478{
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +0100479 srcu_queue_delayed_work_on(sdp, delay);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700480}
481
482/*
483 * Schedule callback invocation for all srcu_data structures associated
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700484 * with the specified srcu_node structure that have callbacks for the
485 * just-completed grace period, the one corresponding to idx. If possible,
486 * schedule this invocation on the corresponding CPUs.
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700487 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700488static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700489 unsigned long mask, unsigned long delay)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700490{
491 int cpu;
492
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700493 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
494 if (!(mask & (1 << (cpu - snp->grplo))))
495 continue;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700496 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700497 }
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700498}
499
500/*
501 * Note the end of an SRCU grace period. Initiates callback invocation
502 * and starts a new grace period if needed.
503 *
504 * The ->srcu_cb_mutex acquisition does not protect any data, but
505 * instead prevents more than one grace period from starting while we
506 * are initiating callback invocation. This allows the ->srcu_have_cbs[]
507 * array to have a finite number of elements.
508 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700509static void srcu_gp_end(struct srcu_struct *ssp)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700510{
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700511 unsigned long cbdelay;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700512 bool cbs;
Ildar Ismagilov8ddbd882018-01-31 22:42:21 +0300513 bool last_lvl;
Paul E. McKenneyc350c002017-05-03 15:35:32 -0700514 int cpu;
515 unsigned long flags;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700516 unsigned long gpseq;
517 int idx;
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700518 unsigned long mask;
Paul E. McKenneyc350c002017-05-03 15:35:32 -0700519 struct srcu_data *sdp;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700520 struct srcu_node *snp;
521
522 /* Prevent more than one additional grace period. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700523 mutex_lock(&ssp->srcu_cb_mutex);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700524
525 /* End the current grace period. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700526 spin_lock_irq_rcu_node(ssp);
527 idx = rcu_seq_state(ssp->srcu_gp_seq);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700528 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700529 cbdelay = srcu_get_delay(ssp);
Paul E. McKenney844a3782019-11-04 08:08:30 -0800530 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700531 rcu_seq_end(&ssp->srcu_gp_seq);
532 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
533 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
Paul E. McKenney8c9e0cb2019-12-22 19:36:33 -0800534 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700535 spin_unlock_irq_rcu_node(ssp);
536 mutex_unlock(&ssp->srcu_gp_mutex);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700537 /* A new grace period can start at this point. But only one. */
538
539 /* Initiate callback invocation as needed. */
540 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700541 srcu_for_each_node_breadth_first(ssp, snp) {
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700542 spin_lock_irq_rcu_node(snp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700543 cbs = false;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700544 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
Ildar Ismagilov8ddbd882018-01-31 22:42:21 +0300545 if (last_lvl)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700546 cbs = snp->srcu_have_cbs[idx] == gpseq;
547 snp->srcu_have_cbs[idx] = gpseq;
548 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700549 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
Paul E. McKenney7ff8b452019-12-22 19:32:54 -0800550 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700551 mask = snp->srcu_data_have_cbs[idx];
552 snp->srcu_data_have_cbs[idx] = 0;
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700553 spin_unlock_irq_rcu_node(snp);
Paul E. McKenneya3883df2017-05-09 15:00:14 -0700554 if (cbs)
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700555 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
Paul E. McKenneyc350c002017-05-03 15:35:32 -0700556
557 /* Occasionally prevent srcu_data counter wrap. */
Ildar Ismagilov8ddbd882018-01-31 22:42:21 +0300558 if (!(gpseq & counter_wrap_check) && last_lvl)
Paul E. McKenneyc350c002017-05-03 15:35:32 -0700559 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700560 sdp = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700561 spin_lock_irqsave_rcu_node(sdp, flags);
Paul E. McKenneyc350c002017-05-03 15:35:32 -0700562 if (ULONG_CMP_GE(gpseq,
563 sdp->srcu_gp_seq_needed + 100))
564 sdp->srcu_gp_seq_needed = gpseq;
Ildar Ismagilova35d13e2018-01-31 22:39:53 +0300565 if (ULONG_CMP_GE(gpseq,
566 sdp->srcu_gp_seq_needed_exp + 100))
567 sdp->srcu_gp_seq_needed_exp = gpseq;
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700568 spin_unlock_irqrestore_rcu_node(sdp, flags);
Paul E. McKenneyc350c002017-05-03 15:35:32 -0700569 }
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700570 }
571
572 /* Callback initiation done, allow grace periods after next. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700573 mutex_unlock(&ssp->srcu_cb_mutex);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700574
575 /* Start a new grace period if needed. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700576 spin_lock_irq_rcu_node(ssp);
577 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700578 if (!rcu_seq_state(gpseq) &&
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700579 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
580 srcu_gp_start(ssp);
581 spin_unlock_irq_rcu_node(ssp);
582 srcu_reschedule(ssp, 0);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700583 } else {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700584 spin_unlock_irq_rcu_node(ssp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700585 }
586}
587
588/*
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700589 * Funnel-locking scheme to scalably mediate many concurrent expedited
590 * grace-period requests. This function is invoked for the first known
591 * expedited request for a grace period that has already been requested,
592 * but without expediting. To start a completely new grace period,
593 * whether expedited or not, use srcu_funnel_gp_start() instead.
594 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700595static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700596 unsigned long s)
597{
598 unsigned long flags;
599
600 for (; snp != NULL; snp = snp->srcu_parent) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700601 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700602 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
603 return;
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700604 spin_lock_irqsave_rcu_node(snp, flags);
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700605 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700606 spin_unlock_irqrestore_rcu_node(snp, flags);
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700607 return;
608 }
609 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700610 spin_unlock_irqrestore_rcu_node(snp, flags);
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700611 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700612 spin_lock_irqsave_rcu_node(ssp, flags);
613 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
Paul E. McKenney8c9e0cb2019-12-22 19:36:33 -0800614 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700615 spin_unlock_irqrestore_rcu_node(ssp, flags);
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700616}
617
618/*
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700619 * Funnel-locking scheme to scalably mediate many concurrent grace-period
620 * requests. The winner has to do the work of actually starting grace
621 * period s. Losers must either ensure that their desired grace-period
622 * number is recorded on at least their leaf srcu_node structure, or they
623 * must take steps to invoke their own callbacks.
Paul E. McKenney17294ce2018-04-25 12:03:36 -0700624 *
625 * Note that this function also does the work of srcu_funnel_exp_start(),
626 * in some cases by directly invoking it.
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700627 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700628static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700629 unsigned long s, bool do_norm)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700630{
631 unsigned long flags;
632 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
633 struct srcu_node *snp = sdp->mynode;
634 unsigned long snp_seq;
635
636 /* Each pass through the loop does one level of the srcu_node tree. */
637 for (; snp != NULL; snp = snp->srcu_parent) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700638 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700639 return; /* GP already done and CBs recorded. */
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700640 spin_lock_irqsave_rcu_node(snp, flags);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700641 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
642 snp_seq = snp->srcu_have_cbs[idx];
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700643 if (snp == sdp->mynode && snp_seq == s)
644 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700645 spin_unlock_irqrestore_rcu_node(snp, flags);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700646 if (snp == sdp->mynode && snp_seq != s) {
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700647 srcu_schedule_cbs_sdp(sdp, do_norm
648 ? SRCU_INTERVAL
649 : 0);
650 return;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700651 }
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700652 if (!do_norm)
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700653 srcu_funnel_exp_start(ssp, snp, s);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700654 return;
655 }
656 snp->srcu_have_cbs[idx] = s;
Paul E. McKenneyc7e88062017-04-18 16:01:46 -0700657 if (snp == sdp->mynode)
658 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700659 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
Paul E. McKenney7ff8b452019-12-22 19:32:54 -0800660 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700661 spin_unlock_irqrestore_rcu_node(snp, flags);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700662 }
663
664 /* Top of tree, must ensure the grace period will be started. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700665 spin_lock_irqsave_rcu_node(ssp, flags);
666 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700667 /*
668 * Record need for grace period s. Pair with load
669 * acquire setting up for initialization.
670 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700671 smp_store_release(&ssp->srcu_gp_seq_needed, s); /*^^^*/
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700672 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700673 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
Paul E. McKenney8c9e0cb2019-12-22 19:36:33 -0800674 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700675
676 /* If grace period not already done and none in progress, start it. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700677 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
678 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
679 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
680 srcu_gp_start(ssp);
Paul E. McKenneye0fcba92018-08-14 08:45:54 -0700681 if (likely(srcu_init_done))
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700682 queue_delayed_work(rcu_gp_wq, &ssp->work,
683 srcu_get_delay(ssp));
684 else if (list_empty(&ssp->work.work.entry))
685 list_add(&ssp->work.work.entry, &srcu_boot_list);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700686 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700687 spin_unlock_irqrestore_rcu_node(ssp, flags);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700688}
689
690/*
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700691 * Wait until all readers counted by array index idx complete, but
692 * loop an additional time if there is an expedited grace period pending.
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700693 * The caller must ensure that ->srcu_idx is not changed while checking.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700694 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700695static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700696{
697 for (;;) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700698 if (srcu_readers_active_idx_check(ssp, idx))
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700699 return true;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700700 if (--trycount + !srcu_get_delay(ssp) <= 0)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700701 return false;
702 udelay(SRCU_RETRY_CHECK_DELAY);
703 }
704}
705
706/*
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700707 * Increment the ->srcu_idx counter so that future SRCU readers will
708 * use the other rank of the ->srcu_(un)lock_count[] arrays. This allows
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700709 * us to wait for pre-existing readers in a starvation-free manner.
710 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700711static void srcu_flip(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700712{
Paul E. McKenney881ec9d2017-04-12 15:16:50 -0700713 /*
714 * Ensure that if this updater saw a given reader's increment
715 * from __srcu_read_lock(), that reader was using an old value
716 * of ->srcu_idx. Also ensure that if a given reader sees the
717 * new value of ->srcu_idx, this updater's earlier scans cannot
718 * have seen that reader's increments (which is OK, because this
719 * grace period need not wait on that reader).
720 */
721 smp_mb(); /* E */ /* Pairs with B and C. */
722
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700723 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700724
725 /*
726 * Ensure that if the updater misses an __srcu_read_unlock()
727 * increment, that task's next __srcu_read_lock() will see the
728 * above counter update. Note that both this memory barrier
729 * and the one in srcu_readers_active_idx_check() provide the
730 * guarantee for __srcu_read_lock().
731 */
732 smp_mb(); /* D */ /* Pairs with C. */
733}
734
735/*
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700736 * If SRCU is likely idle, return true, otherwise return false.
737 *
738 * Note that it is OK for several current from-idle requests for a new
739 * grace period from idle to specify expediting because they will all end
740 * up requesting the same grace period anyhow. So no loss.
741 *
742 * Note also that if any CPU (including the current one) is still invoking
743 * callbacks, this function will nevertheless say "idle". This is not
744 * ideal, but the overhead of checking all CPUs' callback lists is even
745 * less ideal, especially on large systems. Furthermore, the wakeup
746 * can happen before the callback is fully removed, so we have no choice
747 * but to accept this type of error.
748 *
749 * This function is also subject to counter-wrap errors, but let's face
750 * it, if this function was preempted for enough time for the counters
751 * to wrap, it really doesn't matter whether or not we expedite the grace
752 * period. The extra overhead of a needlessly expedited grace period is
Ethon Paul7fef6cf2020-04-18 19:46:47 +0800753 * negligible when amortized over that time period, and the extra latency
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700754 * of a needlessly non-expedited grace period is similarly negligible.
755 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700756static bool srcu_might_be_idle(struct srcu_struct *ssp)
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700757{
Paul E. McKenney22607d62017-04-25 14:03:11 -0700758 unsigned long curseq;
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700759 unsigned long flags;
760 struct srcu_data *sdp;
Paul E. McKenney22607d62017-04-25 14:03:11 -0700761 unsigned long t;
Paul E. McKenney844a3782019-11-04 08:08:30 -0800762 unsigned long tlast;
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700763
Sebastian Andrzej Siewiorbde50d82020-05-26 15:41:34 +0200764 check_init_srcu_struct(ssp);
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700765 /* If the local srcu_data structure has callbacks, not idle. */
Sebastian Andrzej Siewiorbde50d82020-05-26 15:41:34 +0200766 sdp = raw_cpu_ptr(ssp->sda);
767 spin_lock_irqsave_rcu_node(sdp, flags);
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700768 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
Sebastian Andrzej Siewiorbde50d82020-05-26 15:41:34 +0200769 spin_unlock_irqrestore_rcu_node(sdp, flags);
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700770 return false; /* Callbacks already present, so not idle. */
771 }
Sebastian Andrzej Siewiorbde50d82020-05-26 15:41:34 +0200772 spin_unlock_irqrestore_rcu_node(sdp, flags);
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700773
774 /*
Ingo Molnara616aec2021-03-22 22:29:10 -0700775 * No local callbacks, so probabilistically probe global state.
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700776 * Exact information would require acquiring locks, which would
Ingo Molnara616aec2021-03-22 22:29:10 -0700777 * kill scalability, hence the probabilistic nature of the probe.
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700778 */
Paul E. McKenney22607d62017-04-25 14:03:11 -0700779
780 /* First, see if enough time has passed since the last GP. */
781 t = ktime_get_mono_fast_ns();
Paul E. McKenney844a3782019-11-04 08:08:30 -0800782 tlast = READ_ONCE(ssp->srcu_last_gp_end);
Paul E. McKenney22607d62017-04-25 14:03:11 -0700783 if (exp_holdoff == 0 ||
Paul E. McKenney844a3782019-11-04 08:08:30 -0800784 time_in_range_open(t, tlast, tlast + exp_holdoff))
Paul E. McKenney22607d62017-04-25 14:03:11 -0700785 return false; /* Too soon after last GP. */
786
787 /* Next, check for probable idleness. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700788 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700789 smp_mb(); /* Order ->srcu_gp_seq with ->srcu_gp_seq_needed. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700790 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700791 return false; /* Grace period in progress, so not idle. */
792 smp_mb(); /* Order ->srcu_gp_seq with prior access. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700793 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700794 return false; /* GP # changed, so not idle. */
795 return true; /* With reasonable probability, idle! */
796}
797
798/*
Paul E. McKenneya6025382017-04-28 15:39:34 -0700799 * SRCU callback function to leak a callback.
800 */
801static void srcu_leak_callback(struct rcu_head *rhp)
802{
803}
804
805/*
Paul E. McKenney29d2bb92020-11-13 10:08:09 -0800806 * Start an SRCU grace period, and also queue the callback if non-NULL.
807 */
Paul E. McKenney5358c9f2020-11-13 17:31:55 -0800808static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
809 struct rcu_head *rhp, bool do_norm)
Paul E. McKenney29d2bb92020-11-13 10:08:09 -0800810{
811 unsigned long flags;
812 int idx;
813 bool needexp = false;
814 bool needgp = false;
815 unsigned long s;
816 struct srcu_data *sdp;
817
Paul E. McKenney5358c9f2020-11-13 17:31:55 -0800818 check_init_srcu_struct(ssp);
Paul E. McKenney29d2bb92020-11-13 10:08:09 -0800819 idx = srcu_read_lock(ssp);
820 sdp = raw_cpu_ptr(ssp->sda);
821 spin_lock_irqsave_rcu_node(sdp, flags);
Paul E. McKenney5358c9f2020-11-13 17:31:55 -0800822 if (rhp)
823 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
Paul E. McKenney29d2bb92020-11-13 10:08:09 -0800824 rcu_segcblist_advance(&sdp->srcu_cblist,
825 rcu_seq_current(&ssp->srcu_gp_seq));
826 s = rcu_seq_snap(&ssp->srcu_gp_seq);
827 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
828 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
829 sdp->srcu_gp_seq_needed = s;
830 needgp = true;
831 }
832 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
833 sdp->srcu_gp_seq_needed_exp = s;
834 needexp = true;
835 }
836 spin_unlock_irqrestore_rcu_node(sdp, flags);
837 if (needgp)
838 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
839 else if (needexp)
840 srcu_funnel_exp_start(ssp, sdp->mynode, s);
841 srcu_read_unlock(ssp, idx);
Paul E. McKenney5358c9f2020-11-13 17:31:55 -0800842 return s;
Paul E. McKenney29d2bb92020-11-13 10:08:09 -0800843}
844
845/*
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700846 * Enqueue an SRCU callback on the srcu_data structure associated with
847 * the current CPU and the specified srcu_struct structure, initiating
848 * grace-period processing if it is not already running.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700849 *
850 * Note that all CPUs must agree that the grace period extended beyond
851 * all pre-existing SRCU read-side critical section. On systems with
852 * more than one CPU, this means that when "func()" is invoked, each CPU
853 * is guaranteed to have executed a full memory barrier since the end of
854 * its last corresponding SRCU read-side critical section whose beginning
Paul E. McKenney5ef98a62018-04-24 21:30:13 -0700855 * preceded the call to call_srcu(). It also means that each CPU executing
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700856 * an SRCU read-side critical section that continues beyond the start of
Paul E. McKenney5ef98a62018-04-24 21:30:13 -0700857 * "func()" must have executed a memory barrier after the call_srcu()
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700858 * but before the beginning of that SRCU read-side critical section.
859 * Note that these guarantees include CPUs that are offline, idle, or
860 * executing in user mode, as well as CPUs that are executing in the kernel.
861 *
Paul E. McKenney5ef98a62018-04-24 21:30:13 -0700862 * Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700863 * resulting SRCU callback function "func()", then both CPU A and CPU
864 * B are guaranteed to execute a full memory barrier during the time
Paul E. McKenney5ef98a62018-04-24 21:30:13 -0700865 * interval between the call to call_srcu() and the invocation of "func()".
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700866 * This guarantee applies even if CPU A and CPU B are the same CPU (but
867 * again only if the system has more than one CPU).
868 *
869 * Of course, these guarantees apply only for invocations of call_srcu(),
870 * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
871 * srcu_struct structure.
872 */
Jiang Biao11b00042019-04-23 09:22:56 +0800873static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
874 rcu_callback_t func, bool do_norm)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700875{
Paul E. McKenneya6025382017-04-28 15:39:34 -0700876 if (debug_rcu_head_queue(rhp)) {
877 /* Probable double call_srcu(), so leak the callback. */
878 WRITE_ONCE(rhp->func, srcu_leak_callback);
879 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
880 return;
881 }
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700882 rhp->func = func;
Paul E. McKenney5358c9f2020-11-13 17:31:55 -0800883 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700884}
885
Paul E. McKenney5a0465e2017-05-04 11:31:04 -0700886/**
887 * call_srcu() - Queue a callback for invocation after an SRCU grace period
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700888 * @ssp: srcu_struct in queue the callback
Paul E. McKenney27fdb352017-10-19 14:26:21 -0700889 * @rhp: structure to be used for queueing the SRCU callback.
Paul E. McKenney5a0465e2017-05-04 11:31:04 -0700890 * @func: function to be invoked after the SRCU grace period
891 *
892 * The callback function will be invoked some time after a full SRCU
893 * grace period elapses, in other words after all pre-existing SRCU
894 * read-side critical sections have completed. However, the callback
895 * function might well execute concurrently with other SRCU read-side
896 * critical sections that started after call_srcu() was invoked. SRCU
897 * read-side critical sections are delimited by srcu_read_lock() and
898 * srcu_read_unlock(), and may be nested.
899 *
900 * The callback will be invoked from process context, but must nevertheless
901 * be fast and must not block.
902 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700903void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
Paul E. McKenney1e9a0382017-04-24 16:02:09 -0700904 rcu_callback_t func)
905{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700906 __call_srcu(ssp, rhp, func, true);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700907}
908EXPORT_SYMBOL_GPL(call_srcu);
909
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700910/*
911 * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
912 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700913static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700914{
915 struct rcu_synchronize rcu;
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700916
Jakub Kicinskif505d432020-09-16 11:45:26 -0700917 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700918 lock_is_held(&rcu_bh_lock_map) ||
919 lock_is_held(&rcu_lock_map) ||
920 lock_is_held(&rcu_sched_lock_map),
921 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
922
923 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
924 return;
925 might_sleep();
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700926 check_init_srcu_struct(ssp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700927 init_completion(&rcu.completion);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700928 init_rcu_head_on_stack(&rcu.head);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700929 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700930 wait_for_completion(&rcu.completion);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700931 destroy_rcu_head_on_stack(&rcu.head);
Paul E. McKenney35732cf2017-07-05 13:30:21 -0700932
933 /*
934 * Make sure that later code is ordered after the SRCU grace
Paul E. McKenneyd6331982017-10-10 13:52:30 -0700935 * period. This pairs with the spin_lock_irq_rcu_node()
Paul E. McKenney35732cf2017-07-05 13:30:21 -0700936 * in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
937 * because the current CPU might have been totally uninvolved with
938 * (and thus unordered against) that grace period.
939 */
940 smp_mb();
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700941}
942
943/**
944 * synchronize_srcu_expedited - Brute-force SRCU grace period
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700945 * @ssp: srcu_struct with which to synchronize.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700946 *
947 * Wait for an SRCU grace period to elapse, but be more aggressive about
948 * spinning rather than blocking when waiting.
949 *
950 * Note that synchronize_srcu_expedited() has the same deadlock and
951 * memory-ordering properties as does synchronize_srcu().
952 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700953void synchronize_srcu_expedited(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700954{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700955 __synchronize_srcu(ssp, rcu_gp_is_normal());
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700956}
957EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
958
959/**
960 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -0700961 * @ssp: srcu_struct with which to synchronize.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700962 *
963 * Wait for the count to drain to zero of both indexes. To avoid the
964 * possible starvation of synchronize_srcu(), it waits for the count of
Paul E. McKenneyda915ad2017-04-05 09:01:53 -0700965 * the index=((->srcu_idx & 1) ^ 1) to drain to zero at first,
966 * and then flip the srcu_idx and wait for the count of the other index.
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700967 *
968 * Can block; must be called from process context.
969 *
970 * Note that it is illegal to call synchronize_srcu() from the corresponding
971 * SRCU read-side critical section; doing so will result in deadlock.
972 * However, it is perfectly legal to call synchronize_srcu() on one
973 * srcu_struct from some other srcu_struct's read-side critical section,
974 * as long as the resulting graph of srcu_structs is acyclic.
975 *
976 * There are memory-ordering constraints implied by synchronize_srcu().
977 * On systems with more than one CPU, when synchronize_srcu() returns,
978 * each CPU is guaranteed to have executed a full memory barrier since
Paul E. McKenney6eb95cc2018-07-07 18:12:26 -0700979 * the end of its last corresponding SRCU read-side critical section
Paul E. McKenneydad81a22017-03-25 17:23:44 -0700980 * whose beginning preceded the call to synchronize_srcu(). In addition,
981 * each CPU having an SRCU read-side critical section that extends beyond
982 * the return from synchronize_srcu() is guaranteed to have executed a
983 * full memory barrier after the beginning of synchronize_srcu() and before
984 * the beginning of that SRCU read-side critical section. Note that these
985 * guarantees include CPUs that are offline, idle, or executing in user mode,
986 * as well as CPUs that are executing in the kernel.
987 *
988 * Furthermore, if CPU A invoked synchronize_srcu(), which returned
989 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
990 * to have executed a full memory barrier during the execution of
991 * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
992 * are the same CPU, but again only if the system has more than one CPU.
993 *
994 * Of course, these memory-ordering guarantees apply only when
995 * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
996 * passed the same srcu_struct structure.
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -0700997 *
Paul E. McKenney3d3a0d12021-04-16 16:53:16 -0700998 * Implementation of these memory-ordering guarantees is similar to
999 * that of synchronize_rcu().
1000 *
Paul E. McKenney2da4b2a2017-04-25 11:34:40 -07001001 * If SRCU is likely idle, expedite the first request. This semantic
1002 * was provided by Classic SRCU, and is relied upon by its users, so TREE
1003 * SRCU must also provide it. Note that detecting idleness is heuristic
1004 * and subject to both false positives and negatives.
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001005 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001006void synchronize_srcu(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001007{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001008 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1009 synchronize_srcu_expedited(ssp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001010 else
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001011 __synchronize_srcu(ssp, true);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001012}
1013EXPORT_SYMBOL_GPL(synchronize_srcu);
1014
Paul E. McKenney5358c9f2020-11-13 17:31:55 -08001015/**
1016 * get_state_synchronize_srcu - Provide an end-of-grace-period cookie
1017 * @ssp: srcu_struct to provide cookie for.
1018 *
1019 * This function returns a cookie that can be passed to
1020 * poll_state_synchronize_srcu(), which will return true if a full grace
1021 * period has elapsed in the meantime. It is the caller's responsibility
1022 * to make sure that grace period happens, for example, by invoking
1023 * call_srcu() after return from get_state_synchronize_srcu().
1024 */
1025unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1026{
1027 // Any prior manipulation of SRCU-protected data must happen
1028 // before the load from ->srcu_gp_seq.
1029 smp_mb();
1030 return rcu_seq_snap(&ssp->srcu_gp_seq);
1031}
1032EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1033
1034/**
1035 * start_poll_synchronize_srcu - Provide cookie and start grace period
1036 * @ssp: srcu_struct to provide cookie for.
1037 *
1038 * This function returns a cookie that can be passed to
1039 * poll_state_synchronize_srcu(), which will return true if a full grace
1040 * period has elapsed in the meantime. Unlike get_state_synchronize_srcu(),
1041 * this function also ensures that any needed SRCU grace period will be
1042 * started. This convenience does come at a cost in terms of CPU overhead.
1043 */
1044unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1045{
1046 return srcu_gp_start_if_needed(ssp, NULL, true);
1047}
1048EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1049
1050/**
1051 * poll_state_synchronize_srcu - Has cookie's grace period ended?
1052 * @ssp: srcu_struct to provide cookie for.
1053 * @cookie: Return value from get_state_synchronize_srcu() or start_poll_synchronize_srcu().
1054 *
1055 * This function takes the cookie that was returned from either
1056 * get_state_synchronize_srcu() or start_poll_synchronize_srcu(), and
1057 * returns @true if an SRCU grace period elapsed since the time that the
1058 * cookie was created.
Paul E. McKenney4e7ccfa2020-11-15 20:33:38 -08001059 *
1060 * Because cookies are finite in size, wrapping/overflow is possible.
1061 * This is more pronounced on 32-bit systems where cookies are 32 bits,
1062 * where in theory wrapping could happen in about 14 hours assuming
1063 * 25-microsecond expedited SRCU grace periods. However, a more likely
1064 * overflow lower bound is on the order of 24 days in the case of
1065 * one-millisecond SRCU grace periods. Of course, wrapping in a 64-bit
1066 * system requires geologic timespans, as in more than seven million years
1067 * even for expedited SRCU grace periods.
1068 *
1069 * Wrapping/overflow is much more of an issue for CONFIG_SMP=n systems
1070 * that also have CONFIG_PREEMPTION=n, which selects Tiny SRCU. This uses
1071 * a 16-bit cookie, which rcutorture routinely wraps in a matter of a
1072 * few minutes. If this proves to be a problem, this counter will be
1073 * expanded to the same size as for Tree SRCU.
Paul E. McKenney5358c9f2020-11-13 17:31:55 -08001074 */
1075bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1076{
1077 if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1078 return false;
1079 // Ensure that the end of the SRCU grace period happens before
1080 // any subsequent code that the caller might execute.
1081 smp_mb(); // ^^^
1082 return true;
1083}
1084EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1085
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001086/*
1087 * Callback function for srcu_barrier() use.
1088 */
1089static void srcu_barrier_cb(struct rcu_head *rhp)
1090{
1091 struct srcu_data *sdp;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001092 struct srcu_struct *ssp;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001093
1094 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001095 ssp = sdp->ssp;
1096 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1097 complete(&ssp->srcu_barrier_completion);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001098}
1099
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001100/**
1101 * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001102 * @ssp: srcu_struct on which to wait for in-flight callbacks.
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001103 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001104void srcu_barrier(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001105{
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001106 int cpu;
1107 struct srcu_data *sdp;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001108 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001109
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001110 check_init_srcu_struct(ssp);
1111 mutex_lock(&ssp->srcu_barrier_mutex);
1112 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001113 smp_mb(); /* Force ordering following return. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001114 mutex_unlock(&ssp->srcu_barrier_mutex);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001115 return; /* Someone else did our work for us. */
1116 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001117 rcu_seq_start(&ssp->srcu_barrier_seq);
1118 init_completion(&ssp->srcu_barrier_completion);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001119
1120 /* Initial count prevents reaching zero until all CBs are posted. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001121 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001122
1123 /*
1124 * Each pass through this loop enqueues a callback, but only
1125 * on CPUs already having callbacks enqueued. Note that if
1126 * a CPU already has callbacks enqueue, it must have already
1127 * registered the need for a future grace period, so all we
1128 * need do is enqueue a callback that will use the same
1129 * grace period as the last callback already in the queue.
1130 */
1131 for_each_possible_cpu(cpu) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001132 sdp = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001133 spin_lock_irq_rcu_node(sdp);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001134 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001135 sdp->srcu_barrier_head.func = srcu_barrier_cb;
Paul E. McKenneya6025382017-04-28 15:39:34 -07001136 debug_rcu_head_queue(&sdp->srcu_barrier_head);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001137 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
Joel Fernandes (Google)77a40f92019-08-30 12:36:32 -04001138 &sdp->srcu_barrier_head)) {
Paul E. McKenneya6025382017-04-28 15:39:34 -07001139 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001140 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
Paul E. McKenneya6025382017-04-28 15:39:34 -07001141 }
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001142 spin_unlock_irq_rcu_node(sdp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001143 }
1144
1145 /* Remove the initial count, at which point reaching zero can happen. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001146 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1147 complete(&ssp->srcu_barrier_completion);
1148 wait_for_completion(&ssp->srcu_barrier_completion);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001149
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001150 rcu_seq_end(&ssp->srcu_barrier_seq);
1151 mutex_unlock(&ssp->srcu_barrier_mutex);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001152}
1153EXPORT_SYMBOL_GPL(srcu_barrier);
1154
1155/**
1156 * srcu_batches_completed - return batches completed.
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001157 * @ssp: srcu_struct on which to report batch completion.
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001158 *
1159 * Report the number of batches, correlated with, but not necessarily
1160 * precisely the same as, the number of grace periods that have elapsed.
1161 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001162unsigned long srcu_batches_completed(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001163{
Paul E. McKenney39f91502019-12-22 19:39:35 -08001164 return READ_ONCE(ssp->srcu_idx);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001165}
1166EXPORT_SYMBOL_GPL(srcu_batches_completed);
1167
1168/*
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001169 * Core SRCU state machine. Push state bits of ->srcu_gp_seq
1170 * to SRCU_STATE_SCAN2, and invoke srcu_gp_end() when scan has
1171 * completed in that state.
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001172 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001173static void srcu_advance_state(struct srcu_struct *ssp)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001174{
1175 int idx;
1176
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001177 mutex_lock(&ssp->srcu_gp_mutex);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001178
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001179 /*
1180 * Because readers might be delayed for an extended period after
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001181 * fetching ->srcu_idx for their index, at any point in time there
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001182 * might well be readers using both idx=0 and idx=1. We therefore
1183 * need to wait for readers to clear from both index values before
1184 * invoking a callback.
1185 *
1186 * The load-acquire ensures that we see the accesses performed
1187 * by the prior grace period.
1188 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001189 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001190 if (idx == SRCU_STATE_IDLE) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001191 spin_lock_irq_rcu_node(ssp);
1192 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1193 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1194 spin_unlock_irq_rcu_node(ssp);
1195 mutex_unlock(&ssp->srcu_gp_mutex);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001196 return;
1197 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001198 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001199 if (idx == SRCU_STATE_IDLE)
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001200 srcu_gp_start(ssp);
1201 spin_unlock_irq_rcu_node(ssp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001202 if (idx != SRCU_STATE_IDLE) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001203 mutex_unlock(&ssp->srcu_gp_mutex);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001204 return; /* Someone else started the grace period. */
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001205 }
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001206 }
1207
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001208 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1209 idx = 1 ^ (ssp->srcu_idx & 1);
1210 if (!try_check_zero(ssp, idx, 1)) {
1211 mutex_unlock(&ssp->srcu_gp_mutex);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001212 return; /* readers present, retry later. */
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001213 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001214 srcu_flip(ssp);
Paul E. McKenney71042602020-01-03 11:42:05 -08001215 spin_lock_irq_rcu_node(ssp);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001216 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
Paul E. McKenney71042602020-01-03 11:42:05 -08001217 spin_unlock_irq_rcu_node(ssp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001218 }
1219
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001220 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001221
1222 /*
1223 * SRCU read-side critical sections are normally short,
1224 * so check at least twice in quick succession after a flip.
1225 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001226 idx = 1 ^ (ssp->srcu_idx & 1);
1227 if (!try_check_zero(ssp, idx, 2)) {
1228 mutex_unlock(&ssp->srcu_gp_mutex);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001229 return; /* readers present, retry later. */
1230 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001231 srcu_gp_end(ssp); /* Releases ->srcu_gp_mutex. */
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001232 }
1233}
1234
1235/*
1236 * Invoke a limited number of SRCU callbacks that have passed through
1237 * their grace period. If there are more to do, SRCU will reschedule
1238 * the workqueue. Note that needed memory barriers have been executed
1239 * in this task's context by srcu_readers_active_idx_check().
1240 */
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001241static void srcu_invoke_callbacks(struct work_struct *work)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001242{
Joel Fernandes (Google)ae5c2342020-09-23 11:22:09 -04001243 long len;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001244 bool more;
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001245 struct rcu_cblist ready_cbs;
1246 struct rcu_head *rhp;
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001247 struct srcu_data *sdp;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001248 struct srcu_struct *ssp;
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001249
Sebastian Andrzej Siewiore81baf42018-12-11 12:12:38 +01001250 sdp = container_of(work, struct srcu_data, work);
1251
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001252 ssp = sdp->ssp;
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001253 rcu_cblist_init(&ready_cbs);
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001254 spin_lock_irq_rcu_node(sdp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001255 rcu_segcblist_advance(&sdp->srcu_cblist,
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001256 rcu_seq_current(&ssp->srcu_gp_seq));
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001257 if (sdp->srcu_cblist_invoking ||
1258 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001259 spin_unlock_irq_rcu_node(sdp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001260 return; /* Someone else on the job or nothing to do. */
1261 }
1262
1263 /* We are on the job! Extract and invoke ready callbacks. */
1264 sdp->srcu_cblist_invoking = true;
1265 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
Joel Fernandes (Google)ae5c2342020-09-23 11:22:09 -04001266 len = ready_cbs.len;
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001267 spin_unlock_irq_rcu_node(sdp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001268 rhp = rcu_cblist_dequeue(&ready_cbs);
1269 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
Paul E. McKenneya6025382017-04-28 15:39:34 -07001270 debug_rcu_head_unqueue(rhp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001271 local_bh_disable();
1272 rhp->func(rhp);
1273 local_bh_enable();
1274 }
Joel Fernandes (Google)ae5c2342020-09-23 11:22:09 -04001275 WARN_ON_ONCE(ready_cbs.len);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001276
1277 /*
1278 * Update counts, accelerate new callbacks, and if needed,
1279 * schedule another round of callback invocation.
1280 */
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001281 spin_lock_irq_rcu_node(sdp);
Joel Fernandes (Google)ae5c2342020-09-23 11:22:09 -04001282 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001283 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001284 rcu_seq_snap(&ssp->srcu_gp_seq));
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001285 sdp->srcu_cblist_invoking = false;
1286 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
Paul E. McKenneyd6331982017-10-10 13:52:30 -07001287 spin_unlock_irq_rcu_node(sdp);
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001288 if (more)
1289 srcu_schedule_cbs_sdp(sdp, 0);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001290}
1291
1292/*
1293 * Finished one round of SRCU grace period. Start another if there are
1294 * more SRCU callbacks queued, otherwise put SRCU into not-running state.
1295 */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001296static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001297{
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001298 bool pushgp = true;
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001299
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001300 spin_lock_irq_rcu_node(ssp);
1301 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1302 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001303 /* All requests fulfilled, time to go idle. */
1304 pushgp = false;
1305 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001306 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001307 /* Outstanding request and no GP. Start one. */
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001308 srcu_gp_start(ssp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001309 }
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001310 spin_unlock_irq_rcu_node(ssp);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001311
Paul E. McKenneyda915ad2017-04-05 09:01:53 -07001312 if (pushgp)
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001313 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001314}
1315
1316/*
1317 * This is the work-queue function that handles SRCU grace periods.
1318 */
Paul E. McKenney0d8a1e82017-06-15 17:06:38 -07001319static void process_srcu(struct work_struct *work)
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001320{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001321 struct srcu_struct *ssp;
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001322
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001323 ssp = container_of(work, struct srcu_struct, work.work);
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001324
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001325 srcu_advance_state(ssp);
1326 srcu_reschedule(ssp, srcu_get_delay(ssp));
Paul E. McKenneydad81a22017-03-25 17:23:44 -07001327}
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001328
1329void srcutorture_get_gp_data(enum rcutorture_type test_type,
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001330 struct srcu_struct *ssp, int *flags,
Paul E. McKenneyaebc8262018-05-01 06:42:51 -07001331 unsigned long *gp_seq)
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001332{
1333 if (test_type != SRCU_FLAVOR)
1334 return;
1335 *flags = 0;
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001336 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
Paul E. McKenney7f6733c2017-04-18 17:17:35 -07001337}
1338EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
Paul E. McKenney1f4f6da2017-04-21 11:16:32 -07001339
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001340void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001341{
1342 int cpu;
1343 int idx;
Paul E. McKenneyac3748c2017-05-22 13:59:52 -07001344 unsigned long s0 = 0, s1 = 0;
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001345
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001346 idx = ssp->srcu_idx & 0x1;
Paul E. McKenney52e17ba2018-06-19 08:54:37 -07001347 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001348 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001349 for_each_possible_cpu(cpu) {
1350 unsigned long l0, l1;
1351 unsigned long u0, u1;
1352 long c0, c1;
Paul E. McKenney5ab07a82018-05-22 12:28:04 -07001353 struct srcu_data *sdp;
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001354
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001355 sdp = per_cpu_ptr(ssp->sda, cpu);
Paul E. McKenneyb68c6142020-01-03 16:36:59 -08001356 u0 = data_race(sdp->srcu_unlock_count[!idx]);
1357 u1 = data_race(sdp->srcu_unlock_count[idx]);
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001358
1359 /*
1360 * Make sure that a lock is always counted if the corresponding
1361 * unlock is counted.
1362 */
1363 smp_rmb();
1364
Paul E. McKenneyb68c6142020-01-03 16:36:59 -08001365 l0 = data_race(sdp->srcu_lock_count[!idx]);
1366 l1 = data_race(sdp->srcu_lock_count[idx]);
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001367
1368 c0 = l0 - u0;
1369 c1 = l1 - u1;
Paul E. McKenney7e210a62019-06-28 17:11:10 -07001370 pr_cont(" %d(%ld,%ld %c)",
1371 cpu, c0, c1,
1372 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
Paul E. McKenneyac3748c2017-05-22 13:59:52 -07001373 s0 += c0;
1374 s1 += c1;
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001375 }
Paul E. McKenneyac3748c2017-05-22 13:59:52 -07001376 pr_cont(" T(%ld,%ld)\n", s0, s1);
Paul E. McKenney115a1a52017-05-22 13:31:03 -07001377}
1378EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1379
Paul E. McKenney1f4f6da2017-04-21 11:16:32 -07001380static int __init srcu_bootup_announce(void)
1381{
1382 pr_info("Hierarchical SRCU implementation.\n");
Paul E. McKenney0c8e0e32017-04-28 11:24:22 -07001383 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1384 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
Paul E. McKenney1f4f6da2017-04-21 11:16:32 -07001385 return 0;
1386}
1387early_initcall(srcu_bootup_announce);
Paul E. McKenneye0fcba92018-08-14 08:45:54 -07001388
1389void __init srcu_init(void)
1390{
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001391 struct srcu_struct *ssp;
Paul E. McKenneye0fcba92018-08-14 08:45:54 -07001392
Frederic Weisbecker8e9c01c2021-04-09 00:38:59 +02001393 /*
1394 * Once that is set, call_srcu() can follow the normal path and
1395 * queue delayed work. This must follow RCU workqueues creation
1396 * and timers initialization.
1397 */
Paul E. McKenneye0fcba92018-08-14 08:45:54 -07001398 srcu_init_done = true;
1399 while (!list_empty(&srcu_boot_list)) {
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001400 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
Paul E. McKenney4e6ea4e2018-08-14 14:41:49 -07001401 work.work.entry);
Paul E. McKenneyaacb5d92018-10-28 10:32:51 -07001402 list_del_init(&ssp->work.work.entry);
1403 queue_work(rcu_gp_wq, &ssp->work.work);
Paul E. McKenneye0fcba92018-08-14 08:45:54 -07001404 }
1405}
Paul E. McKenneyfe15b502019-04-05 16:15:00 -07001406
1407#ifdef CONFIG_MODULES
1408
1409/* Initialize any global-scope srcu_struct structures used by this module. */
1410static int srcu_module_coming(struct module *mod)
1411{
1412 int i;
1413 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1414 int ret;
1415
1416 for (i = 0; i < mod->num_srcu_structs; i++) {
1417 ret = init_srcu_struct(*(sspp++));
1418 if (WARN_ON_ONCE(ret))
1419 return ret;
1420 }
1421 return 0;
1422}
1423
1424/* Clean up any global-scope srcu_struct structures used by this module. */
1425static void srcu_module_going(struct module *mod)
1426{
1427 int i;
1428 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1429
1430 for (i = 0; i < mod->num_srcu_structs; i++)
1431 cleanup_srcu_struct(*(sspp++));
1432}
1433
1434/* Handle one module, either coming or going. */
1435static int srcu_module_notify(struct notifier_block *self,
1436 unsigned long val, void *data)
1437{
1438 struct module *mod = data;
1439 int ret = 0;
1440
1441 switch (val) {
1442 case MODULE_STATE_COMING:
1443 ret = srcu_module_coming(mod);
1444 break;
1445 case MODULE_STATE_GOING:
1446 srcu_module_going(mod);
1447 break;
1448 default:
1449 break;
1450 }
1451 return ret;
1452}
1453
1454static struct notifier_block srcu_module_nb = {
1455 .notifier_call = srcu_module_notify,
1456 .priority = 0,
1457};
1458
1459static __init int init_srcu_module_notifier(void)
1460{
1461 int ret;
1462
1463 ret = register_module_notifier(&srcu_module_nb);
1464 if (ret)
1465 pr_warn("Failed to register srcu module notifier\n");
1466 return ret;
1467}
1468late_initcall(init_srcu_module_notifier);
1469
1470#endif /* #ifdef CONFIG_MODULES */