Paul E. McKenney | dad81a2 | 2017-03-25 17:23:44 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
| 17 | * |
| 18 | * Copyright (C) IBM Corporation, 2006 |
| 19 | * Copyright (C) Fujitsu, 2012 |
| 20 | * |
| 21 | * Author: Paul McKenney <paulmck@us.ibm.com> |
| 22 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
| 23 | * |
| 24 | * For detailed explanation of Read-Copy Update mechanism see - |
| 25 | * Documentation/RCU/ *.txt |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #include <linux/export.h> |
| 30 | #include <linux/mutex.h> |
| 31 | #include <linux/percpu.h> |
| 32 | #include <linux/preempt.h> |
| 33 | #include <linux/rcupdate_wait.h> |
| 34 | #include <linux/sched.h> |
| 35 | #include <linux/smp.h> |
| 36 | #include <linux/delay.h> |
| 37 | #include <linux/srcu.h> |
| 38 | |
| 39 | #include <linux/rcu_node_tree.h> |
| 40 | #include "rcu.h" |
| 41 | |
| 42 | static int init_srcu_struct_fields(struct srcu_struct *sp) |
| 43 | { |
| 44 | sp->completed = 0; |
| 45 | sp->srcu_gp_seq = 0; |
| 46 | atomic_set(&sp->srcu_exp_cnt, 0); |
| 47 | spin_lock_init(&sp->queue_lock); |
| 48 | rcu_segcblist_init(&sp->srcu_cblist); |
| 49 | INIT_DELAYED_WORK(&sp->work, process_srcu); |
| 50 | sp->per_cpu_ref = alloc_percpu(struct srcu_array); |
| 51 | return sp->per_cpu_ref ? 0 : -ENOMEM; |
| 52 | } |
| 53 | |
| 54 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 55 | |
| 56 | int __init_srcu_struct(struct srcu_struct *sp, const char *name, |
| 57 | struct lock_class_key *key) |
| 58 | { |
| 59 | /* Don't re-initialize a lock while it is held. */ |
| 60 | debug_check_no_locks_freed((void *)sp, sizeof(*sp)); |
| 61 | lockdep_init_map(&sp->dep_map, name, key, 0); |
| 62 | return init_srcu_struct_fields(sp); |
| 63 | } |
| 64 | EXPORT_SYMBOL_GPL(__init_srcu_struct); |
| 65 | |
| 66 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 67 | |
| 68 | /** |
| 69 | * init_srcu_struct - initialize a sleep-RCU structure |
| 70 | * @sp: structure to initialize. |
| 71 | * |
| 72 | * Must invoke this on a given srcu_struct before passing that srcu_struct |
| 73 | * to any other function. Each srcu_struct represents a separate domain |
| 74 | * of SRCU protection. |
| 75 | */ |
| 76 | int init_srcu_struct(struct srcu_struct *sp) |
| 77 | { |
| 78 | return init_srcu_struct_fields(sp); |
| 79 | } |
| 80 | EXPORT_SYMBOL_GPL(init_srcu_struct); |
| 81 | |
| 82 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 83 | |
| 84 | /* |
| 85 | * Returns approximate total of the readers' ->lock_count[] values for the |
| 86 | * rank of per-CPU counters specified by idx. |
| 87 | */ |
| 88 | static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx) |
| 89 | { |
| 90 | int cpu; |
| 91 | unsigned long sum = 0; |
| 92 | |
| 93 | for_each_possible_cpu(cpu) { |
| 94 | struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); |
| 95 | |
| 96 | sum += READ_ONCE(cpuc->lock_count[idx]); |
| 97 | } |
| 98 | return sum; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Returns approximate total of the readers' ->unlock_count[] values for the |
| 103 | * rank of per-CPU counters specified by idx. |
| 104 | */ |
| 105 | static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx) |
| 106 | { |
| 107 | int cpu; |
| 108 | unsigned long sum = 0; |
| 109 | |
| 110 | for_each_possible_cpu(cpu) { |
| 111 | struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); |
| 112 | |
| 113 | sum += READ_ONCE(cpuc->unlock_count[idx]); |
| 114 | } |
| 115 | return sum; |
| 116 | } |
| 117 | |
| 118 | /* |
| 119 | * Return true if the number of pre-existing readers is determined to |
| 120 | * be zero. |
| 121 | */ |
| 122 | static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx) |
| 123 | { |
| 124 | unsigned long unlocks; |
| 125 | |
| 126 | unlocks = srcu_readers_unlock_idx(sp, idx); |
| 127 | |
| 128 | /* |
| 129 | * Make sure that a lock is always counted if the corresponding |
| 130 | * unlock is counted. Needs to be a smp_mb() as the read side may |
| 131 | * contain a read from a variable that is written to before the |
| 132 | * synchronize_srcu() in the write side. In this case smp_mb()s |
| 133 | * A and B act like the store buffering pattern. |
| 134 | * |
| 135 | * This smp_mb() also pairs with smp_mb() C to prevent accesses |
| 136 | * after the synchronize_srcu() from being executed before the |
| 137 | * grace period ends. |
| 138 | */ |
| 139 | smp_mb(); /* A */ |
| 140 | |
| 141 | /* |
| 142 | * If the locks are the same as the unlocks, then there must have |
| 143 | * been no readers on this index at some time in between. This does |
| 144 | * not mean that there are no more readers, as one could have read |
| 145 | * the current index but not have incremented the lock counter yet. |
| 146 | * |
| 147 | * Possible bug: There is no guarantee that there haven't been |
| 148 | * ULONG_MAX increments of ->lock_count[] since the unlocks were |
| 149 | * counted, meaning that this could return true even if there are |
| 150 | * still active readers. Since there are no memory barriers around |
| 151 | * srcu_flip(), the CPU is not required to increment ->completed |
| 152 | * before running srcu_readers_unlock_idx(), which means that there |
| 153 | * could be an arbitrarily large number of critical sections that |
| 154 | * execute after srcu_readers_unlock_idx() but use the old value |
| 155 | * of ->completed. |
| 156 | */ |
| 157 | return srcu_readers_lock_idx(sp, idx) == unlocks; |
| 158 | } |
| 159 | |
| 160 | /** |
| 161 | * srcu_readers_active - returns true if there are readers. and false |
| 162 | * otherwise |
| 163 | * @sp: which srcu_struct to count active readers (holding srcu_read_lock). |
| 164 | * |
| 165 | * Note that this is not an atomic primitive, and can therefore suffer |
| 166 | * severe errors when invoked on an active srcu_struct. That said, it |
| 167 | * can be useful as an error check at cleanup time. |
| 168 | */ |
| 169 | static bool srcu_readers_active(struct srcu_struct *sp) |
| 170 | { |
| 171 | int cpu; |
| 172 | unsigned long sum = 0; |
| 173 | |
| 174 | for_each_possible_cpu(cpu) { |
| 175 | struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu); |
| 176 | |
| 177 | sum += READ_ONCE(cpuc->lock_count[0]); |
| 178 | sum += READ_ONCE(cpuc->lock_count[1]); |
| 179 | sum -= READ_ONCE(cpuc->unlock_count[0]); |
| 180 | sum -= READ_ONCE(cpuc->unlock_count[1]); |
| 181 | } |
| 182 | return sum; |
| 183 | } |
| 184 | |
| 185 | #define SRCU_INTERVAL 1 |
| 186 | |
| 187 | /** |
| 188 | * cleanup_srcu_struct - deconstruct a sleep-RCU structure |
| 189 | * @sp: structure to clean up. |
| 190 | * |
| 191 | * Must invoke this after you are finished using a given srcu_struct that |
| 192 | * was initialized via init_srcu_struct(), else you leak memory. |
| 193 | */ |
| 194 | void cleanup_srcu_struct(struct srcu_struct *sp) |
| 195 | { |
| 196 | WARN_ON_ONCE(atomic_read(&sp->srcu_exp_cnt)); |
| 197 | if (WARN_ON(srcu_readers_active(sp))) |
| 198 | return; /* Leakage unless caller handles error. */ |
| 199 | if (WARN_ON(!rcu_segcblist_empty(&sp->srcu_cblist))) |
| 200 | return; /* Leakage unless caller handles error. */ |
| 201 | flush_delayed_work(&sp->work); |
| 202 | if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE)) { |
| 203 | pr_info("cleanup_srcu_struct: Active srcu_struct %lu CBs %c state: %d\n", rcu_segcblist_n_cbs(&sp->srcu_cblist), ".E"[rcu_segcblist_empty(&sp->srcu_cblist)], rcu_seq_state(READ_ONCE(sp->srcu_gp_seq))); |
| 204 | return; /* Caller forgot to stop doing call_srcu()? */ |
| 205 | } |
| 206 | free_percpu(sp->per_cpu_ref); |
| 207 | sp->per_cpu_ref = NULL; |
| 208 | } |
| 209 | EXPORT_SYMBOL_GPL(cleanup_srcu_struct); |
| 210 | |
| 211 | /* |
| 212 | * Counts the new reader in the appropriate per-CPU element of the |
| 213 | * srcu_struct. Must be called from process context. |
| 214 | * Returns an index that must be passed to the matching srcu_read_unlock(). |
| 215 | */ |
| 216 | int __srcu_read_lock(struct srcu_struct *sp) |
| 217 | { |
| 218 | int idx; |
| 219 | |
| 220 | idx = READ_ONCE(sp->completed) & 0x1; |
| 221 | __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]); |
| 222 | smp_mb(); /* B */ /* Avoid leaking the critical section. */ |
| 223 | return idx; |
| 224 | } |
| 225 | EXPORT_SYMBOL_GPL(__srcu_read_lock); |
| 226 | |
| 227 | /* |
| 228 | * Removes the count for the old reader from the appropriate per-CPU |
| 229 | * element of the srcu_struct. Note that this may well be a different |
| 230 | * CPU than that which was incremented by the corresponding srcu_read_lock(). |
| 231 | * Must be called from process context. |
| 232 | */ |
| 233 | void __srcu_read_unlock(struct srcu_struct *sp, int idx) |
| 234 | { |
| 235 | smp_mb(); /* C */ /* Avoid leaking the critical section. */ |
| 236 | this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]); |
| 237 | } |
| 238 | EXPORT_SYMBOL_GPL(__srcu_read_unlock); |
| 239 | |
| 240 | /* |
| 241 | * We use an adaptive strategy for synchronize_srcu() and especially for |
| 242 | * synchronize_srcu_expedited(). We spin for a fixed time period |
| 243 | * (defined below) to allow SRCU readers to exit their read-side critical |
| 244 | * sections. If there are still some readers after a few microseconds, |
| 245 | * we repeatedly block for 1-millisecond time periods. |
| 246 | */ |
| 247 | #define SRCU_RETRY_CHECK_DELAY 5 |
| 248 | |
| 249 | /* |
| 250 | * Start an SRCU grace period. |
| 251 | */ |
| 252 | static void srcu_gp_start(struct srcu_struct *sp) |
| 253 | { |
| 254 | int state; |
| 255 | |
| 256 | rcu_segcblist_accelerate(&sp->srcu_cblist, |
| 257 | rcu_seq_snap(&sp->srcu_gp_seq)); |
| 258 | rcu_seq_start(&sp->srcu_gp_seq); |
| 259 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 260 | WARN_ON_ONCE(state != SRCU_STATE_SCAN1); |
| 261 | } |
| 262 | |
| 263 | /* |
| 264 | * Wait until all readers counted by array index idx complete, but |
| 265 | * loop an additional time if there is an expedited grace period pending. |
| 266 | * The caller must ensure that ->completed is not changed while checking. |
| 267 | */ |
| 268 | static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount) |
| 269 | { |
| 270 | for (;;) { |
| 271 | if (srcu_readers_active_idx_check(sp, idx)) |
| 272 | return true; |
| 273 | if (--trycount + !!atomic_read(&sp->srcu_exp_cnt) <= 0) |
| 274 | return false; |
| 275 | udelay(SRCU_RETRY_CHECK_DELAY); |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * Increment the ->completed counter so that future SRCU readers will |
| 281 | * use the other rank of the ->(un)lock_count[] arrays. This allows |
| 282 | * us to wait for pre-existing readers in a starvation-free manner. |
| 283 | */ |
| 284 | static void srcu_flip(struct srcu_struct *sp) |
| 285 | { |
| 286 | WRITE_ONCE(sp->completed, sp->completed + 1); |
| 287 | |
| 288 | /* |
| 289 | * Ensure that if the updater misses an __srcu_read_unlock() |
| 290 | * increment, that task's next __srcu_read_lock() will see the |
| 291 | * above counter update. Note that both this memory barrier |
| 292 | * and the one in srcu_readers_active_idx_check() provide the |
| 293 | * guarantee for __srcu_read_lock(). |
| 294 | */ |
| 295 | smp_mb(); /* D */ /* Pairs with C. */ |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * End an SRCU grace period. |
| 300 | */ |
| 301 | static void srcu_gp_end(struct srcu_struct *sp) |
| 302 | { |
| 303 | rcu_seq_end(&sp->srcu_gp_seq); |
| 304 | |
| 305 | spin_lock_irq(&sp->queue_lock); |
| 306 | rcu_segcblist_advance(&sp->srcu_cblist, |
| 307 | rcu_seq_current(&sp->srcu_gp_seq)); |
| 308 | spin_unlock_irq(&sp->queue_lock); |
| 309 | } |
| 310 | |
| 311 | /* |
| 312 | * Enqueue an SRCU callback on the specified srcu_struct structure, |
| 313 | * initiating grace-period processing if it is not already running. |
| 314 | * |
| 315 | * Note that all CPUs must agree that the grace period extended beyond |
| 316 | * all pre-existing SRCU read-side critical section. On systems with |
| 317 | * more than one CPU, this means that when "func()" is invoked, each CPU |
| 318 | * is guaranteed to have executed a full memory barrier since the end of |
| 319 | * its last corresponding SRCU read-side critical section whose beginning |
| 320 | * preceded the call to call_rcu(). It also means that each CPU executing |
| 321 | * an SRCU read-side critical section that continues beyond the start of |
| 322 | * "func()" must have executed a memory barrier after the call_rcu() |
| 323 | * but before the beginning of that SRCU read-side critical section. |
| 324 | * Note that these guarantees include CPUs that are offline, idle, or |
| 325 | * executing in user mode, as well as CPUs that are executing in the kernel. |
| 326 | * |
| 327 | * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the |
| 328 | * resulting SRCU callback function "func()", then both CPU A and CPU |
| 329 | * B are guaranteed to execute a full memory barrier during the time |
| 330 | * interval between the call to call_rcu() and the invocation of "func()". |
| 331 | * This guarantee applies even if CPU A and CPU B are the same CPU (but |
| 332 | * again only if the system has more than one CPU). |
| 333 | * |
| 334 | * Of course, these guarantees apply only for invocations of call_srcu(), |
| 335 | * srcu_read_lock(), and srcu_read_unlock() that are all passed the same |
| 336 | * srcu_struct structure. |
| 337 | */ |
| 338 | void call_srcu(struct srcu_struct *sp, struct rcu_head *head, |
| 339 | rcu_callback_t func) |
| 340 | { |
| 341 | unsigned long flags; |
| 342 | |
| 343 | head->next = NULL; |
| 344 | head->func = func; |
| 345 | spin_lock_irqsave(&sp->queue_lock, flags); |
| 346 | smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ |
| 347 | rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); |
| 348 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { |
| 349 | srcu_gp_start(sp); |
| 350 | queue_delayed_work(system_power_efficient_wq, &sp->work, 0); |
| 351 | } |
| 352 | spin_unlock_irqrestore(&sp->queue_lock, flags); |
| 353 | } |
| 354 | EXPORT_SYMBOL_GPL(call_srcu); |
| 355 | |
| 356 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay); |
| 357 | |
| 358 | /* |
| 359 | * Helper function for synchronize_srcu() and synchronize_srcu_expedited(). |
| 360 | */ |
| 361 | static void __synchronize_srcu(struct srcu_struct *sp) |
| 362 | { |
| 363 | struct rcu_synchronize rcu; |
| 364 | struct rcu_head *head = &rcu.head; |
| 365 | |
| 366 | RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) || |
| 367 | lock_is_held(&rcu_bh_lock_map) || |
| 368 | lock_is_held(&rcu_lock_map) || |
| 369 | lock_is_held(&rcu_sched_lock_map), |
| 370 | "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section"); |
| 371 | |
| 372 | if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) |
| 373 | return; |
| 374 | might_sleep(); |
| 375 | init_completion(&rcu.completion); |
| 376 | |
| 377 | head->next = NULL; |
| 378 | head->func = wakeme_after_rcu; |
| 379 | spin_lock_irq(&sp->queue_lock); |
| 380 | smp_mb__after_unlock_lock(); /* Caller's prior accesses before GP. */ |
| 381 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_IDLE) { |
| 382 | /* steal the processing owner */ |
| 383 | rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); |
| 384 | srcu_gp_start(sp); |
| 385 | spin_unlock_irq(&sp->queue_lock); |
| 386 | /* give the processing owner to work_struct */ |
| 387 | srcu_reschedule(sp, 0); |
| 388 | } else { |
| 389 | rcu_segcblist_enqueue(&sp->srcu_cblist, head, false); |
| 390 | spin_unlock_irq(&sp->queue_lock); |
| 391 | } |
| 392 | |
| 393 | wait_for_completion(&rcu.completion); |
| 394 | smp_mb(); /* Caller's later accesses after GP. */ |
| 395 | } |
| 396 | |
| 397 | /** |
| 398 | * synchronize_srcu_expedited - Brute-force SRCU grace period |
| 399 | * @sp: srcu_struct with which to synchronize. |
| 400 | * |
| 401 | * Wait for an SRCU grace period to elapse, but be more aggressive about |
| 402 | * spinning rather than blocking when waiting. |
| 403 | * |
| 404 | * Note that synchronize_srcu_expedited() has the same deadlock and |
| 405 | * memory-ordering properties as does synchronize_srcu(). |
| 406 | */ |
| 407 | void synchronize_srcu_expedited(struct srcu_struct *sp) |
| 408 | { |
| 409 | bool do_norm = rcu_gp_is_normal(); |
| 410 | |
| 411 | if (!do_norm) { |
| 412 | atomic_inc(&sp->srcu_exp_cnt); |
| 413 | smp_mb__after_atomic(); /* increment before GP. */ |
| 414 | } |
| 415 | __synchronize_srcu(sp); |
| 416 | if (!do_norm) { |
| 417 | smp_mb__before_atomic(); /* GP before decrement. */ |
| 418 | atomic_dec(&sp->srcu_exp_cnt); |
| 419 | } |
| 420 | } |
| 421 | EXPORT_SYMBOL_GPL(synchronize_srcu_expedited); |
| 422 | |
| 423 | /** |
| 424 | * synchronize_srcu - wait for prior SRCU read-side critical-section completion |
| 425 | * @sp: srcu_struct with which to synchronize. |
| 426 | * |
| 427 | * Wait for the count to drain to zero of both indexes. To avoid the |
| 428 | * possible starvation of synchronize_srcu(), it waits for the count of |
| 429 | * the index=((->completed & 1) ^ 1) to drain to zero at first, |
| 430 | * and then flip the completed and wait for the count of the other index. |
| 431 | * |
| 432 | * Can block; must be called from process context. |
| 433 | * |
| 434 | * Note that it is illegal to call synchronize_srcu() from the corresponding |
| 435 | * SRCU read-side critical section; doing so will result in deadlock. |
| 436 | * However, it is perfectly legal to call synchronize_srcu() on one |
| 437 | * srcu_struct from some other srcu_struct's read-side critical section, |
| 438 | * as long as the resulting graph of srcu_structs is acyclic. |
| 439 | * |
| 440 | * There are memory-ordering constraints implied by synchronize_srcu(). |
| 441 | * On systems with more than one CPU, when synchronize_srcu() returns, |
| 442 | * each CPU is guaranteed to have executed a full memory barrier since |
| 443 | * the end of its last corresponding SRCU-sched read-side critical section |
| 444 | * whose beginning preceded the call to synchronize_srcu(). In addition, |
| 445 | * each CPU having an SRCU read-side critical section that extends beyond |
| 446 | * the return from synchronize_srcu() is guaranteed to have executed a |
| 447 | * full memory barrier after the beginning of synchronize_srcu() and before |
| 448 | * the beginning of that SRCU read-side critical section. Note that these |
| 449 | * guarantees include CPUs that are offline, idle, or executing in user mode, |
| 450 | * as well as CPUs that are executing in the kernel. |
| 451 | * |
| 452 | * Furthermore, if CPU A invoked synchronize_srcu(), which returned |
| 453 | * to its caller on CPU B, then both CPU A and CPU B are guaranteed |
| 454 | * to have executed a full memory barrier during the execution of |
| 455 | * synchronize_srcu(). This guarantee applies even if CPU A and CPU B |
| 456 | * are the same CPU, but again only if the system has more than one CPU. |
| 457 | * |
| 458 | * Of course, these memory-ordering guarantees apply only when |
| 459 | * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are |
| 460 | * passed the same srcu_struct structure. |
| 461 | */ |
| 462 | void synchronize_srcu(struct srcu_struct *sp) |
| 463 | { |
| 464 | if (rcu_gp_is_expedited()) |
| 465 | synchronize_srcu_expedited(sp); |
| 466 | else |
| 467 | __synchronize_srcu(sp); |
| 468 | } |
| 469 | EXPORT_SYMBOL_GPL(synchronize_srcu); |
| 470 | |
| 471 | /** |
| 472 | * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete. |
| 473 | * @sp: srcu_struct on which to wait for in-flight callbacks. |
| 474 | */ |
| 475 | void srcu_barrier(struct srcu_struct *sp) |
| 476 | { |
| 477 | synchronize_srcu(sp); |
| 478 | } |
| 479 | EXPORT_SYMBOL_GPL(srcu_barrier); |
| 480 | |
| 481 | /** |
| 482 | * srcu_batches_completed - return batches completed. |
| 483 | * @sp: srcu_struct on which to report batch completion. |
| 484 | * |
| 485 | * Report the number of batches, correlated with, but not necessarily |
| 486 | * precisely the same as, the number of grace periods that have elapsed. |
| 487 | */ |
| 488 | unsigned long srcu_batches_completed(struct srcu_struct *sp) |
| 489 | { |
| 490 | return sp->completed; |
| 491 | } |
| 492 | EXPORT_SYMBOL_GPL(srcu_batches_completed); |
| 493 | |
| 494 | /* |
| 495 | * Core SRCU state machine. Advance callbacks from ->batch_check0 to |
| 496 | * ->batch_check1 and then to ->batch_done as readers drain. |
| 497 | */ |
| 498 | static void srcu_advance_batches(struct srcu_struct *sp) |
| 499 | { |
| 500 | int idx; |
| 501 | |
| 502 | /* |
| 503 | * Because readers might be delayed for an extended period after |
| 504 | * fetching ->completed for their index, at any point in time there |
| 505 | * might well be readers using both idx=0 and idx=1. We therefore |
| 506 | * need to wait for readers to clear from both index values before |
| 507 | * invoking a callback. |
| 508 | * |
| 509 | * The load-acquire ensures that we see the accesses performed |
| 510 | * by the prior grace period. |
| 511 | */ |
| 512 | idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */ |
| 513 | if (idx == SRCU_STATE_IDLE) { |
| 514 | spin_lock_irq(&sp->queue_lock); |
| 515 | if (rcu_segcblist_empty(&sp->srcu_cblist)) { |
| 516 | spin_unlock_irq(&sp->queue_lock); |
| 517 | return; |
| 518 | } |
| 519 | idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 520 | if (idx == SRCU_STATE_IDLE) |
| 521 | srcu_gp_start(sp); |
| 522 | spin_unlock_irq(&sp->queue_lock); |
| 523 | if (idx != SRCU_STATE_IDLE) |
| 524 | return; /* Someone else started the grace period. */ |
| 525 | } |
| 526 | |
| 527 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) { |
| 528 | idx = 1 ^ (sp->completed & 1); |
| 529 | if (!try_check_zero(sp, idx, 1)) |
| 530 | return; /* readers present, retry later. */ |
| 531 | srcu_flip(sp); |
| 532 | rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2); |
| 533 | } |
| 534 | |
| 535 | if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) { |
| 536 | |
| 537 | /* |
| 538 | * SRCU read-side critical sections are normally short, |
| 539 | * so check at least twice in quick succession after a flip. |
| 540 | */ |
| 541 | idx = 1 ^ (sp->completed & 1); |
| 542 | if (!try_check_zero(sp, idx, 2)) |
| 543 | return; /* readers present, retry after later. */ |
| 544 | srcu_gp_end(sp); |
| 545 | } |
| 546 | } |
| 547 | |
| 548 | /* |
| 549 | * Invoke a limited number of SRCU callbacks that have passed through |
| 550 | * their grace period. If there are more to do, SRCU will reschedule |
| 551 | * the workqueue. Note that needed memory barriers have been executed |
| 552 | * in this task's context by srcu_readers_active_idx_check(). |
| 553 | */ |
| 554 | static void srcu_invoke_callbacks(struct srcu_struct *sp) |
| 555 | { |
| 556 | struct rcu_cblist ready_cbs; |
| 557 | struct rcu_head *rhp; |
| 558 | |
| 559 | spin_lock_irq(&sp->queue_lock); |
| 560 | if (!rcu_segcblist_ready_cbs(&sp->srcu_cblist)) { |
| 561 | spin_unlock_irq(&sp->queue_lock); |
| 562 | return; |
| 563 | } |
| 564 | rcu_cblist_init(&ready_cbs); |
| 565 | rcu_segcblist_extract_done_cbs(&sp->srcu_cblist, &ready_cbs); |
| 566 | spin_unlock_irq(&sp->queue_lock); |
| 567 | rhp = rcu_cblist_dequeue(&ready_cbs); |
| 568 | for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) { |
| 569 | local_bh_disable(); |
| 570 | rhp->func(rhp); |
| 571 | local_bh_enable(); |
| 572 | } |
| 573 | spin_lock_irq(&sp->queue_lock); |
| 574 | rcu_segcblist_insert_count(&sp->srcu_cblist, &ready_cbs); |
| 575 | spin_unlock_irq(&sp->queue_lock); |
| 576 | } |
| 577 | |
| 578 | /* |
| 579 | * Finished one round of SRCU grace period. Start another if there are |
| 580 | * more SRCU callbacks queued, otherwise put SRCU into not-running state. |
| 581 | */ |
| 582 | static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay) |
| 583 | { |
| 584 | bool pending = true; |
| 585 | int state; |
| 586 | |
| 587 | if (rcu_segcblist_empty(&sp->srcu_cblist)) { |
| 588 | spin_lock_irq(&sp->queue_lock); |
| 589 | state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)); |
| 590 | if (rcu_segcblist_empty(&sp->srcu_cblist) && |
| 591 | state == SRCU_STATE_IDLE) |
| 592 | pending = false; |
| 593 | spin_unlock_irq(&sp->queue_lock); |
| 594 | } |
| 595 | |
| 596 | if (pending) |
| 597 | queue_delayed_work(system_power_efficient_wq, &sp->work, delay); |
| 598 | } |
| 599 | |
| 600 | /* |
| 601 | * This is the work-queue function that handles SRCU grace periods. |
| 602 | */ |
| 603 | void process_srcu(struct work_struct *work) |
| 604 | { |
| 605 | struct srcu_struct *sp; |
| 606 | |
| 607 | sp = container_of(work, struct srcu_struct, work.work); |
| 608 | |
| 609 | srcu_advance_batches(sp); |
| 610 | srcu_invoke_callbacks(sp); |
| 611 | srcu_reschedule(sp, atomic_read(&sp->srcu_exp_cnt) ? 0 : SRCU_INTERVAL); |
| 612 | } |
| 613 | EXPORT_SYMBOL_GPL(process_srcu); |