Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
Paul E. McKenney | 87de1cf | 2013-12-03 10:02:52 -0800 | [diff] [blame] | 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 17 | * |
| 18 | * Copyright IBM Corporation, 2008 |
| 19 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 21 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 23 | * Documentation/RCU |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 24 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 25 | #include <linux/completion.h> |
| 26 | #include <linux/interrupt.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | #include <linux/notifier.h> |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 28 | #include <linux/rcupdate_wait.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 29 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 30 | #include <linux/export.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 31 | #include <linux/mutex.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 32 | #include <linux/sched.h> |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/init.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 35 | #include <linux/time.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 36 | #include <linux/cpu.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 37 | #include <linux/prefetch.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 38 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 39 | #include "rcu.h" |
| 40 | |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 41 | /* Global control variables for rcupdate callback mechanism. */ |
| 42 | struct rcu_ctrlblk { |
| 43 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ |
| 44 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ |
| 45 | struct rcu_head **curtail; /* ->next pointer of last CB. */ |
| 46 | }; |
| 47 | |
| 48 | /* Definition for rcupdate control block. */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 49 | static struct rcu_ctrlblk rcu_ctrlblk = { |
| 50 | .donetail = &rcu_ctrlblk.rcucblist, |
| 51 | .curtail = &rcu_ctrlblk.rcucblist, |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 52 | }; |
| 53 | |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 54 | void rcu_barrier(void) |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 55 | { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 56 | wait_rcu_gp(call_rcu); |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 57 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 58 | EXPORT_SYMBOL(rcu_barrier); |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 59 | |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 60 | /* Record an rcu quiescent state. */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 61 | void rcu_qs(void) |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 62 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 63 | unsigned long flags; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 64 | |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 65 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 66 | if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
| 67 | rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 68 | raise_softirq(RCU_SOFTIRQ); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 69 | } |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 70 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | /* |
| 74 | * Check to see if the scheduling-clock interrupt came from an extended |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 75 | * quiescent state, and, if so, tell RCU about it. This function must |
| 76 | * be called from hardirq context. It is normally called from the |
| 77 | * scheduling-clock interrupt. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 78 | */ |
Paul E. McKenney | c3377c2d | 2014-10-21 07:53:02 -0700 | [diff] [blame] | 79 | void rcu_check_callbacks(int user) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 80 | { |
Paul E. McKenney | c5bacd9 | 2018-07-20 14:18:23 -0700 | [diff] [blame] | 81 | if (user) { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 82 | rcu_qs(); |
Paul E. McKenney | c5bacd9 | 2018-07-20 14:18:23 -0700 | [diff] [blame] | 83 | } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
| 84 | set_tsk_need_resched(current); |
| 85 | set_preempt_need_resched(); |
| 86 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 87 | } |
| 88 | |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 89 | /* Invoke the RCU callbacks whose grace period has elapsed. */ |
| 90 | static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 91 | { |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 92 | struct rcu_head *next, *list; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 93 | unsigned long flags; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 94 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 95 | /* Move the ready-to-invoke callbacks to a local list. */ |
| 96 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 97 | if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { |
Paul E. McKenney | 6e91f8c | 2015-05-11 11:13:05 -0700 | [diff] [blame] | 98 | /* No callbacks ready, so just leave. */ |
| 99 | local_irq_restore(flags); |
| 100 | return; |
| 101 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 102 | list = rcu_ctrlblk.rcucblist; |
| 103 | rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail; |
| 104 | *rcu_ctrlblk.donetail = NULL; |
| 105 | if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail) |
| 106 | rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist; |
| 107 | rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 108 | local_irq_restore(flags); |
| 109 | |
| 110 | /* Invoke the callbacks on the local list. */ |
| 111 | while (list) { |
| 112 | next = list->next; |
| 113 | prefetch(next); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 114 | debug_rcu_head_unqueue(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 115 | local_bh_disable(); |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 116 | __rcu_reclaim("", list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 117 | local_bh_enable(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 118 | list = next; |
| 119 | } |
| 120 | } |
| 121 | |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 122 | /* |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 123 | * Wait for a grace period to elapse. But it is illegal to invoke |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 124 | * synchronize_rcu() from within an RCU read-side critical section. |
| 125 | * Therefore, any legal call to synchronize_rcu() is a quiescent |
| 126 | * state, and so on a UP system, synchronize_rcu() need do nothing. |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 127 | * (But Lai Jiangshan points out the benefits of doing might_sleep() |
| 128 | * to reduce latency.) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 129 | * |
| 130 | * Cool, huh? (Due to Josh Triplett.) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 131 | */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 132 | void synchronize_rcu(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 133 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 134 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
| 135 | lock_is_held(&rcu_lock_map) || |
| 136 | lock_is_held(&rcu_sched_lock_map), |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 137 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 138 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 139 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 140 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 141 | /* |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 142 | * Post an RCU callback to be invoked after the end of an RCU grace |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 143 | * period. But since we have but one CPU, that would be after any |
| 144 | * quiescent state. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 145 | */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 146 | void call_rcu(struct rcu_head *head, rcu_callback_t func) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 147 | { |
| 148 | unsigned long flags; |
| 149 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 150 | debug_rcu_head_queue(head); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 151 | head->func = func; |
| 152 | head->next = NULL; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 153 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 154 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 155 | *rcu_ctrlblk.curtail = head; |
| 156 | rcu_ctrlblk.curtail = &head->next; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 157 | local_irq_restore(flags); |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 158 | |
| 159 | if (unlikely(is_idle_task(current))) { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 160 | /* force scheduling for rcu_qs() */ |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 161 | resched_cpu(0); |
| 162 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 163 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 164 | EXPORT_SYMBOL_GPL(call_rcu); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 165 | |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 166 | void __init rcu_init(void) |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 167 | { |
| 168 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 169 | rcu_early_boot_tests(); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 170 | srcu_init(); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 171 | } |