Paul E. McKenney | 00de9d7 | 2019-01-17 10:21:12 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 4 | * |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2008 |
| 6 | * |
Paul E. McKenney | 00de9d7 | 2019-01-17 10:21:12 -0800 | [diff] [blame] | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 8 | * |
| 9 | * For detailed explanation of Read-Copy Update mechanism see - |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 10 | * Documentation/RCU |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 11 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 12 | #include <linux/completion.h> |
| 13 | #include <linux/interrupt.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 14 | #include <linux/notifier.h> |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 15 | #include <linux/rcupdate_wait.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 16 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 17 | #include <linux/export.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 18 | #include <linux/mutex.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 19 | #include <linux/sched.h> |
| 20 | #include <linux/types.h> |
| 21 | #include <linux/init.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 22 | #include <linux/time.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 23 | #include <linux/cpu.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 24 | #include <linux/prefetch.h> |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 25 | #include <linux/slab.h> |
Uladzislau Rezki (Sony) | 64d1d06 | 2020-05-25 23:47:54 +0200 | [diff] [blame] | 26 | #include <linux/mm.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 28 | #include "rcu.h" |
| 29 | |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 30 | /* Global control variables for rcupdate callback mechanism. */ |
| 31 | struct rcu_ctrlblk { |
| 32 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ |
| 33 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ |
| 34 | struct rcu_head **curtail; /* ->next pointer of last CB. */ |
Paul E. McKenney | 0909fc2 | 2021-02-25 17:36:06 -0800 | [diff] [blame] | 35 | unsigned long gp_seq; /* Grace-period counter. */ |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 36 | }; |
| 37 | |
| 38 | /* Definition for rcupdate control block. */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 39 | static struct rcu_ctrlblk rcu_ctrlblk = { |
| 40 | .donetail = &rcu_ctrlblk.rcucblist, |
| 41 | .curtail = &rcu_ctrlblk.rcucblist, |
Paul E. McKenney | 0909fc2 | 2021-02-25 17:36:06 -0800 | [diff] [blame] | 42 | .gp_seq = 0 - 300UL, |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 43 | }; |
| 44 | |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 45 | void rcu_barrier(void) |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 46 | { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 47 | wait_rcu_gp(call_rcu); |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 48 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 49 | EXPORT_SYMBOL(rcu_barrier); |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 50 | |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 51 | /* Record an rcu quiescent state. */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 52 | void rcu_qs(void) |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 53 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 54 | unsigned long flags; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 55 | |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 56 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 57 | if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
| 58 | rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; |
Cyrill Gorcunov | 18d7e40 | 2019-01-24 21:14:37 +0300 | [diff] [blame] | 59 | raise_softirq_irqoff(RCU_SOFTIRQ); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 60 | } |
Paul E. McKenney | 0909fc2 | 2021-02-25 17:36:06 -0800 | [diff] [blame] | 61 | WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 1); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 62 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | /* |
| 66 | * Check to see if the scheduling-clock interrupt came from an extended |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 67 | * quiescent state, and, if so, tell RCU about it. This function must |
| 68 | * be called from hardirq context. It is normally called from the |
| 69 | * scheduling-clock interrupt. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 70 | */ |
Paul E. McKenney | c98cac6 | 2018-11-21 11:35:03 -0800 | [diff] [blame] | 71 | void rcu_sched_clock_irq(int user) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 72 | { |
Paul E. McKenney | c5bacd9 | 2018-07-20 14:18:23 -0700 | [diff] [blame] | 73 | if (user) { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 74 | rcu_qs(); |
Paul E. McKenney | c5bacd9 | 2018-07-20 14:18:23 -0700 | [diff] [blame] | 75 | } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
| 76 | set_tsk_need_resched(current); |
| 77 | set_preempt_need_resched(); |
| 78 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 79 | } |
| 80 | |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 81 | /* |
| 82 | * Reclaim the specified callback, either by invoking it for non-kfree cases or |
| 83 | * freeing it directly (for kfree). Return true if kfreeing, false otherwise. |
| 84 | */ |
| 85 | static inline bool rcu_reclaim_tiny(struct rcu_head *head) |
| 86 | { |
| 87 | rcu_callback_t f; |
| 88 | unsigned long offset = (unsigned long)head->func; |
| 89 | |
| 90 | rcu_lock_acquire(&rcu_callback_map); |
Uladzislau Rezki (Sony) | c408b21 | 2020-05-25 23:47:55 +0200 | [diff] [blame] | 91 | if (__is_kvfree_rcu_offset(offset)) { |
| 92 | trace_rcu_invoke_kvfree_callback("", head, offset); |
Uladzislau Rezki (Sony) | 64d1d06 | 2020-05-25 23:47:54 +0200 | [diff] [blame] | 93 | kvfree((void *)head - offset); |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 94 | rcu_lock_release(&rcu_callback_map); |
| 95 | return true; |
| 96 | } |
| 97 | |
| 98 | trace_rcu_invoke_callback("", head); |
| 99 | f = head->func; |
| 100 | WRITE_ONCE(head->func, (rcu_callback_t)0L); |
| 101 | f(head); |
| 102 | rcu_lock_release(&rcu_callback_map); |
| 103 | return false; |
| 104 | } |
| 105 | |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 106 | /* Invoke the RCU callbacks whose grace period has elapsed. */ |
| 107 | static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 108 | { |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 109 | struct rcu_head *next, *list; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 110 | unsigned long flags; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 111 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 112 | /* Move the ready-to-invoke callbacks to a local list. */ |
| 113 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 114 | if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { |
Paul E. McKenney | 6e91f8c | 2015-05-11 11:13:05 -0700 | [diff] [blame] | 115 | /* No callbacks ready, so just leave. */ |
| 116 | local_irq_restore(flags); |
| 117 | return; |
| 118 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 119 | list = rcu_ctrlblk.rcucblist; |
| 120 | rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail; |
| 121 | *rcu_ctrlblk.donetail = NULL; |
| 122 | if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail) |
| 123 | rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist; |
| 124 | rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 125 | local_irq_restore(flags); |
| 126 | |
| 127 | /* Invoke the callbacks on the local list. */ |
| 128 | while (list) { |
| 129 | next = list->next; |
| 130 | prefetch(next); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 131 | debug_rcu_head_unqueue(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 132 | local_bh_disable(); |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 133 | rcu_reclaim_tiny(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 134 | local_bh_enable(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 135 | list = next; |
| 136 | } |
| 137 | } |
| 138 | |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 139 | /* |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 140 | * Wait for a grace period to elapse. But it is illegal to invoke |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 141 | * synchronize_rcu() from within an RCU read-side critical section. |
| 142 | * Therefore, any legal call to synchronize_rcu() is a quiescent |
| 143 | * state, and so on a UP system, synchronize_rcu() need do nothing. |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 144 | * (But Lai Jiangshan points out the benefits of doing might_sleep() |
| 145 | * to reduce latency.) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 146 | * |
| 147 | * Cool, huh? (Due to Josh Triplett.) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 148 | */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 149 | void synchronize_rcu(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 150 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 151 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
| 152 | lock_is_held(&rcu_lock_map) || |
| 153 | lock_is_held(&rcu_sched_lock_map), |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 154 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 155 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 156 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 157 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 158 | /* |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 159 | * Post an RCU callback to be invoked after the end of an RCU grace |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 160 | * period. But since we have but one CPU, that would be after any |
| 161 | * quiescent state. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 162 | */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 163 | void call_rcu(struct rcu_head *head, rcu_callback_t func) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 164 | { |
| 165 | unsigned long flags; |
| 166 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 167 | debug_rcu_head_queue(head); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 168 | head->func = func; |
| 169 | head->next = NULL; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 170 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 171 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 172 | *rcu_ctrlblk.curtail = head; |
| 173 | rcu_ctrlblk.curtail = &head->next; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 174 | local_irq_restore(flags); |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 175 | |
| 176 | if (unlikely(is_idle_task(current))) { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 177 | /* force scheduling for rcu_qs() */ |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 178 | resched_cpu(0); |
| 179 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 180 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 181 | EXPORT_SYMBOL_GPL(call_rcu); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 182 | |
Paul E. McKenney | 0909fc2 | 2021-02-25 17:36:06 -0800 | [diff] [blame] | 183 | /* |
| 184 | * Return a grace-period-counter "cookie". For more information, |
| 185 | * see the Tree RCU header comment. |
| 186 | */ |
| 187 | unsigned long get_state_synchronize_rcu(void) |
| 188 | { |
| 189 | return READ_ONCE(rcu_ctrlblk.gp_seq); |
| 190 | } |
| 191 | EXPORT_SYMBOL_GPL(get_state_synchronize_rcu); |
| 192 | |
| 193 | /* |
| 194 | * Return a grace-period-counter "cookie" and ensure that a future grace |
| 195 | * period completes. For more information, see the Tree RCU header comment. |
| 196 | */ |
| 197 | unsigned long start_poll_synchronize_rcu(void) |
| 198 | { |
| 199 | unsigned long gp_seq = get_state_synchronize_rcu(); |
| 200 | |
| 201 | if (unlikely(is_idle_task(current))) { |
| 202 | /* force scheduling for rcu_qs() */ |
| 203 | resched_cpu(0); |
| 204 | } |
| 205 | return gp_seq; |
| 206 | } |
| 207 | EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu); |
| 208 | |
| 209 | /* |
| 210 | * Return true if the grace period corresponding to oldstate has completed |
| 211 | * and false otherwise. For more information, see the Tree RCU header |
| 212 | * comment. |
| 213 | */ |
| 214 | bool poll_state_synchronize_rcu(unsigned long oldstate) |
| 215 | { |
| 216 | return READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate; |
| 217 | } |
| 218 | EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); |
| 219 | |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 220 | void __init rcu_init(void) |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 221 | { |
| 222 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 223 | rcu_early_boot_tests(); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 224 | } |