Paul E. McKenney | 00de9d7 | 2019-01-17 10:21:12 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 4 | * |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 5 | * Copyright IBM Corporation, 2008 |
| 6 | * |
Paul E. McKenney | 00de9d7 | 2019-01-17 10:21:12 -0800 | [diff] [blame] | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 8 | * |
| 9 | * For detailed explanation of Read-Copy Update mechanism see - |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 10 | * Documentation/RCU |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 11 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 12 | #include <linux/completion.h> |
| 13 | #include <linux/interrupt.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 14 | #include <linux/notifier.h> |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 15 | #include <linux/rcupdate_wait.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 16 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 17 | #include <linux/export.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 18 | #include <linux/mutex.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 19 | #include <linux/sched.h> |
| 20 | #include <linux/types.h> |
| 21 | #include <linux/init.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 22 | #include <linux/time.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 23 | #include <linux/cpu.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 24 | #include <linux/prefetch.h> |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 25 | #include <linux/slab.h> |
Uladzislau Rezki (Sony) | 64d1d06 | 2020-05-25 23:47:54 +0200 | [diff] [blame] | 26 | #include <linux/mm.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 28 | #include "rcu.h" |
| 29 | |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 30 | /* Global control variables for rcupdate callback mechanism. */ |
| 31 | struct rcu_ctrlblk { |
| 32 | struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */ |
| 33 | struct rcu_head **donetail; /* ->next pointer of last "done" CB. */ |
| 34 | struct rcu_head **curtail; /* ->next pointer of last CB. */ |
| 35 | }; |
| 36 | |
| 37 | /* Definition for rcupdate control block. */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 38 | static struct rcu_ctrlblk rcu_ctrlblk = { |
| 39 | .donetail = &rcu_ctrlblk.rcucblist, |
| 40 | .curtail = &rcu_ctrlblk.rcucblist, |
Paul E. McKenney | 6d48152 | 2017-05-17 10:54:29 -0700 | [diff] [blame] | 41 | }; |
| 42 | |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 43 | void rcu_barrier(void) |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 44 | { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 45 | wait_rcu_gp(call_rcu); |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 46 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 47 | EXPORT_SYMBOL(rcu_barrier); |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 48 | |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 49 | /* Record an rcu quiescent state. */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 50 | void rcu_qs(void) |
Ingo Molnar | f9411eb | 2017-02-06 09:50:49 +0100 | [diff] [blame] | 51 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 52 | unsigned long flags; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 53 | |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 54 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 55 | if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
| 56 | rcu_ctrlblk.donetail = rcu_ctrlblk.curtail; |
Cyrill Gorcunov | 18d7e40 | 2019-01-24 21:14:37 +0300 | [diff] [blame] | 57 | raise_softirq_irqoff(RCU_SOFTIRQ); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 58 | } |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 59 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | /* |
| 63 | * Check to see if the scheduling-clock interrupt came from an extended |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 64 | * quiescent state, and, if so, tell RCU about it. This function must |
| 65 | * be called from hardirq context. It is normally called from the |
| 66 | * scheduling-clock interrupt. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 67 | */ |
Paul E. McKenney | c98cac6 | 2018-11-21 11:35:03 -0800 | [diff] [blame] | 68 | void rcu_sched_clock_irq(int user) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 69 | { |
Paul E. McKenney | c5bacd9 | 2018-07-20 14:18:23 -0700 | [diff] [blame] | 70 | if (user) { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 71 | rcu_qs(); |
Paul E. McKenney | c5bacd9 | 2018-07-20 14:18:23 -0700 | [diff] [blame] | 72 | } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) { |
| 73 | set_tsk_need_resched(current); |
| 74 | set_preempt_need_resched(); |
| 75 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 78 | /* |
| 79 | * Reclaim the specified callback, either by invoking it for non-kfree cases or |
| 80 | * freeing it directly (for kfree). Return true if kfreeing, false otherwise. |
| 81 | */ |
| 82 | static inline bool rcu_reclaim_tiny(struct rcu_head *head) |
| 83 | { |
| 84 | rcu_callback_t f; |
| 85 | unsigned long offset = (unsigned long)head->func; |
| 86 | |
| 87 | rcu_lock_acquire(&rcu_callback_map); |
Uladzislau Rezki (Sony) | c408b21 | 2020-05-25 23:47:55 +0200 | [diff] [blame] | 88 | if (__is_kvfree_rcu_offset(offset)) { |
| 89 | trace_rcu_invoke_kvfree_callback("", head, offset); |
Uladzislau Rezki (Sony) | 64d1d06 | 2020-05-25 23:47:54 +0200 | [diff] [blame] | 90 | kvfree((void *)head - offset); |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 91 | rcu_lock_release(&rcu_callback_map); |
| 92 | return true; |
| 93 | } |
| 94 | |
| 95 | trace_rcu_invoke_callback("", head); |
| 96 | f = head->func; |
| 97 | WRITE_ONCE(head->func, (rcu_callback_t)0L); |
| 98 | f(head); |
| 99 | rcu_lock_release(&rcu_callback_map); |
| 100 | return false; |
| 101 | } |
| 102 | |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 103 | /* Invoke the RCU callbacks whose grace period has elapsed. */ |
| 104 | static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 105 | { |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 106 | struct rcu_head *next, *list; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 107 | unsigned long flags; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 108 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 109 | /* Move the ready-to-invoke callbacks to a local list. */ |
| 110 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 111 | if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) { |
Paul E. McKenney | 6e91f8c | 2015-05-11 11:13:05 -0700 | [diff] [blame] | 112 | /* No callbacks ready, so just leave. */ |
| 113 | local_irq_restore(flags); |
| 114 | return; |
| 115 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 116 | list = rcu_ctrlblk.rcucblist; |
| 117 | rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail; |
| 118 | *rcu_ctrlblk.donetail = NULL; |
| 119 | if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail) |
| 120 | rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist; |
| 121 | rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 122 | local_irq_restore(flags); |
| 123 | |
| 124 | /* Invoke the callbacks on the local list. */ |
| 125 | while (list) { |
| 126 | next = list->next; |
| 127 | prefetch(next); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 128 | debug_rcu_head_unqueue(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 129 | local_bh_disable(); |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 130 | rcu_reclaim_tiny(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 131 | local_bh_enable(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 132 | list = next; |
| 133 | } |
| 134 | } |
| 135 | |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 136 | /* |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 137 | * Wait for a grace period to elapse. But it is illegal to invoke |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 138 | * synchronize_rcu() from within an RCU read-side critical section. |
| 139 | * Therefore, any legal call to synchronize_rcu() is a quiescent |
| 140 | * state, and so on a UP system, synchronize_rcu() need do nothing. |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 141 | * (But Lai Jiangshan points out the benefits of doing might_sleep() |
| 142 | * to reduce latency.) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 143 | * |
| 144 | * Cool, huh? (Due to Josh Triplett.) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 145 | */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 146 | void synchronize_rcu(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 147 | { |
Paul E. McKenney | f78f5b9 | 2015-06-18 15:50:02 -0700 | [diff] [blame] | 148 | RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) || |
| 149 | lock_is_held(&rcu_lock_map) || |
| 150 | lock_is_held(&rcu_sched_lock_map), |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 151 | "Illegal synchronize_rcu() in RCU read-side critical section"); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 152 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 153 | EXPORT_SYMBOL_GPL(synchronize_rcu); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 154 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 155 | /* |
Paul E. McKenney | 679d3f3 | 2018-07-07 18:12:26 -0700 | [diff] [blame] | 156 | * Post an RCU callback to be invoked after the end of an RCU grace |
Paul E. McKenney | 65cfe35 | 2018-07-01 07:40:52 -0700 | [diff] [blame] | 157 | * period. But since we have but one CPU, that would be after any |
| 158 | * quiescent state. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 159 | */ |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 160 | void call_rcu(struct rcu_head *head, rcu_callback_t func) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 161 | { |
| 162 | unsigned long flags; |
| 163 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 164 | debug_rcu_head_queue(head); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 165 | head->func = func; |
| 166 | head->next = NULL; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 167 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 168 | local_irq_save(flags); |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 169 | *rcu_ctrlblk.curtail = head; |
| 170 | rcu_ctrlblk.curtail = &head->next; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 171 | local_irq_restore(flags); |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 172 | |
| 173 | if (unlikely(is_idle_task(current))) { |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 174 | /* force scheduling for rcu_qs() */ |
Lai Jiangshan | 5f6130f | 2014-12-09 17:53:34 +0800 | [diff] [blame] | 175 | resched_cpu(0); |
| 176 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 177 | } |
Paul E. McKenney | 709fdce | 2018-07-03 10:44:44 -0700 | [diff] [blame] | 178 | EXPORT_SYMBOL_GPL(call_rcu); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 179 | |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 180 | void __init rcu_init(void) |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 181 | { |
| 182 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
Pranith Kumar | aa23c6fbc | 2014-09-19 11:32:29 -0400 | [diff] [blame] | 183 | rcu_early_boot_tests(); |
Paul E. McKenney | e0fcba9 | 2018-08-14 08:45:54 -0700 | [diff] [blame] | 184 | srcu_init(); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 185 | } |