Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
Paul E. McKenney | 87de1cf | 2013-12-03 10:02:52 -0800 | [diff] [blame] | 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 17 | * |
| 18 | * Copyright IBM Corporation, 2008 |
| 19 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 21 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 23 | * Documentation/RCU |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 24 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 25 | #include <linux/completion.h> |
| 26 | #include <linux/interrupt.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | #include <linux/notifier.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
| 29 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 30 | #include <linux/export.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 31 | #include <linux/mutex.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 32 | #include <linux/sched.h> |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/init.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 35 | #include <linux/time.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 36 | #include <linux/cpu.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 37 | #include <linux/prefetch.h> |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 38 | #include <linux/ftrace_event.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 39 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 40 | #include "rcu.h" |
| 41 | |
Paul E. McKenney | 4102ada | 2013-10-08 20:23:47 -0700 | [diff] [blame] | 42 | /* Forward declarations for tiny_plugin.h. */ |
Paul E. McKenney | 24278d1 | 2010-09-27 17:25:23 -0700 | [diff] [blame] | 43 | struct rcu_ctrlblk; |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 44 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
| 45 | static void rcu_process_callbacks(struct softirq_action *unused); |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 46 | static void __call_rcu(struct rcu_head *head, |
| 47 | void (*func)(struct rcu_head *rcu), |
| 48 | struct rcu_ctrlblk *rcp); |
| 49 | |
Paul E. McKenney | 29e37d8 | 2011-11-17 16:55:56 -0800 | [diff] [blame] | 50 | static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 51 | |
Paul E. McKenney | 4102ada | 2013-10-08 20:23:47 -0700 | [diff] [blame] | 52 | #include "tiny_plugin.h" |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 53 | |
Pranith Kumar | fafb6e8 | 2014-07-15 18:31:47 -0400 | [diff] [blame] | 54 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */ |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 55 | static void rcu_idle_enter_common(long long newval) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 56 | { |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 57 | if (newval) { |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 58 | RCU_TRACE(trace_rcu_dyntick(TPS("--="), |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 59 | rcu_dynticks_nesting, newval)); |
| 60 | rcu_dynticks_nesting = newval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 61 | return; |
| 62 | } |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 63 | RCU_TRACE(trace_rcu_dyntick(TPS("Start"), |
| 64 | rcu_dynticks_nesting, newval)); |
Paul E. McKenney | ade98624 | 2014-07-31 16:02:33 -0700 | [diff] [blame] | 65 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { |
Paul E. McKenney | 4102ada | 2013-10-08 20:23:47 -0700 | [diff] [blame] | 66 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 67 | |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 68 | RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"), |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 69 | rcu_dynticks_nesting, newval)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 70 | ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 71 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
| 72 | current->pid, current->comm, |
| 73 | idle->pid, idle->comm); /* must be idle task! */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 74 | } |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 75 | rcu_sched_qs(); /* implies rcu_bh_inc() */ |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 76 | barrier(); |
| 77 | rcu_dynticks_nesting = newval; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | /* |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 81 | * Enter idle, which is an extended quiescent state if we have fully |
| 82 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 83 | */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 84 | void rcu_idle_enter(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 85 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 86 | unsigned long flags; |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 87 | long long newval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 88 | |
| 89 | local_irq_save(flags); |
Paul E. McKenney | 29e37d8 | 2011-11-17 16:55:56 -0800 | [diff] [blame] | 90 | WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); |
| 91 | if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == |
| 92 | DYNTICK_TASK_NEST_VALUE) |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 93 | newval = 0; |
Paul E. McKenney | 29e37d8 | 2011-11-17 16:55:56 -0800 | [diff] [blame] | 94 | else |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 95 | newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; |
| 96 | rcu_idle_enter_common(newval); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 97 | local_irq_restore(flags); |
| 98 | } |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 99 | EXPORT_SYMBOL_GPL(rcu_idle_enter); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 100 | |
| 101 | /* |
| 102 | * Exit an interrupt handler towards idle. |
| 103 | */ |
| 104 | void rcu_irq_exit(void) |
| 105 | { |
| 106 | unsigned long flags; |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 107 | long long newval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 108 | |
| 109 | local_irq_save(flags); |
Paul E. McKenney | 818615c | 2012-07-11 00:24:57 -0700 | [diff] [blame] | 110 | newval = rcu_dynticks_nesting - 1; |
| 111 | WARN_ON_ONCE(newval < 0); |
| 112 | rcu_idle_enter_common(newval); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 113 | local_irq_restore(flags); |
| 114 | } |
Paul E. McKenney | b4270ee | 2012-07-31 10:12:48 -0700 | [diff] [blame] | 115 | EXPORT_SYMBOL_GPL(rcu_irq_exit); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 116 | |
Pranith Kumar | fafb6e8 | 2014-07-15 18:31:47 -0400 | [diff] [blame] | 117 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 118 | static void rcu_idle_exit_common(long long oldval) |
| 119 | { |
| 120 | if (oldval) { |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 121 | RCU_TRACE(trace_rcu_dyntick(TPS("++="), |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 122 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 123 | return; |
| 124 | } |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 125 | RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | ade98624 | 2014-07-31 16:02:33 -0700 | [diff] [blame] | 126 | if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) { |
Paul E. McKenney | 4102ada | 2013-10-08 20:23:47 -0700 | [diff] [blame] | 127 | struct task_struct *idle __maybe_unused = idle_task(smp_processor_id()); |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 128 | |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 129 | RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"), |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 130 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 131 | ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 132 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
| 133 | current->pid, current->comm, |
| 134 | idle->pid, idle->comm); /* must be idle task! */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 135 | } |
| 136 | } |
| 137 | |
| 138 | /* |
| 139 | * Exit idle, so that we are no longer in an extended quiescent state. |
| 140 | */ |
| 141 | void rcu_idle_exit(void) |
| 142 | { |
| 143 | unsigned long flags; |
| 144 | long long oldval; |
| 145 | |
| 146 | local_irq_save(flags); |
| 147 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 29e37d8 | 2011-11-17 16:55:56 -0800 | [diff] [blame] | 148 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); |
| 149 | if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) |
| 150 | rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE; |
| 151 | else |
| 152 | rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 153 | rcu_idle_exit_common(oldval); |
| 154 | local_irq_restore(flags); |
| 155 | } |
Paul E. McKenney | 8a2ecf4 | 2012-02-02 15:42:04 -0800 | [diff] [blame] | 156 | EXPORT_SYMBOL_GPL(rcu_idle_exit); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 157 | |
| 158 | /* |
| 159 | * Enter an interrupt handler, moving away from idle. |
| 160 | */ |
| 161 | void rcu_irq_enter(void) |
| 162 | { |
| 163 | unsigned long flags; |
| 164 | long long oldval; |
| 165 | |
| 166 | local_irq_save(flags); |
| 167 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 168 | rcu_dynticks_nesting++; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 169 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
| 170 | rcu_idle_exit_common(oldval); |
| 171 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 172 | } |
Paul E. McKenney | b4270ee | 2012-07-31 10:12:48 -0700 | [diff] [blame] | 173 | EXPORT_SYMBOL_GPL(rcu_irq_enter); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 174 | |
Paul E. McKenney | cc6783f | 2013-09-06 17:39:49 -0700 | [diff] [blame] | 175 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 176 | |
| 177 | /* |
| 178 | * Test whether RCU thinks that the current CPU is idle. |
| 179 | */ |
Linus Torvalds | b29c830 | 2013-11-16 12:23:18 -0800 | [diff] [blame] | 180 | bool notrace __rcu_is_watching(void) |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 181 | { |
Paul E. McKenney | 5c173eb | 2013-09-13 17:20:11 -0700 | [diff] [blame] | 182 | return rcu_dynticks_nesting; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 183 | } |
Paul E. McKenney | 5c173eb | 2013-09-13 17:20:11 -0700 | [diff] [blame] | 184 | EXPORT_SYMBOL(__rcu_is_watching); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 185 | |
Paul E. McKenney | cc6783f | 2013-09-06 17:39:49 -0700 | [diff] [blame] | 186 | #endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 187 | |
| 188 | /* |
| 189 | * Test whether the current CPU was interrupted from idle. Nested |
| 190 | * interrupts don't count, we must be running at the first interrupt |
| 191 | * level. |
| 192 | */ |
Josh Triplett | 62e3cb1 | 2012-11-20 09:55:26 -0800 | [diff] [blame] | 193 | static int rcu_is_cpu_rrupt_from_idle(void) |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 194 | { |
Paul E. McKenney | 351573a | 2012-10-29 04:52:56 -0700 | [diff] [blame] | 195 | return rcu_dynticks_nesting <= 1; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 196 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 197 | |
| 198 | /* |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 199 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
| 200 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 201 | * invoking call_rcu(). |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 202 | */ |
| 203 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
| 204 | { |
Paul E. McKenney | 1496144 | 2013-04-16 07:49:22 -0700 | [diff] [blame] | 205 | RCU_TRACE(reset_cpu_stall_ticks(rcp)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 206 | if (rcp->rcucblist != NULL && |
| 207 | rcp->donetail != rcp->curtail) { |
| 208 | rcp->donetail = rcp->curtail; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 209 | return 1; |
| 210 | } |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 211 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 212 | return 0; |
| 213 | } |
| 214 | |
| 215 | /* |
| 216 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
| 217 | * are at it, given that any rcu quiescent state is also an rcu_bh |
| 218 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
| 219 | */ |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 220 | void rcu_sched_qs(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 221 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 222 | unsigned long flags; |
| 223 | |
| 224 | local_irq_save(flags); |
Paul E. McKenney | 99652b5 | 2010-03-30 15:50:01 -0700 | [diff] [blame] | 225 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
| 226 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 227 | raise_softirq(RCU_SOFTIRQ); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 228 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | /* |
| 232 | * Record an rcu_bh quiescent state. |
| 233 | */ |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 234 | void rcu_bh_qs(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 235 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 236 | unsigned long flags; |
| 237 | |
| 238 | local_irq_save(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 239 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 240 | raise_softirq(RCU_SOFTIRQ); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 241 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | /* |
| 245 | * Check to see if the scheduling-clock interrupt came from an extended |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 246 | * quiescent state, and, if so, tell RCU about it. This function must |
| 247 | * be called from hardirq context. It is normally called from the |
| 248 | * scheduling-clock interrupt. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 249 | */ |
Paul E. McKenney | c3377c2d | 2014-10-21 07:53:02 -0700 | [diff] [blame^] | 250 | void rcu_check_callbacks(int user) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 251 | { |
Paul E. McKenney | 1496144 | 2013-04-16 07:49:22 -0700 | [diff] [blame] | 252 | RCU_TRACE(check_cpu_stalls()); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 253 | if (user || rcu_is_cpu_rrupt_from_idle()) |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 254 | rcu_sched_qs(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 255 | else if (!in_softirq()) |
Paul E. McKenney | 284a8c9 | 2014-08-14 16:38:46 -0700 | [diff] [blame] | 256 | rcu_bh_qs(); |
Paul E. McKenney | 8315f42 | 2014-06-27 13:42:20 -0700 | [diff] [blame] | 257 | if (user) |
| 258 | rcu_note_voluntary_context_switch(current); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 259 | } |
| 260 | |
| 261 | /* |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 262 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
| 263 | * whose grace period has elapsed. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 264 | */ |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 265 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 266 | { |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 267 | const char *rn = NULL; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 268 | struct rcu_head *next, *list; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 269 | unsigned long flags; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 270 | RCU_TRACE(int cb_count = 0); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 271 | |
| 272 | /* If no RCU callbacks ready to invoke, just return. */ |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 273 | if (&rcp->rcucblist == rcp->donetail) { |
Paul E. McKenney | 486e259 | 2012-01-06 14:11:30 -0800 | [diff] [blame] | 274 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1)); |
Paul E. McKenney | 4968c30 | 2011-12-07 16:32:40 -0800 | [diff] [blame] | 275 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
Paul E. McKenney | 15f5191 | 2013-08-18 11:59:25 -0700 | [diff] [blame] | 276 | !!ACCESS_ONCE(rcp->rcucblist), |
Paul E. McKenney | 4968c30 | 2011-12-07 16:32:40 -0800 | [diff] [blame] | 277 | need_resched(), |
| 278 | is_idle_task(current), |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 279 | false)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 280 | return; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 281 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 282 | |
| 283 | /* Move the ready-to-invoke callbacks to a local list. */ |
| 284 | local_irq_save(flags); |
Paul E. McKenney | 486e259 | 2012-01-06 14:11:30 -0800 | [diff] [blame] | 285 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 286 | list = rcp->rcucblist; |
| 287 | rcp->rcucblist = *rcp->donetail; |
| 288 | *rcp->donetail = NULL; |
| 289 | if (rcp->curtail == rcp->donetail) |
| 290 | rcp->curtail = &rcp->rcucblist; |
| 291 | rcp->donetail = &rcp->rcucblist; |
| 292 | local_irq_restore(flags); |
| 293 | |
| 294 | /* Invoke the callbacks on the local list. */ |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 295 | RCU_TRACE(rn = rcp->name); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 296 | while (list) { |
| 297 | next = list->next; |
| 298 | prefetch(next); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 299 | debug_rcu_head_unqueue(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 300 | local_bh_disable(); |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 301 | __rcu_reclaim(rn, list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 302 | local_bh_enable(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 303 | list = next; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 304 | RCU_TRACE(cb_count++); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 305 | } |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 306 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
Paul E. McKenney | 0d75292 | 2013-08-17 18:08:37 -0700 | [diff] [blame] | 307 | RCU_TRACE(trace_rcu_batch_end(rcp->name, |
| 308 | cb_count, 0, need_resched(), |
Paul E. McKenney | 4968c30 | 2011-12-07 16:32:40 -0800 | [diff] [blame] | 309 | is_idle_task(current), |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 310 | false)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 311 | } |
| 312 | |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 313 | static void rcu_process_callbacks(struct softirq_action *unused) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 314 | { |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 315 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
| 316 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | /* |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 320 | * Wait for a grace period to elapse. But it is illegal to invoke |
| 321 | * synchronize_sched() from within an RCU read-side critical section. |
| 322 | * Therefore, any legal call to synchronize_sched() is a quiescent |
| 323 | * state, and so on a UP system, synchronize_sched() need do nothing. |
| 324 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the |
| 325 | * benefits of doing might_sleep() to reduce latency.) |
| 326 | * |
| 327 | * Cool, huh? (Due to Josh Triplett.) |
| 328 | * |
Paul E. McKenney | da848c4 | 2010-03-30 15:46:01 -0700 | [diff] [blame] | 329 | * But we want to make this a static inline later. The cond_resched() |
| 330 | * currently makes this problematic. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 331 | */ |
| 332 | void synchronize_sched(void) |
| 333 | { |
Paul E. McKenney | fe15d70 | 2012-01-04 13:30:33 -0800 | [diff] [blame] | 334 | rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) && |
| 335 | !lock_is_held(&rcu_lock_map) && |
| 336 | !lock_is_held(&rcu_sched_lock_map), |
| 337 | "Illegal synchronize_sched() in RCU read-side critical section"); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 338 | cond_resched(); |
| 339 | } |
| 340 | EXPORT_SYMBOL_GPL(synchronize_sched); |
| 341 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 342 | /* |
| 343 | * Helper function for call_rcu() and call_rcu_bh(). |
| 344 | */ |
| 345 | static void __call_rcu(struct rcu_head *head, |
| 346 | void (*func)(struct rcu_head *rcu), |
| 347 | struct rcu_ctrlblk *rcp) |
| 348 | { |
| 349 | unsigned long flags; |
| 350 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 351 | debug_rcu_head_queue(head); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 352 | head->func = func; |
| 353 | head->next = NULL; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 354 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 355 | local_irq_save(flags); |
| 356 | *rcp->curtail = head; |
| 357 | rcp->curtail = &head->next; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 358 | RCU_TRACE(rcp->qlen++); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 359 | local_irq_restore(flags); |
| 360 | } |
| 361 | |
| 362 | /* |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 363 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 364 | * period. But since we have but one CPU, that would be after any |
| 365 | * quiescent state. |
| 366 | */ |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 367 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 368 | { |
Paul E. McKenney | 99652b5 | 2010-03-30 15:50:01 -0700 | [diff] [blame] | 369 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 370 | } |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 371 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 372 | |
| 373 | /* |
| 374 | * Post an RCU bottom-half callback to be invoked after any subsequent |
| 375 | * quiescent state. |
| 376 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 377 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 378 | { |
| 379 | __call_rcu(head, func, &rcu_bh_ctrlblk); |
| 380 | } |
| 381 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
Paul E. McKenney | 9dc5ad3 | 2013-03-27 10:11:15 -0700 | [diff] [blame] | 382 | |
| 383 | void rcu_init(void) |
| 384 | { |
| 385 | open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); |
| 386 | } |