Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
| 17 | * |
| 18 | * Copyright IBM Corporation, 2008 |
| 19 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> |
| 21 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 23 | * Documentation/RCU |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 24 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 25 | #include <linux/completion.h> |
| 26 | #include <linux/interrupt.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 27 | #include <linux/notifier.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 28 | #include <linux/rcupdate.h> |
| 29 | #include <linux/kernel.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 30 | #include <linux/export.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 31 | #include <linux/mutex.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 32 | #include <linux/sched.h> |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/init.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 35 | #include <linux/time.h> |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 36 | #include <linux/cpu.h> |
Linus Torvalds | 268bb0c | 2011-05-20 12:50:29 -0700 | [diff] [blame] | 37 | #include <linux/prefetch.h> |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 38 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 39 | #ifdef CONFIG_RCU_TRACE |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 40 | #include <trace/events/rcu.h> |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 41 | #endif /* #else #ifdef CONFIG_RCU_TRACE */ |
| 42 | |
| 43 | #include "rcu.h" |
| 44 | |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 45 | /* Forward declarations for rcutiny_plugin.h. */ |
Paul E. McKenney | 24278d1 | 2010-09-27 17:25:23 -0700 | [diff] [blame] | 46 | struct rcu_ctrlblk; |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 47 | static void invoke_rcu_callbacks(void); |
| 48 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp); |
| 49 | static void rcu_process_callbacks(struct softirq_action *unused); |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 50 | static void __call_rcu(struct rcu_head *head, |
| 51 | void (*func)(struct rcu_head *rcu), |
| 52 | struct rcu_ctrlblk *rcp); |
| 53 | |
| 54 | #include "rcutiny_plugin.h" |
| 55 | |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 56 | static long long rcu_dynticks_nesting = DYNTICK_TASK_NESTING; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 57 | |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 58 | /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 59 | static void rcu_idle_enter_common(long long oldval) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 60 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 61 | if (rcu_dynticks_nesting) { |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 62 | RCU_TRACE(trace_rcu_dyntick("--=", |
| 63 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 64 | return; |
| 65 | } |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 66 | RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 99745b6 | 2011-11-10 15:48:45 -0800 | [diff] [blame] | 67 | if (!is_idle_task(current)) { |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 68 | struct task_struct *idle = idle_task(smp_processor_id()); |
| 69 | |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 70 | RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 71 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 72 | ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 73 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
| 74 | current->pid, current->comm, |
| 75 | idle->pid, idle->comm); /* must be idle task! */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 76 | } |
| 77 | rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | /* |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 81 | * Enter idle, which is an extended quiescent state if we have fully |
| 82 | * entered that mode (i.e., if the new value of dynticks_nesting is zero). |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 83 | */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 84 | void rcu_idle_enter(void) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 85 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 86 | unsigned long flags; |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 87 | long long oldval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 88 | |
| 89 | local_irq_save(flags); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 90 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 91 | rcu_dynticks_nesting = 0; |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 92 | rcu_idle_enter_common(oldval); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 93 | local_irq_restore(flags); |
| 94 | } |
| 95 | |
| 96 | /* |
| 97 | * Exit an interrupt handler towards idle. |
| 98 | */ |
| 99 | void rcu_irq_exit(void) |
| 100 | { |
| 101 | unsigned long flags; |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 102 | long long oldval; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 103 | |
| 104 | local_irq_save(flags); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 105 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 106 | rcu_dynticks_nesting--; |
| 107 | WARN_ON_ONCE(rcu_dynticks_nesting < 0); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 108 | rcu_idle_enter_common(oldval); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 109 | local_irq_restore(flags); |
| 110 | } |
| 111 | |
| 112 | /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ |
| 113 | static void rcu_idle_exit_common(long long oldval) |
| 114 | { |
| 115 | if (oldval) { |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 116 | RCU_TRACE(trace_rcu_dyntick("++=", |
| 117 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 118 | return; |
| 119 | } |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 120 | RCU_TRACE(trace_rcu_dyntick("End", oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 99745b6 | 2011-11-10 15:48:45 -0800 | [diff] [blame] | 121 | if (!is_idle_task(current)) { |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 122 | struct task_struct *idle = idle_task(smp_processor_id()); |
| 123 | |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 124 | RCU_TRACE(trace_rcu_dyntick("Error on exit: not idle task", |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 125 | oldval, rcu_dynticks_nesting)); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 126 | ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 0989cb4 | 2011-11-01 08:57:21 -0700 | [diff] [blame] | 127 | WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", |
| 128 | current->pid, current->comm, |
| 129 | idle->pid, idle->comm); /* must be idle task! */ |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 130 | } |
| 131 | } |
| 132 | |
| 133 | /* |
| 134 | * Exit idle, so that we are no longer in an extended quiescent state. |
| 135 | */ |
| 136 | void rcu_idle_exit(void) |
| 137 | { |
| 138 | unsigned long flags; |
| 139 | long long oldval; |
| 140 | |
| 141 | local_irq_save(flags); |
| 142 | oldval = rcu_dynticks_nesting; |
| 143 | WARN_ON_ONCE(oldval != 0); |
Paul E. McKenney | 4145fa7 | 2011-10-31 15:01:54 -0700 | [diff] [blame] | 144 | rcu_dynticks_nesting = DYNTICK_TASK_NESTING; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 145 | rcu_idle_exit_common(oldval); |
| 146 | local_irq_restore(flags); |
| 147 | } |
| 148 | |
| 149 | /* |
| 150 | * Enter an interrupt handler, moving away from idle. |
| 151 | */ |
| 152 | void rcu_irq_enter(void) |
| 153 | { |
| 154 | unsigned long flags; |
| 155 | long long oldval; |
| 156 | |
| 157 | local_irq_save(flags); |
| 158 | oldval = rcu_dynticks_nesting; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 159 | rcu_dynticks_nesting++; |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 160 | WARN_ON_ONCE(rcu_dynticks_nesting == 0); |
| 161 | rcu_idle_exit_common(oldval); |
| 162 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 163 | } |
| 164 | |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 165 | #ifdef CONFIG_PROVE_RCU |
| 166 | |
| 167 | /* |
| 168 | * Test whether RCU thinks that the current CPU is idle. |
| 169 | */ |
| 170 | int rcu_is_cpu_idle(void) |
| 171 | { |
| 172 | return !rcu_dynticks_nesting; |
| 173 | } |
Frederic Weisbecker | e6b80a3 | 2011-10-07 16:25:18 -0700 | [diff] [blame] | 174 | EXPORT_SYMBOL(rcu_is_cpu_idle); |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 175 | |
| 176 | #endif /* #ifdef CONFIG_PROVE_RCU */ |
| 177 | |
| 178 | /* |
| 179 | * Test whether the current CPU was interrupted from idle. Nested |
| 180 | * interrupts don't count, we must be running at the first interrupt |
| 181 | * level. |
| 182 | */ |
| 183 | int rcu_is_cpu_rrupt_from_idle(void) |
| 184 | { |
| 185 | return rcu_dynticks_nesting <= 0; |
| 186 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 187 | |
| 188 | /* |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 189 | * Helper function for rcu_sched_qs() and rcu_bh_qs(). |
| 190 | * Also irqs are disabled to avoid confusion due to interrupt handlers |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 191 | * invoking call_rcu(). |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 192 | */ |
| 193 | static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) |
| 194 | { |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 195 | if (rcp->rcucblist != NULL && |
| 196 | rcp->donetail != rcp->curtail) { |
| 197 | rcp->donetail = rcp->curtail; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 198 | return 1; |
| 199 | } |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 200 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Record an rcu quiescent state. And an rcu_bh quiescent state while we |
| 206 | * are at it, given that any rcu quiescent state is also an rcu_bh |
| 207 | * quiescent state. Use "+" instead of "||" to defeat short circuiting. |
| 208 | */ |
| 209 | void rcu_sched_qs(int cpu) |
| 210 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 211 | unsigned long flags; |
| 212 | |
| 213 | local_irq_save(flags); |
Paul E. McKenney | 99652b5 | 2010-03-30 15:50:01 -0700 | [diff] [blame] | 214 | if (rcu_qsctr_help(&rcu_sched_ctrlblk) + |
| 215 | rcu_qsctr_help(&rcu_bh_ctrlblk)) |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 216 | invoke_rcu_callbacks(); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 217 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | /* |
| 221 | * Record an rcu_bh quiescent state. |
| 222 | */ |
| 223 | void rcu_bh_qs(int cpu) |
| 224 | { |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 225 | unsigned long flags; |
| 226 | |
| 227 | local_irq_save(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 228 | if (rcu_qsctr_help(&rcu_bh_ctrlblk)) |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 229 | invoke_rcu_callbacks(); |
Eric Dumazet | b554d7d | 2011-04-28 07:23:45 +0200 | [diff] [blame] | 230 | local_irq_restore(flags); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 231 | } |
| 232 | |
| 233 | /* |
| 234 | * Check to see if the scheduling-clock interrupt came from an extended |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 235 | * quiescent state, and, if so, tell RCU about it. This function must |
| 236 | * be called from hardirq context. It is normally called from the |
| 237 | * scheduling-clock interrupt. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 238 | */ |
| 239 | void rcu_check_callbacks(int cpu, int user) |
| 240 | { |
Paul E. McKenney | 9b2e4f1 | 2011-09-30 12:10:22 -0700 | [diff] [blame] | 241 | if (user || rcu_is_cpu_rrupt_from_idle()) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 242 | rcu_sched_qs(cpu); |
| 243 | else if (!in_softirq()) |
| 244 | rcu_bh_qs(cpu); |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 245 | rcu_preempt_check_callbacks(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | /* |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 249 | * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure |
| 250 | * whose grace period has elapsed. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 251 | */ |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 252 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 253 | { |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 254 | char *rn = NULL; |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 255 | struct rcu_head *next, *list; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 256 | unsigned long flags; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 257 | RCU_TRACE(int cb_count = 0); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 258 | |
| 259 | /* If no RCU callbacks ready to invoke, just return. */ |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 260 | if (&rcp->rcucblist == rcp->donetail) { |
Paul E. McKenney | 72fe701 | 2011-06-21 01:14:54 -0700 | [diff] [blame] | 261 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
Paul E. McKenney | 4968c30 | 2011-12-07 16:32:40 -0800 | [diff] [blame] | 262 | RCU_TRACE(trace_rcu_batch_end(rcp->name, 0, |
| 263 | ACCESS_ONCE(rcp->rcucblist), |
| 264 | need_resched(), |
| 265 | is_idle_task(current), |
| 266 | rcu_is_callbacks_kthread())); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 267 | return; |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 268 | } |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 269 | |
| 270 | /* Move the ready-to-invoke callbacks to a local list. */ |
| 271 | local_irq_save(flags); |
Paul E. McKenney | 72fe701 | 2011-06-21 01:14:54 -0700 | [diff] [blame] | 272 | RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, -1)); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 273 | list = rcp->rcucblist; |
| 274 | rcp->rcucblist = *rcp->donetail; |
| 275 | *rcp->donetail = NULL; |
| 276 | if (rcp->curtail == rcp->donetail) |
| 277 | rcp->curtail = &rcp->rcucblist; |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 278 | rcu_preempt_remove_callbacks(rcp); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 279 | rcp->donetail = &rcp->rcucblist; |
| 280 | local_irq_restore(flags); |
| 281 | |
| 282 | /* Invoke the callbacks on the local list. */ |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 283 | RCU_TRACE(rn = rcp->name); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 284 | while (list) { |
| 285 | next = list->next; |
| 286 | prefetch(next); |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 287 | debug_rcu_head_unqueue(list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 288 | local_bh_disable(); |
Paul E. McKenney | d4c08f2 | 2011-06-25 06:36:56 -0700 | [diff] [blame] | 289 | __rcu_reclaim(rn, list); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 290 | local_bh_enable(); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 291 | list = next; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 292 | RCU_TRACE(cb_count++); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 293 | } |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 294 | RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count)); |
Paul E. McKenney | 4968c30 | 2011-12-07 16:32:40 -0800 | [diff] [blame] | 295 | RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count, 0, need_resched(), |
| 296 | is_idle_task(current), |
| 297 | rcu_is_callbacks_kthread())); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 298 | } |
| 299 | |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 300 | static void rcu_process_callbacks(struct softirq_action *unused) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 301 | { |
Paul E. McKenney | 965a002 | 2011-06-18 09:55:39 -0700 | [diff] [blame] | 302 | __rcu_process_callbacks(&rcu_sched_ctrlblk); |
| 303 | __rcu_process_callbacks(&rcu_bh_ctrlblk); |
| 304 | rcu_preempt_process_callbacks(); |
Paul E. McKenney | b2c0710 | 2010-09-09 13:40:39 -0700 | [diff] [blame] | 305 | } |
| 306 | |
| 307 | /* |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 308 | * Wait for a grace period to elapse. But it is illegal to invoke |
| 309 | * synchronize_sched() from within an RCU read-side critical section. |
| 310 | * Therefore, any legal call to synchronize_sched() is a quiescent |
| 311 | * state, and so on a UP system, synchronize_sched() need do nothing. |
| 312 | * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the |
| 313 | * benefits of doing might_sleep() to reduce latency.) |
| 314 | * |
| 315 | * Cool, huh? (Due to Josh Triplett.) |
| 316 | * |
Paul E. McKenney | da848c4 | 2010-03-30 15:46:01 -0700 | [diff] [blame] | 317 | * But we want to make this a static inline later. The cond_resched() |
| 318 | * currently makes this problematic. |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 319 | */ |
| 320 | void synchronize_sched(void) |
| 321 | { |
| 322 | cond_resched(); |
| 323 | } |
| 324 | EXPORT_SYMBOL_GPL(synchronize_sched); |
| 325 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 326 | /* |
| 327 | * Helper function for call_rcu() and call_rcu_bh(). |
| 328 | */ |
| 329 | static void __call_rcu(struct rcu_head *head, |
| 330 | void (*func)(struct rcu_head *rcu), |
| 331 | struct rcu_ctrlblk *rcp) |
| 332 | { |
| 333 | unsigned long flags; |
| 334 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 335 | debug_rcu_head_queue(head); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 336 | head->func = func; |
| 337 | head->next = NULL; |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 338 | |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 339 | local_irq_save(flags); |
| 340 | *rcp->curtail = head; |
| 341 | rcp->curtail = &head->next; |
Paul E. McKenney | 9e571a8 | 2010-09-30 21:26:52 -0700 | [diff] [blame] | 342 | RCU_TRACE(rcp->qlen++); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 343 | local_irq_restore(flags); |
| 344 | } |
| 345 | |
| 346 | /* |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 347 | * Post an RCU callback to be invoked after the end of an RCU-sched grace |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 348 | * period. But since we have but one CPU, that would be after any |
| 349 | * quiescent state. |
| 350 | */ |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 351 | void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 352 | { |
Paul E. McKenney | 99652b5 | 2010-03-30 15:50:01 -0700 | [diff] [blame] | 353 | __call_rcu(head, func, &rcu_sched_ctrlblk); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 354 | } |
Paul E. McKenney | a57eb94 | 2010-06-29 16:49:16 -0700 | [diff] [blame] | 355 | EXPORT_SYMBOL_GPL(call_rcu_sched); |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 356 | |
| 357 | /* |
| 358 | * Post an RCU bottom-half callback to be invoked after any subsequent |
| 359 | * quiescent state. |
| 360 | */ |
Ingo Molnar | 4ce5b90 | 2009-10-26 07:55:55 +0100 | [diff] [blame] | 361 | void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) |
Paul E. McKenney | 9b1d82f | 2009-10-25 19:03:50 -0700 | [diff] [blame] | 362 | { |
| 363 | __call_rcu(head, func, &rcu_bh_ctrlblk); |
| 364 | } |
| 365 | EXPORT_SYMBOL_GPL(call_rcu_bh); |