blob: 01570c68d237c38647b94307ec79c7fc397b4f9c [file] [log] [blame]
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -07001/*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Paul E. McKenney87de1cf2013-12-03 10:02:52 -080015 * along with this program; if not, you can access it online at
16 * http://www.gnu.org/licenses/gpl-2.0.html.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070017 *
18 * Copyright IBM Corporation, 2008
19 *
20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
21 *
22 * For detailed explanation of Read-Copy Update mechanism see -
Ingo Molnar4ce5b902009-10-26 07:55:55 +010023 * Documentation/RCU
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070024 */
Ingo Molnar4ce5b902009-10-26 07:55:55 +010025#include <linux/completion.h>
26#include <linux/interrupt.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070027#include <linux/notifier.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010028#include <linux/rcupdate.h>
29#include <linux/kernel.h>
Paul Gortmaker9984de12011-05-23 14:51:41 -040030#include <linux/export.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070031#include <linux/mutex.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010032#include <linux/sched.h>
33#include <linux/types.h>
34#include <linux/init.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070035#include <linux/time.h>
Ingo Molnar4ce5b902009-10-26 07:55:55 +010036#include <linux/cpu.h>
Linus Torvalds268bb0c2011-05-20 12:50:29 -070037#include <linux/prefetch.h>
Paul E. McKenney0d752922013-08-17 18:08:37 -070038#include <linux/ftrace_event.h>
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070039
Paul E. McKenney29c00b42011-06-17 15:53:19 -070040#include "rcu.h"
41
Paul E. McKenney4102ada2013-10-08 20:23:47 -070042/* Forward declarations for tiny_plugin.h. */
Paul E. McKenney24278d12010-09-27 17:25:23 -070043struct rcu_ctrlblk;
Paul E. McKenney965a0022011-06-18 09:55:39 -070044static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
45static void rcu_process_callbacks(struct softirq_action *unused);
Paul E. McKenneya57eb942010-06-29 16:49:16 -070046static void __call_rcu(struct rcu_head *head,
47 void (*func)(struct rcu_head *rcu),
48 struct rcu_ctrlblk *rcp);
49
Paul E. McKenney29e37d82011-11-17 16:55:56 -080050static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070051
Paul E. McKenney4102ada2013-10-08 20:23:47 -070052#include "tiny_plugin.h"
Paul E. McKenney6bfc09e2012-10-19 12:49:17 -070053
Pranith Kumarfafb6e82014-07-15 18:31:47 -040054/* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcu/tree.c. */
Paul E. McKenney818615c2012-07-11 00:24:57 -070055static void rcu_idle_enter_common(long long newval)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070056{
Paul E. McKenney818615c2012-07-11 00:24:57 -070057 if (newval) {
Paul E. McKenney0d752922013-08-17 18:08:37 -070058 RCU_TRACE(trace_rcu_dyntick(TPS("--="),
Paul E. McKenney818615c2012-07-11 00:24:57 -070059 rcu_dynticks_nesting, newval));
60 rcu_dynticks_nesting = newval;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070061 return;
62 }
Paul E. McKenney0d752922013-08-17 18:08:37 -070063 RCU_TRACE(trace_rcu_dyntick(TPS("Start"),
64 rcu_dynticks_nesting, newval));
Paul E. McKenneyade986242014-07-31 16:02:33 -070065 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
Paul E. McKenney4102ada2013-10-08 20:23:47 -070066 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
Paul E. McKenney0989cb42011-11-01 08:57:21 -070067
Paul E. McKenney0d752922013-08-17 18:08:37 -070068 RCU_TRACE(trace_rcu_dyntick(TPS("Entry error: not idle task"),
Paul E. McKenney818615c2012-07-11 00:24:57 -070069 rcu_dynticks_nesting, newval));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070070 ftrace_dump(DUMP_ALL);
Paul E. McKenney0989cb42011-11-01 08:57:21 -070071 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
72 current->pid, current->comm,
73 idle->pid, idle->comm); /* must be idle task! */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070074 }
Paul E. McKenney284a8c92014-08-14 16:38:46 -070075 rcu_sched_qs(); /* implies rcu_bh_inc() */
Paul E. McKenney818615c2012-07-11 00:24:57 -070076 barrier();
77 rcu_dynticks_nesting = newval;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070078}
79
80/*
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070081 * Enter idle, which is an extended quiescent state if we have fully
82 * entered that mode (i.e., if the new value of dynticks_nesting is zero).
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070083 */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070084void rcu_idle_enter(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -070085{
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070086 unsigned long flags;
Paul E. McKenney818615c2012-07-11 00:24:57 -070087 long long newval;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070088
89 local_irq_save(flags);
Paul E. McKenney29e37d82011-11-17 16:55:56 -080090 WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0);
91 if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) ==
92 DYNTICK_TASK_NEST_VALUE)
Paul E. McKenney818615c2012-07-11 00:24:57 -070093 newval = 0;
Paul E. McKenney29e37d82011-11-17 16:55:56 -080094 else
Paul E. McKenney818615c2012-07-11 00:24:57 -070095 newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE;
96 rcu_idle_enter_common(newval);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -070097 local_irq_restore(flags);
98}
Paul E. McKenney8a2ecf42012-02-02 15:42:04 -080099EXPORT_SYMBOL_GPL(rcu_idle_enter);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700100
101/*
102 * Exit an interrupt handler towards idle.
103 */
104void rcu_irq_exit(void)
105{
106 unsigned long flags;
Paul E. McKenney818615c2012-07-11 00:24:57 -0700107 long long newval;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700108
109 local_irq_save(flags);
Paul E. McKenney818615c2012-07-11 00:24:57 -0700110 newval = rcu_dynticks_nesting - 1;
111 WARN_ON_ONCE(newval < 0);
112 rcu_idle_enter_common(newval);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700113 local_irq_restore(flags);
114}
Paul E. McKenneyb4270ee2012-07-31 10:12:48 -0700115EXPORT_SYMBOL_GPL(rcu_irq_exit);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700116
Pranith Kumarfafb6e82014-07-15 18:31:47 -0400117/* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcu/tree.c. */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700118static void rcu_idle_exit_common(long long oldval)
119{
120 if (oldval) {
Paul E. McKenney0d752922013-08-17 18:08:37 -0700121 RCU_TRACE(trace_rcu_dyntick(TPS("++="),
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700122 oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700123 return;
124 }
Paul E. McKenney0d752922013-08-17 18:08:37 -0700125 RCU_TRACE(trace_rcu_dyntick(TPS("End"), oldval, rcu_dynticks_nesting));
Paul E. McKenneyade986242014-07-31 16:02:33 -0700126 if (IS_ENABLED(CONFIG_RCU_TRACE) && !is_idle_task(current)) {
Paul E. McKenney4102ada2013-10-08 20:23:47 -0700127 struct task_struct *idle __maybe_unused = idle_task(smp_processor_id());
Paul E. McKenney0989cb42011-11-01 08:57:21 -0700128
Paul E. McKenney0d752922013-08-17 18:08:37 -0700129 RCU_TRACE(trace_rcu_dyntick(TPS("Exit error: not idle task"),
Paul E. McKenney4145fa72011-10-31 15:01:54 -0700130 oldval, rcu_dynticks_nesting));
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700131 ftrace_dump(DUMP_ALL);
Paul E. McKenney0989cb42011-11-01 08:57:21 -0700132 WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
133 current->pid, current->comm,
134 idle->pid, idle->comm); /* must be idle task! */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700135 }
136}
137
138/*
139 * Exit idle, so that we are no longer in an extended quiescent state.
140 */
141void rcu_idle_exit(void)
142{
143 unsigned long flags;
144 long long oldval;
145
146 local_irq_save(flags);
147 oldval = rcu_dynticks_nesting;
Paul E. McKenney29e37d82011-11-17 16:55:56 -0800148 WARN_ON_ONCE(rcu_dynticks_nesting < 0);
149 if (rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK)
150 rcu_dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
151 else
152 rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700153 rcu_idle_exit_common(oldval);
154 local_irq_restore(flags);
155}
Paul E. McKenney8a2ecf42012-02-02 15:42:04 -0800156EXPORT_SYMBOL_GPL(rcu_idle_exit);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700157
158/*
159 * Enter an interrupt handler, moving away from idle.
160 */
161void rcu_irq_enter(void)
162{
163 unsigned long flags;
164 long long oldval;
165
166 local_irq_save(flags);
167 oldval = rcu_dynticks_nesting;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700168 rcu_dynticks_nesting++;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700169 WARN_ON_ONCE(rcu_dynticks_nesting == 0);
170 rcu_idle_exit_common(oldval);
171 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700172}
Paul E. McKenneyb4270ee2012-07-31 10:12:48 -0700173EXPORT_SYMBOL_GPL(rcu_irq_enter);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700174
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700175#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700176
177/*
178 * Test whether RCU thinks that the current CPU is idle.
179 */
Linus Torvaldsb29c8302013-11-16 12:23:18 -0800180bool notrace __rcu_is_watching(void)
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700181{
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700182 return rcu_dynticks_nesting;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700183}
Paul E. McKenney5c173eb2013-09-13 17:20:11 -0700184EXPORT_SYMBOL(__rcu_is_watching);
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700185
Paul E. McKenneycc6783f2013-09-06 17:39:49 -0700186#endif /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700187
188/*
189 * Test whether the current CPU was interrupted from idle. Nested
190 * interrupts don't count, we must be running at the first interrupt
191 * level.
192 */
Josh Triplett62e3cb12012-11-20 09:55:26 -0800193static int rcu_is_cpu_rrupt_from_idle(void)
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700194{
Paul E. McKenney351573a2012-10-29 04:52:56 -0700195 return rcu_dynticks_nesting <= 1;
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700196}
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700197
198/*
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200199 * Helper function for rcu_sched_qs() and rcu_bh_qs().
200 * Also irqs are disabled to avoid confusion due to interrupt handlers
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100201 * invoking call_rcu().
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700202 */
203static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
204{
Paul E. McKenney14961442013-04-16 07:49:22 -0700205 RCU_TRACE(reset_cpu_stall_ticks(rcp));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700206 if (rcp->rcucblist != NULL &&
207 rcp->donetail != rcp->curtail) {
208 rcp->donetail = rcp->curtail;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700209 return 1;
210 }
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100211
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700212 return 0;
213}
214
215/*
216 * Record an rcu quiescent state. And an rcu_bh quiescent state while we
217 * are at it, given that any rcu quiescent state is also an rcu_bh
218 * quiescent state. Use "+" instead of "||" to defeat short circuiting.
219 */
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700220void rcu_sched_qs(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700221{
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200222 unsigned long flags;
223
224 local_irq_save(flags);
Paul E. McKenney99652b52010-03-30 15:50:01 -0700225 if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
226 rcu_qsctr_help(&rcu_bh_ctrlblk))
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700227 raise_softirq(RCU_SOFTIRQ);
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200228 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700229}
230
231/*
232 * Record an rcu_bh quiescent state.
233 */
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700234void rcu_bh_qs(void)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700235{
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200236 unsigned long flags;
237
238 local_irq_save(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700239 if (rcu_qsctr_help(&rcu_bh_ctrlblk))
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700240 raise_softirq(RCU_SOFTIRQ);
Eric Dumazetb554d7d2011-04-28 07:23:45 +0200241 local_irq_restore(flags);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700242}
243
244/*
245 * Check to see if the scheduling-clock interrupt came from an extended
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700246 * quiescent state, and, if so, tell RCU about it. This function must
247 * be called from hardirq context. It is normally called from the
248 * scheduling-clock interrupt.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700249 */
Paul E. McKenneyc3377c2d2014-10-21 07:53:02 -0700250void rcu_check_callbacks(int user)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700251{
Paul E. McKenney14961442013-04-16 07:49:22 -0700252 RCU_TRACE(check_cpu_stalls());
Paul E. McKenney9b2e4f12011-09-30 12:10:22 -0700253 if (user || rcu_is_cpu_rrupt_from_idle())
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700254 rcu_sched_qs();
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700255 else if (!in_softirq())
Paul E. McKenney284a8c92014-08-14 16:38:46 -0700256 rcu_bh_qs();
Paul E. McKenney8315f422014-06-27 13:42:20 -0700257 if (user)
258 rcu_note_voluntary_context_switch(current);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700259}
260
261/*
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700262 * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
263 * whose grace period has elapsed.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700264 */
Paul E. McKenney965a0022011-06-18 09:55:39 -0700265static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700266{
Steven Rostedt (Red Hat)e66c33d2013-07-12 16:50:28 -0400267 const char *rn = NULL;
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700268 struct rcu_head *next, *list;
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100269 unsigned long flags;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700270 RCU_TRACE(int cb_count = 0);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700271
272 /* If no RCU callbacks ready to invoke, just return. */
Paul E. McKenney29c00b42011-06-17 15:53:19 -0700273 if (&rcp->rcucblist == rcp->donetail) {
Paul E. McKenney486e2592012-01-06 14:11:30 -0800274 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, 0, -1));
Paul E. McKenney4968c302011-12-07 16:32:40 -0800275 RCU_TRACE(trace_rcu_batch_end(rcp->name, 0,
Paul E. McKenney15f51912013-08-18 11:59:25 -0700276 !!ACCESS_ONCE(rcp->rcucblist),
Paul E. McKenney4968c302011-12-07 16:32:40 -0800277 need_resched(),
278 is_idle_task(current),
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700279 false));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700280 return;
Paul E. McKenney29c00b42011-06-17 15:53:19 -0700281 }
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700282
283 /* Move the ready-to-invoke callbacks to a local list. */
284 local_irq_save(flags);
Paul E. McKenney486e2592012-01-06 14:11:30 -0800285 RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700286 list = rcp->rcucblist;
287 rcp->rcucblist = *rcp->donetail;
288 *rcp->donetail = NULL;
289 if (rcp->curtail == rcp->donetail)
290 rcp->curtail = &rcp->rcucblist;
291 rcp->donetail = &rcp->rcucblist;
292 local_irq_restore(flags);
293
294 /* Invoke the callbacks on the local list. */
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700295 RCU_TRACE(rn = rcp->name);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700296 while (list) {
297 next = list->next;
298 prefetch(next);
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400299 debug_rcu_head_unqueue(list);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700300 local_bh_disable();
Paul E. McKenneyd4c08f22011-06-25 06:36:56 -0700301 __rcu_reclaim(rn, list);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700302 local_bh_enable();
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700303 list = next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700304 RCU_TRACE(cb_count++);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700305 }
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700306 RCU_TRACE(rcu_trace_sub_qlen(rcp, cb_count));
Paul E. McKenney0d752922013-08-17 18:08:37 -0700307 RCU_TRACE(trace_rcu_batch_end(rcp->name,
308 cb_count, 0, need_resched(),
Paul E. McKenney4968c302011-12-07 16:32:40 -0800309 is_idle_task(current),
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700310 false));
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700311}
312
Paul E. McKenney965a0022011-06-18 09:55:39 -0700313static void rcu_process_callbacks(struct softirq_action *unused)
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700314{
Paul E. McKenney965a0022011-06-18 09:55:39 -0700315 __rcu_process_callbacks(&rcu_sched_ctrlblk);
316 __rcu_process_callbacks(&rcu_bh_ctrlblk);
Paul E. McKenneyb2c07102010-09-09 13:40:39 -0700317}
318
319/*
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700320 * Wait for a grace period to elapse. But it is illegal to invoke
321 * synchronize_sched() from within an RCU read-side critical section.
322 * Therefore, any legal call to synchronize_sched() is a quiescent
323 * state, and so on a UP system, synchronize_sched() need do nothing.
324 * Ditto for synchronize_rcu_bh(). (But Lai Jiangshan points out the
325 * benefits of doing might_sleep() to reduce latency.)
326 *
327 * Cool, huh? (Due to Josh Triplett.)
328 *
Paul E. McKenneyda848c42010-03-30 15:46:01 -0700329 * But we want to make this a static inline later. The cond_resched()
330 * currently makes this problematic.
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700331 */
332void synchronize_sched(void)
333{
Paul E. McKenneyfe15d702012-01-04 13:30:33 -0800334 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
335 !lock_is_held(&rcu_lock_map) &&
336 !lock_is_held(&rcu_sched_lock_map),
337 "Illegal synchronize_sched() in RCU read-side critical section");
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700338 cond_resched();
339}
340EXPORT_SYMBOL_GPL(synchronize_sched);
341
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700342/*
343 * Helper function for call_rcu() and call_rcu_bh().
344 */
345static void __call_rcu(struct rcu_head *head,
346 void (*func)(struct rcu_head *rcu),
347 struct rcu_ctrlblk *rcp)
348{
349 unsigned long flags;
350
Mathieu Desnoyers551d55a2010-04-17 08:48:42 -0400351 debug_rcu_head_queue(head);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700352 head->func = func;
353 head->next = NULL;
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100354
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700355 local_irq_save(flags);
356 *rcp->curtail = head;
357 rcp->curtail = &head->next;
Paul E. McKenney9e571a82010-09-30 21:26:52 -0700358 RCU_TRACE(rcp->qlen++);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700359 local_irq_restore(flags);
360}
361
362/*
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700363 * Post an RCU callback to be invoked after the end of an RCU-sched grace
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700364 * period. But since we have but one CPU, that would be after any
365 * quiescent state.
366 */
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700367void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700368{
Paul E. McKenney99652b52010-03-30 15:50:01 -0700369 __call_rcu(head, func, &rcu_sched_ctrlblk);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700370}
Paul E. McKenneya57eb942010-06-29 16:49:16 -0700371EXPORT_SYMBOL_GPL(call_rcu_sched);
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700372
373/*
374 * Post an RCU bottom-half callback to be invoked after any subsequent
375 * quiescent state.
376 */
Ingo Molnar4ce5b902009-10-26 07:55:55 +0100377void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
Paul E. McKenney9b1d82f2009-10-25 19:03:50 -0700378{
379 __call_rcu(head, func, &rcu_bh_ctrlblk);
380}
381EXPORT_SYMBOL_GPL(call_rcu_bh);
Paul E. McKenney9dc5ad32013-03-27 10:11:15 -0700382
383void rcu_init(void)
384{
385 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
386}