Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
Paul E. McKenney | 87de1cf | 2013-12-03 10:02:52 -0800 | [diff] [blame] | 15 | * along with this program; if not, you can access it online at |
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | * |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 18 | * Copyright IBM Corporation, 2001 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | * |
| 20 | * Authors: Dipankar Sarma <dipankar@in.ibm.com> |
| 21 | * Manfred Spraul <manfred@colorfullife.com> |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 22 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> |
| 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 25 | * Papers: |
| 26 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| 27 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) |
| 28 | * |
| 29 | * For detailed explanation of Read-Copy Update mechanism see - |
Paul E. McKenney | a71fca5 | 2009-09-18 10:28:19 -0700 | [diff] [blame] | 30 | * http://lse.sourceforge.net/locking/rcupdate.html |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | * |
| 32 | */ |
| 33 | #include <linux/types.h> |
| 34 | #include <linux/kernel.h> |
| 35 | #include <linux/init.h> |
| 36 | #include <linux/spinlock.h> |
| 37 | #include <linux/smp.h> |
| 38 | #include <linux/interrupt.h> |
| 39 | #include <linux/sched.h> |
Arun Sharma | 60063497 | 2011-07-26 16:09:06 -0700 | [diff] [blame] | 40 | #include <linux/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | #include <linux/percpu.h> |
| 43 | #include <linux/notifier.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/cpu.h> |
Ingo Molnar | 9331b31 | 2006-03-23 03:00:19 -0800 | [diff] [blame] | 45 | #include <linux/mutex.h> |
Paul Gortmaker | 9984de1 | 2011-05-23 14:51:41 -0400 | [diff] [blame] | 46 | #include <linux/export.h> |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 47 | #include <linux/hardirq.h> |
Paul E. McKenney | e3ebfb9 | 2012-07-02 14:42:01 -0700 | [diff] [blame] | 48 | #include <linux/delay.h> |
Antti P Miettinen | 3705b88 | 2012-10-05 09:59:15 +0300 | [diff] [blame] | 49 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 51 | #define CREATE_TRACE_POINTS |
Paul E. McKenney | 29c00b4 | 2011-06-17 15:53:19 -0700 | [diff] [blame] | 52 | |
| 53 | #include "rcu.h" |
| 54 | |
Paul E. McKenney | 4102ada | 2013-10-08 20:23:47 -0700 | [diff] [blame] | 55 | MODULE_ALIAS("rcupdate"); |
| 56 | #ifdef MODULE_PARAM_PREFIX |
| 57 | #undef MODULE_PARAM_PREFIX |
| 58 | #endif |
| 59 | #define MODULE_PARAM_PREFIX "rcupdate." |
| 60 | |
Antti P Miettinen | 3705b88 | 2012-10-05 09:59:15 +0300 | [diff] [blame] | 61 | module_param(rcu_expedited, int, 0); |
| 62 | |
Paul E. McKenney | 9dd8fb1 | 2012-04-13 12:54:22 -0700 | [diff] [blame] | 63 | #ifdef CONFIG_PREEMPT_RCU |
| 64 | |
| 65 | /* |
Paul E. McKenney | 2a3fa84 | 2012-05-21 11:58:36 -0700 | [diff] [blame] | 66 | * Preemptible RCU implementation for rcu_read_lock(). |
| 67 | * Just increment ->rcu_read_lock_nesting, shared state will be updated |
| 68 | * if we block. |
| 69 | */ |
| 70 | void __rcu_read_lock(void) |
| 71 | { |
| 72 | current->rcu_read_lock_nesting++; |
| 73 | barrier(); /* critical section after entry code. */ |
| 74 | } |
| 75 | EXPORT_SYMBOL_GPL(__rcu_read_lock); |
| 76 | |
| 77 | /* |
| 78 | * Preemptible RCU implementation for rcu_read_unlock(). |
| 79 | * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost |
| 80 | * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then |
| 81 | * invoke rcu_read_unlock_special() to clean up after a context switch |
| 82 | * in an RCU read-side critical section and other special cases. |
| 83 | */ |
| 84 | void __rcu_read_unlock(void) |
| 85 | { |
| 86 | struct task_struct *t = current; |
| 87 | |
| 88 | if (t->rcu_read_lock_nesting != 1) { |
| 89 | --t->rcu_read_lock_nesting; |
| 90 | } else { |
| 91 | barrier(); /* critical section before exit code. */ |
| 92 | t->rcu_read_lock_nesting = INT_MIN; |
Paul E. McKenney | e3ebfb9 | 2012-07-02 14:42:01 -0700 | [diff] [blame] | 93 | #ifdef CONFIG_PROVE_RCU_DELAY |
| 94 | udelay(10); /* Make preemption more probable. */ |
| 95 | #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ |
Paul E. McKenney | 2a3fa84 | 2012-05-21 11:58:36 -0700 | [diff] [blame] | 96 | barrier(); /* assign before ->rcu_read_unlock_special load */ |
| 97 | if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) |
| 98 | rcu_read_unlock_special(t); |
| 99 | barrier(); /* ->rcu_read_unlock_special load before assign */ |
| 100 | t->rcu_read_lock_nesting = 0; |
| 101 | } |
| 102 | #ifdef CONFIG_PROVE_LOCKING |
| 103 | { |
| 104 | int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting); |
| 105 | |
| 106 | WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2); |
| 107 | } |
| 108 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
| 109 | } |
| 110 | EXPORT_SYMBOL_GPL(__rcu_read_unlock); |
| 111 | |
Paul E. McKenney | 2439b69 | 2013-04-11 10:15:52 -0700 | [diff] [blame] | 112 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 9dd8fb1 | 2012-04-13 12:54:22 -0700 | [diff] [blame] | 113 | |
Paul E. McKenney | 162cc27 | 2009-09-23 16:18:13 -0700 | [diff] [blame] | 114 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 115 | static struct lock_class_key rcu_lock_key; |
| 116 | struct lockdep_map rcu_lock_map = |
| 117 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); |
| 118 | EXPORT_SYMBOL_GPL(rcu_lock_map); |
Paul E. McKenney | 632ee20 | 2010-02-22 17:04:45 -0800 | [diff] [blame] | 119 | |
| 120 | static struct lock_class_key rcu_bh_lock_key; |
| 121 | struct lockdep_map rcu_bh_lock_map = |
| 122 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key); |
| 123 | EXPORT_SYMBOL_GPL(rcu_bh_lock_map); |
| 124 | |
| 125 | static struct lock_class_key rcu_sched_lock_key; |
| 126 | struct lockdep_map rcu_sched_lock_map = |
| 127 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); |
| 128 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 129 | |
Paul E. McKenney | 24ef659 | 2013-10-28 09:22:24 -0700 | [diff] [blame] | 130 | static struct lock_class_key rcu_callback_key; |
| 131 | struct lockdep_map rcu_callback_map = |
| 132 | STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key); |
| 133 | EXPORT_SYMBOL_GPL(rcu_callback_map); |
| 134 | |
Steven Rostedt (Red Hat) | a0a5a05 | 2013-08-31 01:04:07 -0400 | [diff] [blame] | 135 | int notrace debug_lockdep_rcu_enabled(void) |
Paul E. McKenney | bc293d6 | 2010-04-15 12:50:39 -0700 | [diff] [blame] | 136 | { |
| 137 | return rcu_scheduler_active && debug_locks && |
| 138 | current->lockdep_recursion == 0; |
| 139 | } |
| 140 | EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled); |
| 141 | |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 142 | /** |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 143 | * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 144 | * |
| 145 | * Check for bottom half being disabled, which covers both the |
| 146 | * CONFIG_PROVE_RCU and not cases. Note that if someone uses |
| 147 | * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled) |
Paul E. McKenney | ca5ecdd | 2010-04-28 14:39:09 -0700 | [diff] [blame] | 148 | * will show the situation. This is useful for debug checks in functions |
| 149 | * that require that they be called within an RCU read-side critical |
| 150 | * section. |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 151 | * |
| 152 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot. |
Paul E. McKenney | c0d6d01 | 2012-01-23 12:41:26 -0800 | [diff] [blame] | 153 | * |
| 154 | * Note that rcu_read_lock() is disallowed if the CPU is either idle or |
| 155 | * offline from an RCU perspective, so check for those as well. |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 156 | */ |
| 157 | int rcu_read_lock_bh_held(void) |
| 158 | { |
| 159 | if (!debug_lockdep_rcu_enabled()) |
| 160 | return 1; |
Paul E. McKenney | 5c173eb | 2013-09-13 17:20:11 -0700 | [diff] [blame] | 161 | if (!rcu_is_watching()) |
Frederic Weisbecker | e6b80a3 | 2011-10-07 16:25:18 -0700 | [diff] [blame] | 162 | return 0; |
Paul E. McKenney | c0d6d01 | 2012-01-23 12:41:26 -0800 | [diff] [blame] | 163 | if (!rcu_lockdep_current_cpu_online()) |
| 164 | return 0; |
Paul E. McKenney | 773e3f9 | 2010-10-05 14:03:02 -0700 | [diff] [blame] | 165 | return in_softirq() || irqs_disabled(); |
Paul E. McKenney | e3818b8 | 2010-03-15 17:03:43 -0700 | [diff] [blame] | 166 | } |
| 167 | EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); |
| 168 | |
| 169 | #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 170 | |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 171 | struct rcu_synchronize { |
| 172 | struct rcu_head head; |
| 173 | struct completion completion; |
| 174 | }; |
| 175 | |
Paul E. McKenney | d9f1bb6 | 2010-02-25 14:06:47 -0800 | [diff] [blame] | 176 | /* |
Paul E. McKenney | fbf6bfc | 2008-02-13 15:03:15 -0800 | [diff] [blame] | 177 | * Awaken the corresponding synchronize_rcu() instance now that a |
| 178 | * grace period has elapsed. |
| 179 | */ |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 180 | static void wakeme_after_rcu(struct rcu_head *head) |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 181 | { |
Paul E. McKenney | 01c1c66 | 2008-01-25 21:08:24 +0100 | [diff] [blame] | 182 | struct rcu_synchronize *rcu; |
| 183 | |
| 184 | rcu = container_of(head, struct rcu_synchronize, head); |
| 185 | complete(&rcu->completion); |
Dipankar Sarma | 21a1ea9 | 2006-03-07 21:55:33 -0800 | [diff] [blame] | 186 | } |
Paul E. McKenney | ee84b82 | 2010-05-06 09:28:41 -0700 | [diff] [blame] | 187 | |
Paul E. McKenney | 2c42818 | 2011-05-26 22:14:36 -0700 | [diff] [blame] | 188 | void wait_rcu_gp(call_rcu_func_t crf) |
| 189 | { |
| 190 | struct rcu_synchronize rcu; |
| 191 | |
| 192 | init_rcu_head_on_stack(&rcu.head); |
| 193 | init_completion(&rcu.completion); |
| 194 | /* Will wake me after RCU finished. */ |
| 195 | crf(&rcu.head, wakeme_after_rcu); |
| 196 | /* Wait for it. */ |
| 197 | wait_for_completion(&rcu.completion); |
| 198 | destroy_rcu_head_on_stack(&rcu.head); |
| 199 | } |
| 200 | EXPORT_SYMBOL_GPL(wait_rcu_gp); |
| 201 | |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 202 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
Paul E. McKenney | 546a9d8 | 2014-06-19 14:57:10 -0700 | [diff] [blame] | 203 | void init_rcu_head(struct rcu_head *head) |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 204 | { |
| 205 | debug_object_init(head, &rcuhead_debug_descr); |
| 206 | } |
| 207 | |
Paul E. McKenney | 546a9d8 | 2014-06-19 14:57:10 -0700 | [diff] [blame] | 208 | void destroy_rcu_head(struct rcu_head *head) |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 209 | { |
| 210 | debug_object_free(head, &rcuhead_debug_descr); |
| 211 | } |
| 212 | |
| 213 | /* |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 214 | * fixup_activate is called when: |
| 215 | * - an active object is activated |
| 216 | * - an unknown object is activated (might be a statically initialized object) |
| 217 | * Activation is performed internally by call_rcu(). |
| 218 | */ |
| 219 | static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state) |
| 220 | { |
| 221 | struct rcu_head *head = addr; |
| 222 | |
| 223 | switch (state) { |
| 224 | |
| 225 | case ODEBUG_STATE_NOTAVAILABLE: |
| 226 | /* |
| 227 | * This is not really a fixup. We just make sure that it is |
| 228 | * tracked in the object tracker. |
| 229 | */ |
| 230 | debug_object_init(head, &rcuhead_debug_descr); |
| 231 | debug_object_activate(head, &rcuhead_debug_descr); |
| 232 | return 0; |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 233 | default: |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 234 | return 1; |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 235 | } |
| 236 | } |
| 237 | |
| 238 | /** |
| 239 | * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects |
| 240 | * @head: pointer to rcu_head structure to be initialized |
| 241 | * |
| 242 | * This function informs debugobjects of a new rcu_head structure that |
| 243 | * has been allocated as an auto variable on the stack. This function |
| 244 | * is not required for rcu_head structures that are statically defined or |
| 245 | * that are dynamically allocated on the heap. This function has no |
| 246 | * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. |
| 247 | */ |
| 248 | void init_rcu_head_on_stack(struct rcu_head *head) |
| 249 | { |
| 250 | debug_object_init_on_stack(head, &rcuhead_debug_descr); |
| 251 | } |
| 252 | EXPORT_SYMBOL_GPL(init_rcu_head_on_stack); |
| 253 | |
| 254 | /** |
| 255 | * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects |
| 256 | * @head: pointer to rcu_head structure to be initialized |
| 257 | * |
| 258 | * This function informs debugobjects that an on-stack rcu_head structure |
| 259 | * is about to go out of scope. As with init_rcu_head_on_stack(), this |
| 260 | * function is not required for rcu_head structures that are statically |
| 261 | * defined or that are dynamically allocated on the heap. Also as with |
| 262 | * init_rcu_head_on_stack(), this function has no effect for |
| 263 | * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds. |
| 264 | */ |
| 265 | void destroy_rcu_head_on_stack(struct rcu_head *head) |
| 266 | { |
| 267 | debug_object_free(head, &rcuhead_debug_descr); |
| 268 | } |
| 269 | EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack); |
| 270 | |
| 271 | struct debug_obj_descr rcuhead_debug_descr = { |
| 272 | .name = "rcu_head", |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 273 | .fixup_activate = rcuhead_fixup_activate, |
Mathieu Desnoyers | 551d55a | 2010-04-17 08:48:42 -0400 | [diff] [blame] | 274 | }; |
| 275 | EXPORT_SYMBOL_GPL(rcuhead_debug_descr); |
| 276 | #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 277 | |
| 278 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) |
Steven Rostedt (Red Hat) | e66c33d | 2013-07-12 16:50:28 -0400 | [diff] [blame] | 279 | void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp, |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 280 | unsigned long secs, |
| 281 | unsigned long c_old, unsigned long c) |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 282 | { |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 283 | trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 284 | } |
| 285 | EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); |
| 286 | #else |
Paul E. McKenney | 5249453 | 2012-11-14 16:26:40 -0800 | [diff] [blame] | 287 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
| 288 | do { } while (0) |
Paul E. McKenney | 91afaf3 | 2011-10-02 07:44:32 -0700 | [diff] [blame] | 289 | #endif |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 290 | |
| 291 | #ifdef CONFIG_RCU_STALL_COMMON |
| 292 | |
| 293 | #ifdef CONFIG_PROVE_RCU |
| 294 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
| 295 | #else |
| 296 | #define RCU_STALL_DELAY_DELTA 0 |
| 297 | #endif |
| 298 | |
| 299 | int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ |
Paul E. McKenney | 01896f7 | 2013-08-18 12:14:32 -0700 | [diff] [blame] | 300 | static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 301 | |
| 302 | module_param(rcu_cpu_stall_suppress, int, 0644); |
| 303 | module_param(rcu_cpu_stall_timeout, int, 0644); |
| 304 | |
| 305 | int rcu_jiffies_till_stall_check(void) |
| 306 | { |
| 307 | int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout); |
| 308 | |
| 309 | /* |
| 310 | * Limit check must be consistent with the Kconfig limits |
| 311 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. |
| 312 | */ |
| 313 | if (till_stall_check < 3) { |
| 314 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 3; |
| 315 | till_stall_check = 3; |
| 316 | } else if (till_stall_check > 300) { |
| 317 | ACCESS_ONCE(rcu_cpu_stall_timeout) = 300; |
| 318 | till_stall_check = 300; |
| 319 | } |
| 320 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; |
| 321 | } |
| 322 | |
Rik van Riel | 61f38db | 2014-04-26 23:15:35 -0700 | [diff] [blame] | 323 | void rcu_sysrq_start(void) |
| 324 | { |
| 325 | if (!rcu_cpu_stall_suppress) |
| 326 | rcu_cpu_stall_suppress = 2; |
| 327 | } |
| 328 | |
| 329 | void rcu_sysrq_end(void) |
| 330 | { |
| 331 | if (rcu_cpu_stall_suppress == 2) |
| 332 | rcu_cpu_stall_suppress = 0; |
| 333 | } |
| 334 | |
Paul E. McKenney | 6bfc09e | 2012-10-19 12:49:17 -0700 | [diff] [blame] | 335 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
| 336 | { |
| 337 | rcu_cpu_stall_suppress = 1; |
| 338 | return NOTIFY_DONE; |
| 339 | } |
| 340 | |
| 341 | static struct notifier_block rcu_panic_block = { |
| 342 | .notifier_call = rcu_panic, |
| 343 | }; |
| 344 | |
| 345 | static int __init check_cpu_stall_init(void) |
| 346 | { |
| 347 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
| 348 | return 0; |
| 349 | } |
| 350 | early_initcall(check_cpu_stall_init); |
| 351 | |
| 352 | #endif /* #ifdef CONFIG_RCU_STALL_COMMON */ |