Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * RCU CPU stall warnings for normal RCU grace periods |
| 4 | * |
| 5 | * Copyright IBM Corporation, 2019 |
| 6 | * |
| 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 8 | */ |
| 9 | |
Sergey Senozhatsky | ccfc9dd | 2021-05-22 00:56:23 +0900 | [diff] [blame] | 10 | #include <linux/kvm_para.h> |
| 11 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 12 | ////////////////////////////////////////////////////////////////////////////// |
| 13 | // |
| 14 | // Controlling CPU stall warnings, including delay calculation. |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 15 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 16 | /* panic() on RCU Stall sysctl. */ |
| 17 | int sysctl_panic_on_rcu_stall __read_mostly; |
chao | dfe5640 | 2020-08-30 23:41:17 -0700 | [diff] [blame] | 18 | int sysctl_max_rcu_stall_to_panic __read_mostly; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 19 | |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 20 | #ifdef CONFIG_PROVE_RCU |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 21 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 22 | #else |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 23 | #define RCU_STALL_DELAY_DELTA 0 |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 24 | #endif |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 25 | #define RCU_STALL_MIGHT_DIV 8 |
| 26 | #define RCU_STALL_MIGHT_MIN (2 * HZ) |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 27 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 28 | /* Limit-check stall timeouts specified at boottime and runtime. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 29 | int rcu_jiffies_till_stall_check(void) |
| 30 | { |
| 31 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
| 32 | |
| 33 | /* |
| 34 | * Limit check must be consistent with the Kconfig limits |
| 35 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. |
| 36 | */ |
| 37 | if (till_stall_check < 3) { |
| 38 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
| 39 | till_stall_check = 3; |
| 40 | } else if (till_stall_check > 300) { |
| 41 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
| 42 | till_stall_check = 300; |
| 43 | } |
| 44 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; |
| 45 | } |
| 46 | EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); |
| 47 | |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 48 | /** |
| 49 | * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled? |
| 50 | * |
| 51 | * Returns @true if the current grace period is sufficiently old that |
| 52 | * it is reasonable to assume that it might be stalled. This can be |
| 53 | * useful when deciding whether to allocate memory to enable RCU-mediated |
| 54 | * freeing on the one hand or just invoking synchronize_rcu() on the other. |
| 55 | * The latter is preferable when the grace period is stalled. |
| 56 | * |
| 57 | * Note that sampling of the .gp_start and .gp_seq fields must be done |
| 58 | * carefully to avoid false positives at the beginnings and ends of |
| 59 | * grace periods. |
| 60 | */ |
| 61 | bool rcu_gp_might_be_stalled(void) |
| 62 | { |
| 63 | unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; |
| 64 | unsigned long j = jiffies; |
| 65 | |
| 66 | if (d < RCU_STALL_MIGHT_MIN) |
| 67 | d = RCU_STALL_MIGHT_MIN; |
| 68 | smp_mb(); // jiffies before .gp_seq to avoid false positives. |
| 69 | if (!rcu_gp_in_progress()) |
| 70 | return false; |
| 71 | // Long delays at this point avoids false positive, but a delay |
| 72 | // of ULONG_MAX/4 jiffies voids your no-false-positive warranty. |
| 73 | smp_mb(); // .gp_seq before second .gp_start |
| 74 | // And ditto here. |
| 75 | return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); |
| 76 | } |
| 77 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 78 | /* Don't do RCU CPU stall warnings during long sysrq printouts. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 79 | void rcu_sysrq_start(void) |
| 80 | { |
| 81 | if (!rcu_cpu_stall_suppress) |
| 82 | rcu_cpu_stall_suppress = 2; |
| 83 | } |
| 84 | |
| 85 | void rcu_sysrq_end(void) |
| 86 | { |
| 87 | if (rcu_cpu_stall_suppress == 2) |
| 88 | rcu_cpu_stall_suppress = 0; |
| 89 | } |
| 90 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 91 | /* Don't print RCU CPU stall warnings during a kernel panic. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 92 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
| 93 | { |
| 94 | rcu_cpu_stall_suppress = 1; |
| 95 | return NOTIFY_DONE; |
| 96 | } |
| 97 | |
| 98 | static struct notifier_block rcu_panic_block = { |
| 99 | .notifier_call = rcu_panic, |
| 100 | }; |
| 101 | |
| 102 | static int __init check_cpu_stall_init(void) |
| 103 | { |
| 104 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
| 105 | return 0; |
| 106 | } |
| 107 | early_initcall(check_cpu_stall_init); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 108 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 109 | /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ |
| 110 | static void panic_on_rcu_stall(void) |
| 111 | { |
chao | dfe5640 | 2020-08-30 23:41:17 -0700 | [diff] [blame] | 112 | static int cpu_stall; |
| 113 | |
| 114 | if (++cpu_stall < sysctl_max_rcu_stall_to_panic) |
| 115 | return; |
| 116 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 117 | if (sysctl_panic_on_rcu_stall) |
| 118 | panic("RCU Stall\n"); |
| 119 | } |
| 120 | |
| 121 | /** |
Sergey Senozhatsky | a80be42 | 2021-05-22 00:56:24 +0900 | [diff] [blame] | 122 | * rcu_cpu_stall_reset - restart stall-warning timeout for current grace period |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 123 | * |
| 124 | * The caller must disable hard irqs. |
| 125 | */ |
| 126 | void rcu_cpu_stall_reset(void) |
| 127 | { |
Sergey Senozhatsky | a80be42 | 2021-05-22 00:56:24 +0900 | [diff] [blame] | 128 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 129 | jiffies + rcu_jiffies_till_stall_check()); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 130 | } |
| 131 | |
| 132 | ////////////////////////////////////////////////////////////////////////////// |
| 133 | // |
| 134 | // Interaction with RCU grace periods |
| 135 | |
| 136 | /* Start of new grace period, so record stall time (and forcing times). */ |
| 137 | static void record_gp_stall_check_time(void) |
| 138 | { |
| 139 | unsigned long j = jiffies; |
| 140 | unsigned long j1; |
| 141 | |
Paul E. McKenney | 59881bc | 2020-01-20 15:29:04 -0800 | [diff] [blame] | 142 | WRITE_ONCE(rcu_state.gp_start, j); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 143 | j1 = rcu_jiffies_till_stall_check(); |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 144 | smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq. |
| 145 | WRITE_ONCE(rcu_state.jiffies_stall, j + j1); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 146 | rcu_state.jiffies_resched = j + j1 / 2; |
| 147 | rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); |
| 148 | } |
| 149 | |
| 150 | /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ |
| 151 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) |
| 152 | { |
| 153 | rdp->ticks_this_gp = 0; |
| 154 | rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); |
| 155 | WRITE_ONCE(rdp->last_fqs_resched, jiffies); |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * If too much time has passed in the current grace period, and if |
| 160 | * so configured, go kick the relevant kthreads. |
| 161 | */ |
| 162 | static void rcu_stall_kick_kthreads(void) |
| 163 | { |
| 164 | unsigned long j; |
| 165 | |
Paul E. McKenney | fe63b72 | 2020-06-23 18:04:45 -0700 | [diff] [blame] | 166 | if (!READ_ONCE(rcu_kick_kthreads)) |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 167 | return; |
| 168 | j = READ_ONCE(rcu_state.jiffies_kick_kthreads); |
| 169 | if (time_after(jiffies, j) && rcu_state.gp_kthread && |
| 170 | (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { |
| 171 | WARN_ONCE(1, "Kicking %s grace-period kthread\n", |
| 172 | rcu_state.name); |
| 173 | rcu_ftrace_dump(DUMP_ALL); |
| 174 | wake_up_process(rcu_state.gp_kthread); |
| 175 | WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); |
| 176 | } |
| 177 | } |
| 178 | |
Paul E. McKenney | 7ac1907 | 2019-01-14 10:19:20 -0800 | [diff] [blame] | 179 | /* |
| 180 | * Handler for the irq_work request posted about halfway into the RCU CPU |
| 181 | * stall timeout, and used to detect excessive irq disabling. Set state |
| 182 | * appropriately, but just complain if there is unexpected state on entry. |
| 183 | */ |
| 184 | static void rcu_iw_handler(struct irq_work *iwp) |
| 185 | { |
| 186 | struct rcu_data *rdp; |
| 187 | struct rcu_node *rnp; |
| 188 | |
| 189 | rdp = container_of(iwp, struct rcu_data, rcu_iw); |
| 190 | rnp = rdp->mynode; |
| 191 | raw_spin_lock_rcu_node(rnp); |
| 192 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { |
| 193 | rdp->rcu_iw_gp_seq = rnp->gp_seq; |
| 194 | rdp->rcu_iw_pending = false; |
| 195 | } |
| 196 | raw_spin_unlock_rcu_node(rnp); |
| 197 | } |
| 198 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 199 | ////////////////////////////////////////////////////////////////////////////// |
| 200 | // |
| 201 | // Printing RCU CPU stall warnings |
| 202 | |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 203 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 204 | |
| 205 | /* |
| 206 | * Dump detailed information for all tasks blocking the current RCU |
| 207 | * grace period on the specified rcu_node structure. |
| 208 | */ |
| 209 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
| 210 | { |
| 211 | unsigned long flags; |
| 212 | struct task_struct *t; |
| 213 | |
| 214 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 215 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { |
| 216 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 217 | return; |
| 218 | } |
| 219 | t = list_entry(rnp->gp_tasks->prev, |
| 220 | struct task_struct, rcu_node_entry); |
| 221 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
| 222 | /* |
| 223 | * We could be printing a lot while holding a spinlock. |
| 224 | * Avoid triggering hard lockup. |
| 225 | */ |
| 226 | touch_nmi_watchdog(); |
| 227 | sched_show_task(t); |
| 228 | } |
| 229 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 230 | } |
| 231 | |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 232 | // Communicate task state back to the RCU CPU stall warning request. |
| 233 | struct rcu_stall_chk_rdr { |
| 234 | int nesting; |
| 235 | union rcu_special rs; |
| 236 | bool on_blkd_list; |
| 237 | }; |
| 238 | |
| 239 | /* |
| 240 | * Report out the state of a not-running task that is stalling the |
| 241 | * current RCU grace period. |
| 242 | */ |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 243 | static int check_slow_task(struct task_struct *t, void *arg) |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 244 | { |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 245 | struct rcu_stall_chk_rdr *rscrp = arg; |
| 246 | |
| 247 | if (task_curr(t)) |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 248 | return -EBUSY; // It is running, so decline to inspect it. |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 249 | rscrp->nesting = t->rcu_read_lock_nesting; |
| 250 | rscrp->rs = t->rcu_read_unlock_special; |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 251 | rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 252 | return 0; |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 253 | } |
| 254 | |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 255 | /* |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 256 | * Scan the current list of tasks blocked within RCU read-side critical |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 257 | * sections, printing out the tid of each of the first few of them. |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 258 | */ |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 259 | static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) |
| 260 | __releases(rnp->lock) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 261 | { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 262 | int i = 0; |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 263 | int ndetected = 0; |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 264 | struct rcu_stall_chk_rdr rscr; |
| 265 | struct task_struct *t; |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 266 | struct task_struct *ts[8]; |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 267 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 268 | lockdep_assert_irqs_disabled(); |
Yanfei Xu | dc87740 | 2021-05-16 17:50:10 +0800 | [diff] [blame] | 269 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { |
| 270 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 271 | return 0; |
Yanfei Xu | dc87740 | 2021-05-16 17:50:10 +0800 | [diff] [blame] | 272 | } |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 273 | pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", |
| 274 | rnp->level, rnp->grplo, rnp->grphi); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 275 | t = list_entry(rnp->gp_tasks->prev, |
| 276 | struct task_struct, rcu_node_entry); |
| 277 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 278 | get_task_struct(t); |
| 279 | ts[i++] = t; |
| 280 | if (i >= ARRAY_SIZE(ts)) |
| 281 | break; |
| 282 | } |
| 283 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Yanfei Xu | e6a901a | 2021-05-16 00:45:11 +0800 | [diff] [blame] | 284 | while (i) { |
| 285 | t = ts[--i]; |
Peter Zijlstra | 9b3c4ab | 2021-09-21 21:54:32 +0200 | [diff] [blame] | 286 | if (task_call_func(t, check_slow_task, &rscr)) |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 287 | pr_cont(" P%d", t->pid); |
| 288 | else |
| 289 | pr_cont(" P%d/%d:%c%c%c%c", |
| 290 | t->pid, rscr.nesting, |
| 291 | ".b"[rscr.rs.b.blocked], |
| 292 | ".q"[rscr.rs.b.need_qs], |
| 293 | ".e"[rscr.rs.b.exp_hint], |
| 294 | ".l"[rscr.on_blkd_list]); |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 295 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 296 | put_task_struct(t); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 297 | ndetected++; |
| 298 | } |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 299 | pr_cont("\n"); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 300 | return ndetected; |
| 301 | } |
| 302 | |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 303 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 304 | |
| 305 | /* |
| 306 | * Because preemptible RCU does not exist, we never have to check for |
| 307 | * tasks blocked within RCU read-side critical sections. |
| 308 | */ |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 309 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 310 | { |
| 311 | } |
| 312 | |
| 313 | /* |
| 314 | * Because preemptible RCU does not exist, we never have to check for |
| 315 | * tasks blocked within RCU read-side critical sections. |
| 316 | */ |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 317 | static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) |
Jules Irenge | c70360c | 2021-04-29 00:12:19 +0100 | [diff] [blame] | 318 | __releases(rnp->lock) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 319 | { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 320 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 321 | return 0; |
| 322 | } |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 323 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 324 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 325 | /* |
| 326 | * Dump stacks of all tasks running on stalled CPUs. First try using |
| 327 | * NMIs, but fall back to manual remote stack tracing on architectures |
| 328 | * that don't support NMI-based stack dumps. The NMI-triggered stack |
| 329 | * traces are more accurate because they are printed by the target CPU. |
| 330 | */ |
| 331 | static void rcu_dump_cpu_stacks(void) |
| 332 | { |
| 333 | int cpu; |
| 334 | unsigned long flags; |
| 335 | struct rcu_node *rnp; |
| 336 | |
| 337 | rcu_for_each_leaf_node(rnp) { |
| 338 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 339 | for_each_leaf_node_possible_cpu(rnp, cpu) |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 340 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { |
| 341 | if (cpu_is_offline(cpu)) |
| 342 | pr_err("Offline CPU %d blocking current GP.\n", cpu); |
| 343 | else if (!trigger_single_cpu_backtrace(cpu)) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 344 | dump_cpu_task(cpu); |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 345 | } |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 346 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 347 | } |
| 348 | } |
| 349 | |
Lai Jiangshan | e2167b3 | 2019-10-15 10:28:47 +0000 | [diff] [blame] | 350 | static const char * const gp_state_names[] = { |
| 351 | [RCU_GP_IDLE] = "RCU_GP_IDLE", |
| 352 | [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", |
| 353 | [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", |
| 354 | [RCU_GP_ONOFF] = "RCU_GP_ONOFF", |
| 355 | [RCU_GP_INIT] = "RCU_GP_INIT", |
| 356 | [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", |
| 357 | [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", |
| 358 | [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", |
| 359 | [RCU_GP_CLEANED] = "RCU_GP_CLEANED", |
| 360 | }; |
| 361 | |
| 362 | /* |
| 363 | * Convert a ->gp_state value to a character string. |
| 364 | */ |
| 365 | static const char *gp_state_getname(short gs) |
| 366 | { |
| 367 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) |
| 368 | return "???"; |
| 369 | return gp_state_names[gs]; |
| 370 | } |
| 371 | |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 372 | /* Is the RCU grace-period kthread being starved of CPU time? */ |
| 373 | static bool rcu_is_gp_kthread_starving(unsigned long *jp) |
| 374 | { |
| 375 | unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); |
| 376 | |
| 377 | if (jp) |
| 378 | *jp = j; |
| 379 | return j > 2 * HZ; |
| 380 | } |
| 381 | |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 382 | /* |
| 383 | * Print out diagnostic information for the specified stalled CPU. |
| 384 | * |
| 385 | * If the specified CPU is aware of the current RCU grace period, then |
| 386 | * print the number of scheduling clock interrupts the CPU has taken |
| 387 | * during the time that it has been aware. Otherwise, print the number |
| 388 | * of RCU grace periods that this CPU is ignorant of, for example, "1" |
| 389 | * if the CPU was aware of the previous grace period. |
| 390 | * |
Paul E. McKenney | e2c73a6 | 2021-09-27 14:18:51 -0700 | [diff] [blame] | 391 | * Also print out idle info. |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 392 | */ |
| 393 | static void print_cpu_stall_info(int cpu) |
| 394 | { |
| 395 | unsigned long delta; |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 396 | bool falsepositive; |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 397 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
| 398 | char *ticks_title; |
| 399 | unsigned long ticks_value; |
| 400 | |
| 401 | /* |
| 402 | * We could be printing a lot while holding a spinlock. Avoid |
| 403 | * triggering hard lockup. |
| 404 | */ |
| 405 | touch_nmi_watchdog(); |
| 406 | |
| 407 | ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); |
| 408 | if (ticks_value) { |
| 409 | ticks_title = "GPs behind"; |
| 410 | } else { |
| 411 | ticks_title = "ticks this GP"; |
| 412 | ticks_value = rdp->ticks_this_gp; |
| 413 | } |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 414 | delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 415 | falsepositive = rcu_is_gp_kthread_starving(NULL) && |
| 416 | rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); |
Paul E. McKenney | e2c73a6 | 2021-09-27 14:18:51 -0700 | [diff] [blame] | 417 | pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s\n", |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 418 | cpu, |
| 419 | "O."[!!cpu_online(cpu)], |
| 420 | "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], |
| 421 | "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], |
| 422 | !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : |
| 423 | rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : |
| 424 | "!."[!delta], |
| 425 | ticks_value, ticks_title, |
| 426 | rcu_dynticks_snap(rdp) & 0xfff, |
| 427 | rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, |
| 428 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 429 | data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 430 | falsepositive ? " (false positive?)" : ""); |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 431 | } |
| 432 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 433 | /* Complain about starvation of grace-period kthread. */ |
| 434 | static void rcu_check_gp_kthread_starvation(void) |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 435 | { |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 436 | int cpu; |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 437 | struct task_struct *gpk = rcu_state.gp_kthread; |
| 438 | unsigned long j; |
| 439 | |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 440 | if (rcu_is_gp_kthread_starving(&j)) { |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 441 | cpu = gpk ? task_cpu(gpk) : -1; |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 442 | pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 443 | rcu_state.name, j, |
| 444 | (long)rcu_seq_current(&rcu_state.gp_seq), |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 445 | data_race(READ_ONCE(rcu_state.gp_flags)), |
| 446 | gp_state_getname(rcu_state.gp_state), |
| 447 | data_race(READ_ONCE(rcu_state.gp_state)), |
| 448 | gpk ? data_race(READ_ONCE(gpk->__state)) : ~0, cpu); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 449 | if (gpk) { |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 450 | pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 451 | pr_err("RCU grace-period kthread stack dump:\n"); |
| 452 | sched_show_task(gpk); |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 453 | if (cpu >= 0) { |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 454 | if (cpu_is_offline(cpu)) { |
| 455 | pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); |
| 456 | } else { |
| 457 | pr_err("Stack dump where RCU GP kthread last ran:\n"); |
| 458 | if (!trigger_single_cpu_backtrace(cpu)) |
| 459 | dump_cpu_task(cpu); |
| 460 | } |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 461 | } |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 462 | wake_up_process(gpk); |
| 463 | } |
| 464 | } |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 465 | } |
| 466 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 467 | /* Complain about missing wakeups from expired fqs wait timer */ |
| 468 | static void rcu_check_gp_kthread_expired_fqs_timer(void) |
| 469 | { |
| 470 | struct task_struct *gpk = rcu_state.gp_kthread; |
| 471 | short gp_state; |
| 472 | unsigned long jiffies_fqs; |
| 473 | int cpu; |
| 474 | |
| 475 | /* |
| 476 | * Order reads of .gp_state and .jiffies_force_qs. |
| 477 | * Matching smp_wmb() is present in rcu_gp_fqs_loop(). |
| 478 | */ |
| 479 | gp_state = smp_load_acquire(&rcu_state.gp_state); |
| 480 | jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs); |
| 481 | |
| 482 | if (gp_state == RCU_GP_WAIT_FQS && |
| 483 | time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && |
| 484 | gpk && !READ_ONCE(gpk->on_rq)) { |
| 485 | cpu = task_cpu(gpk); |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 486 | pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n", |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 487 | rcu_state.name, (jiffies - jiffies_fqs), |
| 488 | (long)rcu_seq_current(&rcu_state.gp_seq), |
| 489 | data_race(rcu_state.gp_flags), |
| 490 | gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 491 | data_race(READ_ONCE(gpk->__state))); |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 492 | pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", |
| 493 | cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); |
| 494 | } |
| 495 | } |
| 496 | |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 497 | static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 498 | { |
| 499 | int cpu; |
| 500 | unsigned long flags; |
| 501 | unsigned long gpa; |
| 502 | unsigned long j; |
| 503 | int ndetected = 0; |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 504 | struct rcu_node *rnp; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 505 | long totqlen = 0; |
| 506 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 507 | lockdep_assert_irqs_disabled(); |
| 508 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 509 | /* Kick and suppress, if so configured. */ |
| 510 | rcu_stall_kick_kthreads(); |
Paul E. McKenney | 58c5336 | 2019-12-05 11:29:01 -0800 | [diff] [blame] | 511 | if (rcu_stall_is_suppressed()) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 512 | return; |
| 513 | |
| 514 | /* |
| 515 | * OK, time to rat on our buddy... |
Mauro Carvalho Chehab | f2286ab | 2020-04-21 19:04:10 +0200 | [diff] [blame] | 516 | * See Documentation/RCU/stallwarn.rst for info on how to debug |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 517 | * RCU CPU stall warnings. |
| 518 | */ |
Sangmoon Kim | 565cfb9 | 2021-03-02 20:55:15 +0900 | [diff] [blame] | 519 | trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected")); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 520 | pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 521 | rcu_for_each_leaf_node(rnp) { |
| 522 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 523 | if (rnp->qsmask != 0) { |
| 524 | for_each_leaf_node_possible_cpu(rnp, cpu) |
| 525 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { |
| 526 | print_cpu_stall_info(cpu); |
| 527 | ndetected++; |
| 528 | } |
| 529 | } |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 530 | ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 531 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 532 | } |
| 533 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 534 | for_each_possible_cpu(cpu) |
| 535 | totqlen += rcu_get_n_cbs_cpu(cpu); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 536 | pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 537 | smp_processor_id(), (long)(jiffies - gps), |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 538 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
| 539 | if (ndetected) { |
| 540 | rcu_dump_cpu_stacks(); |
| 541 | |
| 542 | /* Complain about tasks blocking the grace period. */ |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 543 | rcu_for_each_leaf_node(rnp) |
| 544 | rcu_print_detail_task_stall_rnp(rnp); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 545 | } else { |
| 546 | if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { |
| 547 | pr_err("INFO: Stall ended before state dump start\n"); |
| 548 | } else { |
| 549 | j = jiffies; |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 550 | gpa = data_race(READ_ONCE(rcu_state.gp_activity)); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 551 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", |
| 552 | rcu_state.name, j - gpa, j, gpa, |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 553 | data_race(READ_ONCE(jiffies_till_next_fqs)), |
| 554 | data_race(READ_ONCE(rcu_get_root()->qsmask))); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 555 | } |
| 556 | } |
| 557 | /* Rewrite if needed in case of slow consoles. */ |
| 558 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) |
| 559 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 560 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
| 561 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 562 | rcu_check_gp_kthread_expired_fqs_timer(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 563 | rcu_check_gp_kthread_starvation(); |
| 564 | |
| 565 | panic_on_rcu_stall(); |
| 566 | |
| 567 | rcu_force_quiescent_state(); /* Kick them all. */ |
| 568 | } |
| 569 | |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 570 | static void print_cpu_stall(unsigned long gps) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 571 | { |
| 572 | int cpu; |
| 573 | unsigned long flags; |
| 574 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| 575 | struct rcu_node *rnp = rcu_get_root(); |
| 576 | long totqlen = 0; |
| 577 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 578 | lockdep_assert_irqs_disabled(); |
| 579 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 580 | /* Kick and suppress, if so configured. */ |
| 581 | rcu_stall_kick_kthreads(); |
Paul E. McKenney | 58c5336 | 2019-12-05 11:29:01 -0800 | [diff] [blame] | 582 | if (rcu_stall_is_suppressed()) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 583 | return; |
| 584 | |
| 585 | /* |
| 586 | * OK, time to rat on ourselves... |
Mauro Carvalho Chehab | f2286ab | 2020-04-21 19:04:10 +0200 | [diff] [blame] | 587 | * See Documentation/RCU/stallwarn.rst for info on how to debug |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 588 | * RCU CPU stall warnings. |
| 589 | */ |
Sangmoon Kim | 565cfb9 | 2021-03-02 20:55:15 +0900 | [diff] [blame] | 590 | trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 591 | pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 592 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); |
| 593 | print_cpu_stall_info(smp_processor_id()); |
| 594 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 595 | for_each_possible_cpu(cpu) |
| 596 | totqlen += rcu_get_n_cbs_cpu(cpu); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 597 | pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 598 | jiffies - gps, |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 599 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
| 600 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 601 | rcu_check_gp_kthread_expired_fqs_timer(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 602 | rcu_check_gp_kthread_starvation(); |
| 603 | |
| 604 | rcu_dump_cpu_stacks(); |
| 605 | |
| 606 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 607 | /* Rewrite if needed in case of slow consoles. */ |
| 608 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) |
| 609 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 610 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
| 611 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 612 | |
| 613 | panic_on_rcu_stall(); |
| 614 | |
| 615 | /* |
| 616 | * Attempt to revive the RCU machinery by forcing a context switch. |
| 617 | * |
| 618 | * A context switch would normally allow the RCU state machine to make |
| 619 | * progress and it could be we're stuck in kernel space without context |
| 620 | * switches for an entirely unreasonable amount of time. |
| 621 | */ |
| 622 | set_tsk_need_resched(current); |
| 623 | set_preempt_need_resched(); |
| 624 | } |
| 625 | |
| 626 | static void check_cpu_stall(struct rcu_data *rdp) |
| 627 | { |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 628 | bool didstall = false; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 629 | unsigned long gs1; |
| 630 | unsigned long gs2; |
| 631 | unsigned long gps; |
| 632 | unsigned long j; |
| 633 | unsigned long jn; |
| 634 | unsigned long js; |
| 635 | struct rcu_node *rnp; |
| 636 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 637 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | fe63b72 | 2020-06-23 18:04:45 -0700 | [diff] [blame] | 638 | if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 639 | !rcu_gp_in_progress()) |
| 640 | return; |
| 641 | rcu_stall_kick_kthreads(); |
| 642 | j = jiffies; |
| 643 | |
| 644 | /* |
| 645 | * Lots of memory barriers to reject false positives. |
| 646 | * |
| 647 | * The idea is to pick up rcu_state.gp_seq, then |
| 648 | * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally |
| 649 | * another copy of rcu_state.gp_seq. These values are updated in |
| 650 | * the opposite order with memory barriers (or equivalent) during |
| 651 | * grace-period initialization and cleanup. Now, a false positive |
| 652 | * can occur if we get an new value of rcu_state.gp_start and a old |
| 653 | * value of rcu_state.jiffies_stall. But given the memory barriers, |
| 654 | * the only way that this can happen is if one grace period ends |
| 655 | * and another starts between these two fetches. This is detected |
| 656 | * by comparing the second fetch of rcu_state.gp_seq with the |
| 657 | * previous fetch from rcu_state.gp_seq. |
| 658 | * |
| 659 | * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, |
| 660 | * and rcu_state.gp_start suffice to forestall false positives. |
| 661 | */ |
| 662 | gs1 = READ_ONCE(rcu_state.gp_seq); |
| 663 | smp_rmb(); /* Pick up ->gp_seq first... */ |
| 664 | js = READ_ONCE(rcu_state.jiffies_stall); |
| 665 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ |
| 666 | gps = READ_ONCE(rcu_state.gp_start); |
| 667 | smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ |
| 668 | gs2 = READ_ONCE(rcu_state.gp_seq); |
| 669 | if (gs1 != gs2 || |
| 670 | ULONG_CMP_LT(j, js) || |
| 671 | ULONG_CMP_GE(gps, js)) |
| 672 | return; /* No stall or GP completed since entering function. */ |
| 673 | rnp = rdp->mynode; |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 674 | jn = jiffies + ULONG_MAX / 2; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 675 | if (rcu_gp_in_progress() && |
| 676 | (READ_ONCE(rnp->qsmask) & rdp->grpmask) && |
| 677 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { |
| 678 | |
Sergey Senozhatsky | ccfc9dd | 2021-05-22 00:56:23 +0900 | [diff] [blame] | 679 | /* |
| 680 | * If a virtual machine is stopped by the host it can look to |
| 681 | * the watchdog like an RCU stall. Check to see if the host |
| 682 | * stopped the vm. |
| 683 | */ |
| 684 | if (kvm_check_and_clear_guest_paused()) |
| 685 | return; |
| 686 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 687 | /* We haven't checked in, so go dump stack. */ |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 688 | print_cpu_stall(gps); |
Paul E. McKenney | 1ef5a44 | 2020-06-23 20:57:59 -0700 | [diff] [blame] | 689 | if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 690 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 691 | didstall = true; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 692 | |
| 693 | } else if (rcu_gp_in_progress() && |
| 694 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && |
| 695 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { |
| 696 | |
Sergey Senozhatsky | ccfc9dd | 2021-05-22 00:56:23 +0900 | [diff] [blame] | 697 | /* |
| 698 | * If a virtual machine is stopped by the host it can look to |
| 699 | * the watchdog like an RCU stall. Check to see if the host |
| 700 | * stopped the vm. |
| 701 | */ |
| 702 | if (kvm_check_and_clear_guest_paused()) |
| 703 | return; |
| 704 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 705 | /* They had a few time units to dump stack, so complain. */ |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 706 | print_other_cpu_stall(gs2, gps); |
Paul E. McKenney | 1ef5a44 | 2020-06-23 20:57:59 -0700 | [diff] [blame] | 707 | if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 708 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | b169246 | 2021-05-21 14:23:03 -0700 | [diff] [blame] | 709 | didstall = true; |
| 710 | } |
| 711 | if (didstall && READ_ONCE(rcu_state.jiffies_stall) == jn) { |
| 712 | jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; |
| 713 | WRITE_ONCE(rcu_state.jiffies_stall, jn); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 714 | } |
| 715 | } |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 716 | |
| 717 | ////////////////////////////////////////////////////////////////////////////// |
| 718 | // |
| 719 | // RCU forward-progress mechanisms, including of callback invocation. |
| 720 | |
| 721 | |
| 722 | /* |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 723 | * Check to see if a failure to end RCU priority inversion was due to |
| 724 | * a CPU not passing through a quiescent state. When this happens, there |
| 725 | * is nothing that RCU priority boosting can do to help, so we shouldn't |
| 726 | * count this as an RCU priority boosting failure. A return of true says |
| 727 | * RCU priority boosting is to blame, and false says otherwise. If false |
| 728 | * is returned, the first of the CPUs to blame is stored through cpup. |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 729 | * If there was no CPU blocking the current grace period, but also nothing |
| 730 | * in need of being boosted, *cpup is set to -1. This can happen in case |
| 731 | * of vCPU preemption while the last CPU is reporting its quiscent state, |
| 732 | * for example. |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 733 | * |
| 734 | * If cpup is NULL, then a lockless quick check is carried out, suitable |
| 735 | * for high-rate usage. On the other hand, if cpup is non-NULL, each |
| 736 | * rcu_node structure's ->lock is acquired, ruling out high-rate usage. |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 737 | */ |
| 738 | bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) |
| 739 | { |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 740 | bool atb = false; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 741 | int cpu; |
| 742 | unsigned long flags; |
| 743 | struct rcu_node *rnp; |
| 744 | |
| 745 | rcu_for_each_leaf_node(rnp) { |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 746 | if (!cpup) { |
Paul E. McKenney | d9ee962 | 2021-06-03 10:17:36 -0700 | [diff] [blame] | 747 | if (data_race(READ_ONCE(rnp->qsmask))) { |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 748 | return false; |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 749 | } else { |
| 750 | if (READ_ONCE(rnp->gp_tasks)) |
| 751 | atb = true; |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 752 | continue; |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 753 | } |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 754 | } |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 755 | *cpup = -1; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 756 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 757 | if (rnp->gp_tasks) |
| 758 | atb = true; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 759 | if (!rnp->qsmask) { |
| 760 | // No CPUs without quiescent states for this rnp. |
| 761 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 762 | continue; |
| 763 | } |
| 764 | // Find the first holdout CPU. |
| 765 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 766 | if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) { |
| 767 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 768 | *cpup = cpu; |
| 769 | return false; |
| 770 | } |
| 771 | } |
| 772 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 773 | } |
| 774 | // Can't blame CPUs, so must blame RCU priority boosting. |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 775 | return atb; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 776 | } |
| 777 | EXPORT_SYMBOL_GPL(rcu_check_boost_fail); |
| 778 | |
| 779 | /* |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 780 | * Show the state of the grace-period kthreads. |
| 781 | */ |
| 782 | void show_rcu_gp_kthreads(void) |
| 783 | { |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 784 | unsigned long cbs = 0; |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 785 | int cpu; |
| 786 | unsigned long j; |
| 787 | unsigned long ja; |
| 788 | unsigned long jr; |
Paul E. McKenney | e44111e | 2021-04-02 21:51:50 -0700 | [diff] [blame] | 789 | unsigned long js; |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 790 | unsigned long jw; |
| 791 | struct rcu_data *rdp; |
| 792 | struct rcu_node *rnp; |
Paul E. McKenney | 5648d65 | 2020-01-21 12:30:22 -0800 | [diff] [blame] | 793 | struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 794 | |
| 795 | j = jiffies; |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 796 | ja = j - data_race(READ_ONCE(rcu_state.gp_activity)); |
| 797 | jr = j - data_race(READ_ONCE(rcu_state.gp_req_activity)); |
| 798 | js = j - data_race(READ_ONCE(rcu_state.gp_start)); |
| 799 | jw = j - data_race(READ_ONCE(rcu_state.gp_wake_time)); |
Paul E. McKenney | 2a2ed56 | 2021-07-04 13:59:35 -0700 | [diff] [blame] | 800 | pr_info("%s: wait state: %s(%d) ->state: %#x ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 801 | rcu_state.name, gp_state_getname(rcu_state.gp_state), |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 802 | data_race(READ_ONCE(rcu_state.gp_state)), |
| 803 | t ? data_race(READ_ONCE(t->__state)) : 0x1ffff, t ? t->rt_priority : 0xffU, |
| 804 | js, ja, jr, jw, (long)data_race(READ_ONCE(rcu_state.gp_wake_seq)), |
| 805 | (long)data_race(READ_ONCE(rcu_state.gp_seq)), |
| 806 | (long)data_race(READ_ONCE(rcu_get_root()->gp_seq_needed)), |
| 807 | data_race(READ_ONCE(rcu_state.gp_max)), |
| 808 | data_race(READ_ONCE(rcu_state.gp_flags))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 809 | rcu_for_each_node_breadth_first(rnp) { |
Paul E. McKenney | b158050 | 2021-04-07 15:14:01 -0700 | [diff] [blame] | 810 | if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 811 | !data_race(READ_ONCE(rnp->qsmask)) && !data_race(READ_ONCE(rnp->boost_tasks)) && |
| 812 | !data_race(READ_ONCE(rnp->exp_tasks)) && !data_race(READ_ONCE(rnp->gp_tasks))) |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 813 | continue; |
Paul E. McKenney | 396eba6 | 2021-04-06 16:31:42 -0700 | [diff] [blame] | 814 | pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", |
| 815 | rnp->grplo, rnp->grphi, |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 816 | (long)data_race(READ_ONCE(rnp->gp_seq)), |
| 817 | (long)data_race(READ_ONCE(rnp->gp_seq_needed)), |
| 818 | data_race(READ_ONCE(rnp->qsmask)), |
| 819 | ".b"[!!data_race(READ_ONCE(rnp->boost_kthread_task))], |
| 820 | ".B"[!!data_race(READ_ONCE(rnp->boost_tasks))], |
| 821 | ".E"[!!data_race(READ_ONCE(rnp->exp_tasks))], |
| 822 | ".G"[!!data_race(READ_ONCE(rnp->gp_tasks))], |
| 823 | data_race(READ_ONCE(rnp->n_boosts))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 824 | if (!rcu_is_leaf_node(rnp)) |
| 825 | continue; |
| 826 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 827 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | a5b8950 | 2020-01-07 15:48:39 -0800 | [diff] [blame] | 828 | if (READ_ONCE(rdp->gpwrap) || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 829 | ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
| 830 | READ_ONCE(rdp->gp_seq_needed))) |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 831 | continue; |
| 832 | pr_info("\tcpu %d ->gp_seq_needed %ld\n", |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 833 | cpu, (long)data_race(READ_ONCE(rdp->gp_seq_needed))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 834 | } |
| 835 | } |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 836 | for_each_possible_cpu(cpu) { |
| 837 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 838 | cbs += data_race(READ_ONCE(rdp->n_cbs_invoked)); |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 839 | if (rcu_segcblist_is_offloaded(&rdp->cblist)) |
| 840 | show_rcu_nocb_state(rdp); |
| 841 | } |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 842 | pr_info("RCU callbacks invoked since boot: %lu\n", cbs); |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 843 | show_rcu_tasks_gp_kthreads(); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 844 | } |
| 845 | EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); |
| 846 | |
| 847 | /* |
| 848 | * This function checks for grace-period requests that fail to motivate |
| 849 | * RCU to come out of its idle mode. |
| 850 | */ |
| 851 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
| 852 | const unsigned long gpssdelay) |
| 853 | { |
| 854 | unsigned long flags; |
| 855 | unsigned long j; |
| 856 | struct rcu_node *rnp_root = rcu_get_root(); |
| 857 | static atomic_t warned = ATOMIC_INIT(0); |
| 858 | |
| 859 | if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 860 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
Paul E. McKenney | 5648d65 | 2020-01-21 12:30:22 -0800 | [diff] [blame] | 861 | READ_ONCE(rnp_root->gp_seq_needed)) || |
| 862 | !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 863 | return; |
| 864 | j = jiffies; /* Expensive access, and in common case don't get here. */ |
| 865 | if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 866 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
| 867 | atomic_read(&warned)) |
| 868 | return; |
| 869 | |
| 870 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 871 | j = jiffies; |
| 872 | if (rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 873 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
| 874 | READ_ONCE(rnp_root->gp_seq_needed)) || |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 875 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 876 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
| 877 | atomic_read(&warned)) { |
| 878 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 879 | return; |
| 880 | } |
| 881 | /* Hold onto the leaf lock to make others see warned==1. */ |
| 882 | |
| 883 | if (rnp_root != rnp) |
| 884 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ |
| 885 | j = jiffies; |
| 886 | if (rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 887 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
| 888 | READ_ONCE(rnp_root->gp_seq_needed)) || |
| 889 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 890 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 891 | atomic_xchg(&warned, 1)) { |
Neeraj Upadhyay | 3ae976a | 2019-03-29 16:57:08 +0530 | [diff] [blame] | 892 | if (rnp_root != rnp) |
| 893 | /* irqs remain disabled. */ |
| 894 | raw_spin_unlock_rcu_node(rnp_root); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 895 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 896 | return; |
| 897 | } |
| 898 | WARN_ON(1); |
| 899 | if (rnp_root != rnp) |
| 900 | raw_spin_unlock_rcu_node(rnp_root); |
| 901 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 902 | show_rcu_gp_kthreads(); |
| 903 | } |
| 904 | |
| 905 | /* |
| 906 | * Do a forward-progress check for rcutorture. This is normally invoked |
| 907 | * due to an OOM event. The argument "j" gives the time period during |
| 908 | * which rcutorture would like progress to have been made. |
| 909 | */ |
| 910 | void rcu_fwd_progress_check(unsigned long j) |
| 911 | { |
| 912 | unsigned long cbs; |
| 913 | int cpu; |
| 914 | unsigned long max_cbs = 0; |
| 915 | int max_cpu = -1; |
| 916 | struct rcu_data *rdp; |
| 917 | |
| 918 | if (rcu_gp_in_progress()) { |
| 919 | pr_info("%s: GP age %lu jiffies\n", |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 920 | __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_start))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 921 | show_rcu_gp_kthreads(); |
| 922 | } else { |
| 923 | pr_info("%s: Last GP end %lu jiffies ago\n", |
Paul E. McKenney | d283aa1 | 2021-05-20 21:08:38 -0700 | [diff] [blame] | 924 | __func__, jiffies - data_race(READ_ONCE(rcu_state.gp_end))); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 925 | preempt_disable(); |
| 926 | rdp = this_cpu_ptr(&rcu_data); |
| 927 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); |
| 928 | preempt_enable(); |
| 929 | } |
| 930 | for_each_possible_cpu(cpu) { |
| 931 | cbs = rcu_get_n_cbs_cpu(cpu); |
| 932 | if (!cbs) |
| 933 | continue; |
| 934 | if (max_cpu < 0) |
| 935 | pr_info("%s: callbacks", __func__); |
| 936 | pr_cont(" %d: %lu", cpu, cbs); |
| 937 | if (cbs <= max_cbs) |
| 938 | continue; |
| 939 | max_cbs = cbs; |
| 940 | max_cpu = cpu; |
| 941 | } |
| 942 | if (max_cpu >= 0) |
| 943 | pr_cont("\n"); |
| 944 | } |
| 945 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); |
| 946 | |
| 947 | /* Commandeer a sysrq key to dump RCU's tree. */ |
| 948 | static bool sysrq_rcu; |
| 949 | module_param(sysrq_rcu, bool, 0444); |
| 950 | |
| 951 | /* Dump grace-period-request information due to commandeered sysrq. */ |
| 952 | static void sysrq_show_rcu(int key) |
| 953 | { |
| 954 | show_rcu_gp_kthreads(); |
| 955 | } |
| 956 | |
Emil Velikov | 0ca650c4 | 2020-05-13 22:43:51 +0100 | [diff] [blame] | 957 | static const struct sysrq_key_op sysrq_rcudump_op = { |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 958 | .handler = sysrq_show_rcu, |
| 959 | .help_msg = "show-rcu(y)", |
| 960 | .action_msg = "Show RCU tree", |
| 961 | .enable_mask = SYSRQ_ENABLE_DUMP, |
| 962 | }; |
| 963 | |
| 964 | static int __init rcu_sysrq_init(void) |
| 965 | { |
| 966 | if (sysrq_rcu) |
| 967 | return register_sysrq_key('y', &sysrq_rcudump_op); |
| 968 | return 0; |
| 969 | } |
| 970 | early_initcall(rcu_sysrq_init); |