Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * RCU CPU stall warnings for normal RCU grace periods |
| 4 | * |
| 5 | * Copyright IBM Corporation, 2019 |
| 6 | * |
| 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 8 | */ |
| 9 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 10 | ////////////////////////////////////////////////////////////////////////////// |
| 11 | // |
| 12 | // Controlling CPU stall warnings, including delay calculation. |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 13 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 14 | /* panic() on RCU Stall sysctl. */ |
| 15 | int sysctl_panic_on_rcu_stall __read_mostly; |
chao | dfe5640 | 2020-08-30 23:41:17 -0700 | [diff] [blame] | 16 | int sysctl_max_rcu_stall_to_panic __read_mostly; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 17 | |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 18 | #ifdef CONFIG_PROVE_RCU |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 19 | #define RCU_STALL_DELAY_DELTA (5 * HZ) |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 20 | #else |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 21 | #define RCU_STALL_DELAY_DELTA 0 |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 22 | #endif |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 23 | #define RCU_STALL_MIGHT_DIV 8 |
| 24 | #define RCU_STALL_MIGHT_MIN (2 * HZ) |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 25 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 26 | /* Limit-check stall timeouts specified at boottime and runtime. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 27 | int rcu_jiffies_till_stall_check(void) |
| 28 | { |
| 29 | int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); |
| 30 | |
| 31 | /* |
| 32 | * Limit check must be consistent with the Kconfig limits |
| 33 | * for CONFIG_RCU_CPU_STALL_TIMEOUT. |
| 34 | */ |
| 35 | if (till_stall_check < 3) { |
| 36 | WRITE_ONCE(rcu_cpu_stall_timeout, 3); |
| 37 | till_stall_check = 3; |
| 38 | } else if (till_stall_check > 300) { |
| 39 | WRITE_ONCE(rcu_cpu_stall_timeout, 300); |
| 40 | till_stall_check = 300; |
| 41 | } |
| 42 | return till_stall_check * HZ + RCU_STALL_DELAY_DELTA; |
| 43 | } |
| 44 | EXPORT_SYMBOL_GPL(rcu_jiffies_till_stall_check); |
| 45 | |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 46 | /** |
| 47 | * rcu_gp_might_be_stalled - Is it likely that the grace period is stalled? |
| 48 | * |
| 49 | * Returns @true if the current grace period is sufficiently old that |
| 50 | * it is reasonable to assume that it might be stalled. This can be |
| 51 | * useful when deciding whether to allocate memory to enable RCU-mediated |
| 52 | * freeing on the one hand or just invoking synchronize_rcu() on the other. |
| 53 | * The latter is preferable when the grace period is stalled. |
| 54 | * |
| 55 | * Note that sampling of the .gp_start and .gp_seq fields must be done |
| 56 | * carefully to avoid false positives at the beginnings and ends of |
| 57 | * grace periods. |
| 58 | */ |
| 59 | bool rcu_gp_might_be_stalled(void) |
| 60 | { |
| 61 | unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; |
| 62 | unsigned long j = jiffies; |
| 63 | |
| 64 | if (d < RCU_STALL_MIGHT_MIN) |
| 65 | d = RCU_STALL_MIGHT_MIN; |
| 66 | smp_mb(); // jiffies before .gp_seq to avoid false positives. |
| 67 | if (!rcu_gp_in_progress()) |
| 68 | return false; |
| 69 | // Long delays at this point avoids false positive, but a delay |
| 70 | // of ULONG_MAX/4 jiffies voids your no-false-positive warranty. |
| 71 | smp_mb(); // .gp_seq before second .gp_start |
| 72 | // And ditto here. |
| 73 | return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); |
| 74 | } |
| 75 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 76 | /* Don't do RCU CPU stall warnings during long sysrq printouts. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 77 | void rcu_sysrq_start(void) |
| 78 | { |
| 79 | if (!rcu_cpu_stall_suppress) |
| 80 | rcu_cpu_stall_suppress = 2; |
| 81 | } |
| 82 | |
| 83 | void rcu_sysrq_end(void) |
| 84 | { |
| 85 | if (rcu_cpu_stall_suppress == 2) |
| 86 | rcu_cpu_stall_suppress = 0; |
| 87 | } |
| 88 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 89 | /* Don't print RCU CPU stall warnings during a kernel panic. */ |
Paul E. McKenney | 10462d6 | 2019-01-11 16:10:57 -0800 | [diff] [blame] | 90 | static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) |
| 91 | { |
| 92 | rcu_cpu_stall_suppress = 1; |
| 93 | return NOTIFY_DONE; |
| 94 | } |
| 95 | |
| 96 | static struct notifier_block rcu_panic_block = { |
| 97 | .notifier_call = rcu_panic, |
| 98 | }; |
| 99 | |
| 100 | static int __init check_cpu_stall_init(void) |
| 101 | { |
| 102 | atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block); |
| 103 | return 0; |
| 104 | } |
| 105 | early_initcall(check_cpu_stall_init); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 106 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 107 | /* If so specified via sysctl, panic, yielding cleaner stall-warning output. */ |
| 108 | static void panic_on_rcu_stall(void) |
| 109 | { |
chao | dfe5640 | 2020-08-30 23:41:17 -0700 | [diff] [blame] | 110 | static int cpu_stall; |
| 111 | |
| 112 | if (++cpu_stall < sysctl_max_rcu_stall_to_panic) |
| 113 | return; |
| 114 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 115 | if (sysctl_panic_on_rcu_stall) |
| 116 | panic("RCU Stall\n"); |
| 117 | } |
| 118 | |
| 119 | /** |
| 120 | * rcu_cpu_stall_reset - prevent further stall warnings in current grace period |
| 121 | * |
| 122 | * Set the stall-warning timeout way off into the future, thus preventing |
| 123 | * any RCU CPU stall-warning messages from appearing in the current set of |
| 124 | * RCU grace periods. |
| 125 | * |
| 126 | * The caller must disable hard irqs. |
| 127 | */ |
| 128 | void rcu_cpu_stall_reset(void) |
| 129 | { |
| 130 | WRITE_ONCE(rcu_state.jiffies_stall, jiffies + ULONG_MAX / 2); |
| 131 | } |
| 132 | |
| 133 | ////////////////////////////////////////////////////////////////////////////// |
| 134 | // |
| 135 | // Interaction with RCU grace periods |
| 136 | |
| 137 | /* Start of new grace period, so record stall time (and forcing times). */ |
| 138 | static void record_gp_stall_check_time(void) |
| 139 | { |
| 140 | unsigned long j = jiffies; |
| 141 | unsigned long j1; |
| 142 | |
Paul E. McKenney | 59881bc | 2020-01-20 15:29:04 -0800 | [diff] [blame] | 143 | WRITE_ONCE(rcu_state.gp_start, j); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 144 | j1 = rcu_jiffies_till_stall_check(); |
Paul E. McKenney | 6be7436 | 2020-04-10 13:47:41 -0700 | [diff] [blame] | 145 | smp_mb(); // ->gp_start before ->jiffies_stall and caller's ->gp_seq. |
| 146 | WRITE_ONCE(rcu_state.jiffies_stall, j + j1); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 147 | rcu_state.jiffies_resched = j + j1 / 2; |
| 148 | rcu_state.n_force_qs_gpstart = READ_ONCE(rcu_state.n_force_qs); |
| 149 | } |
| 150 | |
| 151 | /* Zero ->ticks_this_gp and snapshot the number of RCU softirq handlers. */ |
| 152 | static void zero_cpu_stall_ticks(struct rcu_data *rdp) |
| 153 | { |
| 154 | rdp->ticks_this_gp = 0; |
| 155 | rdp->softirq_snap = kstat_softirqs_cpu(RCU_SOFTIRQ, smp_processor_id()); |
| 156 | WRITE_ONCE(rdp->last_fqs_resched, jiffies); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * If too much time has passed in the current grace period, and if |
| 161 | * so configured, go kick the relevant kthreads. |
| 162 | */ |
| 163 | static void rcu_stall_kick_kthreads(void) |
| 164 | { |
| 165 | unsigned long j; |
| 166 | |
Paul E. McKenney | fe63b72 | 2020-06-23 18:04:45 -0700 | [diff] [blame] | 167 | if (!READ_ONCE(rcu_kick_kthreads)) |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 168 | return; |
| 169 | j = READ_ONCE(rcu_state.jiffies_kick_kthreads); |
| 170 | if (time_after(jiffies, j) && rcu_state.gp_kthread && |
| 171 | (rcu_gp_in_progress() || READ_ONCE(rcu_state.gp_flags))) { |
| 172 | WARN_ONCE(1, "Kicking %s grace-period kthread\n", |
| 173 | rcu_state.name); |
| 174 | rcu_ftrace_dump(DUMP_ALL); |
| 175 | wake_up_process(rcu_state.gp_kthread); |
| 176 | WRITE_ONCE(rcu_state.jiffies_kick_kthreads, j + HZ); |
| 177 | } |
| 178 | } |
| 179 | |
Paul E. McKenney | 7ac1907 | 2019-01-14 10:19:20 -0800 | [diff] [blame] | 180 | /* |
| 181 | * Handler for the irq_work request posted about halfway into the RCU CPU |
| 182 | * stall timeout, and used to detect excessive irq disabling. Set state |
| 183 | * appropriately, but just complain if there is unexpected state on entry. |
| 184 | */ |
| 185 | static void rcu_iw_handler(struct irq_work *iwp) |
| 186 | { |
| 187 | struct rcu_data *rdp; |
| 188 | struct rcu_node *rnp; |
| 189 | |
| 190 | rdp = container_of(iwp, struct rcu_data, rcu_iw); |
| 191 | rnp = rdp->mynode; |
| 192 | raw_spin_lock_rcu_node(rnp); |
| 193 | if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) { |
| 194 | rdp->rcu_iw_gp_seq = rnp->gp_seq; |
| 195 | rdp->rcu_iw_pending = false; |
| 196 | } |
| 197 | raw_spin_unlock_rcu_node(rnp); |
| 198 | } |
| 199 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 200 | ////////////////////////////////////////////////////////////////////////////// |
| 201 | // |
| 202 | // Printing RCU CPU stall warnings |
| 203 | |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 204 | #ifdef CONFIG_PREEMPT_RCU |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 205 | |
| 206 | /* |
| 207 | * Dump detailed information for all tasks blocking the current RCU |
| 208 | * grace period on the specified rcu_node structure. |
| 209 | */ |
| 210 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
| 211 | { |
| 212 | unsigned long flags; |
| 213 | struct task_struct *t; |
| 214 | |
| 215 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 216 | if (!rcu_preempt_blocked_readers_cgp(rnp)) { |
| 217 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 218 | return; |
| 219 | } |
| 220 | t = list_entry(rnp->gp_tasks->prev, |
| 221 | struct task_struct, rcu_node_entry); |
| 222 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
| 223 | /* |
| 224 | * We could be printing a lot while holding a spinlock. |
| 225 | * Avoid triggering hard lockup. |
| 226 | */ |
| 227 | touch_nmi_watchdog(); |
| 228 | sched_show_task(t); |
| 229 | } |
| 230 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 231 | } |
| 232 | |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 233 | // Communicate task state back to the RCU CPU stall warning request. |
| 234 | struct rcu_stall_chk_rdr { |
| 235 | int nesting; |
| 236 | union rcu_special rs; |
| 237 | bool on_blkd_list; |
| 238 | }; |
| 239 | |
| 240 | /* |
| 241 | * Report out the state of a not-running task that is stalling the |
| 242 | * current RCU grace period. |
| 243 | */ |
| 244 | static bool check_slow_task(struct task_struct *t, void *arg) |
| 245 | { |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 246 | struct rcu_stall_chk_rdr *rscrp = arg; |
| 247 | |
| 248 | if (task_curr(t)) |
| 249 | return false; // It is running, so decline to inspect it. |
| 250 | rscrp->nesting = t->rcu_read_lock_nesting; |
| 251 | rscrp->rs = t->rcu_read_unlock_special; |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 252 | rscrp->on_blkd_list = !list_empty(&t->rcu_node_entry); |
| 253 | return true; |
| 254 | } |
| 255 | |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 256 | /* |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 257 | * Scan the current list of tasks blocked within RCU read-side critical |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 258 | * sections, printing out the tid of each of the first few of them. |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 259 | */ |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 260 | static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) |
| 261 | __releases(rnp->lock) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 262 | { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 263 | int i = 0; |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 264 | int ndetected = 0; |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 265 | struct rcu_stall_chk_rdr rscr; |
| 266 | struct task_struct *t; |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 267 | struct task_struct *ts[8]; |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 268 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 269 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 270 | if (!rcu_preempt_blocked_readers_cgp(rnp)) |
| 271 | return 0; |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 272 | pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):", |
| 273 | rnp->level, rnp->grplo, rnp->grphi); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 274 | t = list_entry(rnp->gp_tasks->prev, |
| 275 | struct task_struct, rcu_node_entry); |
| 276 | list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 277 | get_task_struct(t); |
| 278 | ts[i++] = t; |
| 279 | if (i >= ARRAY_SIZE(ts)) |
| 280 | break; |
| 281 | } |
| 282 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 283 | for (i--; i; i--) { |
| 284 | t = ts[i]; |
Paul E. McKenney | 5bef8da | 2020-03-11 17:35:46 -0700 | [diff] [blame] | 285 | if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr)) |
| 286 | pr_cont(" P%d", t->pid); |
| 287 | else |
| 288 | pr_cont(" P%d/%d:%c%c%c%c", |
| 289 | t->pid, rscr.nesting, |
| 290 | ".b"[rscr.rs.b.blocked], |
| 291 | ".q"[rscr.rs.b.need_qs], |
| 292 | ".e"[rscr.rs.b.exp_hint], |
| 293 | ".l"[rscr.on_blkd_list]); |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 294 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 295 | put_task_struct(t); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 296 | ndetected++; |
| 297 | } |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 298 | pr_cont("\n"); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 299 | return ndetected; |
| 300 | } |
| 301 | |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 302 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 303 | |
| 304 | /* |
| 305 | * Because preemptible RCU does not exist, we never have to check for |
| 306 | * tasks blocked within RCU read-side critical sections. |
| 307 | */ |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 308 | static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 309 | { |
| 310 | } |
| 311 | |
| 312 | /* |
| 313 | * Because preemptible RCU does not exist, we never have to check for |
| 314 | * tasks blocked within RCU read-side critical sections. |
| 315 | */ |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 316 | static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags) |
Jules Irenge | c70360c | 2021-04-29 00:12:19 +0100 | [diff] [blame] | 317 | __releases(rnp->lock) |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 318 | { |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 319 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
Paul E. McKenney | 3fc3d17 | 2019-01-11 16:34:47 -0800 | [diff] [blame] | 320 | return 0; |
| 321 | } |
Lai Jiangshan | c130d2d | 2019-10-15 10:28:48 +0000 | [diff] [blame] | 322 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 323 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 324 | /* |
| 325 | * Dump stacks of all tasks running on stalled CPUs. First try using |
| 326 | * NMIs, but fall back to manual remote stack tracing on architectures |
| 327 | * that don't support NMI-based stack dumps. The NMI-triggered stack |
| 328 | * traces are more accurate because they are printed by the target CPU. |
| 329 | */ |
| 330 | static void rcu_dump_cpu_stacks(void) |
| 331 | { |
| 332 | int cpu; |
| 333 | unsigned long flags; |
| 334 | struct rcu_node *rnp; |
| 335 | |
| 336 | rcu_for_each_leaf_node(rnp) { |
| 337 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 338 | for_each_leaf_node_possible_cpu(rnp, cpu) |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 339 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { |
| 340 | if (cpu_is_offline(cpu)) |
| 341 | pr_err("Offline CPU %d blocking current GP.\n", cpu); |
| 342 | else if (!trigger_single_cpu_backtrace(cpu)) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 343 | dump_cpu_task(cpu); |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 344 | } |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 345 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 346 | } |
| 347 | } |
| 348 | |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 349 | #ifdef CONFIG_RCU_FAST_NO_HZ |
| 350 | |
| 351 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
| 352 | { |
| 353 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
| 354 | |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 355 | sprintf(cp, "last_accelerate: %04lx/%04lx dyntick_enabled: %d", |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 356 | rdp->last_accelerate & 0xffff, jiffies & 0xffff, |
Joel Fernandes (Google) | 77a40f9 | 2019-08-30 12:36:32 -0400 | [diff] [blame] | 357 | !!rdp->tick_nohz_enabled_snap); |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ |
| 361 | |
| 362 | static void print_cpu_stall_fast_no_hz(char *cp, int cpu) |
| 363 | { |
| 364 | *cp = '\0'; |
| 365 | } |
| 366 | |
| 367 | #endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */ |
| 368 | |
Lai Jiangshan | e2167b3 | 2019-10-15 10:28:47 +0000 | [diff] [blame] | 369 | static const char * const gp_state_names[] = { |
| 370 | [RCU_GP_IDLE] = "RCU_GP_IDLE", |
| 371 | [RCU_GP_WAIT_GPS] = "RCU_GP_WAIT_GPS", |
| 372 | [RCU_GP_DONE_GPS] = "RCU_GP_DONE_GPS", |
| 373 | [RCU_GP_ONOFF] = "RCU_GP_ONOFF", |
| 374 | [RCU_GP_INIT] = "RCU_GP_INIT", |
| 375 | [RCU_GP_WAIT_FQS] = "RCU_GP_WAIT_FQS", |
| 376 | [RCU_GP_DOING_FQS] = "RCU_GP_DOING_FQS", |
| 377 | [RCU_GP_CLEANUP] = "RCU_GP_CLEANUP", |
| 378 | [RCU_GP_CLEANED] = "RCU_GP_CLEANED", |
| 379 | }; |
| 380 | |
| 381 | /* |
| 382 | * Convert a ->gp_state value to a character string. |
| 383 | */ |
| 384 | static const char *gp_state_getname(short gs) |
| 385 | { |
| 386 | if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names)) |
| 387 | return "???"; |
| 388 | return gp_state_names[gs]; |
| 389 | } |
| 390 | |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 391 | /* Is the RCU grace-period kthread being starved of CPU time? */ |
| 392 | static bool rcu_is_gp_kthread_starving(unsigned long *jp) |
| 393 | { |
| 394 | unsigned long j = jiffies - READ_ONCE(rcu_state.gp_activity); |
| 395 | |
| 396 | if (jp) |
| 397 | *jp = j; |
| 398 | return j > 2 * HZ; |
| 399 | } |
| 400 | |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 401 | /* |
| 402 | * Print out diagnostic information for the specified stalled CPU. |
| 403 | * |
| 404 | * If the specified CPU is aware of the current RCU grace period, then |
| 405 | * print the number of scheduling clock interrupts the CPU has taken |
| 406 | * during the time that it has been aware. Otherwise, print the number |
| 407 | * of RCU grace periods that this CPU is ignorant of, for example, "1" |
| 408 | * if the CPU was aware of the previous grace period. |
| 409 | * |
| 410 | * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info. |
| 411 | */ |
| 412 | static void print_cpu_stall_info(int cpu) |
| 413 | { |
| 414 | unsigned long delta; |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 415 | bool falsepositive; |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 416 | char fast_no_hz[72]; |
| 417 | struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); |
| 418 | char *ticks_title; |
| 419 | unsigned long ticks_value; |
| 420 | |
| 421 | /* |
| 422 | * We could be printing a lot while holding a spinlock. Avoid |
| 423 | * triggering hard lockup. |
| 424 | */ |
| 425 | touch_nmi_watchdog(); |
| 426 | |
| 427 | ticks_value = rcu_seq_ctr(rcu_state.gp_seq - rdp->gp_seq); |
| 428 | if (ticks_value) { |
| 429 | ticks_title = "GPs behind"; |
| 430 | } else { |
| 431 | ticks_title = "ticks this GP"; |
| 432 | ticks_value = rdp->ticks_this_gp; |
| 433 | } |
| 434 | print_cpu_stall_fast_no_hz(fast_no_hz, cpu); |
| 435 | delta = rcu_seq_ctr(rdp->mynode->gp_seq - rdp->rcu_iw_gp_seq); |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 436 | falsepositive = rcu_is_gp_kthread_starving(NULL) && |
| 437 | rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp)); |
| 438 | pr_err("\t%d-%c%c%c%c: (%lu %s) idle=%03x/%ld/%#lx softirq=%u/%u fqs=%ld %s%s\n", |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 439 | cpu, |
| 440 | "O."[!!cpu_online(cpu)], |
| 441 | "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)], |
| 442 | "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)], |
| 443 | !IS_ENABLED(CONFIG_IRQ_WORK) ? '?' : |
| 444 | rdp->rcu_iw_pending ? (int)min(delta, 9UL) + '0' : |
| 445 | "!."[!delta], |
| 446 | ticks_value, ticks_title, |
| 447 | rcu_dynticks_snap(rdp) & 0xfff, |
| 448 | rdp->dynticks_nesting, rdp->dynticks_nmi_nesting, |
| 449 | rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu), |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 450 | data_race(rcu_state.n_force_qs) - rcu_state.n_force_qs_gpstart, |
| 451 | fast_no_hz, |
| 452 | falsepositive ? " (false positive?)" : ""); |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 453 | } |
| 454 | |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 455 | /* Complain about starvation of grace-period kthread. */ |
| 456 | static void rcu_check_gp_kthread_starvation(void) |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 457 | { |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 458 | int cpu; |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 459 | struct task_struct *gpk = rcu_state.gp_kthread; |
| 460 | unsigned long j; |
| 461 | |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 462 | if (rcu_is_gp_kthread_starving(&j)) { |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 463 | cpu = gpk ? task_cpu(gpk) : -1; |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 464 | pr_err("%s kthread starved for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x ->cpu=%d\n", |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 465 | rcu_state.name, j, |
| 466 | (long)rcu_seq_current(&rcu_state.gp_seq), |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 467 | data_race(rcu_state.gp_flags), |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 468 | gp_state_getname(rcu_state.gp_state), rcu_state.gp_state, |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 469 | gpk ? gpk->__state : ~0, cpu); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 470 | if (gpk) { |
Paul E. McKenney | 8837582 | 2020-03-31 19:00:52 -0700 | [diff] [blame] | 471 | pr_err("\tUnless %s kthread gets sufficient CPU time, OOM is now expected behavior.\n", rcu_state.name); |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 472 | pr_err("RCU grace-period kthread stack dump:\n"); |
| 473 | sched_show_task(gpk); |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 474 | if (cpu >= 0) { |
Paul E. McKenney | 725969a | 2020-11-12 12:19:47 -0800 | [diff] [blame] | 475 | if (cpu_is_offline(cpu)) { |
| 476 | pr_err("RCU GP kthread last ran on offline CPU %d.\n", cpu); |
| 477 | } else { |
| 478 | pr_err("Stack dump where RCU GP kthread last ran:\n"); |
| 479 | if (!trigger_single_cpu_backtrace(cpu)) |
| 480 | dump_cpu_task(cpu); |
| 481 | } |
Paul E. McKenney | 243027a | 2020-11-11 16:08:01 -0800 | [diff] [blame] | 482 | } |
Paul E. McKenney | e23344c | 2019-01-12 09:35:44 -0800 | [diff] [blame] | 483 | wake_up_process(gpk); |
| 484 | } |
| 485 | } |
Paul E. McKenney | 59b73a2 | 2019-01-11 21:05:17 -0800 | [diff] [blame] | 486 | } |
| 487 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 488 | /* Complain about missing wakeups from expired fqs wait timer */ |
| 489 | static void rcu_check_gp_kthread_expired_fqs_timer(void) |
| 490 | { |
| 491 | struct task_struct *gpk = rcu_state.gp_kthread; |
| 492 | short gp_state; |
| 493 | unsigned long jiffies_fqs; |
| 494 | int cpu; |
| 495 | |
| 496 | /* |
| 497 | * Order reads of .gp_state and .jiffies_force_qs. |
| 498 | * Matching smp_wmb() is present in rcu_gp_fqs_loop(). |
| 499 | */ |
| 500 | gp_state = smp_load_acquire(&rcu_state.gp_state); |
| 501 | jiffies_fqs = READ_ONCE(rcu_state.jiffies_force_qs); |
| 502 | |
| 503 | if (gp_state == RCU_GP_WAIT_FQS && |
| 504 | time_after(jiffies, jiffies_fqs + RCU_STALL_MIGHT_MIN) && |
| 505 | gpk && !READ_ONCE(gpk->on_rq)) { |
| 506 | cpu = task_cpu(gpk); |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 507 | pr_err("%s kthread timer wakeup didn't happen for %ld jiffies! g%ld f%#x %s(%d) ->state=%#x\n", |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 508 | rcu_state.name, (jiffies - jiffies_fqs), |
| 509 | (long)rcu_seq_current(&rcu_state.gp_seq), |
| 510 | data_race(rcu_state.gp_flags), |
| 511 | gp_state_getname(RCU_GP_WAIT_FQS), RCU_GP_WAIT_FQS, |
Peter Zijlstra | 2f064a5 | 2021-06-11 10:28:17 +0200 | [diff] [blame] | 512 | gpk->__state); |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 513 | pr_err("\tPossible timer handling issue on cpu=%d timer-softirq=%u\n", |
| 514 | cpu, kstat_softirqs_cpu(TIMER_SOFTIRQ, cpu)); |
| 515 | } |
| 516 | } |
| 517 | |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 518 | static void print_other_cpu_stall(unsigned long gp_seq, unsigned long gps) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 519 | { |
| 520 | int cpu; |
| 521 | unsigned long flags; |
| 522 | unsigned long gpa; |
| 523 | unsigned long j; |
| 524 | int ndetected = 0; |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 525 | struct rcu_node *rnp; |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 526 | long totqlen = 0; |
| 527 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 528 | lockdep_assert_irqs_disabled(); |
| 529 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 530 | /* Kick and suppress, if so configured. */ |
| 531 | rcu_stall_kick_kthreads(); |
Paul E. McKenney | 58c5336 | 2019-12-05 11:29:01 -0800 | [diff] [blame] | 532 | if (rcu_stall_is_suppressed()) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 533 | return; |
| 534 | |
| 535 | /* |
| 536 | * OK, time to rat on our buddy... |
Mauro Carvalho Chehab | f2286ab | 2020-04-21 19:04:10 +0200 | [diff] [blame] | 537 | * See Documentation/RCU/stallwarn.rst for info on how to debug |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 538 | * RCU CPU stall warnings. |
| 539 | */ |
Sangmoon Kim | 565cfb9 | 2021-03-02 20:55:15 +0900 | [diff] [blame] | 540 | trace_rcu_stall_warning(rcu_state.name, TPS("StallDetected")); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 541 | pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 542 | rcu_for_each_leaf_node(rnp) { |
| 543 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 544 | if (rnp->qsmask != 0) { |
| 545 | for_each_leaf_node_possible_cpu(rnp, cpu) |
| 546 | if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) { |
| 547 | print_cpu_stall_info(cpu); |
| 548 | ndetected++; |
| 549 | } |
| 550 | } |
Paul E. McKenney | c583bcb | 2020-09-24 15:11:55 -0700 | [diff] [blame] | 551 | ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock. |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 552 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 553 | } |
| 554 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 555 | for_each_possible_cpu(cpu) |
| 556 | totqlen += rcu_get_n_cbs_cpu(cpu); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 557 | pr_cont("\t(detected by %d, t=%ld jiffies, g=%ld, q=%lu)\n", |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 558 | smp_processor_id(), (long)(jiffies - gps), |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 559 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
| 560 | if (ndetected) { |
| 561 | rcu_dump_cpu_stacks(); |
| 562 | |
| 563 | /* Complain about tasks blocking the grace period. */ |
Paul E. McKenney | 21d0d79 | 2019-01-11 20:36:45 -0800 | [diff] [blame] | 564 | rcu_for_each_leaf_node(rnp) |
| 565 | rcu_print_detail_task_stall_rnp(rnp); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 566 | } else { |
| 567 | if (rcu_seq_current(&rcu_state.gp_seq) != gp_seq) { |
| 568 | pr_err("INFO: Stall ended before state dump start\n"); |
| 569 | } else { |
| 570 | j = jiffies; |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 571 | gpa = data_race(rcu_state.gp_activity); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 572 | pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n", |
| 573 | rcu_state.name, j - gpa, j, gpa, |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 574 | data_race(jiffies_till_next_fqs), |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 575 | rcu_get_root()->qsmask); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 576 | } |
| 577 | } |
| 578 | /* Rewrite if needed in case of slow consoles. */ |
| 579 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) |
| 580 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 581 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
| 582 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 583 | rcu_check_gp_kthread_expired_fqs_timer(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 584 | rcu_check_gp_kthread_starvation(); |
| 585 | |
| 586 | panic_on_rcu_stall(); |
| 587 | |
| 588 | rcu_force_quiescent_state(); /* Kick them all. */ |
| 589 | } |
| 590 | |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 591 | static void print_cpu_stall(unsigned long gps) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 592 | { |
| 593 | int cpu; |
| 594 | unsigned long flags; |
| 595 | struct rcu_data *rdp = this_cpu_ptr(&rcu_data); |
| 596 | struct rcu_node *rnp = rcu_get_root(); |
| 597 | long totqlen = 0; |
| 598 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 599 | lockdep_assert_irqs_disabled(); |
| 600 | |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 601 | /* Kick and suppress, if so configured. */ |
| 602 | rcu_stall_kick_kthreads(); |
Paul E. McKenney | 58c5336 | 2019-12-05 11:29:01 -0800 | [diff] [blame] | 603 | if (rcu_stall_is_suppressed()) |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 604 | return; |
| 605 | |
| 606 | /* |
| 607 | * OK, time to rat on ourselves... |
Mauro Carvalho Chehab | f2286ab | 2020-04-21 19:04:10 +0200 | [diff] [blame] | 608 | * See Documentation/RCU/stallwarn.rst for info on how to debug |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 609 | * RCU CPU stall warnings. |
| 610 | */ |
Sangmoon Kim | 565cfb9 | 2021-03-02 20:55:15 +0900 | [diff] [blame] | 611 | trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 612 | pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 613 | raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); |
| 614 | print_cpu_stall_info(smp_processor_id()); |
| 615 | raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 616 | for_each_possible_cpu(cpu) |
| 617 | totqlen += rcu_get_n_cbs_cpu(cpu); |
Paul E. McKenney | 40e69ac | 2019-01-11 20:58:58 -0800 | [diff] [blame] | 618 | pr_cont("\t(t=%lu jiffies g=%ld q=%lu)\n", |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 619 | jiffies - gps, |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 620 | (long)rcu_seq_current(&rcu_state.gp_seq), totqlen); |
| 621 | |
Neeraj Upadhyay | 683954e | 2020-11-16 21:36:00 +0530 | [diff] [blame] | 622 | rcu_check_gp_kthread_expired_fqs_timer(); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 623 | rcu_check_gp_kthread_starvation(); |
| 624 | |
| 625 | rcu_dump_cpu_stacks(); |
| 626 | |
| 627 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 628 | /* Rewrite if needed in case of slow consoles. */ |
| 629 | if (ULONG_CMP_GE(jiffies, READ_ONCE(rcu_state.jiffies_stall))) |
| 630 | WRITE_ONCE(rcu_state.jiffies_stall, |
| 631 | jiffies + 3 * rcu_jiffies_till_stall_check() + 3); |
| 632 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 633 | |
| 634 | panic_on_rcu_stall(); |
| 635 | |
| 636 | /* |
| 637 | * Attempt to revive the RCU machinery by forcing a context switch. |
| 638 | * |
| 639 | * A context switch would normally allow the RCU state machine to make |
| 640 | * progress and it could be we're stuck in kernel space without context |
| 641 | * switches for an entirely unreasonable amount of time. |
| 642 | */ |
| 643 | set_tsk_need_resched(current); |
| 644 | set_preempt_need_resched(); |
| 645 | } |
| 646 | |
| 647 | static void check_cpu_stall(struct rcu_data *rdp) |
| 648 | { |
| 649 | unsigned long gs1; |
| 650 | unsigned long gs2; |
| 651 | unsigned long gps; |
| 652 | unsigned long j; |
| 653 | unsigned long jn; |
| 654 | unsigned long js; |
| 655 | struct rcu_node *rnp; |
| 656 | |
Paul E. McKenney | a649d25 | 2020-11-19 10:13:06 -0800 | [diff] [blame] | 657 | lockdep_assert_irqs_disabled(); |
Paul E. McKenney | fe63b72 | 2020-06-23 18:04:45 -0700 | [diff] [blame] | 658 | if ((rcu_stall_is_suppressed() && !READ_ONCE(rcu_kick_kthreads)) || |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 659 | !rcu_gp_in_progress()) |
| 660 | return; |
| 661 | rcu_stall_kick_kthreads(); |
| 662 | j = jiffies; |
| 663 | |
| 664 | /* |
| 665 | * Lots of memory barriers to reject false positives. |
| 666 | * |
| 667 | * The idea is to pick up rcu_state.gp_seq, then |
| 668 | * rcu_state.jiffies_stall, then rcu_state.gp_start, and finally |
| 669 | * another copy of rcu_state.gp_seq. These values are updated in |
| 670 | * the opposite order with memory barriers (or equivalent) during |
| 671 | * grace-period initialization and cleanup. Now, a false positive |
| 672 | * can occur if we get an new value of rcu_state.gp_start and a old |
| 673 | * value of rcu_state.jiffies_stall. But given the memory barriers, |
| 674 | * the only way that this can happen is if one grace period ends |
| 675 | * and another starts between these two fetches. This is detected |
| 676 | * by comparing the second fetch of rcu_state.gp_seq with the |
| 677 | * previous fetch from rcu_state.gp_seq. |
| 678 | * |
| 679 | * Given this check, comparisons of jiffies, rcu_state.jiffies_stall, |
| 680 | * and rcu_state.gp_start suffice to forestall false positives. |
| 681 | */ |
| 682 | gs1 = READ_ONCE(rcu_state.gp_seq); |
| 683 | smp_rmb(); /* Pick up ->gp_seq first... */ |
| 684 | js = READ_ONCE(rcu_state.jiffies_stall); |
| 685 | smp_rmb(); /* ...then ->jiffies_stall before the rest... */ |
| 686 | gps = READ_ONCE(rcu_state.gp_start); |
| 687 | smp_rmb(); /* ...and finally ->gp_start before ->gp_seq again. */ |
| 688 | gs2 = READ_ONCE(rcu_state.gp_seq); |
| 689 | if (gs1 != gs2 || |
| 690 | ULONG_CMP_LT(j, js) || |
| 691 | ULONG_CMP_GE(gps, js)) |
| 692 | return; /* No stall or GP completed since entering function. */ |
| 693 | rnp = rdp->mynode; |
| 694 | jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3; |
| 695 | if (rcu_gp_in_progress() && |
| 696 | (READ_ONCE(rnp->qsmask) & rdp->grpmask) && |
| 697 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { |
| 698 | |
| 699 | /* We haven't checked in, so go dump stack. */ |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 700 | print_cpu_stall(gps); |
Paul E. McKenney | 1ef5a44 | 2020-06-23 20:57:59 -0700 | [diff] [blame] | 701 | if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 702 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 703 | |
| 704 | } else if (rcu_gp_in_progress() && |
| 705 | ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) && |
| 706 | cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) { |
| 707 | |
| 708 | /* They had a few time units to dump stack, so complain. */ |
Zhaolong Zhang | fcbcc0e | 2020-03-05 14:56:11 -0800 | [diff] [blame] | 709 | print_other_cpu_stall(gs2, gps); |
Paul E. McKenney | 1ef5a44 | 2020-06-23 20:57:59 -0700 | [diff] [blame] | 710 | if (READ_ONCE(rcu_cpu_stall_ftrace_dump)) |
Paul E. McKenney | cdc694b | 2019-06-13 15:30:49 -0700 | [diff] [blame] | 711 | rcu_ftrace_dump(DUMP_ALL); |
Paul E. McKenney | 32255d5 | 2019-01-11 16:57:41 -0800 | [diff] [blame] | 712 | } |
| 713 | } |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 714 | |
| 715 | ////////////////////////////////////////////////////////////////////////////// |
| 716 | // |
| 717 | // RCU forward-progress mechanisms, including of callback invocation. |
| 718 | |
| 719 | |
| 720 | /* |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 721 | * Check to see if a failure to end RCU priority inversion was due to |
| 722 | * a CPU not passing through a quiescent state. When this happens, there |
| 723 | * is nothing that RCU priority boosting can do to help, so we shouldn't |
| 724 | * count this as an RCU priority boosting failure. A return of true says |
| 725 | * RCU priority boosting is to blame, and false says otherwise. If false |
| 726 | * is returned, the first of the CPUs to blame is stored through cpup. |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 727 | * If there was no CPU blocking the current grace period, but also nothing |
| 728 | * in need of being boosted, *cpup is set to -1. This can happen in case |
| 729 | * of vCPU preemption while the last CPU is reporting its quiscent state, |
| 730 | * for example. |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 731 | * |
| 732 | * If cpup is NULL, then a lockless quick check is carried out, suitable |
| 733 | * for high-rate usage. On the other hand, if cpup is non-NULL, each |
| 734 | * rcu_node structure's ->lock is acquired, ruling out high-rate usage. |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 735 | */ |
| 736 | bool rcu_check_boost_fail(unsigned long gp_state, int *cpup) |
| 737 | { |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 738 | bool atb = false; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 739 | int cpu; |
| 740 | unsigned long flags; |
| 741 | struct rcu_node *rnp; |
| 742 | |
| 743 | rcu_for_each_leaf_node(rnp) { |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 744 | if (!cpup) { |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 745 | if (READ_ONCE(rnp->qsmask)) { |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 746 | return false; |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 747 | } else { |
| 748 | if (READ_ONCE(rnp->gp_tasks)) |
| 749 | atb = true; |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 750 | continue; |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 751 | } |
Paul E. McKenney | 063f5a4 | 2021-04-14 13:00:10 -0700 | [diff] [blame] | 752 | } |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 753 | *cpup = -1; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 754 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 755 | if (rnp->gp_tasks) |
| 756 | atb = true; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 757 | if (!rnp->qsmask) { |
| 758 | // No CPUs without quiescent states for this rnp. |
| 759 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 760 | continue; |
| 761 | } |
| 762 | // Find the first holdout CPU. |
| 763 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 764 | if (rnp->qsmask & (1UL << (cpu - rnp->grplo))) { |
| 765 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 766 | *cpup = cpu; |
| 767 | return false; |
| 768 | } |
| 769 | } |
| 770 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 771 | } |
| 772 | // Can't blame CPUs, so must blame RCU priority boosting. |
Paul E. McKenney | 5390473 | 2021-04-15 16:30:34 -0700 | [diff] [blame] | 773 | return atb; |
Paul E. McKenney | 0260b92 | 2021-04-08 13:01:14 -0700 | [diff] [blame] | 774 | } |
| 775 | EXPORT_SYMBOL_GPL(rcu_check_boost_fail); |
| 776 | |
| 777 | /* |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 778 | * Show the state of the grace-period kthreads. |
| 779 | */ |
| 780 | void show_rcu_gp_kthreads(void) |
| 781 | { |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 782 | unsigned long cbs = 0; |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 783 | int cpu; |
| 784 | unsigned long j; |
| 785 | unsigned long ja; |
| 786 | unsigned long jr; |
Paul E. McKenney | e44111e | 2021-04-02 21:51:50 -0700 | [diff] [blame] | 787 | unsigned long js; |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 788 | unsigned long jw; |
| 789 | struct rcu_data *rdp; |
| 790 | struct rcu_node *rnp; |
Paul E. McKenney | 5648d65 | 2020-01-21 12:30:22 -0800 | [diff] [blame] | 791 | struct task_struct *t = READ_ONCE(rcu_state.gp_kthread); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 792 | |
| 793 | j = jiffies; |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 794 | ja = j - data_race(rcu_state.gp_activity); |
| 795 | jr = j - data_race(rcu_state.gp_req_activity); |
Paul E. McKenney | e44111e | 2021-04-02 21:51:50 -0700 | [diff] [blame] | 796 | js = j - data_race(rcu_state.gp_start); |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 797 | jw = j - data_race(rcu_state.gp_wake_time); |
Paul E. McKenney | 27ba76e | 2021-04-04 17:23:36 -0700 | [diff] [blame] | 798 | pr_info("%s: wait state: %s(%d) ->state: %#lx ->rt_priority %u delta ->gp_start %lu ->gp_activity %lu ->gp_req_activity %lu ->gp_wake_time %lu ->gp_wake_seq %ld ->gp_seq %ld ->gp_seq_needed %ld ->gp_max %lu ->gp_flags %#x\n", |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 799 | rcu_state.name, gp_state_getname(rcu_state.gp_state), |
Linus Torvalds | 28e92f9 | 2021-07-04 12:58:33 -0700 | [diff] [blame] | 800 | rcu_state.gp_state, t ? t->__state : 0x1ffffL, t ? t->rt_priority : 0xffU, |
Paul E. McKenney | e44111e | 2021-04-02 21:51:50 -0700 | [diff] [blame] | 801 | js, ja, jr, jw, (long)data_race(rcu_state.gp_wake_seq), |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 802 | (long)data_race(rcu_state.gp_seq), |
| 803 | (long)data_race(rcu_get_root()->gp_seq_needed), |
Paul E. McKenney | 27ba76e | 2021-04-04 17:23:36 -0700 | [diff] [blame] | 804 | data_race(rcu_state.gp_max), |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 805 | data_race(rcu_state.gp_flags)); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 806 | rcu_for_each_node_breadth_first(rnp) { |
Paul E. McKenney | b158050 | 2021-04-07 15:14:01 -0700 | [diff] [blame] | 807 | if (ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), READ_ONCE(rnp->gp_seq_needed)) && |
| 808 | !data_race(rnp->qsmask) && !data_race(rnp->boost_tasks) && |
| 809 | !data_race(rnp->exp_tasks) && !data_race(rnp->gp_tasks)) |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 810 | continue; |
Paul E. McKenney | 396eba6 | 2021-04-06 16:31:42 -0700 | [diff] [blame] | 811 | pr_info("\trcu_node %d:%d ->gp_seq %ld ->gp_seq_needed %ld ->qsmask %#lx %c%c%c%c ->n_boosts %ld\n", |
| 812 | rnp->grplo, rnp->grphi, |
| 813 | (long)data_race(rnp->gp_seq), (long)data_race(rnp->gp_seq_needed), |
| 814 | data_race(rnp->qsmask), |
| 815 | ".b"[!!data_race(rnp->boost_kthread_task)], |
| 816 | ".B"[!!data_race(rnp->boost_tasks)], |
| 817 | ".E"[!!data_race(rnp->exp_tasks)], |
| 818 | ".G"[!!data_race(rnp->gp_tasks)], |
| 819 | data_race(rnp->n_boosts)); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 820 | if (!rcu_is_leaf_node(rnp)) |
| 821 | continue; |
| 822 | for_each_leaf_node_possible_cpu(rnp, cpu) { |
| 823 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | a5b8950 | 2020-01-07 15:48:39 -0800 | [diff] [blame] | 824 | if (READ_ONCE(rdp->gpwrap) || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 825 | ULONG_CMP_GE(READ_ONCE(rcu_state.gp_seq), |
| 826 | READ_ONCE(rdp->gp_seq_needed))) |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 827 | continue; |
| 828 | pr_info("\tcpu %d ->gp_seq_needed %ld\n", |
Paul E. McKenney | 47fbb07 | 2020-02-09 02:29:36 -0800 | [diff] [blame] | 829 | cpu, (long)data_race(rdp->gp_seq_needed)); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 830 | } |
| 831 | } |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 832 | for_each_possible_cpu(cpu) { |
| 833 | rdp = per_cpu_ptr(&rcu_data, cpu); |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 834 | cbs += data_race(rdp->n_cbs_invoked); |
Paul E. McKenney | f7a81b1 | 2019-06-25 13:32:51 -0700 | [diff] [blame] | 835 | if (rcu_segcblist_is_offloaded(&rdp->cblist)) |
| 836 | show_rcu_nocb_state(rdp); |
| 837 | } |
Paul E. McKenney | e816d56 | 2020-05-01 16:49:48 -0700 | [diff] [blame] | 838 | pr_info("RCU callbacks invoked since boot: %lu\n", cbs); |
Paul E. McKenney | e21408c | 2020-03-16 11:01:55 -0700 | [diff] [blame] | 839 | show_rcu_tasks_gp_kthreads(); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 840 | } |
| 841 | EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads); |
| 842 | |
| 843 | /* |
| 844 | * This function checks for grace-period requests that fail to motivate |
| 845 | * RCU to come out of its idle mode. |
| 846 | */ |
| 847 | static void rcu_check_gp_start_stall(struct rcu_node *rnp, struct rcu_data *rdp, |
| 848 | const unsigned long gpssdelay) |
| 849 | { |
| 850 | unsigned long flags; |
| 851 | unsigned long j; |
| 852 | struct rcu_node *rnp_root = rcu_get_root(); |
| 853 | static atomic_t warned = ATOMIC_INIT(0); |
| 854 | |
| 855 | if (!IS_ENABLED(CONFIG_PROVE_RCU) || rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 856 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
Paul E. McKenney | 5648d65 | 2020-01-21 12:30:22 -0800 | [diff] [blame] | 857 | READ_ONCE(rnp_root->gp_seq_needed)) || |
| 858 | !smp_load_acquire(&rcu_state.gp_kthread)) // Get stable kthread. |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 859 | return; |
| 860 | j = jiffies; /* Expensive access, and in common case don't get here. */ |
| 861 | if (time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 862 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
| 863 | atomic_read(&warned)) |
| 864 | return; |
| 865 | |
| 866 | raw_spin_lock_irqsave_rcu_node(rnp, flags); |
| 867 | j = jiffies; |
| 868 | if (rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 869 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
| 870 | READ_ONCE(rnp_root->gp_seq_needed)) || |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 871 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 872 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
| 873 | atomic_read(&warned)) { |
| 874 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 875 | return; |
| 876 | } |
| 877 | /* Hold onto the leaf lock to make others see warned==1. */ |
| 878 | |
| 879 | if (rnp_root != rnp) |
| 880 | raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */ |
| 881 | j = jiffies; |
| 882 | if (rcu_gp_in_progress() || |
Paul E. McKenney | 8ff37290 | 2020-01-04 11:33:17 -0800 | [diff] [blame] | 883 | ULONG_CMP_GE(READ_ONCE(rnp_root->gp_seq), |
| 884 | READ_ONCE(rnp_root->gp_seq_needed)) || |
| 885 | time_before(j, READ_ONCE(rcu_state.gp_req_activity) + gpssdelay) || |
| 886 | time_before(j, READ_ONCE(rcu_state.gp_activity) + gpssdelay) || |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 887 | atomic_xchg(&warned, 1)) { |
Neeraj Upadhyay | 3ae976a | 2019-03-29 16:57:08 +0530 | [diff] [blame] | 888 | if (rnp_root != rnp) |
| 889 | /* irqs remain disabled. */ |
| 890 | raw_spin_unlock_rcu_node(rnp_root); |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 891 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 892 | return; |
| 893 | } |
| 894 | WARN_ON(1); |
| 895 | if (rnp_root != rnp) |
| 896 | raw_spin_unlock_rcu_node(rnp_root); |
| 897 | raw_spin_unlock_irqrestore_rcu_node(rnp, flags); |
| 898 | show_rcu_gp_kthreads(); |
| 899 | } |
| 900 | |
| 901 | /* |
| 902 | * Do a forward-progress check for rcutorture. This is normally invoked |
| 903 | * due to an OOM event. The argument "j" gives the time period during |
| 904 | * which rcutorture would like progress to have been made. |
| 905 | */ |
| 906 | void rcu_fwd_progress_check(unsigned long j) |
| 907 | { |
| 908 | unsigned long cbs; |
| 909 | int cpu; |
| 910 | unsigned long max_cbs = 0; |
| 911 | int max_cpu = -1; |
| 912 | struct rcu_data *rdp; |
| 913 | |
| 914 | if (rcu_gp_in_progress()) { |
| 915 | pr_info("%s: GP age %lu jiffies\n", |
| 916 | __func__, jiffies - rcu_state.gp_start); |
| 917 | show_rcu_gp_kthreads(); |
| 918 | } else { |
| 919 | pr_info("%s: Last GP end %lu jiffies ago\n", |
| 920 | __func__, jiffies - rcu_state.gp_end); |
| 921 | preempt_disable(); |
| 922 | rdp = this_cpu_ptr(&rcu_data); |
| 923 | rcu_check_gp_start_stall(rdp->mynode, rdp, j); |
| 924 | preempt_enable(); |
| 925 | } |
| 926 | for_each_possible_cpu(cpu) { |
| 927 | cbs = rcu_get_n_cbs_cpu(cpu); |
| 928 | if (!cbs) |
| 929 | continue; |
| 930 | if (max_cpu < 0) |
| 931 | pr_info("%s: callbacks", __func__); |
| 932 | pr_cont(" %d: %lu", cpu, cbs); |
| 933 | if (cbs <= max_cbs) |
| 934 | continue; |
| 935 | max_cbs = cbs; |
| 936 | max_cpu = cpu; |
| 937 | } |
| 938 | if (max_cpu >= 0) |
| 939 | pr_cont("\n"); |
| 940 | } |
| 941 | EXPORT_SYMBOL_GPL(rcu_fwd_progress_check); |
| 942 | |
| 943 | /* Commandeer a sysrq key to dump RCU's tree. */ |
| 944 | static bool sysrq_rcu; |
| 945 | module_param(sysrq_rcu, bool, 0444); |
| 946 | |
| 947 | /* Dump grace-period-request information due to commandeered sysrq. */ |
| 948 | static void sysrq_show_rcu(int key) |
| 949 | { |
| 950 | show_rcu_gp_kthreads(); |
| 951 | } |
| 952 | |
Emil Velikov | 0ca650c4 | 2020-05-13 22:43:51 +0100 | [diff] [blame] | 953 | static const struct sysrq_key_op sysrq_rcudump_op = { |
Paul E. McKenney | b51bcbb | 2019-01-15 07:01:33 -0800 | [diff] [blame] | 954 | .handler = sysrq_show_rcu, |
| 955 | .help_msg = "show-rcu(y)", |
| 956 | .action_msg = "Show RCU tree", |
| 957 | .enable_mask = SYSRQ_ENABLE_DUMP, |
| 958 | }; |
| 959 | |
| 960 | static int __init rcu_sysrq_init(void) |
| 961 | { |
| 962 | if (sysrq_rcu) |
| 963 | return register_sysrq_key('y', &sysrq_rcudump_op); |
| 964 | return 0; |
| 965 | } |
| 966 | early_initcall(rcu_sysrq_init); |