Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 2 | /* |
| 3 | * Detect hard lockups on a system |
| 4 | * |
| 5 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
| 6 | * |
| 7 | * Note: Most of this code is borrowed heavily from the original softlockup |
| 8 | * detector, so thanks to Ingo for the initial implementation. |
| 9 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks |
| 10 | * to those contributors as well. |
| 11 | */ |
| 12 | |
| 13 | #define pr_fmt(fmt) "NMI watchdog: " fmt |
| 14 | |
| 15 | #include <linux/nmi.h> |
Don Zickus | 42f930d | 2017-11-01 14:11:27 -0400 | [diff] [blame] | 16 | #include <linux/atomic.h> |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 17 | #include <linux/module.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 18 | #include <linux/sched/debug.h> |
| 19 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 20 | #include <asm/irq_regs.h> |
| 21 | #include <linux/perf_event.h> |
| 22 | |
| 23 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
| 24 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
| 25 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
Thomas Gleixner | 9c388a5 | 2017-10-31 22:32:00 +0100 | [diff] [blame] | 26 | static DEFINE_PER_CPU(struct perf_event *, dead_event); |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 27 | static struct cpumask dead_events_mask; |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 28 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 29 | static unsigned long hardlockup_allcpu_dumped; |
Don Zickus | 42f930d | 2017-11-01 14:11:27 -0400 | [diff] [blame] | 30 | static atomic_t watchdog_cpus = ATOMIC_INIT(0); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 31 | |
Vincent Whitchurch | cb9d7fd | 2018-08-21 17:25:07 +0200 | [diff] [blame] | 32 | notrace void arch_touch_nmi_watchdog(void) |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 33 | { |
| 34 | /* |
| 35 | * Using __raw here because some code paths have |
| 36 | * preemption enabled. If preemption is enabled |
| 37 | * then interrupts should be enabled too, in which |
| 38 | * case we shouldn't have to worry about the watchdog |
| 39 | * going off. |
| 40 | */ |
| 41 | raw_cpu_write(watchdog_nmi_touch, true); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 42 | } |
Nicholas Piggin | f2e0cff | 2017-07-12 14:35:43 -0700 | [diff] [blame] | 43 | EXPORT_SYMBOL(arch_touch_nmi_watchdog); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 44 | |
Thomas Gleixner | 7edaeb6 | 2017-08-15 09:50:13 +0200 | [diff] [blame] | 45 | #ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP |
| 46 | static DEFINE_PER_CPU(ktime_t, last_timestamp); |
| 47 | static DEFINE_PER_CPU(unsigned int, nmi_rearmed); |
| 48 | static ktime_t watchdog_hrtimer_sample_threshold __read_mostly; |
| 49 | |
| 50 | void watchdog_update_hrtimer_threshold(u64 period) |
| 51 | { |
| 52 | /* |
| 53 | * The hrtimer runs with a period of (watchdog_threshold * 2) / 5 |
| 54 | * |
| 55 | * So it runs effectively with 2.5 times the rate of the NMI |
| 56 | * watchdog. That means the hrtimer should fire 2-3 times before |
| 57 | * the NMI watchdog expires. The NMI watchdog on x86 is based on |
| 58 | * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles |
| 59 | * might run way faster than expected and the NMI fires in a |
| 60 | * smaller period than the one deduced from the nominal CPU |
| 61 | * frequency. Depending on the Turbo-Mode factor this might be fast |
| 62 | * enough to get the NMI period smaller than the hrtimer watchdog |
| 63 | * period and trigger false positives. |
| 64 | * |
| 65 | * The sample threshold is used to check in the NMI handler whether |
| 66 | * the minimum time between two NMI samples has elapsed. That |
| 67 | * prevents false positives. |
| 68 | * |
| 69 | * Set this to 4/5 of the actual watchdog threshold period so the |
| 70 | * hrtimer is guaranteed to fire at least once within the real |
| 71 | * watchdog threshold. |
| 72 | */ |
| 73 | watchdog_hrtimer_sample_threshold = period * 2; |
| 74 | } |
| 75 | |
| 76 | static bool watchdog_check_timestamp(void) |
| 77 | { |
| 78 | ktime_t delta, now = ktime_get_mono_fast_ns(); |
| 79 | |
| 80 | delta = now - __this_cpu_read(last_timestamp); |
| 81 | if (delta < watchdog_hrtimer_sample_threshold) { |
| 82 | /* |
| 83 | * If ktime is jiffies based, a stalled timer would prevent |
| 84 | * jiffies from being incremented and the filter would look |
| 85 | * at a stale timestamp and never trigger. |
| 86 | */ |
| 87 | if (__this_cpu_inc_return(nmi_rearmed) < 10) |
| 88 | return false; |
| 89 | } |
| 90 | __this_cpu_write(nmi_rearmed, 0); |
| 91 | __this_cpu_write(last_timestamp, now); |
| 92 | return true; |
| 93 | } |
| 94 | #else |
| 95 | static inline bool watchdog_check_timestamp(void) |
| 96 | { |
| 97 | return true; |
| 98 | } |
| 99 | #endif |
| 100 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 101 | static struct perf_event_attr wd_hw_attr = { |
| 102 | .type = PERF_TYPE_HARDWARE, |
| 103 | .config = PERF_COUNT_HW_CPU_CYCLES, |
| 104 | .size = sizeof(struct perf_event_attr), |
| 105 | .pinned = 1, |
| 106 | .disabled = 1, |
| 107 | }; |
| 108 | |
| 109 | /* Callback function for perf event subsystem */ |
| 110 | static void watchdog_overflow_callback(struct perf_event *event, |
Thomas Gleixner | 01f0a02 | 2017-09-12 21:37:05 +0200 | [diff] [blame] | 111 | struct perf_sample_data *data, |
| 112 | struct pt_regs *regs) |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 113 | { |
| 114 | /* Ensure the watchdog never gets throttled */ |
| 115 | event->hw.interrupts = 0; |
| 116 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 117 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
| 118 | __this_cpu_write(watchdog_nmi_touch, false); |
| 119 | return; |
| 120 | } |
| 121 | |
Thomas Gleixner | 7edaeb6 | 2017-08-15 09:50:13 +0200 | [diff] [blame] | 122 | if (!watchdog_check_timestamp()) |
| 123 | return; |
| 124 | |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 125 | /* check for a hardlockup |
| 126 | * This is done by making sure our timer interrupt |
| 127 | * is incrementing. The timer interrupt should have |
| 128 | * fired multiple times before we overflow'd. If it hasn't |
| 129 | * then this is a good indication the cpu is stuck |
| 130 | */ |
| 131 | if (is_hardlockup()) { |
| 132 | int this_cpu = smp_processor_id(); |
| 133 | |
| 134 | /* only print hardlockups once */ |
| 135 | if (__this_cpu_read(hard_watchdog_warn) == true) |
| 136 | return; |
| 137 | |
Sergey Senozhatsky | 8f4a8c1 | 2019-04-18 17:50:41 -0700 | [diff] [blame] | 138 | pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", |
| 139 | this_cpu); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 140 | print_modules(); |
| 141 | print_irqtrace_events(current); |
| 142 | if (regs) |
| 143 | show_regs(regs); |
| 144 | else |
| 145 | dump_stack(); |
| 146 | |
| 147 | /* |
| 148 | * Perform all-CPU dump only once to avoid multiple hardlockups |
| 149 | * generating interleaving traces |
| 150 | */ |
| 151 | if (sysctl_hardlockup_all_cpu_backtrace && |
| 152 | !test_and_set_bit(0, &hardlockup_allcpu_dumped)) |
| 153 | trigger_allbutself_cpu_backtrace(); |
| 154 | |
| 155 | if (hardlockup_panic) |
| 156 | nmi_panic(regs, "Hard LOCKUP"); |
| 157 | |
| 158 | __this_cpu_write(hard_watchdog_warn, true); |
| 159 | return; |
| 160 | } |
| 161 | |
| 162 | __this_cpu_write(hard_watchdog_warn, false); |
| 163 | return; |
| 164 | } |
| 165 | |
Thomas Gleixner | 178b9f7 | 2017-09-12 21:37:18 +0200 | [diff] [blame] | 166 | static int hardlockup_detector_event_create(void) |
| 167 | { |
| 168 | unsigned int cpu = smp_processor_id(); |
| 169 | struct perf_event_attr *wd_attr; |
| 170 | struct perf_event *evt; |
| 171 | |
| 172 | wd_attr = &wd_hw_attr; |
| 173 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
| 174 | |
| 175 | /* Try to register using hardware perf events */ |
| 176 | evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL, |
| 177 | watchdog_overflow_callback, NULL); |
| 178 | if (IS_ERR(evt)) { |
Sinan Kaya | 1b6266e | 2018-08-02 23:09:41 -0700 | [diff] [blame] | 179 | pr_debug("Perf event create on CPU %d failed with %ld\n", cpu, |
| 180 | PTR_ERR(evt)); |
Thomas Gleixner | 178b9f7 | 2017-09-12 21:37:18 +0200 | [diff] [blame] | 181 | return PTR_ERR(evt); |
| 182 | } |
| 183 | this_cpu_write(watchdog_ev, evt); |
| 184 | return 0; |
| 185 | } |
| 186 | |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 187 | /** |
Thomas Gleixner | 2a1b8ee | 2017-09-12 21:37:20 +0200 | [diff] [blame] | 188 | * hardlockup_detector_perf_enable - Enable the local event |
| 189 | */ |
| 190 | void hardlockup_detector_perf_enable(void) |
| 191 | { |
| 192 | if (hardlockup_detector_event_create()) |
| 193 | return; |
| 194 | |
Don Zickus | 42f930d | 2017-11-01 14:11:27 -0400 | [diff] [blame] | 195 | /* use original value for check */ |
| 196 | if (!atomic_fetch_inc(&watchdog_cpus)) |
Thomas Gleixner | 146c9d0 | 2017-09-12 21:37:21 +0200 | [diff] [blame] | 197 | pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); |
| 198 | |
Thomas Gleixner | 2a1b8ee | 2017-09-12 21:37:20 +0200 | [diff] [blame] | 199 | perf_event_enable(this_cpu_read(watchdog_ev)); |
| 200 | } |
| 201 | |
| 202 | /** |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 203 | * hardlockup_detector_perf_disable - Disable the local event |
| 204 | */ |
| 205 | void hardlockup_detector_perf_disable(void) |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 206 | { |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 207 | struct perf_event *event = this_cpu_read(watchdog_ev); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 208 | |
| 209 | if (event) { |
| 210 | perf_event_disable(event); |
Thomas Gleixner | 9c388a5 | 2017-10-31 22:32:00 +0100 | [diff] [blame] | 211 | this_cpu_write(watchdog_ev, NULL); |
| 212 | this_cpu_write(dead_event, event); |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 213 | cpumask_set_cpu(smp_processor_id(), &dead_events_mask); |
Don Zickus | 42f930d | 2017-11-01 14:11:27 -0400 | [diff] [blame] | 214 | atomic_dec(&watchdog_cpus); |
Babu Moger | 73ce051 | 2016-12-14 15:06:24 -0800 | [diff] [blame] | 215 | } |
| 216 | } |
Peter Zijlstra | d0b6e0a | 2017-09-12 21:36:55 +0200 | [diff] [blame] | 217 | |
| 218 | /** |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 219 | * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them |
| 220 | * |
| 221 | * Called from lockup_detector_cleanup(). Serialized by the caller. |
| 222 | */ |
| 223 | void hardlockup_detector_perf_cleanup(void) |
| 224 | { |
| 225 | int cpu; |
| 226 | |
| 227 | for_each_cpu(cpu, &dead_events_mask) { |
Thomas Gleixner | 9c388a5 | 2017-10-31 22:32:00 +0100 | [diff] [blame] | 228 | struct perf_event *event = per_cpu(dead_event, cpu); |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 229 | |
Thomas Gleixner | 115ef3b | 2017-09-25 20:21:54 +0200 | [diff] [blame] | 230 | /* |
| 231 | * Required because for_each_cpu() reports unconditionally |
| 232 | * CPU0 as set on UP kernels. Sigh. |
| 233 | */ |
| 234 | if (event) |
| 235 | perf_event_release_kernel(event); |
Thomas Gleixner | 9c388a5 | 2017-10-31 22:32:00 +0100 | [diff] [blame] | 236 | per_cpu(dead_event, cpu) = NULL; |
Thomas Gleixner | 941154b | 2017-09-12 21:37:04 +0200 | [diff] [blame] | 237 | } |
| 238 | cpumask_clear(&dead_events_mask); |
| 239 | } |
| 240 | |
| 241 | /** |
Peter Zijlstra | d0b6e0a | 2017-09-12 21:36:55 +0200 | [diff] [blame] | 242 | * hardlockup_detector_perf_stop - Globally stop watchdog events |
| 243 | * |
| 244 | * Special interface for x86 to handle the perf HT bug. |
| 245 | */ |
| 246 | void __init hardlockup_detector_perf_stop(void) |
| 247 | { |
| 248 | int cpu; |
| 249 | |
| 250 | lockdep_assert_cpus_held(); |
| 251 | |
| 252 | for_each_online_cpu(cpu) { |
| 253 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
| 254 | |
| 255 | if (event) |
| 256 | perf_event_disable(event); |
| 257 | } |
| 258 | } |
| 259 | |
| 260 | /** |
| 261 | * hardlockup_detector_perf_restart - Globally restart watchdog events |
| 262 | * |
| 263 | * Special interface for x86 to handle the perf HT bug. |
| 264 | */ |
| 265 | void __init hardlockup_detector_perf_restart(void) |
| 266 | { |
| 267 | int cpu; |
| 268 | |
| 269 | lockdep_assert_cpus_held(); |
| 270 | |
| 271 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) |
| 272 | return; |
| 273 | |
| 274 | for_each_online_cpu(cpu) { |
| 275 | struct perf_event *event = per_cpu(watchdog_ev, cpu); |
| 276 | |
| 277 | if (event) |
| 278 | perf_event_enable(event); |
| 279 | } |
| 280 | } |
Thomas Gleixner | 178b9f7 | 2017-09-12 21:37:18 +0200 | [diff] [blame] | 281 | |
| 282 | /** |
| 283 | * hardlockup_detector_perf_init - Probe whether NMI event is available at all |
| 284 | */ |
| 285 | int __init hardlockup_detector_perf_init(void) |
| 286 | { |
| 287 | int ret = hardlockup_detector_event_create(); |
| 288 | |
| 289 | if (ret) { |
Colin Ian King | 77c01d1 | 2017-09-26 10:36:03 +0100 | [diff] [blame] | 290 | pr_info("Perf NMI watchdog permanently disabled\n"); |
Thomas Gleixner | 178b9f7 | 2017-09-12 21:37:18 +0200 | [diff] [blame] | 291 | } else { |
| 292 | perf_event_release_kernel(this_cpu_read(watchdog_ev)); |
| 293 | this_cpu_write(watchdog_ev, NULL); |
| 294 | } |
| 295 | return ret; |
| 296 | } |