blob: 509bb6b59c4148d9c4eb7e07acab2e3a9f4d4842 [file] [log] [blame]
Babu Moger73ce0512016-12-14 15:06:24 -08001/*
2 * Detect hard lockups on a system
3 *
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5 *
6 * Note: Most of this code is borrowed heavily from the original softlockup
7 * detector, so thanks to Ingo for the initial implementation.
8 * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
9 * to those contributors as well.
10 */
11
12#define pr_fmt(fmt) "NMI watchdog: " fmt
13
14#include <linux/nmi.h>
15#include <linux/module.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010016#include <linux/sched/debug.h>
17
Babu Moger73ce0512016-12-14 15:06:24 -080018#include <asm/irq_regs.h>
19#include <linux/perf_event.h>
20
21static DEFINE_PER_CPU(bool, hard_watchdog_warn);
22static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
23static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
Thomas Gleixner941154b2017-09-12 21:37:04 +020024static DEFINE_PER_CPU(struct perf_event *, dead_event);
25static struct cpumask dead_events_mask;
Babu Moger73ce0512016-12-14 15:06:24 -080026
Babu Moger73ce0512016-12-14 15:06:24 -080027static unsigned long hardlockup_allcpu_dumped;
Thomas Gleixner146c9d02017-09-12 21:37:21 +020028static unsigned int watchdog_cpus;
Babu Moger73ce0512016-12-14 15:06:24 -080029
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070030void arch_touch_nmi_watchdog(void)
Babu Moger73ce0512016-12-14 15:06:24 -080031{
32 /*
33 * Using __raw here because some code paths have
34 * preemption enabled. If preemption is enabled
35 * then interrupts should be enabled too, in which
36 * case we shouldn't have to worry about the watchdog
37 * going off.
38 */
39 raw_cpu_write(watchdog_nmi_touch, true);
Babu Moger73ce0512016-12-14 15:06:24 -080040}
Nicholas Pigginf2e0cff2017-07-12 14:35:43 -070041EXPORT_SYMBOL(arch_touch_nmi_watchdog);
Babu Moger73ce0512016-12-14 15:06:24 -080042
Thomas Gleixner7edaeb62017-08-15 09:50:13 +020043#ifdef CONFIG_HARDLOCKUP_CHECK_TIMESTAMP
44static DEFINE_PER_CPU(ktime_t, last_timestamp);
45static DEFINE_PER_CPU(unsigned int, nmi_rearmed);
46static ktime_t watchdog_hrtimer_sample_threshold __read_mostly;
47
48void watchdog_update_hrtimer_threshold(u64 period)
49{
50 /*
51 * The hrtimer runs with a period of (watchdog_threshold * 2) / 5
52 *
53 * So it runs effectively with 2.5 times the rate of the NMI
54 * watchdog. That means the hrtimer should fire 2-3 times before
55 * the NMI watchdog expires. The NMI watchdog on x86 is based on
56 * unhalted CPU cycles, so if Turbo-Mode is enabled the CPU cycles
57 * might run way faster than expected and the NMI fires in a
58 * smaller period than the one deduced from the nominal CPU
59 * frequency. Depending on the Turbo-Mode factor this might be fast
60 * enough to get the NMI period smaller than the hrtimer watchdog
61 * period and trigger false positives.
62 *
63 * The sample threshold is used to check in the NMI handler whether
64 * the minimum time between two NMI samples has elapsed. That
65 * prevents false positives.
66 *
67 * Set this to 4/5 of the actual watchdog threshold period so the
68 * hrtimer is guaranteed to fire at least once within the real
69 * watchdog threshold.
70 */
71 watchdog_hrtimer_sample_threshold = period * 2;
72}
73
74static bool watchdog_check_timestamp(void)
75{
76 ktime_t delta, now = ktime_get_mono_fast_ns();
77
78 delta = now - __this_cpu_read(last_timestamp);
79 if (delta < watchdog_hrtimer_sample_threshold) {
80 /*
81 * If ktime is jiffies based, a stalled timer would prevent
82 * jiffies from being incremented and the filter would look
83 * at a stale timestamp and never trigger.
84 */
85 if (__this_cpu_inc_return(nmi_rearmed) < 10)
86 return false;
87 }
88 __this_cpu_write(nmi_rearmed, 0);
89 __this_cpu_write(last_timestamp, now);
90 return true;
91}
92#else
93static inline bool watchdog_check_timestamp(void)
94{
95 return true;
96}
97#endif
98
Babu Moger73ce0512016-12-14 15:06:24 -080099static struct perf_event_attr wd_hw_attr = {
100 .type = PERF_TYPE_HARDWARE,
101 .config = PERF_COUNT_HW_CPU_CYCLES,
102 .size = sizeof(struct perf_event_attr),
103 .pinned = 1,
104 .disabled = 1,
105};
106
107/* Callback function for perf event subsystem */
108static void watchdog_overflow_callback(struct perf_event *event,
Thomas Gleixner01f0a022017-09-12 21:37:05 +0200109 struct perf_sample_data *data,
110 struct pt_regs *regs)
Babu Moger73ce0512016-12-14 15:06:24 -0800111{
112 /* Ensure the watchdog never gets throttled */
113 event->hw.interrupts = 0;
114
Babu Moger73ce0512016-12-14 15:06:24 -0800115 if (__this_cpu_read(watchdog_nmi_touch) == true) {
116 __this_cpu_write(watchdog_nmi_touch, false);
117 return;
118 }
119
Thomas Gleixner7edaeb62017-08-15 09:50:13 +0200120 if (!watchdog_check_timestamp())
121 return;
122
Babu Moger73ce0512016-12-14 15:06:24 -0800123 /* check for a hardlockup
124 * This is done by making sure our timer interrupt
125 * is incrementing. The timer interrupt should have
126 * fired multiple times before we overflow'd. If it hasn't
127 * then this is a good indication the cpu is stuck
128 */
129 if (is_hardlockup()) {
130 int this_cpu = smp_processor_id();
131
132 /* only print hardlockups once */
133 if (__this_cpu_read(hard_watchdog_warn) == true)
134 return;
135
136 pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
137 print_modules();
138 print_irqtrace_events(current);
139 if (regs)
140 show_regs(regs);
141 else
142 dump_stack();
143
144 /*
145 * Perform all-CPU dump only once to avoid multiple hardlockups
146 * generating interleaving traces
147 */
148 if (sysctl_hardlockup_all_cpu_backtrace &&
149 !test_and_set_bit(0, &hardlockup_allcpu_dumped))
150 trigger_allbutself_cpu_backtrace();
151
152 if (hardlockup_panic)
153 nmi_panic(regs, "Hard LOCKUP");
154
155 __this_cpu_write(hard_watchdog_warn, true);
156 return;
157 }
158
159 __this_cpu_write(hard_watchdog_warn, false);
160 return;
161}
162
Thomas Gleixner178b9f72017-09-12 21:37:18 +0200163static int hardlockup_detector_event_create(void)
164{
165 unsigned int cpu = smp_processor_id();
166 struct perf_event_attr *wd_attr;
167 struct perf_event *evt;
168
169 wd_attr = &wd_hw_attr;
170 wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
171
172 /* Try to register using hardware perf events */
173 evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
174 watchdog_overflow_callback, NULL);
175 if (IS_ERR(evt)) {
176 pr_info("Perf event create on CPU %d failed with %ld\n", cpu,
177 PTR_ERR(evt));
178 return PTR_ERR(evt);
179 }
180 this_cpu_write(watchdog_ev, evt);
181 return 0;
182}
183
Thomas Gleixner941154b2017-09-12 21:37:04 +0200184/**
Thomas Gleixner2a1b8ee2017-09-12 21:37:20 +0200185 * hardlockup_detector_perf_enable - Enable the local event
186 */
187void hardlockup_detector_perf_enable(void)
188{
189 if (hardlockup_detector_event_create())
190 return;
191
Thomas Gleixner146c9d02017-09-12 21:37:21 +0200192 if (!watchdog_cpus++)
193 pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
194
Thomas Gleixner2a1b8ee2017-09-12 21:37:20 +0200195 perf_event_enable(this_cpu_read(watchdog_ev));
196}
197
198/**
Thomas Gleixner941154b2017-09-12 21:37:04 +0200199 * hardlockup_detector_perf_disable - Disable the local event
200 */
201void hardlockup_detector_perf_disable(void)
Babu Moger73ce0512016-12-14 15:06:24 -0800202{
Thomas Gleixner941154b2017-09-12 21:37:04 +0200203 struct perf_event *event = this_cpu_read(watchdog_ev);
Babu Moger73ce0512016-12-14 15:06:24 -0800204
205 if (event) {
206 perf_event_disable(event);
Thomas Gleixner941154b2017-09-12 21:37:04 +0200207 this_cpu_write(watchdog_ev, NULL);
208 this_cpu_write(dead_event, event);
209 cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
Thomas Gleixner146c9d02017-09-12 21:37:21 +0200210 watchdog_cpus--;
Babu Moger73ce0512016-12-14 15:06:24 -0800211 }
212}
Peter Zijlstrad0b6e0a2017-09-12 21:36:55 +0200213
214/**
Thomas Gleixner941154b2017-09-12 21:37:04 +0200215 * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
216 *
217 * Called from lockup_detector_cleanup(). Serialized by the caller.
218 */
219void hardlockup_detector_perf_cleanup(void)
220{
221 int cpu;
222
223 for_each_cpu(cpu, &dead_events_mask) {
224 struct perf_event *event = per_cpu(dead_event, cpu);
225
226 per_cpu(dead_event, cpu) = NULL;
227 perf_event_release_kernel(event);
228 }
229 cpumask_clear(&dead_events_mask);
230}
231
232/**
Peter Zijlstrad0b6e0a2017-09-12 21:36:55 +0200233 * hardlockup_detector_perf_stop - Globally stop watchdog events
234 *
235 * Special interface for x86 to handle the perf HT bug.
236 */
237void __init hardlockup_detector_perf_stop(void)
238{
239 int cpu;
240
241 lockdep_assert_cpus_held();
242
243 for_each_online_cpu(cpu) {
244 struct perf_event *event = per_cpu(watchdog_ev, cpu);
245
246 if (event)
247 perf_event_disable(event);
248 }
249}
250
251/**
252 * hardlockup_detector_perf_restart - Globally restart watchdog events
253 *
254 * Special interface for x86 to handle the perf HT bug.
255 */
256void __init hardlockup_detector_perf_restart(void)
257{
258 int cpu;
259
260 lockdep_assert_cpus_held();
261
262 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
263 return;
264
265 for_each_online_cpu(cpu) {
266 struct perf_event *event = per_cpu(watchdog_ev, cpu);
267
268 if (event)
269 perf_event_enable(event);
270 }
271}
Thomas Gleixner178b9f72017-09-12 21:37:18 +0200272
273/**
274 * hardlockup_detector_perf_init - Probe whether NMI event is available at all
275 */
276int __init hardlockup_detector_perf_init(void)
277{
278 int ret = hardlockup_detector_event_create();
279
280 if (ret) {
281 pr_info("Perf NMI watchdog permanetely disabled\n");
282 } else {
283 perf_event_release_kernel(this_cpu_read(watchdog_ev));
284 this_cpu_write(watchdog_ev, NULL);
285 }
286 return ret;
287}