Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 1 | /* |
| 2 | * NMI backtrace support |
| 3 | * |
| 4 | * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, |
| 5 | * with the following header: |
| 6 | * |
| 7 | * HW NMI watchdog support |
| 8 | * |
| 9 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
| 10 | * |
| 11 | * Arch specific calls to support NMI watchdog |
| 12 | * |
| 13 | * Bits copied from original nmi.c file |
| 14 | */ |
| 15 | #include <linux/cpumask.h> |
| 16 | #include <linux/delay.h> |
| 17 | #include <linux/kprobes.h> |
| 18 | #include <linux/nmi.h> |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 19 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 20 | #ifdef arch_trigger_cpumask_backtrace |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 21 | /* For reliability, we're prepared to waste bits here. */ |
| 22 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 23 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 24 | /* "in progress" flag of arch_trigger_cpumask_backtrace */ |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 25 | static unsigned long backtrace_flag; |
| 26 | |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 27 | /* |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 28 | * When raise() is called it will be passed a pointer to the |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 29 | * backtrace_mask. Architectures that call nmi_cpu_backtrace() |
| 30 | * directly from their raise() functions may rely on the mask |
| 31 | * they are passed being updated as a side effect of this call. |
| 32 | */ |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 33 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
| 34 | bool exclude_self, |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 35 | void (*raise)(cpumask_t *mask)) |
| 36 | { |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 37 | int i, this_cpu = get_cpu(); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 38 | |
| 39 | if (test_and_set_bit(0, &backtrace_flag)) { |
| 40 | /* |
| 41 | * If there is already a trigger_all_cpu_backtrace() in progress |
| 42 | * (backtrace_flag == 1), don't output double cpu dump infos. |
| 43 | */ |
| 44 | put_cpu(); |
| 45 | return; |
| 46 | } |
| 47 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 48 | cpumask_copy(to_cpumask(backtrace_mask), mask); |
| 49 | if (exclude_self) |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 50 | cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); |
| 51 | |
Chris Metcalf | 6776648 | 2016-10-07 17:02:49 -0700 | [diff] [blame^] | 52 | /* |
| 53 | * Don't try to send an NMI to this cpu; it may work on some |
| 54 | * architectures, but on others it may not, and we'll get |
| 55 | * information at least as useful just by doing a dump_stack() here. |
| 56 | * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. |
| 57 | */ |
| 58 | if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) |
| 59 | nmi_cpu_backtrace(NULL); |
| 60 | |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 61 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 62 | pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", |
| 63 | this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 64 | raise(to_cpumask(backtrace_mask)); |
| 65 | } |
| 66 | |
| 67 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
| 68 | for (i = 0; i < 10 * 1000; i++) { |
| 69 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
| 70 | break; |
| 71 | mdelay(1); |
| 72 | touch_softlockup_watchdog(); |
| 73 | } |
| 74 | |
| 75 | /* |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 76 | * Force flush any remote buffers that might be stuck in IRQ context |
| 77 | * and therefore could not run their irq_work. |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 78 | */ |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 79 | printk_nmi_flush(); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 80 | |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 81 | clear_bit_unlock(0, &backtrace_flag); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 82 | put_cpu(); |
| 83 | } |
| 84 | |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 85 | bool nmi_cpu_backtrace(struct pt_regs *regs) |
| 86 | { |
| 87 | int cpu = smp_processor_id(); |
| 88 | |
| 89 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 90 | pr_warn("NMI backtrace for cpu %d\n", cpu); |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 91 | if (regs) |
| 92 | show_regs(regs); |
| 93 | else |
| 94 | dump_stack(); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 95 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
| 96 | return true; |
| 97 | } |
| 98 | |
| 99 | return false; |
| 100 | } |
| 101 | NOKPROBE_SYMBOL(nmi_cpu_backtrace); |
| 102 | #endif |