Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 2 | /* |
| 3 | * NMI backtrace support |
| 4 | * |
| 5 | * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, |
| 6 | * with the following header: |
| 7 | * |
| 8 | * HW NMI watchdog support |
| 9 | * |
| 10 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. |
| 11 | * |
| 12 | * Arch specific calls to support NMI watchdog |
| 13 | * |
| 14 | * Bits copied from original nmi.c file |
| 15 | */ |
| 16 | #include <linux/cpumask.h> |
| 17 | #include <linux/delay.h> |
| 18 | #include <linux/kprobes.h> |
| 19 | #include <linux/nmi.h> |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 20 | #include <linux/cpu.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 21 | #include <linux/sched/debug.h> |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 22 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 23 | #ifdef arch_trigger_cpumask_backtrace |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 24 | /* For reliability, we're prepared to waste bits here. */ |
| 25 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 26 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 27 | /* "in progress" flag of arch_trigger_cpumask_backtrace */ |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 28 | static unsigned long backtrace_flag; |
| 29 | |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 30 | /* |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 31 | * When raise() is called it will be passed a pointer to the |
Daniel Thompson | 0768330 | 2015-09-22 17:12:10 +0100 | [diff] [blame] | 32 | * backtrace_mask. Architectures that call nmi_cpu_backtrace() |
| 33 | * directly from their raise() functions may rely on the mask |
| 34 | * they are passed being updated as a side effect of this call. |
| 35 | */ |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 36 | void nmi_trigger_cpumask_backtrace(const cpumask_t *mask, |
| 37 | bool exclude_self, |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 38 | void (*raise)(cpumask_t *mask)) |
| 39 | { |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 40 | int i, this_cpu = get_cpu(); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 41 | |
| 42 | if (test_and_set_bit(0, &backtrace_flag)) { |
| 43 | /* |
| 44 | * If there is already a trigger_all_cpu_backtrace() in progress |
| 45 | * (backtrace_flag == 1), don't output double cpu dump infos. |
| 46 | */ |
| 47 | put_cpu(); |
| 48 | return; |
| 49 | } |
| 50 | |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 51 | cpumask_copy(to_cpumask(backtrace_mask), mask); |
| 52 | if (exclude_self) |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 53 | cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); |
| 54 | |
Chris Metcalf | 6776648 | 2016-10-07 17:02:49 -0700 | [diff] [blame] | 55 | /* |
| 56 | * Don't try to send an NMI to this cpu; it may work on some |
| 57 | * architectures, but on others it may not, and we'll get |
| 58 | * information at least as useful just by doing a dump_stack() here. |
| 59 | * Note that nmi_cpu_backtrace(NULL) will clear the cpu bit. |
| 60 | */ |
| 61 | if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask))) |
| 62 | nmi_cpu_backtrace(NULL); |
| 63 | |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 64 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { |
Chris Metcalf | 9a01c3e | 2016-10-07 17:02:45 -0700 | [diff] [blame] | 65 | pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n", |
| 66 | this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask)); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 67 | raise(to_cpumask(backtrace_mask)); |
| 68 | } |
| 69 | |
| 70 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ |
| 71 | for (i = 0; i < 10 * 1000; i++) { |
| 72 | if (cpumask_empty(to_cpumask(backtrace_mask))) |
| 73 | break; |
| 74 | mdelay(1); |
| 75 | touch_softlockup_watchdog(); |
| 76 | } |
| 77 | |
Nicholas Piggin | 5d5e452 | 2021-11-07 14:51:16 +1000 | [diff] [blame] | 78 | /* |
| 79 | * Force flush any remote buffers that might be stuck in IRQ context |
| 80 | * and therefore could not run their irq_work. |
| 81 | */ |
| 82 | printk_trigger_flush(); |
| 83 | |
Petr Mladek | 42a0bb3 | 2016-05-20 17:00:33 -0700 | [diff] [blame] | 84 | clear_bit_unlock(0, &backtrace_flag); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 85 | put_cpu(); |
| 86 | } |
| 87 | |
Paul E. McKenney | 160c7ba | 2020-07-08 16:25:43 -0700 | [diff] [blame] | 88 | // Dump stacks even for idle CPUs. |
| 89 | static bool backtrace_idle; |
| 90 | module_param(backtrace_idle, bool, 0644); |
| 91 | |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 92 | bool nmi_cpu_backtrace(struct pt_regs *regs) |
| 93 | { |
| 94 | int cpu = smp_processor_id(); |
John Ogness | 55d6af1 | 2021-07-15 21:39:54 +0206 | [diff] [blame] | 95 | unsigned long flags; |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 96 | |
| 97 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
Petr Mladek | 26d1982 | 2021-07-27 10:09:39 +0200 | [diff] [blame] | 98 | /* |
| 99 | * Allow nested NMI backtraces while serializing |
| 100 | * against other CPUs. |
| 101 | */ |
| 102 | printk_cpu_lock_irqsave(flags); |
Paul E. McKenney | 160c7ba | 2020-07-08 16:25:43 -0700 | [diff] [blame] | 103 | if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { |
Liu, Changcheng | 2f9b7e0 | 2017-11-17 15:28:20 -0800 | [diff] [blame] | 104 | pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", |
| 105 | cpu, (void *)instruction_pointer(regs)); |
Chris Metcalf | 6727ad9 | 2016-10-07 17:02:55 -0700 | [diff] [blame] | 106 | } else { |
| 107 | pr_warn("NMI backtrace for cpu %d\n", cpu); |
| 108 | if (regs) |
| 109 | show_regs(regs); |
| 110 | else |
| 111 | dump_stack(); |
| 112 | } |
Petr Mladek | 26d1982 | 2021-07-27 10:09:39 +0200 | [diff] [blame] | 113 | printk_cpu_unlock_irqrestore(flags); |
Russell King | b2c0b2c | 2014-09-03 23:57:13 +0100 | [diff] [blame] | 114 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
| 115 | return true; |
| 116 | } |
| 117 | |
| 118 | return false; |
| 119 | } |
| 120 | NOKPROBE_SYMBOL(nmi_cpu_backtrace); |
| 121 | #endif |