Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
| 4 | */ |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/uaccess.h> |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 8 | #include <linux/hardirq.h> |
| 9 | #include <linux/kdebug.h> |
| 10 | #include <linux/module.h> |
| 11 | #include <linux/ptrace.h> |
| 12 | #include <linux/kexec.h> |
Ingo Molnar | b803090 | 2009-11-26 08:17:31 +0100 | [diff] [blame] | 13 | #include <linux/sysfs.h> |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 14 | #include <linux/bug.h> |
| 15 | #include <linux/nmi.h> |
| 16 | |
| 17 | #include <asm/stacktrace.h> |
| 18 | |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 19 | static void *is_irq_stack(void *p, void *irq) |
| 20 | { |
| 21 | if (p < irq || p >= (irq + THREAD_SIZE)) |
| 22 | return NULL; |
| 23 | return irq + THREAD_SIZE; |
| 24 | } |
| 25 | |
| 26 | |
| 27 | static void *is_hardirq_stack(unsigned long *stack, int cpu) |
| 28 | { |
| 29 | void *irq = per_cpu(hardirq_stack, cpu); |
| 30 | |
| 31 | return is_irq_stack(stack, irq); |
| 32 | } |
| 33 | |
| 34 | static void *is_softirq_stack(unsigned long *stack, int cpu) |
| 35 | { |
| 36 | void *irq = per_cpu(softirq_stack, cpu); |
| 37 | |
| 38 | return is_irq_stack(stack, irq); |
| 39 | } |
Frederic Weisbecker | 0406ca6 | 2009-07-01 21:02:09 +0200 | [diff] [blame] | 40 | |
Namhyung Kim | e8e999cf | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 41 | void dump_trace(struct task_struct *task, struct pt_regs *regs, |
| 42 | unsigned long *stack, unsigned long bp, |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 43 | const struct stacktrace_ops *ops, void *data) |
| 44 | { |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 45 | const unsigned cpu = get_cpu(); |
Steven Rostedt | 7ee991f | 2008-12-02 23:50:04 -0500 | [diff] [blame] | 46 | int graph = 0; |
Steven Rostedt | 0788aa6 | 2014-02-06 09:41:30 -0500 | [diff] [blame] | 47 | u32 *prev_esp; |
Steven Rostedt | 7ee991f | 2008-12-02 23:50:04 -0500 | [diff] [blame] | 48 | |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 49 | if (!task) |
| 50 | task = current; |
| 51 | |
| 52 | if (!stack) { |
| 53 | unsigned long dummy; |
Ingo Molnar | b803090 | 2009-11-26 08:17:31 +0100 | [diff] [blame] | 54 | |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 55 | stack = &dummy; |
Steven Rostedt | 7743a53 | 2014-03-07 10:52:42 -0500 | [diff] [blame] | 56 | if (task != current) |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 57 | stack = (unsigned long *)task->thread.sp; |
| 58 | } |
| 59 | |
Namhyung Kim | e8e999cf | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 60 | if (!bp) |
| 61 | bp = stack_frame(task, regs); |
| 62 | |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 63 | for (;;) { |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 64 | void *end_stack; |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 65 | |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 66 | end_stack = is_hardirq_stack(stack, cpu); |
| 67 | if (!end_stack) |
| 68 | end_stack = is_softirq_stack(stack, cpu); |
Alexander van Heukelum | 2ac5372 | 2008-10-04 23:12:43 +0200 | [diff] [blame] | 69 | |
Linus Torvalds | da01e18 | 2016-06-23 12:20:01 -0700 | [diff] [blame] | 70 | bp = ops->walk_stack(task, stack, bp, ops, data, |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 71 | end_stack, &graph); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 72 | |
Steven Rostedt | 0788aa6 | 2014-02-06 09:41:30 -0500 | [diff] [blame] | 73 | /* Stop if not on irq stack */ |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 74 | if (!end_stack) |
Steven Rostedt | 0788aa6 | 2014-02-06 09:41:30 -0500 | [diff] [blame] | 75 | break; |
| 76 | |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 77 | /* The previous esp is saved on the bottom of the stack */ |
| 78 | prev_esp = (u32 *)(end_stack - THREAD_SIZE); |
Steven Rostedt | 0788aa6 | 2014-02-06 09:41:30 -0500 | [diff] [blame] | 79 | stack = (unsigned long *)*prev_esp; |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 80 | if (!stack) |
| 81 | break; |
Steven Rostedt | 0788aa6 | 2014-02-06 09:41:30 -0500 | [diff] [blame] | 82 | |
Alexander van Heukelum | 2ac5372 | 2008-10-04 23:12:43 +0200 | [diff] [blame] | 83 | if (ops->stack(data, "IRQ") < 0) |
| 84 | break; |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 85 | touch_nmi_watchdog(); |
| 86 | } |
Steven Rostedt | 198d208 | 2014-02-06 09:41:31 -0500 | [diff] [blame] | 87 | put_cpu(); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 88 | } |
| 89 | EXPORT_SYMBOL(dump_trace); |
| 90 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 91 | void |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 92 | show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, |
Namhyung Kim | e8e999cf | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 93 | unsigned long *sp, unsigned long bp, char *log_lvl) |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 94 | { |
| 95 | unsigned long *stack; |
| 96 | int i; |
| 97 | |
| 98 | if (sp == NULL) { |
| 99 | if (task) |
| 100 | sp = (unsigned long *)task->thread.sp; |
| 101 | else |
| 102 | sp = (unsigned long *)&sp; |
| 103 | } |
| 104 | |
| 105 | stack = sp; |
| 106 | for (i = 0; i < kstack_depth_to_print; i++) { |
| 107 | if (kstack_end(stack)) |
| 108 | break; |
Adrien Schildknecht | 1fc7f61 | 2015-02-20 03:34:21 +0100 | [diff] [blame] | 109 | if ((i % STACKSLOTS_PER_LINE) == 0) { |
| 110 | if (i != 0) |
| 111 | pr_cont("\n"); |
| 112 | printk("%s %08lx", log_lvl, *stack++); |
| 113 | } else |
| 114 | pr_cont(" %08lx", *stack++); |
Alexander van Heukelum | ca0a816 | 2008-10-04 23:12:44 +0200 | [diff] [blame] | 115 | touch_nmi_watchdog(); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 116 | } |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 117 | pr_cont("\n"); |
Namhyung Kim | e8e999cf | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 118 | show_trace_log_lvl(task, regs, sp, bp, log_lvl); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 119 | } |
| 120 | |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 121 | |
Jan Beulich | 57da8b9 | 2012-05-09 08:47:37 +0100 | [diff] [blame] | 122 | void show_regs(struct pt_regs *regs) |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 123 | { |
| 124 | int i; |
| 125 | |
Tejun Heo | a43cb95 | 2013-04-30 15:27:17 -0700 | [diff] [blame] | 126 | show_regs_print_info(KERN_EMERG); |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 127 | __show_regs(regs, !user_mode(regs)); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 128 | |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 129 | /* |
| 130 | * When in-kernel, we also print out the stack and code at the |
| 131 | * time of the fault.. |
| 132 | */ |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 133 | if (!user_mode(regs)) { |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 134 | unsigned int code_prologue = code_bytes * 43 / 64; |
| 135 | unsigned int code_len = code_bytes; |
| 136 | unsigned char c; |
| 137 | u8 *ip; |
| 138 | |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 139 | pr_emerg("Stack:\n"); |
Namhyung Kim | e8e999cf | 2011-03-18 11:40:06 +0900 | [diff] [blame] | 140 | show_stack_log_lvl(NULL, regs, ®s->sp, 0, KERN_EMERG); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 141 | |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 142 | pr_emerg("Code:"); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 143 | |
| 144 | ip = (u8 *)regs->ip - code_prologue; |
| 145 | if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { |
Alexander van Heukelum | 8a54166 | 2008-10-04 23:12:46 +0200 | [diff] [blame] | 146 | /* try starting at IP */ |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 147 | ip = (u8 *)regs->ip; |
| 148 | code_len = code_len - code_prologue + 1; |
| 149 | } |
| 150 | for (i = 0; i < code_len; i++, ip++) { |
| 151 | if (ip < (u8 *)PAGE_OFFSET || |
| 152 | probe_kernel_address(ip, c)) { |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 153 | pr_cont(" Bad EIP value."); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 154 | break; |
| 155 | } |
| 156 | if (ip == (u8 *)regs->ip) |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 157 | pr_cont(" <%02x>", c); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 158 | else |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 159 | pr_cont(" %02x", c); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 160 | } |
| 161 | } |
Joe Perches | c767a54 | 2012-05-21 19:50:07 -0700 | [diff] [blame] | 162 | pr_cont("\n"); |
Alexander van Heukelum | 2bc5f92 | 2008-09-30 13:12:14 +0200 | [diff] [blame] | 163 | } |
| 164 | |
| 165 | int is_valid_bugaddr(unsigned long ip) |
| 166 | { |
| 167 | unsigned short ud2; |
| 168 | |
| 169 | if (ip < PAGE_OFFSET) |
| 170 | return 0; |
| 171 | if (probe_kernel_address((unsigned short *)ip, ud2)) |
| 172 | return 0; |
| 173 | |
| 174 | return ud2 == 0x0b0f; |
| 175 | } |