blob: fef917e79b9d614eeaca609c9c03e980bae2272f [file] [log] [blame]
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +02001/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
4 */
5#include <linux/kallsyms.h>
6#include <linux/kprobes.h>
7#include <linux/uaccess.h>
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +02008#include <linux/hardirq.h>
9#include <linux/kdebug.h>
10#include <linux/module.h>
11#include <linux/ptrace.h>
12#include <linux/kexec.h>
Ingo Molnarb8030902009-11-26 08:17:31 +010013#include <linux/sysfs.h>
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020014#include <linux/bug.h>
15#include <linux/nmi.h>
16
17#include <asm/stacktrace.h>
18
Steven Rostedt198d2082014-02-06 09:41:31 -050019static void *is_irq_stack(void *p, void *irq)
20{
21 if (p < irq || p >= (irq + THREAD_SIZE))
22 return NULL;
23 return irq + THREAD_SIZE;
24}
25
26
27static void *is_hardirq_stack(unsigned long *stack, int cpu)
28{
29 void *irq = per_cpu(hardirq_stack, cpu);
30
31 return is_irq_stack(stack, irq);
32}
33
34static void *is_softirq_stack(unsigned long *stack, int cpu)
35{
36 void *irq = per_cpu(softirq_stack, cpu);
37
38 return is_irq_stack(stack, irq);
39}
Frederic Weisbecker0406ca62009-07-01 21:02:09 +020040
Namhyung Kime8e999cf2011-03-18 11:40:06 +090041void dump_trace(struct task_struct *task, struct pt_regs *regs,
42 unsigned long *stack, unsigned long bp,
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020043 const struct stacktrace_ops *ops, void *data)
44{
Steven Rostedt198d2082014-02-06 09:41:31 -050045 const unsigned cpu = get_cpu();
Steven Rostedt7ee991f2008-12-02 23:50:04 -050046 int graph = 0;
Steven Rostedt0788aa62014-02-06 09:41:30 -050047 u32 *prev_esp;
Steven Rostedt7ee991f2008-12-02 23:50:04 -050048
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020049 if (!task)
50 task = current;
51
52 if (!stack) {
53 unsigned long dummy;
Ingo Molnarb8030902009-11-26 08:17:31 +010054
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020055 stack = &dummy;
Steven Rostedt7743a532014-03-07 10:52:42 -050056 if (task != current)
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020057 stack = (unsigned long *)task->thread.sp;
58 }
59
Namhyung Kime8e999cf2011-03-18 11:40:06 +090060 if (!bp)
61 bp = stack_frame(task, regs);
62
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020063 for (;;) {
Steven Rostedt198d2082014-02-06 09:41:31 -050064 void *end_stack;
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020065
Steven Rostedt198d2082014-02-06 09:41:31 -050066 end_stack = is_hardirq_stack(stack, cpu);
67 if (!end_stack)
68 end_stack = is_softirq_stack(stack, cpu);
Alexander van Heukelum2ac53722008-10-04 23:12:43 +020069
Linus Torvaldsda01e182016-06-23 12:20:01 -070070 bp = ops->walk_stack(task, stack, bp, ops, data,
Steven Rostedt198d2082014-02-06 09:41:31 -050071 end_stack, &graph);
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020072
Steven Rostedt0788aa62014-02-06 09:41:30 -050073 /* Stop if not on irq stack */
Steven Rostedt198d2082014-02-06 09:41:31 -050074 if (!end_stack)
Steven Rostedt0788aa62014-02-06 09:41:30 -050075 break;
76
Steven Rostedt198d2082014-02-06 09:41:31 -050077 /* The previous esp is saved on the bottom of the stack */
78 prev_esp = (u32 *)(end_stack - THREAD_SIZE);
Steven Rostedt0788aa62014-02-06 09:41:30 -050079 stack = (unsigned long *)*prev_esp;
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020080 if (!stack)
81 break;
Steven Rostedt0788aa62014-02-06 09:41:30 -050082
Alexander van Heukelum2ac53722008-10-04 23:12:43 +020083 if (ops->stack(data, "IRQ") < 0)
84 break;
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020085 touch_nmi_watchdog();
86 }
Steven Rostedt198d2082014-02-06 09:41:31 -050087 put_cpu();
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020088}
89EXPORT_SYMBOL(dump_trace);
90
Neil Horman878719e2008-10-23 10:40:06 -040091void
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020092show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
Namhyung Kime8e999cf2011-03-18 11:40:06 +090093 unsigned long *sp, unsigned long bp, char *log_lvl)
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +020094{
95 unsigned long *stack;
96 int i;
97
98 if (sp == NULL) {
99 if (task)
100 sp = (unsigned long *)task->thread.sp;
101 else
102 sp = (unsigned long *)&sp;
103 }
104
105 stack = sp;
106 for (i = 0; i < kstack_depth_to_print; i++) {
107 if (kstack_end(stack))
108 break;
Adrien Schildknecht1fc7f612015-02-20 03:34:21 +0100109 if ((i % STACKSLOTS_PER_LINE) == 0) {
110 if (i != 0)
111 pr_cont("\n");
112 printk("%s %08lx", log_lvl, *stack++);
113 } else
114 pr_cont(" %08lx", *stack++);
Alexander van Heukelumca0a8162008-10-04 23:12:44 +0200115 touch_nmi_watchdog();
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200116 }
Joe Perchesc767a542012-05-21 19:50:07 -0700117 pr_cont("\n");
Namhyung Kime8e999cf2011-03-18 11:40:06 +0900118 show_trace_log_lvl(task, regs, sp, bp, log_lvl);
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200119}
120
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200121
Jan Beulich57da8b92012-05-09 08:47:37 +0100122void show_regs(struct pt_regs *regs)
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200123{
124 int i;
125
Tejun Heoa43cb952013-04-30 15:27:17 -0700126 show_regs_print_info(KERN_EMERG);
Andy Lutomirskif39b6f02015-03-18 18:33:33 -0700127 __show_regs(regs, !user_mode(regs));
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200128
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200129 /*
130 * When in-kernel, we also print out the stack and code at the
131 * time of the fault..
132 */
Andy Lutomirskif39b6f02015-03-18 18:33:33 -0700133 if (!user_mode(regs)) {
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200134 unsigned int code_prologue = code_bytes * 43 / 64;
135 unsigned int code_len = code_bytes;
136 unsigned char c;
137 u8 *ip;
138
Joe Perchesc767a542012-05-21 19:50:07 -0700139 pr_emerg("Stack:\n");
Namhyung Kime8e999cf2011-03-18 11:40:06 +0900140 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200141
Joe Perchesc767a542012-05-21 19:50:07 -0700142 pr_emerg("Code:");
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200143
144 ip = (u8 *)regs->ip - code_prologue;
145 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
Alexander van Heukelum8a541662008-10-04 23:12:46 +0200146 /* try starting at IP */
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200147 ip = (u8 *)regs->ip;
148 code_len = code_len - code_prologue + 1;
149 }
150 for (i = 0; i < code_len; i++, ip++) {
151 if (ip < (u8 *)PAGE_OFFSET ||
152 probe_kernel_address(ip, c)) {
Joe Perchesc767a542012-05-21 19:50:07 -0700153 pr_cont(" Bad EIP value.");
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200154 break;
155 }
156 if (ip == (u8 *)regs->ip)
Joe Perchesc767a542012-05-21 19:50:07 -0700157 pr_cont(" <%02x>", c);
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200158 else
Joe Perchesc767a542012-05-21 19:50:07 -0700159 pr_cont(" %02x", c);
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200160 }
161 }
Joe Perchesc767a542012-05-21 19:50:07 -0700162 pr_cont("\n");
Alexander van Heukelum2bc5f922008-09-30 13:12:14 +0200163}
164
165int is_valid_bugaddr(unsigned long ip)
166{
167 unsigned short ud2;
168
169 if (ip < PAGE_OFFSET)
170 return 0;
171 if (probe_kernel_address((unsigned short *)ip, ud2))
172 return 0;
173
174 return ud2 == 0x0b0f;
175}