blob: acb412442e5e95d36039371b5143012d75b9436c [file] [log] [blame]
Heiko Carstens1bca09f2013-03-14 13:44:25 +01001/*
2 * Stack dumping functions
3 *
4 * Copyright IBM Corp. 1999, 2013
5 */
6
7#include <linux/kallsyms.h>
8#include <linux/hardirq.h>
9#include <linux/kprobes.h>
10#include <linux/utsname.h>
11#include <linux/export.h>
12#include <linux/kdebug.h>
13#include <linux/ptrace.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <asm/processor.h>
17#include <asm/debug.h>
Heiko Carstens0f208222013-09-13 13:36:25 +020018#include <asm/dis.h>
Heiko Carstens1bca09f2013-03-14 13:44:25 +010019#include <asm/ipl.h>
20
21#ifndef CONFIG_64BIT
22#define LONG "%08lx "
23#define FOURLONG "%08lx %08lx %08lx %08lx\n"
24static int kstack_depth_to_print = 12;
25#else /* CONFIG_64BIT */
26#define LONG "%016lx "
27#define FOURLONG "%016lx %016lx %016lx %016lx\n"
28static int kstack_depth_to_print = 20;
29#endif /* CONFIG_64BIT */
30
31/*
32 * For show_trace we have tree different stack to consider:
33 * - the panic stack which is used if the kernel stack has overflown
34 * - the asynchronous interrupt stack (cpu related)
35 * - the synchronous kernel stack (process related)
36 * The stack trace can start at any of the three stack and can potentially
37 * touch all of them. The order is: panic stack, async stack, sync stack.
38 */
39static unsigned long
40__show_trace(unsigned long sp, unsigned long low, unsigned long high)
41{
42 struct stack_frame *sf;
43 struct pt_regs *regs;
Heiko Carstens8237ac32013-09-02 11:20:00 +020044 unsigned long addr;
Heiko Carstens1bca09f2013-03-14 13:44:25 +010045
46 while (1) {
47 sp = sp & PSW_ADDR_INSN;
48 if (sp < low || sp > high - sizeof(*sf))
49 return sp;
50 sf = (struct stack_frame *) sp;
Heiko Carstens8237ac32013-09-02 11:20:00 +020051 addr = sf->gprs[8] & PSW_ADDR_INSN;
52 printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
Heiko Carstens1bca09f2013-03-14 13:44:25 +010053 /* Follow the backchain. */
54 while (1) {
55 low = sp;
56 sp = sf->back_chain & PSW_ADDR_INSN;
57 if (!sp)
58 break;
59 if (sp <= low || sp > high - sizeof(*sf))
60 return sp;
61 sf = (struct stack_frame *) sp;
Heiko Carstens8237ac32013-09-02 11:20:00 +020062 addr = sf->gprs[8] & PSW_ADDR_INSN;
63 printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
Heiko Carstens1bca09f2013-03-14 13:44:25 +010064 }
65 /* Zero backchain detected, check for interrupt frame. */
66 sp = (unsigned long) (sf + 1);
67 if (sp <= low || sp > high - sizeof(*regs))
68 return sp;
69 regs = (struct pt_regs *) sp;
Heiko Carstens8237ac32013-09-02 11:20:00 +020070 addr = regs->psw.addr & PSW_ADDR_INSN;
71 printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
Heiko Carstens1bca09f2013-03-14 13:44:25 +010072 low = sp;
73 sp = regs->gprs[15];
74 }
75}
76
77static void show_trace(struct task_struct *task, unsigned long *stack)
78{
Martin Schwidefsky9cc5c202013-06-04 11:00:08 +020079 const unsigned long frame_size =
80 STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
Heiko Carstens1bca09f2013-03-14 13:44:25 +010081 register unsigned long __r15 asm ("15");
82 unsigned long sp;
83
84 sp = (unsigned long) stack;
85 if (!sp)
86 sp = task ? task->thread.ksp : __r15;
87 printk("Call Trace:\n");
88#ifdef CONFIG_CHECK_STACK
Martin Schwidefsky9cc5c202013-06-04 11:00:08 +020089 sp = __show_trace(sp,
90 S390_lowcore.panic_stack + frame_size - 4096,
91 S390_lowcore.panic_stack + frame_size);
Heiko Carstens1bca09f2013-03-14 13:44:25 +010092#endif
Martin Schwidefsky9cc5c202013-06-04 11:00:08 +020093 sp = __show_trace(sp,
94 S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
95 S390_lowcore.async_stack + frame_size);
Heiko Carstens1bca09f2013-03-14 13:44:25 +010096 if (task)
97 __show_trace(sp, (unsigned long) task_stack_page(task),
98 (unsigned long) task_stack_page(task) + THREAD_SIZE);
99 else
100 __show_trace(sp, S390_lowcore.thread_info,
101 S390_lowcore.thread_info + THREAD_SIZE);
102 if (!task)
103 task = current;
104 debug_show_held_locks(task);
105}
106
107void show_stack(struct task_struct *task, unsigned long *sp)
108{
109 register unsigned long *__r15 asm ("15");
110 unsigned long *stack;
111 int i;
112
113 if (!sp)
114 stack = task ? (unsigned long *) task->thread.ksp : __r15;
115 else
116 stack = sp;
117
118 for (i = 0; i < kstack_depth_to_print; i++) {
119 if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
120 break;
121 if ((i * sizeof(long) % 32) == 0)
122 printk("%s ", i == 0 ? "" : "\n");
123 printk(LONG, *stack++);
124 }
125 printk("\n");
126 show_trace(task, sp);
127}
128
129static void show_last_breaking_event(struct pt_regs *regs)
130{
131#ifdef CONFIG_64BIT
132 printk("Last Breaking-Event-Address:\n");
Heiko Carstens8237ac32013-09-02 11:20:00 +0200133 printk(" [<%016lx>] %pSR\n", regs->args[0], (void *)regs->args[0]);
Heiko Carstens1bca09f2013-03-14 13:44:25 +0100134#endif
135}
136
Heiko Carstens1bca09f2013-03-14 13:44:25 +0100137static inline int mask_bits(struct pt_regs *regs, unsigned long bits)
138{
139 return (regs->psw.mask & bits) / ((~bits + 1) & bits);
140}
141
142void show_registers(struct pt_regs *regs)
143{
144 char *mode;
145
146 mode = user_mode(regs) ? "User" : "Krnl";
Heiko Carstens9ea80662014-04-07 16:26:37 +0200147 printk("%s PSW : %p %p", mode, (void *)regs->psw.mask, (void *)regs->psw.addr);
148 if (!user_mode(regs))
149 printk(" (%pSR)", (void *)regs->psw.addr);
150 printk("\n");
Heiko Carstens1bca09f2013-03-14 13:44:25 +0100151 printk(" R:%x T:%x IO:%x EX:%x Key:%x M:%x W:%x "
152 "P:%x AS:%x CC:%x PM:%x", mask_bits(regs, PSW_MASK_PER),
153 mask_bits(regs, PSW_MASK_DAT), mask_bits(regs, PSW_MASK_IO),
154 mask_bits(regs, PSW_MASK_EXT), mask_bits(regs, PSW_MASK_KEY),
155 mask_bits(regs, PSW_MASK_MCHECK), mask_bits(regs, PSW_MASK_WAIT),
156 mask_bits(regs, PSW_MASK_PSTATE), mask_bits(regs, PSW_MASK_ASC),
157 mask_bits(regs, PSW_MASK_CC), mask_bits(regs, PSW_MASK_PM));
158#ifdef CONFIG_64BIT
159 printk(" EA:%x", mask_bits(regs, PSW_MASK_EA | PSW_MASK_BA));
160#endif
161 printk("\n%s GPRS: " FOURLONG, mode,
162 regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
163 printk(" " FOURLONG,
164 regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
165 printk(" " FOURLONG,
166 regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
167 printk(" " FOURLONG,
168 regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
169 show_code(regs);
170}
171
172void show_regs(struct pt_regs *regs)
173{
Tejun Heoa43cb952013-04-30 15:27:17 -0700174 show_regs_print_info(KERN_DEFAULT);
Heiko Carstens1bca09f2013-03-14 13:44:25 +0100175 show_registers(regs);
176 /* Show stack backtrace if pt_regs is from kernel mode */
177 if (!user_mode(regs))
178 show_trace(NULL, (unsigned long *) regs->gprs[15]);
179 show_last_breaking_event(regs);
180}
181
182static DEFINE_SPINLOCK(die_lock);
183
184void die(struct pt_regs *regs, const char *str)
185{
186 static int die_counter;
187
188 oops_enter();
189 lgr_info_log();
190 debug_stop_all();
191 console_verbose();
192 spin_lock_irq(&die_lock);
193 bust_spinlocks(1);
194 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter);
195#ifdef CONFIG_PREEMPT
196 printk("PREEMPT ");
197#endif
198#ifdef CONFIG_SMP
199 printk("SMP ");
200#endif
201#ifdef CONFIG_DEBUG_PAGEALLOC
202 printk("DEBUG_PAGEALLOC");
203#endif
204 printk("\n");
205 notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV);
206 print_modules();
207 show_regs(regs);
208 bust_spinlocks(0);
209 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
210 spin_unlock_irq(&die_lock);
211 if (in_interrupt())
212 panic("Fatal exception in interrupt");
213 if (panic_on_oops)
214 panic("Fatal exception: panic_on_oops");
215 oops_exit();
216 do_exit(SIGSEGV);
217}