Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
| 4 | */ |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/uaccess.h> |
| 8 | #include <linux/utsname.h> |
| 9 | #include <linux/hardirq.h> |
| 10 | #include <linux/kdebug.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/ptrace.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 13 | #include <linux/sched/debug.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 14 | #include <linux/sched/task_stack.h> |
Steven Rostedt | 712406a | 2009-02-09 10:54:03 -0800 | [diff] [blame] | 15 | #include <linux/ftrace.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 16 | #include <linux/kexec.h> |
| 17 | #include <linux/bug.h> |
| 18 | #include <linux/nmi.h> |
| 19 | #include <linux/sysfs.h> |
| 20 | |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 21 | #include <asm/cpu_entry_area.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 22 | #include <asm/stacktrace.h> |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 23 | #include <asm/unwind.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 24 | |
| 25 | int panic_on_unrecovered_nmi; |
Kurt Garloff | 5211a24 | 2009-06-24 14:32:11 -0700 | [diff] [blame] | 26 | int panic_on_io_nmi; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 27 | unsigned int code_bytes = 64; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 28 | static int die_counter; |
| 29 | |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 30 | bool in_task_stack(unsigned long *stack, struct task_struct *task, |
| 31 | struct stack_info *info) |
| 32 | { |
| 33 | unsigned long *begin = task_stack_page(task); |
| 34 | unsigned long *end = task_stack_page(task) + THREAD_SIZE; |
| 35 | |
| 36 | if (stack < begin || stack >= end) |
| 37 | return false; |
| 38 | |
| 39 | info->type = STACK_TYPE_TASK; |
| 40 | info->begin = begin; |
| 41 | info->end = end; |
| 42 | info->next_sp = NULL; |
| 43 | |
| 44 | return true; |
| 45 | } |
| 46 | |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 47 | bool in_entry_stack(unsigned long *stack, struct stack_info *info) |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 48 | { |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 49 | struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 50 | |
Andy Lutomirski | 0f9a481 | 2017-12-04 15:07:28 +0100 | [diff] [blame] | 51 | void *begin = ss; |
| 52 | void *end = ss + 1; |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 53 | |
| 54 | if ((void *)stack < begin || (void *)stack >= end) |
| 55 | return false; |
| 56 | |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 57 | info->type = STACK_TYPE_ENTRY; |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 58 | info->begin = begin; |
| 59 | info->end = end; |
| 60 | info->next_sp = NULL; |
| 61 | |
| 62 | return true; |
| 63 | } |
| 64 | |
Adrien Schildknecht | 1fc7f61 | 2015-02-20 03:34:21 +0100 | [diff] [blame] | 65 | static void printk_stack_address(unsigned long address, int reliable, |
Josh Poimboeuf | d438f5f | 2016-08-24 11:50:16 -0500 | [diff] [blame] | 66 | char *log_lvl) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 67 | { |
Josh Poimboeuf | d438f5f | 2016-08-24 11:50:16 -0500 | [diff] [blame] | 68 | touch_nmi_watchdog(); |
Josh Poimboeuf | bb5e5ce | 2016-10-25 09:51:12 -0500 | [diff] [blame] | 69 | printk("%s %s%pB\n", log_lvl, reliable ? "" : "? ", (void *)address); |
Jiri Slaby | 5f01c98 | 2013-10-25 15:06:58 +0200 | [diff] [blame] | 70 | } |
| 71 | |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 72 | void show_iret_regs(struct pt_regs *regs) |
| 73 | { |
| 74 | printk(KERN_DEFAULT "RIP: %04x:%pS\n", (int)regs->cs, (void *)regs->ip); |
| 75 | printk(KERN_DEFAULT "RSP: %04x:%016lx EFLAGS: %08lx", (int)regs->ss, |
| 76 | regs->sp, regs->flags); |
| 77 | } |
| 78 | |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 79 | static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, |
| 80 | bool partial) |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 81 | { |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 82 | /* |
| 83 | * These on_stack() checks aren't strictly necessary: the unwind code |
| 84 | * has already validated the 'regs' pointer. The checks are done for |
| 85 | * ordering reasons: if the registers are on the next stack, we don't |
| 86 | * want to print them out yet. Otherwise they'll be shown as part of |
| 87 | * the wrong stack. Later, when show_trace_log_lvl() switches to the |
| 88 | * next stack, this function will be called again with the same regs so |
| 89 | * they can be printed in the right context. |
| 90 | */ |
| 91 | if (!partial && on_stack(info, regs, sizeof(*regs))) { |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 92 | __show_regs(regs, 0); |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 93 | |
| 94 | } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, |
| 95 | IRET_FRAME_SIZE)) { |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 96 | /* |
| 97 | * When an interrupt or exception occurs in entry code, the |
| 98 | * full pt_regs might not have been saved yet. In that case |
| 99 | * just print the iret frame. |
| 100 | */ |
| 101 | show_iret_regs(regs); |
| 102 | } |
| 103 | } |
| 104 | |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 105 | void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
| 106 | unsigned long *stack, char *log_lvl) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 107 | { |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 108 | struct unwind_state state; |
| 109 | struct stack_info stack_info = {0}; |
| 110 | unsigned long visit_mask = 0; |
| 111 | int graph_idx = 0; |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 112 | bool partial; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 113 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 114 | printk("%sCall Trace:\n", log_lvl); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 115 | |
| 116 | unwind_start(&state, task, regs, stack); |
Josh Poimboeuf | f4474c9 | 2016-11-17 00:04:58 -0600 | [diff] [blame] | 117 | stack = stack ? : get_stack_pointer(task, regs); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 118 | |
| 119 | /* |
| 120 | * Iterate through the stacks, starting with the current stack pointer. |
| 121 | * Each stack has a pointer to the next one. |
| 122 | * |
| 123 | * x86-64 can have several stacks: |
| 124 | * - task stack |
| 125 | * - interrupt stack |
| 126 | * - HW exception stacks (double fault, nmi, debug, mce) |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 127 | * - entry stack |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 128 | * |
Andy Lutomirski | 6e60e58 | 2017-12-04 15:07:18 +0100 | [diff] [blame] | 129 | * x86-32 can have up to four stacks: |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 130 | * - task stack |
| 131 | * - softirq stack |
| 132 | * - hardirq stack |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 133 | * - entry stack |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 134 | */ |
Josh Poimboeuf | e335bb5 | 2017-04-17 08:44:00 -0500 | [diff] [blame] | 135 | for (regs = NULL; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { |
Josh Poimboeuf | 3d02a9c | 2016-11-18 11:46:23 -0600 | [diff] [blame] | 136 | const char *stack_name; |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 137 | |
Andy Lutomirski | 6e60e58 | 2017-12-04 15:07:18 +0100 | [diff] [blame] | 138 | if (get_stack_info(stack, task, &stack_info, &visit_mask)) { |
| 139 | /* |
| 140 | * We weren't on a valid stack. It's possible that |
| 141 | * we overflowed a valid stack into a guard page. |
| 142 | * See if the next page up is valid so that we can |
| 143 | * generate some kind of backtrace if this happens. |
| 144 | */ |
| 145 | stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); |
| 146 | if (get_stack_info(stack, task, &stack_info, &visit_mask)) |
| 147 | break; |
| 148 | } |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 149 | |
Josh Poimboeuf | 3d02a9c | 2016-11-18 11:46:23 -0600 | [diff] [blame] | 150 | stack_name = stack_type_name(stack_info.type); |
| 151 | if (stack_name) |
| 152 | printk("%s <%s>\n", log_lvl, stack_name); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 153 | |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 154 | if (regs) |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 155 | show_regs_if_on_stack(&stack_info, regs, partial); |
Josh Poimboeuf | b0529be | 2017-07-11 10:33:40 -0500 | [diff] [blame] | 156 | |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 157 | /* |
| 158 | * Scan the stack, printing any text addresses we find. At the |
| 159 | * same time, follow proper stack frames with the unwinder. |
| 160 | * |
| 161 | * Addresses found during the scan which are not reported by |
| 162 | * the unwinder are considered to be additional clues which are |
| 163 | * sometimes useful for debugging and are prefixed with '?'. |
| 164 | * This also serves as a failsafe option in case the unwinder |
| 165 | * goes off in the weeds. |
| 166 | */ |
| 167 | for (; stack < stack_info.end; stack++) { |
| 168 | unsigned long real_addr; |
| 169 | int reliable = 0; |
Josh Poimboeuf | 91e08ab | 2016-11-17 09:57:24 -0600 | [diff] [blame] | 170 | unsigned long addr = READ_ONCE_NOCHECK(*stack); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 171 | unsigned long *ret_addr_p = |
| 172 | unwind_get_return_address_ptr(&state); |
| 173 | |
| 174 | if (!__kernel_text_address(addr)) |
| 175 | continue; |
| 176 | |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 177 | /* |
| 178 | * Don't print regs->ip again if it was already printed |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 179 | * by show_regs_if_on_stack(). |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 180 | */ |
Josh Poimboeuf | b0529be | 2017-07-11 10:33:40 -0500 | [diff] [blame] | 181 | if (regs && stack == ®s->ip) |
| 182 | goto next; |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 183 | |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 184 | if (stack == ret_addr_p) |
| 185 | reliable = 1; |
| 186 | |
| 187 | /* |
| 188 | * When function graph tracing is enabled for a |
| 189 | * function, its return address on the stack is |
| 190 | * replaced with the address of an ftrace handler |
| 191 | * (return_to_handler). In that case, before printing |
| 192 | * the "real" address, we want to print the handler |
| 193 | * address as an "unreliable" hint that function graph |
| 194 | * tracing was involved. |
| 195 | */ |
| 196 | real_addr = ftrace_graph_ret_addr(task, &graph_idx, |
| 197 | addr, stack); |
| 198 | if (real_addr != addr) |
| 199 | printk_stack_address(addr, 0, log_lvl); |
| 200 | printk_stack_address(real_addr, reliable, log_lvl); |
| 201 | |
| 202 | if (!reliable) |
| 203 | continue; |
| 204 | |
Josh Poimboeuf | b0529be | 2017-07-11 10:33:40 -0500 | [diff] [blame] | 205 | next: |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 206 | /* |
| 207 | * Get the next frame from the unwinder. No need to |
| 208 | * check for an error: if anything goes wrong, the rest |
| 209 | * of the addresses will just be printed as unreliable. |
| 210 | */ |
| 211 | unwind_next_frame(&state); |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 212 | |
| 213 | /* if the frame has entry regs, print them */ |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 214 | regs = unwind_get_entry_regs(&state, &partial); |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 215 | if (regs) |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame^] | 216 | show_regs_if_on_stack(&stack_info, regs, partial); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 217 | } |
| 218 | |
Josh Poimboeuf | 3d02a9c | 2016-11-18 11:46:23 -0600 | [diff] [blame] | 219 | if (stack_name) |
| 220 | printk("%s </%s>\n", log_lvl, stack_name); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 221 | } |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 222 | } |
| 223 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 224 | void show_stack(struct task_struct *task, unsigned long *sp) |
| 225 | { |
Josh Poimboeuf | 8153916 | 2016-09-16 08:05:20 -0500 | [diff] [blame] | 226 | task = task ? : current; |
| 227 | |
Tejun Heo | a77f2a4 | 2013-04-30 15:27:09 -0700 | [diff] [blame] | 228 | /* |
| 229 | * Stack frames below this one aren't interesting. Don't show them |
| 230 | * if we're printing for %current. |
| 231 | */ |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 232 | if (!sp && task == current) |
Josh Poimboeuf | 4b8afaf | 2016-08-24 11:50:17 -0500 | [diff] [blame] | 233 | sp = get_stack_pointer(current, NULL); |
Tejun Heo | a77f2a4 | 2013-04-30 15:27:09 -0700 | [diff] [blame] | 234 | |
Josh Poimboeuf | 0ee1dd9 | 2016-10-25 09:51:13 -0500 | [diff] [blame] | 235 | show_trace_log_lvl(task, NULL, sp, KERN_DEFAULT); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 236 | } |
| 237 | |
Borislav Petkov | 81c2949 | 2016-07-05 00:31:27 +0200 | [diff] [blame] | 238 | void show_stack_regs(struct pt_regs *regs) |
| 239 | { |
Josh Poimboeuf | 0ee1dd9 | 2016-10-25 09:51:13 -0500 | [diff] [blame] | 240 | show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); |
Borislav Petkov | 81c2949 | 2016-07-05 00:31:27 +0200 | [diff] [blame] | 241 | } |
| 242 | |
Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 243 | static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 244 | static int die_owner = -1; |
| 245 | static unsigned int die_nest_count; |
| 246 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 247 | unsigned long oops_begin(void) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 248 | { |
| 249 | int cpu; |
| 250 | unsigned long flags; |
| 251 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 252 | oops_enter(); |
| 253 | |
| 254 | /* racy, but better than risking deadlock. */ |
| 255 | raw_local_irq_save(flags); |
| 256 | cpu = smp_processor_id(); |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 257 | if (!arch_spin_trylock(&die_lock)) { |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 258 | if (cpu == die_owner) |
| 259 | /* nested oops. should stop eventually */; |
| 260 | else |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 261 | arch_spin_lock(&die_lock); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 262 | } |
| 263 | die_nest_count++; |
| 264 | die_owner = cpu; |
| 265 | console_verbose(); |
| 266 | bust_spinlocks(1); |
| 267 | return flags; |
| 268 | } |
Huang Ying | 81e88fd | 2011-01-12 14:44:55 +0800 | [diff] [blame] | 269 | EXPORT_SYMBOL_GPL(oops_begin); |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 270 | NOKPROBE_SYMBOL(oops_begin); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 271 | |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 272 | void __noreturn rewind_stack_do_exit(int signr); |
| 273 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 274 | void oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 275 | { |
| 276 | if (regs && kexec_should_crash(current)) |
| 277 | crash_kexec(regs); |
| 278 | |
| 279 | bust_spinlocks(0); |
| 280 | die_owner = -1; |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 281 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 282 | die_nest_count--; |
| 283 | if (!die_nest_count) |
| 284 | /* Nest count reaches zero, release the lock. */ |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 285 | arch_spin_unlock(&die_lock); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 286 | raw_local_irq_restore(flags); |
| 287 | oops_exit(); |
| 288 | |
| 289 | if (!signr) |
| 290 | return; |
| 291 | if (in_interrupt()) |
| 292 | panic("Fatal exception in interrupt"); |
| 293 | if (panic_on_oops) |
| 294 | panic("Fatal exception"); |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 295 | |
| 296 | /* |
| 297 | * We're not going to return, but we might be on an IST stack or |
| 298 | * have very little stack space left. Rewind the stack and kill |
| 299 | * the task. |
| 300 | */ |
| 301 | rewind_stack_do_exit(signr); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 302 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 303 | NOKPROBE_SYMBOL(oops_end); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 304 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 305 | int __die(const char *str, struct pt_regs *regs, long err) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 306 | { |
| 307 | #ifdef CONFIG_X86_32 |
| 308 | unsigned short ss; |
| 309 | unsigned long sp; |
| 310 | #endif |
Prarit Bhargava | b0f4c4b | 2012-01-26 08:55:34 -0500 | [diff] [blame] | 311 | printk(KERN_DEFAULT |
Vlastimil Babka | 5f26d76 | 2017-12-19 22:33:46 +0100 | [diff] [blame] | 312 | "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, |
Rasmus Villemoes | 8fad7ec | 2016-03-26 21:40:16 +0100 | [diff] [blame] | 313 | IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "", |
| 314 | IS_ENABLED(CONFIG_SMP) ? " SMP" : "", |
| 315 | debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", |
Vlastimil Babka | 5f26d76 | 2017-12-19 22:33:46 +0100 | [diff] [blame] | 316 | IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", |
| 317 | IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? |
| 318 | (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); |
Rasmus Villemoes | 8fad7ec | 2016-03-26 21:40:16 +0100 | [diff] [blame] | 319 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 320 | if (notify_die(DIE_OOPS, str, regs, err, |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 321 | current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 322 | return 1; |
| 323 | |
Jan Beulich | 0fa0e2f | 2012-06-18 11:40:04 +0100 | [diff] [blame] | 324 | print_modules(); |
Jan Beulich | 57da8b9 | 2012-05-09 08:47:37 +0100 | [diff] [blame] | 325 | show_regs(regs); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 326 | #ifdef CONFIG_X86_32 |
Andy Lutomirski | f39b6f0 | 2015-03-18 18:33:33 -0700 | [diff] [blame] | 327 | if (user_mode(regs)) { |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 328 | sp = regs->sp; |
Andy Lutomirski | 9950481 | 2017-07-28 06:00:32 -0700 | [diff] [blame] | 329 | ss = regs->ss; |
H. Peter Anvin | a343c75 | 2009-10-12 14:11:09 -0700 | [diff] [blame] | 330 | } else { |
| 331 | sp = kernel_stack_pointer(regs); |
| 332 | savesegment(ss, ss); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 333 | } |
Josh Poimboeuf | bb5e5ce | 2016-10-25 09:51:12 -0500 | [diff] [blame] | 334 | printk(KERN_EMERG "EIP: %pS SS:ESP: %04x:%08lx\n", |
| 335 | (void *)regs->ip, ss, sp); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 336 | #else |
| 337 | /* Executive summary in case the oops scrolled away */ |
Josh Poimboeuf | bb5e5ce | 2016-10-25 09:51:12 -0500 | [diff] [blame] | 338 | printk(KERN_ALERT "RIP: %pS RSP: %016lx\n", (void *)regs->ip, regs->sp); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 339 | #endif |
| 340 | return 0; |
| 341 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 342 | NOKPROBE_SYMBOL(__die); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 343 | |
| 344 | /* |
| 345 | * This is gone through when something in the kernel has done something bad |
| 346 | * and is about to be terminated: |
| 347 | */ |
| 348 | void die(const char *str, struct pt_regs *regs, long err) |
| 349 | { |
| 350 | unsigned long flags = oops_begin(); |
| 351 | int sig = SIGSEGV; |
| 352 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 353 | if (__die(str, regs, err)) |
| 354 | sig = 0; |
| 355 | oops_end(flags, regs, sig); |
| 356 | } |
| 357 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 358 | static int __init code_bytes_setup(char *s) |
| 359 | { |
Shuah Khan | 363f7ce | 2012-05-06 11:58:04 -0600 | [diff] [blame] | 360 | ssize_t ret; |
| 361 | unsigned long val; |
| 362 | |
| 363 | if (!s) |
| 364 | return -EINVAL; |
| 365 | |
| 366 | ret = kstrtoul(s, 0, &val); |
| 367 | if (ret) |
| 368 | return ret; |
| 369 | |
| 370 | code_bytes = val; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 371 | if (code_bytes > 8192) |
| 372 | code_bytes = 8192; |
| 373 | |
| 374 | return 1; |
| 375 | } |
| 376 | __setup("code_bytes=", code_bytes_setup); |