Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs |
| 4 | */ |
| 5 | #include <linux/kallsyms.h> |
| 6 | #include <linux/kprobes.h> |
| 7 | #include <linux/uaccess.h> |
| 8 | #include <linux/utsname.h> |
| 9 | #include <linux/hardirq.h> |
| 10 | #include <linux/kdebug.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/ptrace.h> |
Ingo Molnar | b17b015 | 2017-02-08 18:51:35 +0100 | [diff] [blame] | 13 | #include <linux/sched/debug.h> |
Ingo Molnar | 68db0cf | 2017-02-08 18:51:37 +0100 | [diff] [blame] | 14 | #include <linux/sched/task_stack.h> |
Steven Rostedt | 712406a | 2009-02-09 10:54:03 -0800 | [diff] [blame] | 15 | #include <linux/ftrace.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 16 | #include <linux/kexec.h> |
| 17 | #include <linux/bug.h> |
| 18 | #include <linux/nmi.h> |
| 19 | #include <linux/sysfs.h> |
Jann Horn | f12d11c | 2018-08-28 20:40:33 +0200 | [diff] [blame] | 20 | #include <linux/kasan.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 21 | |
Thomas Gleixner | 92a0f81 | 2017-12-20 18:51:31 +0100 | [diff] [blame] | 22 | #include <asm/cpu_entry_area.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 23 | #include <asm/stacktrace.h> |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 24 | #include <asm/unwind.h> |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 25 | |
| 26 | int panic_on_unrecovered_nmi; |
Kurt Garloff | 5211a24 | 2009-06-24 14:32:11 -0700 | [diff] [blame] | 27 | int panic_on_io_nmi; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 28 | static int die_counter; |
| 29 | |
Borislav Petkov | 602bd70 | 2018-04-17 18:11:23 +0200 | [diff] [blame] | 30 | static struct pt_regs exec_summary_regs; |
| 31 | |
Joerg Roedel | 6b27edd | 2020-09-07 15:15:45 +0200 | [diff] [blame] | 32 | bool noinstr in_task_stack(unsigned long *stack, struct task_struct *task, |
| 33 | struct stack_info *info) |
Josh Poimboeuf | cb76c93 | 2016-09-14 21:07:42 -0500 | [diff] [blame] | 34 | { |
| 35 | unsigned long *begin = task_stack_page(task); |
| 36 | unsigned long *end = task_stack_page(task) + THREAD_SIZE; |
| 37 | |
| 38 | if (stack < begin || stack >= end) |
| 39 | return false; |
| 40 | |
| 41 | info->type = STACK_TYPE_TASK; |
| 42 | info->begin = begin; |
| 43 | info->end = end; |
| 44 | info->next_sp = NULL; |
| 45 | |
| 46 | return true; |
| 47 | } |
| 48 | |
Joerg Roedel | 6b27edd | 2020-09-07 15:15:45 +0200 | [diff] [blame] | 49 | /* Called from get_stack_info_noinstr - so must be noinstr too */ |
| 50 | bool noinstr in_entry_stack(unsigned long *stack, struct stack_info *info) |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 51 | { |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 52 | struct entry_stack *ss = cpu_entry_stack(smp_processor_id()); |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 53 | |
Andy Lutomirski | 0f9a481 | 2017-12-04 15:07:28 +0100 | [diff] [blame] | 54 | void *begin = ss; |
| 55 | void *end = ss + 1; |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 56 | |
| 57 | if ((void *)stack < begin || (void *)stack >= end) |
| 58 | return false; |
| 59 | |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 60 | info->type = STACK_TYPE_ENTRY; |
Andy Lutomirski | 33a2f1a | 2017-12-04 15:07:13 +0100 | [diff] [blame] | 61 | info->begin = begin; |
| 62 | info->end = end; |
| 63 | info->next_sp = NULL; |
| 64 | |
| 65 | return true; |
| 66 | } |
| 67 | |
Adrien Schildknecht | 1fc7f61 | 2015-02-20 03:34:21 +0100 | [diff] [blame] | 68 | static void printk_stack_address(unsigned long address, int reliable, |
Dmitry Safonov | d46b3df | 2020-06-08 21:31:57 -0700 | [diff] [blame] | 69 | const char *log_lvl) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 70 | { |
Josh Poimboeuf | d438f5f | 2016-08-24 11:50:16 -0500 | [diff] [blame] | 71 | touch_nmi_watchdog(); |
Stephen Boyd | 9ef8af2 | 2021-07-07 18:09:27 -0700 | [diff] [blame] | 72 | printk("%s %s%pBb\n", log_lvl, reliable ? "" : "? ", (void *)address); |
Jiri Slaby | 5f01c98 | 2013-10-25 15:06:58 +0200 | [diff] [blame] | 73 | } |
| 74 | |
Thomas Gleixner | d181d2d | 2020-07-22 10:39:54 +0200 | [diff] [blame] | 75 | static int copy_code(struct pt_regs *regs, u8 *buf, unsigned long src, |
| 76 | unsigned int nbytes) |
| 77 | { |
| 78 | if (!user_mode(regs)) |
| 79 | return copy_from_kernel_nofault(buf, (u8 *)src, nbytes); |
| 80 | |
Thomas Gleixner | 860aaab | 2020-11-17 21:23:34 +0100 | [diff] [blame] | 81 | /* The user space code from other tasks cannot be accessed. */ |
| 82 | if (regs != task_pt_regs(current)) |
| 83 | return -EPERM; |
Thomas Gleixner | d181d2d | 2020-07-22 10:39:54 +0200 | [diff] [blame] | 84 | /* |
| 85 | * Make sure userspace isn't trying to trick us into dumping kernel |
| 86 | * memory by pointing the userspace instruction pointer at it. |
| 87 | */ |
| 88 | if (__chk_range_not_ok(src, nbytes, TASK_SIZE_MAX)) |
| 89 | return -EINVAL; |
| 90 | |
Thomas Gleixner | 860aaab | 2020-11-17 21:23:34 +0100 | [diff] [blame] | 91 | /* |
| 92 | * Even if named copy_from_user_nmi() this can be invoked from |
| 93 | * other contexts and will not try to resolve a pagefault, which is |
| 94 | * the correct thing to do here as this code can be called from any |
| 95 | * context. |
| 96 | */ |
Thomas Gleixner | d181d2d | 2020-07-22 10:39:54 +0200 | [diff] [blame] | 97 | return copy_from_user_nmi(buf, (void __user *)src, nbytes); |
| 98 | } |
| 99 | |
Borislav Petkov | 4dba072 | 2018-04-17 18:11:24 +0200 | [diff] [blame] | 100 | /* |
| 101 | * There are a couple of reasons for the 2/3rd prologue, courtesy of Linus: |
| 102 | * |
| 103 | * In case where we don't have the exact kernel image (which, if we did, we can |
| 104 | * simply disassemble and navigate to the RIP), the purpose of the bigger |
| 105 | * prologue is to have more context and to be able to correlate the code from |
| 106 | * the different toolchains better. |
| 107 | * |
| 108 | * In addition, it helps in recreating the register allocation of the failing |
| 109 | * kernel and thus make sense of the register dump. |
| 110 | * |
| 111 | * What is more, the additional complication of a variable length insn arch like |
| 112 | * x86 warrants having longer byte sequence before rIP so that the disassembler |
| 113 | * can "sync" up properly and find instruction boundaries when decoding the |
| 114 | * opcode bytes. |
| 115 | * |
| 116 | * Thus, the 2/3rds prologue and 64 byte OPCODE_BUFSIZE is just a random |
| 117 | * guesstimate in attempt to achieve all of the above. |
| 118 | */ |
Jann Horn | 342db04 | 2018-08-28 17:49:01 +0200 | [diff] [blame] | 119 | void show_opcodes(struct pt_regs *regs, const char *loglvl) |
Borislav Petkov | f0a1d7c | 2018-04-17 18:11:18 +0200 | [diff] [blame] | 120 | { |
Rasmus Villemoes | 8e974b3 | 2018-07-19 23:07:58 +0900 | [diff] [blame] | 121 | #define PROLOGUE_SIZE 42 |
| 122 | #define EPILOGUE_SIZE 21 |
| 123 | #define OPCODE_BUFSIZE (PROLOGUE_SIZE + 1 + EPILOGUE_SIZE) |
Borislav Petkov | 9e4a90f | 2018-04-17 18:11:19 +0200 | [diff] [blame] | 124 | u8 opcodes[OPCODE_BUFSIZE]; |
Jann Horn | 342db04 | 2018-08-28 17:49:01 +0200 | [diff] [blame] | 125 | unsigned long prologue = regs->ip - PROLOGUE_SIZE; |
Borislav Petkov | f0a1d7c | 2018-04-17 18:11:18 +0200 | [diff] [blame] | 126 | |
Thomas Gleixner | 860aaab | 2020-11-17 21:23:34 +0100 | [diff] [blame] | 127 | switch (copy_code(regs, opcodes, prologue, sizeof(opcodes))) { |
| 128 | case 0: |
Rasmus Villemoes | 8e974b3 | 2018-07-19 23:07:58 +0900 | [diff] [blame] | 129 | printk("%sCode: %" __stringify(PROLOGUE_SIZE) "ph <%02x> %" |
| 130 | __stringify(EPILOGUE_SIZE) "ph\n", loglvl, opcodes, |
| 131 | opcodes[PROLOGUE_SIZE], opcodes + PROLOGUE_SIZE + 1); |
Thomas Gleixner | 860aaab | 2020-11-17 21:23:34 +0100 | [diff] [blame] | 132 | break; |
| 133 | case -EPERM: |
| 134 | /* No access to the user space stack of other tasks. Ignore. */ |
| 135 | break; |
| 136 | default: |
| 137 | printk("%sCode: Unable to access opcode bytes at RIP 0x%lx.\n", |
| 138 | loglvl, prologue); |
| 139 | break; |
Borislav Petkov | f0a1d7c | 2018-04-17 18:11:18 +0200 | [diff] [blame] | 140 | } |
Borislav Petkov | f0a1d7c | 2018-04-17 18:11:18 +0200 | [diff] [blame] | 141 | } |
| 142 | |
Borislav Petkov | 7cccf07 | 2018-04-17 18:11:22 +0200 | [diff] [blame] | 143 | void show_ip(struct pt_regs *regs, const char *loglvl) |
| 144 | { |
| 145 | #ifdef CONFIG_X86_32 |
| 146 | printk("%sEIP: %pS\n", loglvl, (void *)regs->ip); |
| 147 | #else |
| 148 | printk("%sRIP: %04x:%pS\n", loglvl, (int)regs->cs, (void *)regs->ip); |
| 149 | #endif |
Jann Horn | 342db04 | 2018-08-28 17:49:01 +0200 | [diff] [blame] | 150 | show_opcodes(regs, loglvl); |
Borislav Petkov | 7cccf07 | 2018-04-17 18:11:22 +0200 | [diff] [blame] | 151 | } |
| 152 | |
Dmitry Safonov | fd07f80 | 2020-06-29 15:48:45 +0100 | [diff] [blame] | 153 | void show_iret_regs(struct pt_regs *regs, const char *log_lvl) |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 154 | { |
Dmitry Safonov | fd07f80 | 2020-06-29 15:48:45 +0100 | [diff] [blame] | 155 | show_ip(regs, log_lvl); |
| 156 | printk("%sRSP: %04x:%016lx EFLAGS: %08lx", log_lvl, (int)regs->ss, |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 157 | regs->sp, regs->flags); |
| 158 | } |
| 159 | |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 160 | static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs, |
Dmitry Safonov | ef2ff0f | 2020-06-29 15:48:47 +0100 | [diff] [blame] | 161 | bool partial, const char *log_lvl) |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 162 | { |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 163 | /* |
| 164 | * These on_stack() checks aren't strictly necessary: the unwind code |
| 165 | * has already validated the 'regs' pointer. The checks are done for |
| 166 | * ordering reasons: if the registers are on the next stack, we don't |
| 167 | * want to print them out yet. Otherwise they'll be shown as part of |
| 168 | * the wrong stack. Later, when show_trace_log_lvl() switches to the |
| 169 | * next stack, this function will be called again with the same regs so |
| 170 | * they can be printed in the right context. |
| 171 | */ |
| 172 | if (!partial && on_stack(info, regs, sizeof(*regs))) { |
Dmitry Safonov | ef2ff0f | 2020-06-29 15:48:47 +0100 | [diff] [blame] | 173 | __show_regs(regs, SHOW_REGS_SHORT, log_lvl); |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 174 | |
| 175 | } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET, |
| 176 | IRET_FRAME_SIZE)) { |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 177 | /* |
| 178 | * When an interrupt or exception occurs in entry code, the |
| 179 | * full pt_regs might not have been saved yet. In that case |
| 180 | * just print the iret frame. |
| 181 | */ |
Dmitry Safonov | ef2ff0f | 2020-06-29 15:48:47 +0100 | [diff] [blame] | 182 | show_iret_regs(regs, log_lvl); |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 183 | } |
| 184 | } |
| 185 | |
Hui Su | 09a217c | 2020-11-13 21:39:43 +0800 | [diff] [blame] | 186 | static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, |
Dmitry Safonov | d46b3df | 2020-06-08 21:31:57 -0700 | [diff] [blame] | 187 | unsigned long *stack, const char *log_lvl) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 188 | { |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 189 | struct unwind_state state; |
| 190 | struct stack_info stack_info = {0}; |
| 191 | unsigned long visit_mask = 0; |
| 192 | int graph_idx = 0; |
Arnd Bergmann | ebfc150 | 2018-02-02 15:56:17 +0100 | [diff] [blame] | 193 | bool partial = false; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 194 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 195 | printk("%sCall Trace:\n", log_lvl); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 196 | |
| 197 | unwind_start(&state, task, regs, stack); |
Josh Poimboeuf | f4474c9 | 2016-11-17 00:04:58 -0600 | [diff] [blame] | 198 | stack = stack ? : get_stack_pointer(task, regs); |
Josh Poimboeuf | 3ffdeb1 | 2017-12-31 10:18:07 -0600 | [diff] [blame] | 199 | regs = unwind_get_entry_regs(&state, &partial); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 200 | |
| 201 | /* |
| 202 | * Iterate through the stacks, starting with the current stack pointer. |
| 203 | * Each stack has a pointer to the next one. |
| 204 | * |
| 205 | * x86-64 can have several stacks: |
| 206 | * - task stack |
| 207 | * - interrupt stack |
| 208 | * - HW exception stacks (double fault, nmi, debug, mce) |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 209 | * - entry stack |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 210 | * |
Andy Lutomirski | 6e60e58 | 2017-12-04 15:07:18 +0100 | [diff] [blame] | 211 | * x86-32 can have up to four stacks: |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 212 | * - task stack |
| 213 | * - softirq stack |
| 214 | * - hardirq stack |
Dave Hansen | 4fe2d8b | 2017-12-04 17:25:07 -0800 | [diff] [blame] | 215 | * - entry stack |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 216 | */ |
Josh Poimboeuf | 3ffdeb1 | 2017-12-31 10:18:07 -0600 | [diff] [blame] | 217 | for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { |
Josh Poimboeuf | 3d02a9c | 2016-11-18 11:46:23 -0600 | [diff] [blame] | 218 | const char *stack_name; |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 219 | |
Andy Lutomirski | 6e60e58 | 2017-12-04 15:07:18 +0100 | [diff] [blame] | 220 | if (get_stack_info(stack, task, &stack_info, &visit_mask)) { |
| 221 | /* |
| 222 | * We weren't on a valid stack. It's possible that |
| 223 | * we overflowed a valid stack into a guard page. |
| 224 | * See if the next page up is valid so that we can |
| 225 | * generate some kind of backtrace if this happens. |
| 226 | */ |
| 227 | stack = (unsigned long *)PAGE_ALIGN((unsigned long)stack); |
| 228 | if (get_stack_info(stack, task, &stack_info, &visit_mask)) |
| 229 | break; |
| 230 | } |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 231 | |
Josh Poimboeuf | 3d02a9c | 2016-11-18 11:46:23 -0600 | [diff] [blame] | 232 | stack_name = stack_type_name(stack_info.type); |
| 233 | if (stack_name) |
| 234 | printk("%s <%s>\n", log_lvl, stack_name); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 235 | |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 236 | if (regs) |
Dmitry Safonov | ef2ff0f | 2020-06-29 15:48:47 +0100 | [diff] [blame] | 237 | show_regs_if_on_stack(&stack_info, regs, partial, log_lvl); |
Josh Poimboeuf | b0529be | 2017-07-11 10:33:40 -0500 | [diff] [blame] | 238 | |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 239 | /* |
| 240 | * Scan the stack, printing any text addresses we find. At the |
| 241 | * same time, follow proper stack frames with the unwinder. |
| 242 | * |
| 243 | * Addresses found during the scan which are not reported by |
| 244 | * the unwinder are considered to be additional clues which are |
| 245 | * sometimes useful for debugging and are prefixed with '?'. |
| 246 | * This also serves as a failsafe option in case the unwinder |
| 247 | * goes off in the weeds. |
| 248 | */ |
| 249 | for (; stack < stack_info.end; stack++) { |
| 250 | unsigned long real_addr; |
| 251 | int reliable = 0; |
Josh Poimboeuf | 91e08ab | 2016-11-17 09:57:24 -0600 | [diff] [blame] | 252 | unsigned long addr = READ_ONCE_NOCHECK(*stack); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 253 | unsigned long *ret_addr_p = |
| 254 | unwind_get_return_address_ptr(&state); |
| 255 | |
| 256 | if (!__kernel_text_address(addr)) |
| 257 | continue; |
| 258 | |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 259 | /* |
| 260 | * Don't print regs->ip again if it was already printed |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 261 | * by show_regs_if_on_stack(). |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 262 | */ |
Josh Poimboeuf | b0529be | 2017-07-11 10:33:40 -0500 | [diff] [blame] | 263 | if (regs && stack == ®s->ip) |
| 264 | goto next; |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 265 | |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 266 | if (stack == ret_addr_p) |
| 267 | reliable = 1; |
| 268 | |
| 269 | /* |
| 270 | * When function graph tracing is enabled for a |
| 271 | * function, its return address on the stack is |
| 272 | * replaced with the address of an ftrace handler |
| 273 | * (return_to_handler). In that case, before printing |
| 274 | * the "real" address, we want to print the handler |
| 275 | * address as an "unreliable" hint that function graph |
| 276 | * tracing was involved. |
| 277 | */ |
| 278 | real_addr = ftrace_graph_ret_addr(task, &graph_idx, |
| 279 | addr, stack); |
| 280 | if (real_addr != addr) |
| 281 | printk_stack_address(addr, 0, log_lvl); |
| 282 | printk_stack_address(real_addr, reliable, log_lvl); |
| 283 | |
| 284 | if (!reliable) |
| 285 | continue; |
| 286 | |
Josh Poimboeuf | b0529be | 2017-07-11 10:33:40 -0500 | [diff] [blame] | 287 | next: |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 288 | /* |
| 289 | * Get the next frame from the unwinder. No need to |
| 290 | * check for an error: if anything goes wrong, the rest |
| 291 | * of the addresses will just be printed as unreliable. |
| 292 | */ |
| 293 | unwind_next_frame(&state); |
Josh Poimboeuf | 3b3fa11 | 2016-10-20 11:34:43 -0500 | [diff] [blame] | 294 | |
| 295 | /* if the frame has entry regs, print them */ |
Josh Poimboeuf | a9cdbe7 | 2017-12-31 10:18:06 -0600 | [diff] [blame] | 296 | regs = unwind_get_entry_regs(&state, &partial); |
Josh Poimboeuf | b02fcf9 | 2017-12-04 15:07:09 +0100 | [diff] [blame] | 297 | if (regs) |
Dmitry Safonov | ef2ff0f | 2020-06-29 15:48:47 +0100 | [diff] [blame] | 298 | show_regs_if_on_stack(&stack_info, regs, partial, log_lvl); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 299 | } |
| 300 | |
Josh Poimboeuf | 3d02a9c | 2016-11-18 11:46:23 -0600 | [diff] [blame] | 301 | if (stack_name) |
| 302 | printk("%s </%s>\n", log_lvl, stack_name); |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 303 | } |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 304 | } |
| 305 | |
Dmitry Safonov | 9cb8f06 | 2020-06-08 21:32:29 -0700 | [diff] [blame] | 306 | void show_stack(struct task_struct *task, unsigned long *sp, |
Dmitry Safonov | a832ff0 | 2020-06-08 21:32:00 -0700 | [diff] [blame] | 307 | const char *loglvl) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 308 | { |
Josh Poimboeuf | 8153916 | 2016-09-16 08:05:20 -0500 | [diff] [blame] | 309 | task = task ? : current; |
| 310 | |
Tejun Heo | a77f2a4 | 2013-04-30 15:27:09 -0700 | [diff] [blame] | 311 | /* |
| 312 | * Stack frames below this one aren't interesting. Don't show them |
| 313 | * if we're printing for %current. |
| 314 | */ |
Josh Poimboeuf | e18bccc | 2016-09-16 14:18:16 -0500 | [diff] [blame] | 315 | if (!sp && task == current) |
Josh Poimboeuf | 4b8afaf | 2016-08-24 11:50:17 -0500 | [diff] [blame] | 316 | sp = get_stack_pointer(current, NULL); |
Tejun Heo | a77f2a4 | 2013-04-30 15:27:09 -0700 | [diff] [blame] | 317 | |
Dmitry Safonov | a832ff0 | 2020-06-08 21:32:00 -0700 | [diff] [blame] | 318 | show_trace_log_lvl(task, NULL, sp, loglvl); |
| 319 | } |
| 320 | |
Borislav Petkov | 81c2949 | 2016-07-05 00:31:27 +0200 | [diff] [blame] | 321 | void show_stack_regs(struct pt_regs *regs) |
| 322 | { |
Josh Poimboeuf | 0ee1dd9 | 2016-10-25 09:51:13 -0500 | [diff] [blame] | 323 | show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); |
Borislav Petkov | 81c2949 | 2016-07-05 00:31:27 +0200 | [diff] [blame] | 324 | } |
| 325 | |
Thomas Gleixner | edc35bd | 2009-12-03 12:38:57 +0100 | [diff] [blame] | 326 | static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 327 | static int die_owner = -1; |
| 328 | static unsigned int die_nest_count; |
| 329 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 330 | unsigned long oops_begin(void) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 331 | { |
| 332 | int cpu; |
| 333 | unsigned long flags; |
| 334 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 335 | oops_enter(); |
| 336 | |
| 337 | /* racy, but better than risking deadlock. */ |
| 338 | raw_local_irq_save(flags); |
| 339 | cpu = smp_processor_id(); |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 340 | if (!arch_spin_trylock(&die_lock)) { |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 341 | if (cpu == die_owner) |
| 342 | /* nested oops. should stop eventually */; |
| 343 | else |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 344 | arch_spin_lock(&die_lock); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 345 | } |
| 346 | die_nest_count++; |
| 347 | die_owner = cpu; |
| 348 | console_verbose(); |
| 349 | bust_spinlocks(1); |
| 350 | return flags; |
| 351 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 352 | NOKPROBE_SYMBOL(oops_begin); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 353 | |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 354 | void __noreturn rewind_stack_do_exit(int signr); |
| 355 | |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 356 | void oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 357 | { |
| 358 | if (regs && kexec_should_crash(current)) |
| 359 | crash_kexec(regs); |
| 360 | |
| 361 | bust_spinlocks(0); |
| 362 | die_owner = -1; |
Rusty Russell | 373d4d0 | 2013-01-21 17:17:39 +1030 | [diff] [blame] | 363 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 364 | die_nest_count--; |
| 365 | if (!die_nest_count) |
| 366 | /* Nest count reaches zero, release the lock. */ |
Thomas Gleixner | 0199c4e | 2009-12-02 20:01:25 +0100 | [diff] [blame] | 367 | arch_spin_unlock(&die_lock); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 368 | raw_local_irq_restore(flags); |
| 369 | oops_exit(); |
| 370 | |
Borislav Petkov | 602bd70 | 2018-04-17 18:11:23 +0200 | [diff] [blame] | 371 | /* Executive summary in case the oops scrolled away */ |
Dmitry Safonov | 44e2153 | 2020-06-29 15:48:46 +0100 | [diff] [blame] | 372 | __show_regs(&exec_summary_regs, SHOW_REGS_ALL, KERN_DEFAULT); |
Borislav Petkov | 602bd70 | 2018-04-17 18:11:23 +0200 | [diff] [blame] | 373 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 374 | if (!signr) |
| 375 | return; |
| 376 | if (in_interrupt()) |
| 377 | panic("Fatal exception in interrupt"); |
| 378 | if (panic_on_oops) |
| 379 | panic("Fatal exception"); |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 380 | |
| 381 | /* |
| 382 | * We're not going to return, but we might be on an IST stack or |
| 383 | * have very little stack space left. Rewind the stack and kill |
| 384 | * the task. |
Jann Horn | f12d11c | 2018-08-28 20:40:33 +0200 | [diff] [blame] | 385 | * Before we rewind the stack, we have to tell KASAN that we're going to |
| 386 | * reuse the task stack and that existing poisons are invalid. |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 387 | */ |
Jann Horn | f12d11c | 2018-08-28 20:40:33 +0200 | [diff] [blame] | 388 | kasan_unpoison_task_stack(current); |
Andy Lutomirski | 2deb4be | 2016-07-14 13:22:55 -0700 | [diff] [blame] | 389 | rewind_stack_do_exit(signr); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 390 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 391 | NOKPROBE_SYMBOL(oops_end); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 392 | |
Jann Horn | aa49f20 | 2019-12-19 00:11:49 +0100 | [diff] [blame] | 393 | static void __die_header(const char *str, struct pt_regs *regs, long err) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 394 | { |
Thomas Gleixner | cb376c2 | 2019-07-26 23:19:43 +0200 | [diff] [blame] | 395 | const char *pr = ""; |
| 396 | |
Borislav Petkov | 602bd70 | 2018-04-17 18:11:23 +0200 | [diff] [blame] | 397 | /* Save the regs of the first oops for the executive summary later. */ |
| 398 | if (!die_counter) |
| 399 | exec_summary_regs = *regs; |
| 400 | |
Thomas Gleixner | cb376c2 | 2019-07-26 23:19:43 +0200 | [diff] [blame] | 401 | if (IS_ENABLED(CONFIG_PREEMPTION)) |
| 402 | pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; |
| 403 | |
Prarit Bhargava | b0f4c4b | 2012-01-26 08:55:34 -0500 | [diff] [blame] | 404 | printk(KERN_DEFAULT |
Vlastimil Babka | 5f26d76 | 2017-12-19 22:33:46 +0100 | [diff] [blame] | 405 | "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter, |
Thomas Gleixner | cb376c2 | 2019-07-26 23:19:43 +0200 | [diff] [blame] | 406 | pr, |
Rasmus Villemoes | 8fad7ec | 2016-03-26 21:40:16 +0100 | [diff] [blame] | 407 | IS_ENABLED(CONFIG_SMP) ? " SMP" : "", |
| 408 | debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "", |
Vlastimil Babka | 5f26d76 | 2017-12-19 22:33:46 +0100 | [diff] [blame] | 409 | IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "", |
| 410 | IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ? |
| 411 | (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : ""); |
Jann Horn | aa49f20 | 2019-12-19 00:11:49 +0100 | [diff] [blame] | 412 | } |
| 413 | NOKPROBE_SYMBOL(__die_header); |
Rasmus Villemoes | 8fad7ec | 2016-03-26 21:40:16 +0100 | [diff] [blame] | 414 | |
Jann Horn | aa49f20 | 2019-12-19 00:11:49 +0100 | [diff] [blame] | 415 | static int __die_body(const char *str, struct pt_regs *regs, long err) |
| 416 | { |
Borislav Petkov | 602bd70 | 2018-04-17 18:11:23 +0200 | [diff] [blame] | 417 | show_regs(regs); |
| 418 | print_modules(); |
| 419 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 420 | if (notify_die(DIE_OOPS, str, regs, err, |
Srikar Dronamraju | 51e7dc7 | 2012-03-12 14:55:55 +0530 | [diff] [blame] | 421 | current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 422 | return 1; |
| 423 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 424 | return 0; |
| 425 | } |
Jann Horn | aa49f20 | 2019-12-19 00:11:49 +0100 | [diff] [blame] | 426 | NOKPROBE_SYMBOL(__die_body); |
| 427 | |
| 428 | int __die(const char *str, struct pt_regs *regs, long err) |
| 429 | { |
| 430 | __die_header(str, regs, err); |
| 431 | return __die_body(str, regs, err); |
| 432 | } |
Masami Hiramatsu | 9326638 | 2014-04-17 17:18:14 +0900 | [diff] [blame] | 433 | NOKPROBE_SYMBOL(__die); |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 434 | |
| 435 | /* |
| 436 | * This is gone through when something in the kernel has done something bad |
| 437 | * and is about to be terminated: |
| 438 | */ |
| 439 | void die(const char *str, struct pt_regs *regs, long err) |
| 440 | { |
| 441 | unsigned long flags = oops_begin(); |
| 442 | int sig = SIGSEGV; |
| 443 | |
Neil Horman | 878719e | 2008-10-23 10:40:06 -0400 | [diff] [blame] | 444 | if (__die(str, regs, err)) |
| 445 | sig = 0; |
| 446 | oops_end(flags, regs, sig); |
| 447 | } |
| 448 | |
Jann Horn | aa49f20 | 2019-12-19 00:11:49 +0100 | [diff] [blame] | 449 | void die_addr(const char *str, struct pt_regs *regs, long err, long gp_addr) |
| 450 | { |
| 451 | unsigned long flags = oops_begin(); |
| 452 | int sig = SIGSEGV; |
| 453 | |
| 454 | __die_header(str, regs, err); |
Jann Horn | 2f004ee | 2019-12-19 00:11:50 +0100 | [diff] [blame] | 455 | if (gp_addr) |
| 456 | kasan_non_canonical_hook(gp_addr); |
Jann Horn | aa49f20 | 2019-12-19 00:11:49 +0100 | [diff] [blame] | 457 | if (__die_body(str, regs, err)) |
| 458 | sig = 0; |
| 459 | oops_end(flags, regs, sig); |
| 460 | } |
| 461 | |
Borislav Petkov | 16d1cb0 | 2018-03-06 10:49:14 +0100 | [diff] [blame] | 462 | void show_regs(struct pt_regs *regs) |
| 463 | { |
Dmitry Safonov | 44e2153 | 2020-06-29 15:48:46 +0100 | [diff] [blame] | 464 | enum show_regs_mode print_kernel_regs; |
| 465 | |
Borislav Petkov | 16d1cb0 | 2018-03-06 10:49:14 +0100 | [diff] [blame] | 466 | show_regs_print_info(KERN_DEFAULT); |
| 467 | |
Dmitry Safonov | 44e2153 | 2020-06-29 15:48:46 +0100 | [diff] [blame] | 468 | print_kernel_regs = user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL; |
| 469 | __show_regs(regs, print_kernel_regs, KERN_DEFAULT); |
Borislav Petkov | 16d1cb0 | 2018-03-06 10:49:14 +0100 | [diff] [blame] | 470 | |
| 471 | /* |
Borislav Petkov | 7cccf07 | 2018-04-17 18:11:22 +0200 | [diff] [blame] | 472 | * When in-kernel, we also print out the stack at the time of the fault.. |
Borislav Petkov | 16d1cb0 | 2018-03-06 10:49:14 +0100 | [diff] [blame] | 473 | */ |
Borislav Petkov | 7cccf07 | 2018-04-17 18:11:22 +0200 | [diff] [blame] | 474 | if (!user_mode(regs)) |
Borislav Petkov | 16d1cb0 | 2018-03-06 10:49:14 +0100 | [diff] [blame] | 475 | show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); |
Borislav Petkov | 16d1cb0 | 2018-03-06 10:49:14 +0100 | [diff] [blame] | 476 | } |