blob: 4b4dbeb2d6125608c01d334335c200c6d4026d04 [file] [log] [blame]
Chris Zankel5a0015d2005-06-23 22:01:16 -07001/*
2 * arch/xtensa/kernel/traps.c
3 *
4 * Exception handling.
5 *
6 * Derived from code with the following copyrights:
7 * Copyright (C) 1994 - 1999 by Ralf Baechle
8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
9 * Complete output from die() by Ulf Carlsson, 1998
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 *
12 * Essentially rewritten for the Xtensa architecture port.
13 *
Max Filippov3e4196a2013-04-15 09:20:48 +040014 * Copyright (C) 2001 - 2013 Tensilica Inc.
Chris Zankel5a0015d2005-06-23 22:01:16 -070015 *
16 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
17 * Chris Zankel <chris@zankel.net>
18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
19 * Kevin Chea
20 *
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License. See the file "COPYING" in the main directory of this archive
23 * for more details.
24 */
25
26#include <linux/kernel.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010027#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010028#include <linux/sched/debug.h>
Ingo Molnar3f8c2452017-02-05 14:31:22 +010029#include <linux/sched/task_stack.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070030#include <linux/init.h>
31#include <linux/module.h>
32#include <linux/stringify.h>
33#include <linux/kallsyms.h>
Nishanth Aravamudan5c888d52005-07-12 13:58:26 -070034#include <linux/delay.h>
Alexey Dobriyan5a891ed2009-03-10 12:55:49 -070035#include <linux/hardirq.h>
Max Filippovc130d3b2017-12-15 12:00:30 -080036#include <linux/ratelimit.h>
Mike Rapoport65fddcf2020-06-08 21:32:42 -070037#include <linux/pgtable.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070038
Max Filippov3e4196a2013-04-15 09:20:48 +040039#include <asm/stacktrace.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070040#include <asm/ptrace.h>
41#include <asm/timex.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080042#include <linux/uaccess.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070043#include <asm/processor.h>
Max Filippov2d6f82f2013-02-03 05:39:22 +040044#include <asm/traps.h>
Max Filippovc91e02b2016-01-24 10:32:10 +030045#include <asm/hw_breakpoint.h>
Chris Zankel5a0015d2005-06-23 22:01:16 -070046
Chris Zankel5a0015d2005-06-23 22:01:16 -070047/*
48 * Machine specific interrupt handlers
49 */
50
51extern void kernel_exception(void);
52extern void user_exception(void);
53
Max Filippov09f8a6d2015-01-12 09:44:44 +030054extern void fast_illegal_instruction_user(void);
Chris Zankel5a0015d2005-06-23 22:01:16 -070055extern void fast_syscall_user(void);
56extern void fast_alloca(void);
57extern void fast_unaligned(void);
58extern void fast_second_level_miss(void);
59extern void fast_store_prohibited(void);
60extern void fast_coprocessor(void);
61
62extern void do_illegal_instruction (struct pt_regs*);
63extern void do_interrupt (struct pt_regs*);
Max Filippov38fef732015-07-16 10:37:31 +030064extern void do_nmi(struct pt_regs *);
Chris Zankel5a0015d2005-06-23 22:01:16 -070065extern void do_unaligned_user (struct pt_regs*);
66extern void do_multihit (struct pt_regs*, unsigned long);
67extern void do_page_fault (struct pt_regs*, unsigned long);
68extern void do_debug (struct pt_regs*);
69extern void system_call (struct pt_regs*);
70
71/*
72 * The vector table must be preceded by a save area (which
73 * implies it must be in RAM, unless one places RAM immediately
74 * before a ROM and puts the vector at the start of the ROM (!))
75 */
76
77#define KRNL 0x01
78#define USER 0x02
79
80#define COPROCESSOR(x) \
Chris Zankel173d6682006-12-10 02:18:48 -080081{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
Chris Zankel5a0015d2005-06-23 22:01:16 -070082
83typedef struct {
84 int cause;
85 int fast;
86 void* handler;
87} dispatch_init_table_t;
88
Chris Zankelb91dc332007-08-03 15:54:36 -070089static dispatch_init_table_t __initdata dispatch_init_table[] = {
Chris Zankel5a0015d2005-06-23 22:01:16 -070090
Max Filippov09f8a6d2015-01-12 09:44:44 +030091#ifdef CONFIG_USER_ABI_CALL0_PROBE
92{ EXCCAUSE_ILLEGAL_INSTRUCTION, USER, fast_illegal_instruction_user },
93#endif
Chris Zankel173d6682006-12-10 02:18:48 -080094{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0, do_illegal_instruction},
Chris Zankel173d6682006-12-10 02:18:48 -080095{ EXCCAUSE_SYSTEM_CALL, USER, fast_syscall_user },
96{ EXCCAUSE_SYSTEM_CALL, 0, system_call },
97/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
98/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
99{ EXCCAUSE_LEVEL1_INTERRUPT, 0, do_interrupt },
Max Filippovda0a4e52021-07-26 07:32:55 -0700100#ifdef SUPPORT_WINDOWED
Chris Zankel173d6682006-12-10 02:18:48 -0800101{ EXCCAUSE_ALLOCA, USER|KRNL, fast_alloca },
Max Filippovda0a4e52021-07-26 07:32:55 -0700102#endif
Chris Zankel173d6682006-12-10 02:18:48 -0800103/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
104/* EXCCAUSE_PRIVILEGED unhandled */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700105#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
Max Filippov4ded6282012-10-15 21:23:02 +0400106#ifdef CONFIG_XTENSA_UNALIGNED_USER
Chris Zankel173d6682006-12-10 02:18:48 -0800107{ EXCCAUSE_UNALIGNED, USER, fast_unaligned },
Chris Zankel5a0015d2005-06-23 22:01:16 -0700108#endif
Max Filippov3cfc0962014-08-07 01:03:01 +0400109{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user },
Chris Zankel173d6682006-12-10 02:18:48 -0800110{ EXCCAUSE_UNALIGNED, KRNL, fast_unaligned },
Chris Zankel5a0015d2005-06-23 22:01:16 -0700111#endif
Johannes Weinere5083a62009-03-04 16:21:31 +0100112#ifdef CONFIG_MMU
Chris Zankel173d6682006-12-10 02:18:48 -0800113{ EXCCAUSE_ITLB_MISS, 0, do_page_fault },
114{ EXCCAUSE_ITLB_MISS, USER|KRNL, fast_second_level_miss},
115{ EXCCAUSE_ITLB_MULTIHIT, 0, do_multihit },
116{ EXCCAUSE_ITLB_PRIVILEGE, 0, do_page_fault },
117/* EXCCAUSE_SIZE_RESTRICTION unhandled */
118{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE, 0, do_page_fault },
119{ EXCCAUSE_DTLB_MISS, USER|KRNL, fast_second_level_miss},
120{ EXCCAUSE_DTLB_MISS, 0, do_page_fault },
121{ EXCCAUSE_DTLB_MULTIHIT, 0, do_multihit },
122{ EXCCAUSE_DTLB_PRIVILEGE, 0, do_page_fault },
123/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
124{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, USER|KRNL, fast_store_prohibited },
125{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
126{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
Johannes Weinere5083a62009-03-04 16:21:31 +0100127#endif /* CONFIG_MMU */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700128/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
Chris Zankelc658eac2008-02-12 13:17:07 -0800129#if XTENSA_HAVE_COPROCESSOR(0)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700130COPROCESSOR(0),
131#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800132#if XTENSA_HAVE_COPROCESSOR(1)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700133COPROCESSOR(1),
134#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800135#if XTENSA_HAVE_COPROCESSOR(2)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700136COPROCESSOR(2),
137#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800138#if XTENSA_HAVE_COPROCESSOR(3)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700139COPROCESSOR(3),
140#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800141#if XTENSA_HAVE_COPROCESSOR(4)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700142COPROCESSOR(4),
143#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800144#if XTENSA_HAVE_COPROCESSOR(5)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700145COPROCESSOR(5),
146#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800147#if XTENSA_HAVE_COPROCESSOR(6)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700148COPROCESSOR(6),
149#endif
Chris Zankelc658eac2008-02-12 13:17:07 -0800150#if XTENSA_HAVE_COPROCESSOR(7)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700151COPROCESSOR(7),
152#endif
Max Filippov38fef732015-07-16 10:37:31 +0300153#if XTENSA_FAKE_NMI
154{ EXCCAUSE_MAPPED_NMI, 0, do_nmi },
155#endif
Chris Zankel5a0015d2005-06-23 22:01:16 -0700156{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
157{ -1, -1, 0 }
158
159};
160
161/* The exception table <exc_table> serves two functions:
162 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
163 * 2. it is a temporary memory buffer for the exception handlers.
164 */
165
Max Filippovf21a79c2017-12-15 16:08:16 -0800166DEFINE_PER_CPU(struct exc_table, exc_table);
Max Filippov6ec70262016-03-07 01:36:33 +0300167DEFINE_PER_CPU(struct debug_table, debug_table);
168
Chris Zankel5a0015d2005-06-23 22:01:16 -0700169void die(const char*, struct pt_regs*, long);
170
171static inline void
172__die_if_kernel(const char *str, struct pt_regs *regs, long err)
173{
174 if (!user_mode(regs))
175 die(str, regs, err);
176}
177
178/*
179 * Unhandled Exceptions. Kill user task or panic if in kernel space.
180 */
181
182void do_unhandled(struct pt_regs *regs, unsigned long exccause)
183{
184 __die_if_kernel("Caught unhandled exception - should not happen",
Max Filippovc130d3b2017-12-15 12:00:30 -0800185 regs, SIGKILL);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700186
187 /* If in user mode, send SIGILL signal to current process */
Max Filippovc130d3b2017-12-15 12:00:30 -0800188 pr_info_ratelimited("Caught unhandled exception in '%s' "
189 "(pid = %d, pc = %#010lx) - should not happen\n"
190 "\tEXCCAUSE is %ld\n",
191 current->comm, task_pid_nr(current), regs->pc,
192 exccause);
Eric W. Biederman3cf5d072019-05-23 10:17:27 -0500193 force_sig(SIGILL);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700194}
195
196/*
197 * Multi-hit exception. This if fatal!
198 */
199
200void do_multihit(struct pt_regs *regs, unsigned long exccause)
201{
202 die("Caught multihit exception", regs, SIGKILL);
203}
204
205/*
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400206 * IRQ handler.
Chris Zankel5a0015d2005-06-23 22:01:16 -0700207 */
208
Chris Zankel5a0015d2005-06-23 22:01:16 -0700209extern void do_IRQ(int, struct pt_regs *);
210
Max Filippov38fef732015-07-16 10:37:31 +0300211#if XTENSA_FAKE_NMI
212
Max Filippove4629192015-11-27 16:26:41 +0300213#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
214
215#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
216 IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
217#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
218#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
219
220static inline void check_valid_nmi(void)
221{
Max Filippovcad6fad2018-11-27 16:27:47 -0800222 unsigned intread = xtensa_get_sr(interrupt);
223 unsigned intenable = xtensa_get_sr(intenable);
Max Filippove4629192015-11-27 16:26:41 +0300224
225 BUG_ON(intread & intenable &
226 ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
227 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
228 BIT(XCHAL_PROFILING_INTERRUPT)));
229}
230
231#else
232
233static inline void check_valid_nmi(void)
234{
235}
236
237#endif
238
Max Filippov38fef732015-07-16 10:37:31 +0300239irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
240
241DEFINE_PER_CPU(unsigned long, nmi_count);
242
243void do_nmi(struct pt_regs *regs)
244{
245 struct pt_regs *old_regs;
246
247 if ((regs->ps & PS_INTLEVEL_MASK) < LOCKLEVEL)
248 trace_hardirqs_off();
249
250 old_regs = set_irq_regs(regs);
251 nmi_enter();
252 ++*this_cpu_ptr(&nmi_count);
Max Filippove4629192015-11-27 16:26:41 +0300253 check_valid_nmi();
Max Filippov38fef732015-07-16 10:37:31 +0300254 xtensa_pmu_irq_handler(0, NULL);
255 nmi_exit();
256 set_irq_regs(old_regs);
257}
258#endif
259
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400260void do_interrupt(struct pt_regs *regs)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700261{
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400262 static const unsigned int_level_mask[] = {
263 0,
264 XCHAL_INTLEVEL1_MASK,
265 XCHAL_INTLEVEL2_MASK,
266 XCHAL_INTLEVEL3_MASK,
267 XCHAL_INTLEVEL4_MASK,
268 XCHAL_INTLEVEL5_MASK,
269 XCHAL_INTLEVEL6_MASK,
270 XCHAL_INTLEVEL7_MASK,
271 };
Max Filippov7d5f6a92015-07-16 06:18:46 +0300272 struct pt_regs *old_regs;
Max Filippov43ba2232021-07-09 04:13:23 -0700273 unsigned unhandled = ~0u;
Max Filippov99623232013-10-17 02:42:23 +0400274
Max Filippov7d5f6a92015-07-16 06:18:46 +0300275 trace_hardirqs_off();
276
277 old_regs = set_irq_regs(regs);
Max Filippov99623232013-10-17 02:42:23 +0400278 irq_enter();
Chris Zankel5a0015d2005-06-23 22:01:16 -0700279
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400280 for (;;) {
Max Filippovcad6fad2018-11-27 16:27:47 -0800281 unsigned intread = xtensa_get_sr(interrupt);
282 unsigned intenable = xtensa_get_sr(intenable);
Max Filippov895666a2013-03-26 02:51:43 +0400283 unsigned int_at_level = intread & intenable;
284 unsigned level;
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400285
Max Filippov895666a2013-03-26 02:51:43 +0400286 for (level = LOCKLEVEL; level > 0; --level) {
287 if (int_at_level & int_level_mask[level]) {
288 int_at_level &= int_level_mask[level];
Max Filippov43ba2232021-07-09 04:13:23 -0700289 if (int_at_level & unhandled)
290 int_at_level &= unhandled;
291 else
292 unhandled |= int_level_mask[level];
Max Filippov895666a2013-03-26 02:51:43 +0400293 break;
294 }
295 }
296
297 if (level == 0)
Max Filippov99623232013-10-17 02:42:23 +0400298 break;
Marc Gauthier2d1c6452013-01-05 04:57:17 +0400299
Max Filippov43ba2232021-07-09 04:13:23 -0700300 /* clear lowest pending irq in the unhandled mask */
301 unhandled ^= (int_at_level & -int_at_level);
Max Filippov99623232013-10-17 02:42:23 +0400302 do_IRQ(__ffs(int_at_level), regs);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700303 }
Max Filippov99623232013-10-17 02:42:23 +0400304
305 irq_exit();
306 set_irq_regs(old_regs);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700307}
308
309/*
310 * Illegal instruction. Fatal if in kernel space.
311 */
312
313void
314do_illegal_instruction(struct pt_regs *regs)
315{
316 __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
317
318 /* If in user mode, send SIGILL signal to current process. */
319
Max Filippovc130d3b2017-12-15 12:00:30 -0800320 pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
321 current->comm, task_pid_nr(current), regs->pc);
Eric W. Biederman3cf5d072019-05-23 10:17:27 -0500322 force_sig(SIGILL);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700323}
324
325
326/*
327 * Handle unaligned memory accesses from user space. Kill task.
328 *
329 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
330 * accesses causes from user space.
331 */
332
333#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
Chris Zankel5a0015d2005-06-23 22:01:16 -0700334void
335do_unaligned_user (struct pt_regs *regs)
336{
Chris Zankel5a0015d2005-06-23 22:01:16 -0700337 __die_if_kernel("Unhandled unaligned exception in kernel",
Max Filippovc130d3b2017-12-15 12:00:30 -0800338 regs, SIGKILL);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700339
340 current->thread.bad_vaddr = regs->excvaddr;
341 current->thread.error_code = -3;
Max Filippovc130d3b2017-12-15 12:00:30 -0800342 pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
343 "(pid = %d, pc = %#010lx)\n",
344 regs->excvaddr, current->comm,
345 task_pid_nr(current), regs->pc);
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500346 force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700347}
348#endif
Chris Zankel5a0015d2005-06-23 22:01:16 -0700349
Max Filippovc91e02b2016-01-24 10:32:10 +0300350/* Handle debug events.
351 * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
352 * preemption disabled to avoid rescheduling and keep mapping of hardware
353 * breakpoint structures to debug registers intact, so that
354 * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
355 */
Chris Zankel5a0015d2005-06-23 22:01:16 -0700356void
357do_debug(struct pt_regs *regs)
358{
Max Filippovc91e02b2016-01-24 10:32:10 +0300359#ifdef CONFIG_HAVE_HW_BREAKPOINT
360 int ret = check_hw_breakpoint(regs);
361
362 preempt_enable();
363 if (ret == 0)
364 return;
365#endif
Chris Zankel5a0015d2005-06-23 22:01:16 -0700366 __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
367
368 /* If in user mode, send SIGTRAP signal to current process */
369
Eric W. Biederman3cf5d072019-05-23 10:17:27 -0500370 force_sig(SIGTRAP);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700371}
372
373
Max Filippovf21a79c2017-12-15 16:08:16 -0800374#define set_handler(type, cause, handler) \
375 do { \
376 unsigned int cpu; \
377 \
378 for_each_possible_cpu(cpu) \
379 per_cpu(exc_table, cpu).type[cause] = (handler);\
380 } while (0)
Max Filippovf6151362013-10-17 02:42:26 +0400381
Max Filippov28570e82012-11-19 08:30:15 +0400382/* Set exception C handler - for temporary use when probing exceptions */
383
384void * __init trap_set_handler(int cause, void *handler)
385{
Max Filippovf21a79c2017-12-15 16:08:16 -0800386 void *previous = per_cpu(exc_table, 0).default_handler[cause];
387
388 set_handler(default_handler, cause, handler);
Max Filippov28570e82012-11-19 08:30:15 +0400389 return previous;
390}
391
392
Max Filippov49b424f2013-10-17 02:42:28 +0400393static void trap_init_excsave(void)
Max Filippovf6151362013-10-17 02:42:26 +0400394{
Max Filippovf21a79c2017-12-15 16:08:16 -0800395 unsigned long excsave1 = (unsigned long)this_cpu_ptr(&exc_table);
Max Filippovf6151362013-10-17 02:42:26 +0400396 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
397}
398
Max Filippov6ec70262016-03-07 01:36:33 +0300399static void trap_init_debug(void)
400{
401 unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
402
403 this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
404 __asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
405 :: "a"(debugsave));
406}
407
Chris Zankel5a0015d2005-06-23 22:01:16 -0700408/*
409 * Initialize dispatch tables.
410 *
411 * The exception vectors are stored compressed the __init section in the
412 * dispatch_init_table. This function initializes the following three tables
413 * from that compressed table:
414 * - fast user first dispatch table for user exceptions
415 * - fast kernel first dispatch table for kernel exceptions
416 * - default C-handler C-handler called by the default fast handler.
417 *
418 * See vectors.S for more details.
419 */
420
Chris Zankelb91dc332007-08-03 15:54:36 -0700421void __init trap_init(void)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700422{
423 int i;
424
425 /* Setup default vectors. */
426
Max Filippovf21a79c2017-12-15 16:08:16 -0800427 for (i = 0; i < EXCCAUSE_N; i++) {
428 set_handler(fast_user_handler, i, user_exception);
429 set_handler(fast_kernel_handler, i, kernel_exception);
430 set_handler(default_handler, i, do_unhandled);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700431 }
432
433 /* Setup specific handlers. */
434
435 for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
Chris Zankel5a0015d2005-06-23 22:01:16 -0700436 int fast = dispatch_init_table[i].fast;
437 int cause = dispatch_init_table[i].cause;
438 void *handler = dispatch_init_table[i].handler;
439
440 if (fast == 0)
Max Filippovf21a79c2017-12-15 16:08:16 -0800441 set_handler(default_handler, cause, handler);
Max Filippov60deebe2019-01-02 02:19:30 -0800442 if ((fast & USER) != 0)
Max Filippovf21a79c2017-12-15 16:08:16 -0800443 set_handler(fast_user_handler, cause, handler);
Max Filippov60deebe2019-01-02 02:19:30 -0800444 if ((fast & KRNL) != 0)
Max Filippovf21a79c2017-12-15 16:08:16 -0800445 set_handler(fast_kernel_handler, cause, handler);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700446 }
447
448 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
Max Filippovf6151362013-10-17 02:42:26 +0400449 trap_init_excsave();
Max Filippov6ec70262016-03-07 01:36:33 +0300450 trap_init_debug();
Chris Zankel5a0015d2005-06-23 22:01:16 -0700451}
452
Max Filippovf6151362013-10-17 02:42:26 +0400453#ifdef CONFIG_SMP
Max Filippov49b424f2013-10-17 02:42:28 +0400454void secondary_trap_init(void)
Max Filippovf6151362013-10-17 02:42:26 +0400455{
456 trap_init_excsave();
Max Filippov6ec70262016-03-07 01:36:33 +0300457 trap_init_debug();
Max Filippovf6151362013-10-17 02:42:26 +0400458}
459#endif
460
Chris Zankel5a0015d2005-06-23 22:01:16 -0700461/*
462 * This function dumps the current valid window frame and other base registers.
463 */
464
465void show_regs(struct pt_regs * regs)
466{
Max Filippov431d1a32021-08-22 21:28:01 -0700467 int i;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700468
Tejun Heoa43cb952013-04-30 15:27:17 -0700469 show_regs_print_info(KERN_DEFAULT);
470
Chris Zankel8d7e8242008-02-12 11:55:32 -0800471 for (i = 0; i < 16; i++) {
Chris Zankel5a0015d2005-06-23 22:01:16 -0700472 if ((i % 8) == 0)
Max Filippovd4eccaf2016-11-04 14:45:08 -0700473 pr_info("a%02d:", i);
474 pr_cont(" %08lx", regs->areg[i]);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700475 }
Max Filippovd4eccaf2016-11-04 14:45:08 -0700476 pr_cont("\n");
477 pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
478 regs->pc, regs->ps, regs->depc, regs->excvaddr);
479 pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
480 regs->lbeg, regs->lend, regs->lcount, regs->sar);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700481 if (user_mode(regs))
Max Filippovd4eccaf2016-11-04 14:45:08 -0700482 pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
483 regs->windowbase, regs->windowstart, regs->wmask,
484 regs->syscall);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700485}
486
Max Filippov3e4196a2013-04-15 09:20:48 +0400487static int show_trace_cb(struct stackframe *frame, void *data)
Johannes Weiner586411d2009-05-11 15:43:33 +0200488{
Dmitry Safonov47fb7022020-06-08 21:32:04 -0700489 const char *loglvl = data;
490
Max Filippove640cc32017-03-31 15:58:40 -0700491 if (kernel_text_address(frame->pc))
Dmitry Safonov47fb7022020-06-08 21:32:04 -0700492 printk("%s [<%08lx>] %pB\n",
493 loglvl, frame->pc, (void *)frame->pc);
Max Filippov3e4196a2013-04-15 09:20:48 +0400494 return 0;
Johannes Weiner586411d2009-05-11 15:43:33 +0200495}
496
Dmitry Safonov47fb7022020-06-08 21:32:04 -0700497static void show_trace(struct task_struct *task, unsigned long *sp,
498 const char *loglvl)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700499{
Max Filippov3e4196a2013-04-15 09:20:48 +0400500 if (!sp)
501 sp = stack_pointer(task);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700502
Dmitry Safonov47fb7022020-06-08 21:32:04 -0700503 printk("%sCall Trace:\n", loglvl);
504 walk_stackframe(sp, show_trace_cb, (void *)loglvl);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700505}
506
Max Filippovc5fcceb2019-11-12 08:47:48 -0800507#define STACK_DUMP_ENTRY_SIZE 4
508#define STACK_DUMP_LINE_SIZE 32
Max Filippov8951eb152019-11-12 08:43:25 -0800509static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700510
Dmitry Safonov9cb8f062020-06-08 21:32:29 -0700511void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700512{
Max Filippovc5fcceb2019-11-12 08:47:48 -0800513 size_t len;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700514
Johannes Weiner28a0ce72009-03-04 16:21:29 +0100515 if (!sp)
Johannes Weiner586411d2009-05-11 15:43:33 +0200516 sp = stack_pointer(task);
Max Filippovc5fcceb2019-11-12 08:47:48 -0800517
518 len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
519 kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700520
Dmitry Safonov20da1e82020-06-08 21:32:07 -0700521 printk("%sStack:\n", loglvl);
522 print_hex_dump(loglvl, " ", DUMP_PREFIX_NONE,
Max Filippovc5fcceb2019-11-12 08:47:48 -0800523 STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
524 sp, len, false);
Dmitry Safonov20da1e82020-06-08 21:32:07 -0700525 show_trace(task, sp, loglvl);
526}
527
Ingo Molnar34af9462006-06-27 02:53:55 -0700528DEFINE_SPINLOCK(die_lock);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700529
Eric W. Biederman9fd5a042021-10-20 12:43:48 -0500530void __noreturn die(const char * str, struct pt_regs * regs, long err)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700531{
532 static int die_counter;
Thomas Gleixner6c5260d2019-10-15 21:18:07 +0200533 const char *pr = "";
534
535 if (IS_ENABLED(CONFIG_PREEMPTION))
536 pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
Chris Zankel5a0015d2005-06-23 22:01:16 -0700537
538 console_verbose();
539 spin_lock_irq(&die_lock);
540
Thomas Gleixner6c5260d2019-10-15 21:18:07 +0200541 pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700542 show_regs(regs);
543 if (!user_mode(regs))
Dmitry Safonov9cb8f062020-06-08 21:32:29 -0700544 show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700545
Rusty Russell373d4d02013-01-21 17:17:39 +1030546 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700547 spin_unlock_irq(&die_lock);
548
549 if (in_interrupt())
550 panic("Fatal exception in interrupt");
551
Hormscea6a4b2006-07-30 03:03:34 -0700552 if (panic_on_oops)
Horms012c4372006-08-13 23:24:22 -0700553 panic("Fatal exception");
Hormscea6a4b2006-07-30 03:03:34 -0700554
Chris Zankel5a0015d2005-06-23 22:01:16 -0700555 do_exit(err);
556}