blob: b24f81197a68258c76615910e0a09075fad8560c [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Catalin Marinas60ffc302012-03-05 11:49:27 +00002/*
3 * Based on arch/arm/kernel/traps.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
Catalin Marinas60ffc302012-03-05 11:49:27 +00007 */
8
Dave P Martin9fb74102015-07-24 16:37:48 +01009#include <linux/bug.h>
James Morse26718282019-08-20 18:45:57 +010010#include <linux/context_tracking.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000011#include <linux/signal.h>
12#include <linux/personality.h>
13#include <linux/kallsyms.h>
James Morse26718282019-08-20 18:45:57 +010014#include <linux/kprobes.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000015#include <linux/spinlock.h>
16#include <linux/uaccess.h>
17#include <linux/hardirq.h>
18#include <linux/kdebug.h>
19#include <linux/module.h>
20#include <linux/kexec.h>
21#include <linux/delay.h>
22#include <linux/init.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010023#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010024#include <linux/sched/debug.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010025#include <linux/sched/task_stack.h>
Mark Rutland872d8322017-07-14 20:30:35 +010026#include <linux/sizes.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000027#include <linux/syscalls.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010028#include <linux/mm_types.h>
Andrey Konovalov41eea9c2018-12-28 00:30:54 -080029#include <linux/kasan.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000030
31#include <asm/atomic.h>
Dave P Martin9fb74102015-07-24 16:37:48 +010032#include <asm/bug.h>
Dave Martinc0cda3b2018-03-26 15:12:28 +010033#include <asm/cpufeature.h>
James Morse0fbeb312017-11-02 12:12:34 +000034#include <asm/daifflags.h>
Will Deacon1442b6e2013-03-16 08:48:13 +000035#include <asm/debug-monitors.h>
Mark Rutland60a1f022014-11-18 12:16:30 +000036#include <asm/esr.h>
Dave P Martin9fb74102015-07-24 16:37:48 +010037#include <asm/insn.h>
James Morseb6e43c02019-10-25 17:42:10 +010038#include <asm/kprobes.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000039#include <asm/traps.h>
Mark Rutland872d8322017-07-14 20:30:35 +010040#include <asm/smp.h>
Mark Rutlanda9ea0012016-11-03 20:23:05 +000041#include <asm/stack_pointer.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000042#include <asm/stacktrace.h>
43#include <asm/exception.h>
44#include <asm/system_misc.h>
Andre Przywara7dd01ae2016-06-28 18:07:32 +010045#include <asm/sysreg.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000046
47static const char *handler[]= {
48 "Synchronous Abort",
49 "IRQ",
50 "FIQ",
51 "Error"
52};
53
Michael Weiser5ee39a72018-02-01 23:13:38 +010054int show_unhandled_signals = 0;
Catalin Marinas60ffc302012-03-05 11:49:27 +000055
Dmitry Safonovc7689832020-06-08 21:30:23 -070056static void dump_backtrace_entry(unsigned long where, const char *loglvl)
Catalin Marinas60ffc302012-03-05 11:49:27 +000057{
Dmitry Safonovc7689832020-06-08 21:30:23 -070058 printk("%s %pS\n", loglvl, (void *)where);
Catalin Marinas60ffc302012-03-05 11:49:27 +000059}
60
jinho lim7b716652019-06-26 20:50:13 +090061static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
Catalin Marinas60ffc302012-03-05 11:49:27 +000062{
63 unsigned long addr = instruction_pointer(regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +000064 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
65 int i;
66
jinho lim7b716652019-06-26 20:50:13 +090067 if (user_mode(regs))
68 return;
69
Catalin Marinas60ffc302012-03-05 11:49:27 +000070 for (i = -4; i < 1; i++) {
71 unsigned int val, bad;
72
jinho lim7b716652019-06-26 20:50:13 +090073 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
Catalin Marinas60ffc302012-03-05 11:49:27 +000074
75 if (!bad)
76 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
77 else {
78 p += sprintf(p, "bad PC value");
79 break;
80 }
81 }
Catalin Marinas60ffc302012-03-05 11:49:27 +000082
jinho lim7b716652019-06-26 20:50:13 +090083 printk("%sCode: %s\n", lvl, str);
Catalin Marinas60ffc302012-03-05 11:49:27 +000084}
85
Dmitry Safonovc7689832020-06-08 21:30:23 -070086void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
87 const char *loglvl)
Catalin Marinas60ffc302012-03-05 11:49:27 +000088{
89 struct stackframe frame;
Will Deacon1e6f54402019-04-08 17:56:34 +010090 int skip = 0;
Catalin Marinas60ffc302012-03-05 11:49:27 +000091
Mark Rutlandb5e73072016-09-23 17:55:05 +010092 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
93
Will Deacon1e6f54402019-04-08 17:56:34 +010094 if (regs) {
95 if (user_mode(regs))
96 return;
97 skip = 1;
98 }
99
Mark Rutlandb5e73072016-09-23 17:55:05 +0100100 if (!tsk)
101 tsk = current;
102
Mark Rutland9bbd4c52016-11-03 20:23:08 +0000103 if (!try_get_task_stack(tsk))
104 return;
105
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900106 if (tsk == current) {
Dave Martinf3dcbe62019-07-02 14:07:28 +0100107 start_backtrace(&frame,
108 (unsigned long)__builtin_frame_address(0),
109 (unsigned long)dump_backtrace);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000110 } else {
111 /*
112 * task blocked in __switch_to
113 */
Dave Martinf3dcbe62019-07-02 14:07:28 +0100114 start_backtrace(&frame,
115 thread_saved_fp(tsk),
116 thread_saved_pc(tsk));
Catalin Marinas60ffc302012-03-05 11:49:27 +0000117 }
118
Dmitry Safonovc7689832020-06-08 21:30:23 -0700119 printk("%sCall trace:\n", loglvl);
Will Deacona25ffd32017-10-19 13:19:20 +0100120 do {
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900121 /* skip until specified stack frame */
122 if (!skip) {
Dmitry Safonovc7689832020-06-08 21:30:23 -0700123 dump_backtrace_entry(frame.pc, loglvl);
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900124 } else if (frame.fp == regs->regs[29]) {
125 skip = 0;
126 /*
127 * Mostly, this is the case where this function is
128 * called in panic/abort. As exception handler's
129 * stack frame does not contain the corresponding pc
130 * at which an exception has taken place, use regs->pc
131 * instead.
132 */
Dmitry Safonovc7689832020-06-08 21:30:23 -0700133 dump_backtrace_entry(regs->pc, loglvl);
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900134 }
Will Deacona25ffd32017-10-19 13:19:20 +0100135 } while (!unwind_frame(tsk, &frame));
Mark Rutland9bbd4c52016-11-03 20:23:08 +0000136
137 put_task_stack(tsk);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000138}
139
Dmitry Safonov9cb8f062020-06-08 21:32:29 -0700140void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
Dmitry Safonovc0fe0962020-06-08 21:30:26 -0700141{
142 dump_backtrace(NULL, tsk, loglvl);
143 barrier();
144}
145
Catalin Marinas60ffc302012-03-05 11:49:27 +0000146#ifdef CONFIG_PREEMPT
147#define S_PREEMPT " PREEMPT"
Thomas Gleixner7ef858d2019-10-15 21:17:49 +0200148#elif defined(CONFIG_PREEMPT_RT)
149#define S_PREEMPT " PREEMPT_RT"
Catalin Marinas60ffc302012-03-05 11:49:27 +0000150#else
151#define S_PREEMPT ""
152#endif
Thomas Gleixner7ef858d2019-10-15 21:17:49 +0200153
Catalin Marinas60ffc302012-03-05 11:49:27 +0000154#define S_SMP " SMP"
Catalin Marinas60ffc302012-03-05 11:49:27 +0000155
Mark Rutland876e7a32016-11-03 20:23:06 +0000156static int __die(const char *str, int err, struct pt_regs *regs)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000157{
Catalin Marinas60ffc302012-03-05 11:49:27 +0000158 static int die_counter;
159 int ret;
160
161 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
162 str, err, ++die_counter);
163
164 /* trap and error numbers are mostly meaningless on ARM */
165 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
166 if (ret == NOTIFY_STOP)
167 return ret;
168
169 print_modules();
Will Deacon1e6f54402019-04-08 17:56:34 +0100170 show_regs(regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000171
jinho lim7b716652019-06-26 20:50:13 +0900172 dump_kernel_instr(KERN_EMERG, regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000173
174 return ret;
175}
176
177static DEFINE_RAW_SPINLOCK(die_lock);
178
179/*
180 * This function is protected against re-entrancy.
181 */
182void die(const char *str, struct pt_regs *regs, int err)
183{
Catalin Marinas60ffc302012-03-05 11:49:27 +0000184 int ret;
Qiao Zhou6f44a0b2017-07-07 17:29:34 +0800185 unsigned long flags;
186
187 raw_spin_lock_irqsave(&die_lock, flags);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000188
189 oops_enter();
190
Catalin Marinas60ffc302012-03-05 11:49:27 +0000191 console_verbose();
192 bust_spinlocks(1);
Mark Rutland876e7a32016-11-03 20:23:06 +0000193 ret = __die(str, err, regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000194
Mark Rutland876e7a32016-11-03 20:23:06 +0000195 if (regs && kexec_should_crash(current))
Catalin Marinas60ffc302012-03-05 11:49:27 +0000196 crash_kexec(regs);
197
198 bust_spinlocks(0);
Rusty Russell373d4d02013-01-21 17:17:39 +1030199 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000200 oops_exit();
201
202 if (in_interrupt())
203 panic("Fatal exception in interrupt");
204 if (panic_on_oops)
205 panic("Fatal exception");
Qiao Zhou6f44a0b2017-07-07 17:29:34 +0800206
207 raw_spin_unlock_irqrestore(&die_lock, flags);
208
Catalin Marinas60ffc302012-03-05 11:49:27 +0000209 if (ret != NOTIFY_STOP)
210 do_exit(SIGSEGV);
211}
212
Eric W. Biederman1628a7c2018-09-22 00:52:21 +0200213static void arm64_show_signal(int signo, const char *str)
Will Deacona26731d2018-02-20 15:08:51 +0000214{
215 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
216 DEFAULT_RATELIMIT_BURST);
Eric W. Biederman24b8f792018-09-22 00:38:41 +0200217 struct task_struct *tsk = current;
Will Deacona1ece822018-02-20 13:46:05 +0000218 unsigned int esr = tsk->thread.fault_code;
219 struct pt_regs *regs = task_pt_regs(tsk);
220
Eric W. Biederman1628a7c2018-09-22 00:52:21 +0200221 /* Leave if the signal won't be shown */
222 if (!show_unhandled_signals ||
223 !unhandled_signal(tsk, signo) ||
224 !__ratelimit(&rs))
225 return;
Will Deacona1ece822018-02-20 13:46:05 +0000226
227 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
228 if (esr)
229 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
230
231 pr_cont("%s", str);
232 print_vma_addr(KERN_CONT " in ", regs->pc);
233 pr_cont("\n");
234 __show_regs(regs);
Eric W. Biederman1628a7c2018-09-22 00:52:21 +0200235}
Will Deacona1ece822018-02-20 13:46:05 +0000236
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200237void arm64_force_sig_fault(int signo, int code, void __user *addr,
238 const char *str)
239{
240 arm64_show_signal(signo, str);
Eric W. Biedermand76cac62019-05-23 11:11:19 -0500241 if (signo == SIGKILL)
Eric W. Biederman3cf5d072019-05-23 10:17:27 -0500242 force_sig(SIGKILL);
Eric W. Biedermand76cac62019-05-23 11:11:19 -0500243 else
Eric W. Biederman2e1661d22019-05-23 11:04:24 -0500244 force_sig_fault(signo, code, addr);
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200245}
246
Eric W. Biedermanb4d55572018-09-22 10:37:15 +0200247void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
248 const char *str)
249{
250 arm64_show_signal(SIGBUS, str);
Eric W. Biedermanf8eac902019-02-05 18:14:19 -0600251 force_sig_mceerr(code, addr, lsb);
Eric W. Biedermanb4d55572018-09-22 10:37:15 +0200252}
253
Eric W. Biedermanf3a900b2018-09-22 10:52:41 +0200254void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
255 const char *str)
256{
257 arm64_show_signal(SIGTRAP, str);
258 force_sig_ptrace_errno_trap(errno, addr);
Will Deacona1ece822018-02-20 13:46:05 +0000259}
260
Catalin Marinas60ffc302012-03-05 11:49:27 +0000261void arm64_notify_die(const char *str, struct pt_regs *regs,
Eric W. Biederman6fa998e2018-09-21 17:24:40 +0200262 int signo, int sicode, void __user *addr,
263 int err)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000264{
Catalin Marinas91413002014-04-06 23:04:12 +0100265 if (user_mode(regs)) {
Will Deacona1ece822018-02-20 13:46:05 +0000266 WARN_ON(regs != current_pt_regs());
Catalin Marinas91413002014-04-06 23:04:12 +0100267 current->thread.fault_address = 0;
268 current->thread.fault_code = err;
Eric W. Biederman6fa998e2018-09-21 17:24:40 +0200269
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200270 arm64_force_sig_fault(signo, sicode, addr, str);
Catalin Marinas91413002014-04-06 23:04:12 +0100271 } else {
Catalin Marinas60ffc302012-03-05 11:49:27 +0000272 die(str, regs, err);
Catalin Marinas91413002014-04-06 23:04:12 +0100273 }
Catalin Marinas60ffc302012-03-05 11:49:27 +0000274}
275
Dave Martind2c2ee42020-03-16 16:50:50 +0000276#ifdef CONFIG_COMPAT
277#define PSTATE_IT_1_0_SHIFT 25
278#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
279#define PSTATE_IT_7_2_SHIFT 10
280#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
281
282static u32 compat_get_it_state(struct pt_regs *regs)
283{
284 u32 it, pstate = regs->pstate;
285
286 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
287 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
288
289 return it;
290}
291
292static void compat_set_it_state(struct pt_regs *regs, u32 it)
293{
294 u32 pstate_it;
295
296 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
297 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
298
299 regs->pstate &= ~PSR_AA32_IT_MASK;
300 regs->pstate |= pstate_it;
301}
302
303static void advance_itstate(struct pt_regs *regs)
304{
305 u32 it;
306
307 /* ARM mode */
308 if (!(regs->pstate & PSR_AA32_T_BIT) ||
309 !(regs->pstate & PSR_AA32_IT_MASK))
310 return;
311
312 it = compat_get_it_state(regs);
313
314 /*
315 * If this is the last instruction of the block, wipe the IT
316 * state. Otherwise advance it.
317 */
318 if (!(it & 7))
319 it = 0;
320 else
321 it = (it & 0xe0) | ((it << 1) & 0x1f);
322
323 compat_set_it_state(regs, it);
324}
325#else
326static void advance_itstate(struct pt_regs *regs)
327{
328}
329#endif
Dave Martin172a7972020-03-16 16:50:49 +0000330
Julien Thierry6436bee2017-10-25 10:04:33 +0100331void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
332{
333 regs->pc += size;
334
335 /*
336 * If we were single stepping, we want to get the step exception after
337 * we return from the trap.
338 */
Mark Rutland9478f192018-04-03 11:22:51 +0100339 if (user_mode(regs))
340 user_fastforward_single_step(current);
Dave Martin172a7972020-03-16 16:50:49 +0000341
Dave Martind2c2ee42020-03-16 16:50:50 +0000342 if (compat_user_mode(regs))
Dave Martin172a7972020-03-16 16:50:49 +0000343 advance_itstate(regs);
Dave Martin0537c4c2020-03-16 16:50:51 +0000344 else
345 regs->pstate &= ~PSR_BTYPE_MASK;
Julien Thierry6436bee2017-10-25 10:04:33 +0100346}
347
Punit Agrawal9b79f522014-11-18 11:41:22 +0000348static LIST_HEAD(undef_hook);
349static DEFINE_RAW_SPINLOCK(undef_lock);
350
351void register_undef_hook(struct undef_hook *hook)
352{
353 unsigned long flags;
354
355 raw_spin_lock_irqsave(&undef_lock, flags);
356 list_add(&hook->node, &undef_hook);
357 raw_spin_unlock_irqrestore(&undef_lock, flags);
358}
359
360void unregister_undef_hook(struct undef_hook *hook)
361{
362 unsigned long flags;
363
364 raw_spin_lock_irqsave(&undef_lock, flags);
365 list_del(&hook->node);
366 raw_spin_unlock_irqrestore(&undef_lock, flags);
367}
368
369static int call_undef_hook(struct pt_regs *regs)
370{
371 struct undef_hook *hook;
372 unsigned long flags;
373 u32 instr;
374 int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
375 void __user *pc = (void __user *)instruction_pointer(regs);
376
Will Deacon0bf0f442018-08-07 13:43:06 +0100377 if (!user_mode(regs)) {
378 __le32 instr_le;
Christoph Hellwig25f12ae2020-06-17 09:37:55 +0200379 if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
Will Deacon0bf0f442018-08-07 13:43:06 +0100380 goto exit;
381 instr = le32_to_cpu(instr_le);
382 } else if (compat_thumb_mode(regs)) {
Punit Agrawal9b79f522014-11-18 11:41:22 +0000383 /* 16-bit Thumb instruction */
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200384 __le16 instr_le;
385 if (get_user(instr_le, (__le16 __user *)pc))
Punit Agrawal9b79f522014-11-18 11:41:22 +0000386 goto exit;
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200387 instr = le16_to_cpu(instr_le);
Punit Agrawal9b79f522014-11-18 11:41:22 +0000388 if (aarch32_insn_is_wide(instr)) {
389 u32 instr2;
390
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200391 if (get_user(instr_le, (__le16 __user *)(pc + 2)))
Punit Agrawal9b79f522014-11-18 11:41:22 +0000392 goto exit;
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200393 instr2 = le16_to_cpu(instr_le);
Punit Agrawal9b79f522014-11-18 11:41:22 +0000394 instr = (instr << 16) | instr2;
395 }
396 } else {
397 /* 32-bit ARM instruction */
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200398 __le32 instr_le;
399 if (get_user(instr_le, (__le32 __user *)pc))
Punit Agrawal9b79f522014-11-18 11:41:22 +0000400 goto exit;
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200401 instr = le32_to_cpu(instr_le);
Punit Agrawal9b79f522014-11-18 11:41:22 +0000402 }
403
404 raw_spin_lock_irqsave(&undef_lock, flags);
405 list_for_each_entry(hook, &undef_hook, node)
406 if ((instr & hook->instr_mask) == hook->instr_val &&
407 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
408 fn = hook->fn;
409
410 raw_spin_unlock_irqrestore(&undef_lock, flags);
411exit:
412 return fn ? fn(regs, instr) : 1;
413}
414
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530415void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000416{
Andre Przywara390bf172016-06-28 18:07:31 +0100417 const char *desc;
Will Deacon2c9120f32018-02-20 14:16:29 +0000418 struct pt_regs *regs = current_pt_regs();
419
Will Deacon8a604192018-08-14 16:24:54 +0100420 if (WARN_ON(!user_mode(regs)))
421 return;
422
Andre Przywara390bf172016-06-28 18:07:31 +0100423 switch (signal) {
424 case SIGILL:
425 desc = "undefined instruction";
426 break;
427 case SIGSEGV:
428 desc = "illegal memory access";
429 break;
430 default:
Dave Martinbc0ee472017-10-31 15:51:05 +0000431 desc = "unknown or unrecoverable error";
Andre Przywara390bf172016-06-28 18:07:31 +0100432 break;
433 }
434
Will Deacona7e6f1c2018-02-20 18:08:40 +0000435 /* Force signals we don't understand to SIGKILL */
Mark Rutlandb2d71b32018-04-16 16:45:01 +0100436 if (WARN_ON(signal != SIGKILL &&
Will Deacona7e6f1c2018-02-20 18:08:40 +0000437 siginfo_layout(signal, code) != SIL_FAULT)) {
438 signal = SIGKILL;
439 }
440
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530441 arm64_notify_die(desc, regs, signal, code, (void __user *)address, err);
Andre Przywara390bf172016-06-28 18:07:31 +0100442}
443
444/*
445 * Set up process info to signal segmentation fault - called on access error.
446 */
Will Deacon2c9120f32018-02-20 14:16:29 +0000447void arm64_notify_segfault(unsigned long addr)
Andre Przywara390bf172016-06-28 18:07:31 +0100448{
449 int code;
450
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700451 mmap_read_lock(current->mm);
Andre Przywara390bf172016-06-28 18:07:31 +0100452 if (find_vma(current->mm, addr) == NULL)
453 code = SEGV_MAPERR;
454 else
455 code = SEGV_ACCERR;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -0700456 mmap_read_unlock(current->mm);
Andre Przywara390bf172016-06-28 18:07:31 +0100457
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530458 force_signal_inject(SIGSEGV, code, addr, 0);
Andre Przywara390bf172016-06-28 18:07:31 +0100459}
460
James Morseafa7c0e2019-10-25 17:42:15 +0100461void do_undefinstr(struct pt_regs *regs)
Andre Przywara390bf172016-06-28 18:07:31 +0100462{
Catalin Marinas60ffc302012-03-05 11:49:27 +0000463 /* check for AArch32 breakpoint instructions */
Will Deacon1442b6e2013-03-16 08:48:13 +0000464 if (!aarch32_break_handler(regs))
Catalin Marinas60ffc302012-03-05 11:49:27 +0000465 return;
Catalin Marinas60ffc302012-03-05 11:49:27 +0000466
Punit Agrawal9b79f522014-11-18 11:41:22 +0000467 if (call_undef_hook(regs) == 0)
468 return;
469
Will Deacon0bf0f442018-08-07 13:43:06 +0100470 BUG_ON(!user_mode(regs));
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530471 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000472}
James Morseb6e43c02019-10-25 17:42:10 +0100473NOKPROBE_SYMBOL(do_undefinstr);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000474
Dave Martin8ef8f3602020-03-16 16:50:45 +0000475void do_bti(struct pt_regs *regs)
476{
477 BUG_ON(!user_mode(regs));
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530478 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
Dave Martin8ef8f3602020-03-16 16:50:45 +0000479}
480NOKPROBE_SYMBOL(do_bti);
481
Amit Daniel Kachhape16aeb02020-09-14 14:06:53 +0530482void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
483{
484 /*
485 * Unexpected FPAC exception or pointer authentication failure in
486 * the kernel: kill the task before it does any more harm.
487 */
488 BUG_ON(!user_mode(regs));
489 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
490}
491NOKPROBE_SYMBOL(do_ptrauth_fault);
492
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100493#define __user_cache_maint(insn, address, res) \
Kristina Martsenko81cddd62017-05-03 16:37:45 +0100494 if (address >= user_addr_max()) { \
Andre Przywara87261d12016-10-19 14:40:54 +0100495 res = -EFAULT; \
Catalin Marinas39bc88e2016-09-02 14:54:03 +0100496 } else { \
497 uaccess_ttbr0_enable(); \
Andre Przywara87261d12016-10-19 14:40:54 +0100498 asm volatile ( \
499 "1: " insn ", %1\n" \
500 " mov %w0, #0\n" \
501 "2:\n" \
502 " .pushsection .fixup,\"ax\"\n" \
503 " .align 2\n" \
504 "3: mov %w0, %w2\n" \
505 " b 2b\n" \
506 " .popsection\n" \
507 _ASM_EXTABLE(1b, 3b) \
508 : "=r" (res) \
Catalin Marinas39bc88e2016-09-02 14:54:03 +0100509 : "r" (address), "i" (-EFAULT)); \
510 uaccess_ttbr0_disable(); \
511 }
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100512
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100513static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100514{
515 unsigned long address;
Anshuman Khandual1c839142018-09-20 09:36:19 +0530516 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100517 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
518 int ret = 0;
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100519
Kristina Martsenko81cddd62017-05-03 16:37:45 +0100520 address = untagged_addr(pt_regs_read_reg(regs, rt));
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100521
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100522 switch (crm) {
523 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
524 __user_cache_maint("dc civac", address, ret);
525 break;
526 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
527 __user_cache_maint("dc civac", address, ret);
528 break;
Andrew Murrayd16ed4102019-04-09 10:52:42 +0100529 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
530 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
531 break;
Robin Murphye1bc5d12017-07-25 11:55:41 +0100532 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
533 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
534 break;
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100535 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
536 __user_cache_maint("dc civac", address, ret);
537 break;
538 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
539 __user_cache_maint("ic ivau", address, ret);
540 break;
541 default:
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530542 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100543 return;
544 }
545
546 if (ret)
Will Deacon2c9120f32018-02-20 14:16:29 +0000547 arm64_notify_segfault(address);
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100548 else
Julien Thierry6436bee2017-10-25 10:04:33 +0100549 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100550}
551
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100552static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
553{
Anshuman Khandual1c839142018-09-20 09:36:19 +0530554 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Mark Rutland8b6e70f2017-02-09 15:19:19 +0000555 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100556
James Morseee9d90b2019-10-17 18:42:59 +0100557 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
558 /* Hide DIC so that we can trap the unnecessary maintenance...*/
James Morse05460842019-10-17 18:42:58 +0100559 val &= ~BIT(CTR_DIC_SHIFT);
560
James Morseee9d90b2019-10-17 18:42:59 +0100561 /* ... and fake IminLine to reduce the number of traps. */
562 val &= ~CTR_IMINLINE_MASK;
563 val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
564 }
565
Mark Rutland8b6e70f2017-02-09 15:19:19 +0000566 pt_regs_write_reg(regs, rt, val);
567
Julien Thierry6436bee2017-10-25 10:04:33 +0100568 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100569}
570
Marc Zyngier6126ce02017-02-01 11:48:58 +0000571static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
572{
Anshuman Khandual1c839142018-09-20 09:36:19 +0530573 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Marc Zyngier6126ce02017-02-01 11:48:58 +0000574
Marc Zyngierdea86a82019-04-08 16:49:03 +0100575 pt_regs_write_reg(regs, rt, arch_timer_read_counter());
Julien Thierry6436bee2017-10-25 10:04:33 +0100576 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Marc Zyngier6126ce02017-02-01 11:48:58 +0000577}
578
Marc Zyngier98421192017-04-24 09:04:03 +0100579static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
580{
Anshuman Khandual1c839142018-09-20 09:36:19 +0530581 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Marc Zyngier98421192017-04-24 09:04:03 +0100582
Marc Zyngierc6f97ad2017-07-21 18:15:27 +0100583 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
Julien Thierry6436bee2017-10-25 10:04:33 +0100584 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Marc Zyngier98421192017-04-24 09:04:03 +0100585}
586
Anshuman Khandual21f84792018-09-20 09:36:21 +0530587static void mrs_handler(unsigned int esr, struct pt_regs *regs)
588{
589 u32 sysreg, rt;
590
591 rt = ESR_ELx_SYS64_ISS_RT(esr);
592 sysreg = esr_sys64_to_sysreg(esr);
593
594 if (do_emulate_mrs(regs, sysreg, rt) != 0)
Amit Daniel Kachhap4ef333b2020-09-14 14:06:52 +0530595 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
Anshuman Khandual21f84792018-09-20 09:36:21 +0530596}
597
Marc Zyngierc219bc42018-10-01 12:19:43 +0100598static void wfi_handler(unsigned int esr, struct pt_regs *regs)
599{
600 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
601}
602
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100603struct sys64_hook {
604 unsigned int esr_mask;
605 unsigned int esr_val;
606 void (*handler)(unsigned int esr, struct pt_regs *regs);
607};
608
Mark Rutland37143dc2019-08-13 15:16:39 +0100609static const struct sys64_hook sys64_hooks[] = {
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100610 {
611 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
612 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
613 .handler = user_cache_maint_handler,
614 },
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100615 {
616 /* Trap read access to CTR_EL0 */
617 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
618 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
619 .handler = ctr_read_handler,
620 },
Marc Zyngier6126ce02017-02-01 11:48:58 +0000621 {
622 /* Trap read access to CNTVCT_EL0 */
623 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
624 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
625 .handler = cntvct_read_handler,
626 },
Marc Zyngier98421192017-04-24 09:04:03 +0100627 {
628 /* Trap read access to CNTFRQ_EL0 */
629 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
630 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
631 .handler = cntfrq_read_handler,
632 },
Anshuman Khandual21f84792018-09-20 09:36:21 +0530633 {
634 /* Trap read access to CPUID registers */
635 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
636 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
637 .handler = mrs_handler,
638 },
Marc Zyngierc219bc42018-10-01 12:19:43 +0100639 {
640 /* Trap WFI instructions executed in userspace */
641 .esr_mask = ESR_ELx_WFx_MASK,
642 .esr_val = ESR_ELx_WFx_WFI_VAL,
643 .handler = wfi_handler,
644 },
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100645 {},
646};
647
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100648#ifdef CONFIG_COMPAT
Marc Zyngier1f1c0142018-09-27 17:15:30 +0100649static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
650{
651 int cond;
652
653 /* Only a T32 instruction can trap without CV being set */
654 if (!(esr & ESR_ELx_CV)) {
655 u32 it;
656
657 it = compat_get_it_state(regs);
658 if (!it)
659 return true;
660
661 cond = it >> 4;
662 } else {
663 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
664 }
665
666 return aarch32_opcode_cond_checks[cond](regs->pstate);
667}
668
Marc Zyngier32a3e632018-09-27 17:15:33 +0100669static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
670{
671 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
672
673 pt_regs_write_reg(regs, reg, arch_timer_get_rate());
Dave Martin172a7972020-03-16 16:50:49 +0000674 arm64_skip_faulting_instruction(regs, 4);
Marc Zyngier32a3e632018-09-27 17:15:33 +0100675}
676
Mark Rutland37143dc2019-08-13 15:16:39 +0100677static const struct sys64_hook cp15_32_hooks[] = {
Marc Zyngier32a3e632018-09-27 17:15:33 +0100678 {
679 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
680 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
681 .handler = compat_cntfrq_read_handler,
682 },
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100683 {},
684};
685
Marc Zyngier50de0132018-09-27 17:15:32 +0100686static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
687{
688 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
689 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
Marc Zyngierdea86a82019-04-08 16:49:03 +0100690 u64 val = arch_timer_read_counter();
Marc Zyngier50de0132018-09-27 17:15:32 +0100691
692 pt_regs_write_reg(regs, rt, lower_32_bits(val));
693 pt_regs_write_reg(regs, rt2, upper_32_bits(val));
Dave Martin172a7972020-03-16 16:50:49 +0000694 arm64_skip_faulting_instruction(regs, 4);
Marc Zyngier50de0132018-09-27 17:15:32 +0100695}
696
Mark Rutland37143dc2019-08-13 15:16:39 +0100697static const struct sys64_hook cp15_64_hooks[] = {
Marc Zyngier50de0132018-09-27 17:15:32 +0100698 {
699 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
700 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
701 .handler = compat_cntvct_read_handler,
702 },
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100703 {},
704};
705
James Morseafa7c0e2019-10-25 17:42:15 +0100706void do_cp15instr(unsigned int esr, struct pt_regs *regs)
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100707{
Mark Rutland37143dc2019-08-13 15:16:39 +0100708 const struct sys64_hook *hook, *hook_base;
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100709
Marc Zyngier1f1c0142018-09-27 17:15:30 +0100710 if (!cp15_cond_valid(esr, regs)) {
711 /*
712 * There is no T16 variant of a CP access, so we
713 * always advance PC by 4 bytes.
714 */
Dave Martin172a7972020-03-16 16:50:49 +0000715 arm64_skip_faulting_instruction(regs, 4);
Marc Zyngier1f1c0142018-09-27 17:15:30 +0100716 return;
717 }
718
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100719 switch (ESR_ELx_EC(esr)) {
720 case ESR_ELx_EC_CP15_32:
721 hook_base = cp15_32_hooks;
722 break;
723 case ESR_ELx_EC_CP15_64:
724 hook_base = cp15_64_hooks;
725 break;
726 default:
727 do_undefinstr(regs);
728 return;
729 }
730
731 for (hook = hook_base; hook->handler; hook++)
732 if ((hook->esr_mask & esr) == hook->esr_val) {
733 hook->handler(esr, regs);
734 return;
735 }
736
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100737 /*
738 * New cp15 instructions may previously have been undefined at
739 * EL0. Fall back to our usual undefined instruction handler
740 * so that we handle these consistently.
741 */
742 do_undefinstr(regs);
743}
James Morseb6e43c02019-10-25 17:42:10 +0100744NOKPROBE_SYMBOL(do_cp15instr);
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100745#endif
746
James Morseafa7c0e2019-10-25 17:42:15 +0100747void do_sysinstr(unsigned int esr, struct pt_regs *regs)
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100748{
Mark Rutland37143dc2019-08-13 15:16:39 +0100749 const struct sys64_hook *hook;
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100750
751 for (hook = sys64_hooks; hook->handler; hook++)
752 if ((hook->esr_mask & esr) == hook->esr_val) {
753 hook->handler(esr, regs);
754 return;
755 }
756
Mark Rutland49f6cba2017-01-27 16:15:38 +0000757 /*
758 * New SYS instructions may previously have been undefined at EL0. Fall
759 * back to our usual undefined instruction handler so that we handle
760 * these consistently.
761 */
762 do_undefinstr(regs);
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100763}
James Morseb6e43c02019-10-25 17:42:10 +0100764NOKPROBE_SYMBOL(do_sysinstr);
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100765
Mark Rutland60a1f022014-11-18 12:16:30 +0000766static const char *esr_class_str[] = {
767 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
768 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
769 [ESR_ELx_EC_WFx] = "WFI/WFE",
770 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
771 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
772 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
773 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
774 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
775 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
Zenghui Yu6701c612019-07-13 04:40:54 +0000776 [ESR_ELx_EC_PAC] = "PAC",
Mark Rutland60a1f022014-11-18 12:16:30 +0000777 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
Dave Martin8ef8f3602020-03-16 16:50:45 +0000778 [ESR_ELx_EC_BTI] = "BTI",
Mark Rutland60a1f022014-11-18 12:16:30 +0000779 [ESR_ELx_EC_ILL] = "PSTATE.IL",
780 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
781 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
782 [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
783 [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
784 [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
785 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
786 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
Dave Martin67236562017-10-31 15:51:00 +0000787 [ESR_ELx_EC_SVE] = "SVE",
Will Deacon332e5282019-07-16 08:14:19 +0100788 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
Amit Daniel Kachhape16aeb02020-09-14 14:06:53 +0530789 [ESR_ELx_EC_FPAC] = "FPAC",
Mark Rutland60a1f022014-11-18 12:16:30 +0000790 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
791 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
792 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
793 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
794 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
795 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
796 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
797 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
798 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
799 [ESR_ELx_EC_SERROR] = "SError",
800 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
801 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
802 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
803 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
804 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
805 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
806 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
807 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
808 [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
809};
810
811const char *esr_get_class_string(u32 esr)
812{
Mark Rutland275f3442016-05-31 12:33:01 +0100813 return esr_class_str[ESR_ELx_EC(esr)];
Mark Rutland60a1f022014-11-18 12:16:30 +0000814}
815
Catalin Marinas60ffc302012-03-05 11:49:27 +0000816/*
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000817 * bad_mode handles the impossible case in the exception vector. This is always
818 * fatal.
Catalin Marinas60ffc302012-03-05 11:49:27 +0000819 */
820asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
821{
822 console_verbose();
823
Mark Rutland8051f4d2016-05-31 12:07:47 +0100824 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
825 handler[reason], smp_processor_id(), esr,
826 esr_get_class_string(esr));
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000827
Will Deacon413d3ea2020-06-15 12:23:16 +0100828 __show_regs(regs);
James Morse0fbeb312017-11-02 12:12:34 +0000829 local_daif_mask();
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000830 panic("bad mode");
831}
832
833/*
834 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
835 * exceptions taken from EL0. Unlike bad_mode, this returns.
836 */
James Morseafa7c0e2019-10-25 17:42:15 +0100837void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000838{
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000839 void __user *pc = (void __user *)instruction_pointer(regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000840
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000841 current->thread.fault_address = 0;
Will Deacon4e829b62018-02-20 15:18:13 +0000842 current->thread.fault_code = esr;
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000843
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200844 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
845 "Bad EL0 synchronous exception");
Catalin Marinas60ffc302012-03-05 11:49:27 +0000846}
847
Mark Rutland872d8322017-07-14 20:30:35 +0100848#ifdef CONFIG_VMAP_STACK
849
850DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
851 __aligned(16);
852
853asmlinkage void handle_bad_stack(struct pt_regs *regs)
854{
855 unsigned long tsk_stk = (unsigned long)current->stack;
856 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
857 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
858 unsigned int esr = read_sysreg(esr_el1);
859 unsigned long far = read_sysreg(far_el1);
860
861 console_verbose();
862 pr_emerg("Insufficient stack space to handle exception!");
863
864 pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
865 pr_emerg("FAR: 0x%016lx\n", far);
866
867 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
868 tsk_stk, tsk_stk + THREAD_SIZE);
869 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
Maninder Singh338c11e2020-07-31 17:19:50 +0530870 irq_stk, irq_stk + IRQ_STACK_SIZE);
Mark Rutland872d8322017-07-14 20:30:35 +0100871 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
872 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
873
874 __show_regs(regs);
875
876 /*
877 * We use nmi_panic to limit the potential for recusive overflows, and
878 * to get a better stack trace.
879 */
880 nmi_panic(NULL, "kernel stack overflow");
881 cpu_park_loop();
882}
883#endif
884
James Morse6bf0dcf2018-01-15 19:38:57 +0000885void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
Xie XiuQia92d4d12017-11-02 12:12:42 +0000886{
Xie XiuQia92d4d12017-11-02 12:12:42 +0000887 console_verbose();
888
889 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
890 smp_processor_id(), esr, esr_get_class_string(esr));
James Morse6bf0dcf2018-01-15 19:38:57 +0000891 if (regs)
892 __show_regs(regs);
Xie XiuQia92d4d12017-11-02 12:12:42 +0000893
James Morse6bf0dcf2018-01-15 19:38:57 +0000894 nmi_panic(regs, "Asynchronous SError Interrupt");
895
896 cpu_park_loop();
897 unreachable();
898}
899
900bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
901{
902 u32 aet = arm64_ras_serror_get_severity(esr);
903
904 switch (aet) {
905 case ESR_ELx_AET_CE: /* corrected error */
906 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
907 /*
908 * The CPU can make progress. We may take UEO again as
909 * a more severe error.
910 */
911 return false;
912
913 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
914 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
915 /*
916 * The CPU can't make progress. The exception may have
917 * been imprecise.
James Morse3276cc22019-06-18 16:17:38 +0100918 *
919 * Neoverse-N1 #1349291 means a non-KVM SError reported as
920 * Unrecoverable should be treated as Uncontainable. We
921 * call arm64_serror_panic() in both cases.
James Morse6bf0dcf2018-01-15 19:38:57 +0000922 */
923 return true;
924
925 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
926 default:
927 /* Error has been silently propagated */
928 arm64_serror_panic(regs, esr);
929 }
930}
931
932asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
933{
Peter Zijlstra69ea03b2020-02-19 09:46:47 +0100934 nmi_enter();
James Morse6bf0dcf2018-01-15 19:38:57 +0000935
936 /* non-RAS errors are not containable */
937 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
938 arm64_serror_panic(regs, esr);
939
Peter Zijlstra69ea03b2020-02-19 09:46:47 +0100940 nmi_exit();
Xie XiuQia92d4d12017-11-02 12:12:42 +0000941}
942
James Morse26718282019-08-20 18:45:57 +0100943asmlinkage void enter_from_user_mode(void)
944{
945 CT_WARN_ON(ct_state() != CONTEXT_USER);
946 user_exit_irqoff();
947}
948NOKPROBE_SYMBOL(enter_from_user_mode);
949
Catalin Marinas60ffc302012-03-05 11:49:27 +0000950void __pte_error(const char *file, int line, unsigned long val)
951{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000952 pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000953}
954
955void __pmd_error(const char *file, int line, unsigned long val)
956{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000957 pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000958}
959
Jungseok Leec79b954b2014-05-12 18:40:51 +0900960void __pud_error(const char *file, int line, unsigned long val)
961{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000962 pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900963}
964
Catalin Marinas60ffc302012-03-05 11:49:27 +0000965void __pgd_error(const char *file, int line, unsigned long val)
966{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000967 pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000968}
969
Dave P Martin9fb74102015-07-24 16:37:48 +0100970/* GENERIC_BUG traps */
971
972int is_valid_bugaddr(unsigned long addr)
973{
974 /*
975 * bug_handler() only called for BRK #BUG_BRK_IMM.
976 * So the answer is trivial -- any spurious instances with no
977 * bug table entry will be rejected by report_bug() and passed
978 * back to the debug-monitors code and handled as a fatal
979 * unexpected debug exception.
980 */
981 return 1;
982}
983
984static int bug_handler(struct pt_regs *regs, unsigned int esr)
985{
Dave P Martin9fb74102015-07-24 16:37:48 +0100986 switch (report_bug(regs->pc, regs)) {
987 case BUG_TRAP_TYPE_BUG:
988 die("Oops - BUG", regs, 0);
989 break;
990
991 case BUG_TRAP_TYPE_WARN:
992 break;
993
994 default:
995 /* unknown/unrecognised bug trap type */
996 return DBG_HOOK_ERROR;
997 }
998
999 /* If thread survives, skip over the BUG instruction and continue: */
Julien Thierry6436bee2017-10-25 10:04:33 +01001000 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Dave P Martin9fb74102015-07-24 16:37:48 +01001001 return DBG_HOOK_HANDLED;
1002}
1003
1004static struct break_hook bug_break_hook = {
Dave P Martin9fb74102015-07-24 16:37:48 +01001005 .fn = bug_handler,
Will Deacon26a04d82019-02-26 12:52:47 +00001006 .imm = BUG_BRK_IMM,
Dave P Martin9fb74102015-07-24 16:37:48 +01001007};
1008
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001009#ifdef CONFIG_KASAN_SW_TAGS
1010
1011#define KASAN_ESR_RECOVER 0x20
1012#define KASAN_ESR_WRITE 0x10
1013#define KASAN_ESR_SIZE_MASK 0x0f
1014#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
1015
1016static int kasan_handler(struct pt_regs *regs, unsigned int esr)
1017{
1018 bool recover = esr & KASAN_ESR_RECOVER;
1019 bool write = esr & KASAN_ESR_WRITE;
1020 size_t size = KASAN_ESR_SIZE(esr);
1021 u64 addr = regs->regs[0];
1022 u64 pc = regs->pc;
1023
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001024 kasan_report(addr, size, write, pc);
1025
1026 /*
1027 * The instrumentation allows to control whether we can proceed after
1028 * a crash was detected. This is done by passing the -recover flag to
1029 * the compiler. Disabling recovery allows to generate more compact
1030 * code.
1031 *
1032 * Unfortunately disabling recovery doesn't work for the kernel right
1033 * now. KASAN reporting is disabled in some contexts (for example when
1034 * the allocator accesses slab object metadata; this is controlled by
1035 * current->kasan_depth). All these accesses are detected by the tool,
1036 * even though the reports for them are not printed.
1037 *
1038 * This is something that might be fixed at some point in the future.
1039 */
1040 if (!recover)
1041 die("Oops - KASAN", regs, 0);
1042
1043 /* If thread survives, skip over the brk instruction and continue: */
1044 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1045 return DBG_HOOK_HANDLED;
1046}
1047
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001048static struct break_hook kasan_break_hook = {
Will Deacon26a04d82019-02-26 12:52:47 +00001049 .fn = kasan_handler,
1050 .imm = KASAN_BRK_IMM,
1051 .mask = KASAN_BRK_MASK,
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001052};
1053#endif
1054
Dave P Martin9fb74102015-07-24 16:37:48 +01001055/*
1056 * Initial handler for AArch64 BRK exceptions
1057 * This handler only used until debug_traps_init().
1058 */
1059int __init early_brk64(unsigned long addr, unsigned int esr,
1060 struct pt_regs *regs)
1061{
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001062#ifdef CONFIG_KASAN_SW_TAGS
Will Deacon453b7742019-02-26 15:06:42 +00001063 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
Will Deacon26a04d82019-02-26 12:52:47 +00001064
1065 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001066 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1067#endif
Dave P Martin9fb74102015-07-24 16:37:48 +01001068 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1069}
1070
Catalin Marinas60ffc302012-03-05 11:49:27 +00001071void __init trap_init(void)
1072{
Will Deacon26a04d82019-02-26 12:52:47 +00001073 register_kernel_break_hook(&bug_break_hook);
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001074#ifdef CONFIG_KASAN_SW_TAGS
Will Deacon26a04d82019-02-26 12:52:47 +00001075 register_kernel_break_hook(&kasan_break_hook);
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001076#endif
Douglas Andersonb322c652020-05-13 16:06:37 -07001077 debug_traps_init();
Catalin Marinas60ffc302012-03-05 11:49:27 +00001078}