blob: 21e73954762ca30c97063b9f36bfdbc387762d05 [file] [log] [blame]
Catalin Marinas60ffc302012-03-05 11:49:27 +00001/*
2 * Based on arch/arm/kernel/traps.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
Dave P Martin9fb74102015-07-24 16:37:48 +010020#include <linux/bug.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000021#include <linux/signal.h>
22#include <linux/personality.h>
23#include <linux/kallsyms.h>
24#include <linux/spinlock.h>
25#include <linux/uaccess.h>
26#include <linux/hardirq.h>
27#include <linux/kdebug.h>
28#include <linux/module.h>
29#include <linux/kexec.h>
30#include <linux/delay.h>
31#include <linux/init.h>
Ingo Molnar3f07c012017-02-08 18:51:30 +010032#include <linux/sched/signal.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +010033#include <linux/sched/debug.h>
Ingo Molnar68db0cf2017-02-08 18:51:37 +010034#include <linux/sched/task_stack.h>
Mark Rutland872d8322017-07-14 20:30:35 +010035#include <linux/sizes.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000036#include <linux/syscalls.h>
Ingo Molnar589ee622017-02-04 00:16:44 +010037#include <linux/mm_types.h>
Andrey Konovalov41eea9c2018-12-28 00:30:54 -080038#include <linux/kasan.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000039
40#include <asm/atomic.h>
Dave P Martin9fb74102015-07-24 16:37:48 +010041#include <asm/bug.h>
Dave Martinc0cda3b2018-03-26 15:12:28 +010042#include <asm/cpufeature.h>
James Morse0fbeb312017-11-02 12:12:34 +000043#include <asm/daifflags.h>
Will Deacon1442b6e2013-03-16 08:48:13 +000044#include <asm/debug-monitors.h>
Mark Rutland60a1f022014-11-18 12:16:30 +000045#include <asm/esr.h>
Dave P Martin9fb74102015-07-24 16:37:48 +010046#include <asm/insn.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000047#include <asm/traps.h>
Mark Rutland872d8322017-07-14 20:30:35 +010048#include <asm/smp.h>
Mark Rutlanda9ea0012016-11-03 20:23:05 +000049#include <asm/stack_pointer.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000050#include <asm/stacktrace.h>
51#include <asm/exception.h>
52#include <asm/system_misc.h>
Andre Przywara7dd01ae2016-06-28 18:07:32 +010053#include <asm/sysreg.h>
Catalin Marinas60ffc302012-03-05 11:49:27 +000054
55static const char *handler[]= {
56 "Synchronous Abort",
57 "IRQ",
58 "FIQ",
59 "Error"
60};
61
Michael Weiser5ee39a72018-02-01 23:13:38 +010062int show_unhandled_signals = 0;
Catalin Marinas60ffc302012-03-05 11:49:27 +000063
Jungseok Lee9f93f3e2015-10-17 14:28:11 +000064static void dump_backtrace_entry(unsigned long where)
Catalin Marinas60ffc302012-03-05 11:49:27 +000065{
Will Deacona25ffd32017-10-19 13:19:20 +010066 printk(" %pS\n", (void *)where);
Catalin Marinas60ffc302012-03-05 11:49:27 +000067}
68
Mark Rutlandc5cea062016-06-13 11:15:14 +010069static void __dump_instr(const char *lvl, struct pt_regs *regs)
Catalin Marinas60ffc302012-03-05 11:49:27 +000070{
71 unsigned long addr = instruction_pointer(regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +000072 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
73 int i;
74
Catalin Marinas60ffc302012-03-05 11:49:27 +000075 for (i = -4; i < 1; i++) {
76 unsigned int val, bad;
77
Mark Rutland7a7003b2017-11-02 16:12:03 +000078 bad = get_user(val, &((u32 *)addr)[i]);
Catalin Marinas60ffc302012-03-05 11:49:27 +000079
80 if (!bad)
81 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
82 else {
83 p += sprintf(p, "bad PC value");
84 break;
85 }
86 }
87 printk("%sCode: %s\n", lvl, str);
Mark Rutlandc5cea062016-06-13 11:15:14 +010088}
Catalin Marinas60ffc302012-03-05 11:49:27 +000089
Mark Rutlandc5cea062016-06-13 11:15:14 +010090static void dump_instr(const char *lvl, struct pt_regs *regs)
91{
92 if (!user_mode(regs)) {
93 mm_segment_t fs = get_fs();
94 set_fs(KERNEL_DS);
95 __dump_instr(lvl, regs);
96 set_fs(fs);
97 } else {
98 __dump_instr(lvl, regs);
99 }
Catalin Marinas60ffc302012-03-05 11:49:27 +0000100}
101
Kefeng Wang1149aad2017-05-09 09:53:37 +0800102void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000103{
104 struct stackframe frame;
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900105 int skip;
Catalin Marinas60ffc302012-03-05 11:49:27 +0000106
Mark Rutlandb5e73072016-09-23 17:55:05 +0100107 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
108
109 if (!tsk)
110 tsk = current;
111
Mark Rutland9bbd4c52016-11-03 20:23:08 +0000112 if (!try_get_task_stack(tsk))
113 return;
114
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900115 if (tsk == current) {
Catalin Marinas60ffc302012-03-05 11:49:27 +0000116 frame.fp = (unsigned long)__builtin_frame_address(0);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000117 frame.pc = (unsigned long)dump_backtrace;
118 } else {
119 /*
120 * task blocked in __switch_to
121 */
122 frame.fp = thread_saved_fp(tsk);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000123 frame.pc = thread_saved_pc(tsk);
124 }
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900125#ifdef CONFIG_FUNCTION_GRAPH_TRACER
Steven Rostedt (VMware)a4482762018-12-07 13:13:28 -0500126 frame.graph = 0;
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900127#endif
Catalin Marinas60ffc302012-03-05 11:49:27 +0000128
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900129 skip = !!regs;
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000130 printk("Call trace:\n");
Will Deacona25ffd32017-10-19 13:19:20 +0100131 do {
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900132 /* skip until specified stack frame */
133 if (!skip) {
Ard Biesheuvel73267492017-07-22 18:45:33 +0100134 dump_backtrace_entry(frame.pc);
AKASHI Takahiro20380bb2015-12-15 17:33:41 +0900135 } else if (frame.fp == regs->regs[29]) {
136 skip = 0;
137 /*
138 * Mostly, this is the case where this function is
139 * called in panic/abort. As exception handler's
140 * stack frame does not contain the corresponding pc
141 * at which an exception has taken place, use regs->pc
142 * instead.
143 */
144 dump_backtrace_entry(regs->pc);
145 }
Will Deacona25ffd32017-10-19 13:19:20 +0100146 } while (!unwind_frame(tsk, &frame));
Mark Rutland9bbd4c52016-11-03 20:23:08 +0000147
148 put_task_stack(tsk);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000149}
150
Catalin Marinas60ffc302012-03-05 11:49:27 +0000151void show_stack(struct task_struct *tsk, unsigned long *sp)
152{
153 dump_backtrace(NULL, tsk);
154 barrier();
155}
156
157#ifdef CONFIG_PREEMPT
158#define S_PREEMPT " PREEMPT"
159#else
160#define S_PREEMPT ""
161#endif
Catalin Marinas60ffc302012-03-05 11:49:27 +0000162#define S_SMP " SMP"
Catalin Marinas60ffc302012-03-05 11:49:27 +0000163
Mark Rutland876e7a32016-11-03 20:23:06 +0000164static int __die(const char *str, int err, struct pt_regs *regs)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000165{
Mark Rutland876e7a32016-11-03 20:23:06 +0000166 struct task_struct *tsk = current;
Catalin Marinas60ffc302012-03-05 11:49:27 +0000167 static int die_counter;
168 int ret;
169
170 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
171 str, err, ++die_counter);
172
173 /* trap and error numbers are mostly meaningless on ARM */
174 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
175 if (ret == NOTIFY_STOP)
176 return ret;
177
178 print_modules();
179 __show_regs(regs);
180 pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
Mark Rutland876e7a32016-11-03 20:23:06 +0000181 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
182 end_of_stack(tsk));
Catalin Marinas60ffc302012-03-05 11:49:27 +0000183
Mark Rutland7ceb3a12016-06-13 11:15:15 +0100184 if (!user_mode(regs)) {
Catalin Marinas60ffc302012-03-05 11:49:27 +0000185 dump_backtrace(regs, tsk);
186 dump_instr(KERN_EMERG, regs);
187 }
188
189 return ret;
190}
191
192static DEFINE_RAW_SPINLOCK(die_lock);
193
194/*
195 * This function is protected against re-entrancy.
196 */
197void die(const char *str, struct pt_regs *regs, int err)
198{
Catalin Marinas60ffc302012-03-05 11:49:27 +0000199 int ret;
Qiao Zhou6f44a0b2017-07-07 17:29:34 +0800200 unsigned long flags;
201
202 raw_spin_lock_irqsave(&die_lock, flags);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000203
204 oops_enter();
205
Catalin Marinas60ffc302012-03-05 11:49:27 +0000206 console_verbose();
207 bust_spinlocks(1);
Mark Rutland876e7a32016-11-03 20:23:06 +0000208 ret = __die(str, err, regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000209
Mark Rutland876e7a32016-11-03 20:23:06 +0000210 if (regs && kexec_should_crash(current))
Catalin Marinas60ffc302012-03-05 11:49:27 +0000211 crash_kexec(regs);
212
213 bust_spinlocks(0);
Rusty Russell373d4d02013-01-21 17:17:39 +1030214 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000215 oops_exit();
216
217 if (in_interrupt())
218 panic("Fatal exception in interrupt");
219 if (panic_on_oops)
220 panic("Fatal exception");
Qiao Zhou6f44a0b2017-07-07 17:29:34 +0800221
222 raw_spin_unlock_irqrestore(&die_lock, flags);
223
Catalin Marinas60ffc302012-03-05 11:49:27 +0000224 if (ret != NOTIFY_STOP)
225 do_exit(SIGSEGV);
226}
227
Eric W. Biederman1628a7c2018-09-22 00:52:21 +0200228static void arm64_show_signal(int signo, const char *str)
Will Deacona26731d2018-02-20 15:08:51 +0000229{
230 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
231 DEFAULT_RATELIMIT_BURST);
Eric W. Biederman24b8f792018-09-22 00:38:41 +0200232 struct task_struct *tsk = current;
Will Deacona1ece822018-02-20 13:46:05 +0000233 unsigned int esr = tsk->thread.fault_code;
234 struct pt_regs *regs = task_pt_regs(tsk);
235
Eric W. Biederman1628a7c2018-09-22 00:52:21 +0200236 /* Leave if the signal won't be shown */
237 if (!show_unhandled_signals ||
238 !unhandled_signal(tsk, signo) ||
239 !__ratelimit(&rs))
240 return;
Will Deacona1ece822018-02-20 13:46:05 +0000241
242 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
243 if (esr)
244 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
245
246 pr_cont("%s", str);
247 print_vma_addr(KERN_CONT " in ", regs->pc);
248 pr_cont("\n");
249 __show_regs(regs);
Eric W. Biederman1628a7c2018-09-22 00:52:21 +0200250}
Will Deacona1ece822018-02-20 13:46:05 +0000251
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200252void arm64_force_sig_fault(int signo, int code, void __user *addr,
253 const char *str)
254{
255 arm64_show_signal(signo, str);
256 force_sig_fault(signo, code, addr, current);
257}
258
Eric W. Biedermanb4d55572018-09-22 10:37:15 +0200259void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
260 const char *str)
261{
262 arm64_show_signal(SIGBUS, str);
263 force_sig_mceerr(code, addr, lsb, current);
264}
265
Eric W. Biedermanf3a900b2018-09-22 10:52:41 +0200266void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
267 const char *str)
268{
269 arm64_show_signal(SIGTRAP, str);
270 force_sig_ptrace_errno_trap(errno, addr);
Will Deacona1ece822018-02-20 13:46:05 +0000271}
272
Catalin Marinas60ffc302012-03-05 11:49:27 +0000273void arm64_notify_die(const char *str, struct pt_regs *regs,
Eric W. Biederman6fa998e2018-09-21 17:24:40 +0200274 int signo, int sicode, void __user *addr,
275 int err)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000276{
Catalin Marinas91413002014-04-06 23:04:12 +0100277 if (user_mode(regs)) {
Will Deacona1ece822018-02-20 13:46:05 +0000278 WARN_ON(regs != current_pt_regs());
Catalin Marinas91413002014-04-06 23:04:12 +0100279 current->thread.fault_address = 0;
280 current->thread.fault_code = err;
Eric W. Biederman6fa998e2018-09-21 17:24:40 +0200281
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200282 arm64_force_sig_fault(signo, sicode, addr, str);
Catalin Marinas91413002014-04-06 23:04:12 +0100283 } else {
Catalin Marinas60ffc302012-03-05 11:49:27 +0000284 die(str, regs, err);
Catalin Marinas91413002014-04-06 23:04:12 +0100285 }
Catalin Marinas60ffc302012-03-05 11:49:27 +0000286}
287
Julien Thierry6436bee2017-10-25 10:04:33 +0100288void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
289{
290 regs->pc += size;
291
292 /*
293 * If we were single stepping, we want to get the step exception after
294 * we return from the trap.
295 */
Mark Rutland9478f192018-04-03 11:22:51 +0100296 if (user_mode(regs))
297 user_fastforward_single_step(current);
Julien Thierry6436bee2017-10-25 10:04:33 +0100298}
299
Punit Agrawal9b79f522014-11-18 11:41:22 +0000300static LIST_HEAD(undef_hook);
301static DEFINE_RAW_SPINLOCK(undef_lock);
302
303void register_undef_hook(struct undef_hook *hook)
304{
305 unsigned long flags;
306
307 raw_spin_lock_irqsave(&undef_lock, flags);
308 list_add(&hook->node, &undef_hook);
309 raw_spin_unlock_irqrestore(&undef_lock, flags);
310}
311
312void unregister_undef_hook(struct undef_hook *hook)
313{
314 unsigned long flags;
315
316 raw_spin_lock_irqsave(&undef_lock, flags);
317 list_del(&hook->node);
318 raw_spin_unlock_irqrestore(&undef_lock, flags);
319}
320
321static int call_undef_hook(struct pt_regs *regs)
322{
323 struct undef_hook *hook;
324 unsigned long flags;
325 u32 instr;
326 int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
327 void __user *pc = (void __user *)instruction_pointer(regs);
328
Will Deacon0bf0f442018-08-07 13:43:06 +0100329 if (!user_mode(regs)) {
330 __le32 instr_le;
331 if (probe_kernel_address((__force __le32 *)pc, instr_le))
332 goto exit;
333 instr = le32_to_cpu(instr_le);
334 } else if (compat_thumb_mode(regs)) {
Punit Agrawal9b79f522014-11-18 11:41:22 +0000335 /* 16-bit Thumb instruction */
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200336 __le16 instr_le;
337 if (get_user(instr_le, (__le16 __user *)pc))
Punit Agrawal9b79f522014-11-18 11:41:22 +0000338 goto exit;
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200339 instr = le16_to_cpu(instr_le);
Punit Agrawal9b79f522014-11-18 11:41:22 +0000340 if (aarch32_insn_is_wide(instr)) {
341 u32 instr2;
342
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200343 if (get_user(instr_le, (__le16 __user *)(pc + 2)))
Punit Agrawal9b79f522014-11-18 11:41:22 +0000344 goto exit;
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200345 instr2 = le16_to_cpu(instr_le);
Punit Agrawal9b79f522014-11-18 11:41:22 +0000346 instr = (instr << 16) | instr2;
347 }
348 } else {
349 /* 32-bit ARM instruction */
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200350 __le32 instr_le;
351 if (get_user(instr_le, (__le32 __user *)pc))
Punit Agrawal9b79f522014-11-18 11:41:22 +0000352 goto exit;
Luc Van Oostenryck6cf5d4a2017-06-28 16:55:55 +0200353 instr = le32_to_cpu(instr_le);
Punit Agrawal9b79f522014-11-18 11:41:22 +0000354 }
355
356 raw_spin_lock_irqsave(&undef_lock, flags);
357 list_for_each_entry(hook, &undef_hook, node)
358 if ((instr & hook->instr_mask) == hook->instr_val &&
359 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
360 fn = hook->fn;
361
362 raw_spin_unlock_irqrestore(&undef_lock, flags);
363exit:
364 return fn ? fn(regs, instr) : 1;
365}
366
Will Deacon2c9120f32018-02-20 14:16:29 +0000367void force_signal_inject(int signal, int code, unsigned long address)
Catalin Marinas60ffc302012-03-05 11:49:27 +0000368{
Andre Przywara390bf172016-06-28 18:07:31 +0100369 const char *desc;
Will Deacon2c9120f32018-02-20 14:16:29 +0000370 struct pt_regs *regs = current_pt_regs();
371
Will Deacon8a604192018-08-14 16:24:54 +0100372 if (WARN_ON(!user_mode(regs)))
373 return;
374
Andre Przywara390bf172016-06-28 18:07:31 +0100375 switch (signal) {
376 case SIGILL:
377 desc = "undefined instruction";
378 break;
379 case SIGSEGV:
380 desc = "illegal memory access";
381 break;
382 default:
Dave Martinbc0ee472017-10-31 15:51:05 +0000383 desc = "unknown or unrecoverable error";
Andre Przywara390bf172016-06-28 18:07:31 +0100384 break;
385 }
386
Will Deacona7e6f1c2018-02-20 18:08:40 +0000387 /* Force signals we don't understand to SIGKILL */
Mark Rutlandb2d71b32018-04-16 16:45:01 +0100388 if (WARN_ON(signal != SIGKILL &&
Will Deacona7e6f1c2018-02-20 18:08:40 +0000389 siginfo_layout(signal, code) != SIL_FAULT)) {
390 signal = SIGKILL;
391 }
392
Eric W. Biederman6fa998e2018-09-21 17:24:40 +0200393 arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
Andre Przywara390bf172016-06-28 18:07:31 +0100394}
395
396/*
397 * Set up process info to signal segmentation fault - called on access error.
398 */
Will Deacon2c9120f32018-02-20 14:16:29 +0000399void arm64_notify_segfault(unsigned long addr)
Andre Przywara390bf172016-06-28 18:07:31 +0100400{
401 int code;
402
403 down_read(&current->mm->mmap_sem);
404 if (find_vma(current->mm, addr) == NULL)
405 code = SEGV_MAPERR;
406 else
407 code = SEGV_ACCERR;
408 up_read(&current->mm->mmap_sem);
409
Will Deacon2c9120f32018-02-20 14:16:29 +0000410 force_signal_inject(SIGSEGV, code, addr);
Andre Przywara390bf172016-06-28 18:07:31 +0100411}
412
413asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
414{
Catalin Marinas60ffc302012-03-05 11:49:27 +0000415 /* check for AArch32 breakpoint instructions */
Will Deacon1442b6e2013-03-16 08:48:13 +0000416 if (!aarch32_break_handler(regs))
Catalin Marinas60ffc302012-03-05 11:49:27 +0000417 return;
Catalin Marinas60ffc302012-03-05 11:49:27 +0000418
Punit Agrawal9b79f522014-11-18 11:41:22 +0000419 if (call_undef_hook(regs) == 0)
420 return;
421
Will Deacon0bf0f442018-08-07 13:43:06 +0100422 BUG_ON(!user_mode(regs));
Will Deacon8a604192018-08-14 16:24:54 +0100423 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000424}
425
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100426#define __user_cache_maint(insn, address, res) \
Kristina Martsenko81cddd62017-05-03 16:37:45 +0100427 if (address >= user_addr_max()) { \
Andre Przywara87261d12016-10-19 14:40:54 +0100428 res = -EFAULT; \
Catalin Marinas39bc88e2016-09-02 14:54:03 +0100429 } else { \
430 uaccess_ttbr0_enable(); \
Andre Przywara87261d12016-10-19 14:40:54 +0100431 asm volatile ( \
432 "1: " insn ", %1\n" \
433 " mov %w0, #0\n" \
434 "2:\n" \
435 " .pushsection .fixup,\"ax\"\n" \
436 " .align 2\n" \
437 "3: mov %w0, %w2\n" \
438 " b 2b\n" \
439 " .popsection\n" \
440 _ASM_EXTABLE(1b, 3b) \
441 : "=r" (res) \
Catalin Marinas39bc88e2016-09-02 14:54:03 +0100442 : "r" (address), "i" (-EFAULT)); \
443 uaccess_ttbr0_disable(); \
444 }
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100445
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100446static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100447{
448 unsigned long address;
Anshuman Khandual1c839142018-09-20 09:36:19 +0530449 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100450 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
451 int ret = 0;
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100452
Kristina Martsenko81cddd62017-05-03 16:37:45 +0100453 address = untagged_addr(pt_regs_read_reg(regs, rt));
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100454
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100455 switch (crm) {
456 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
457 __user_cache_maint("dc civac", address, ret);
458 break;
459 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
460 __user_cache_maint("dc civac", address, ret);
461 break;
Andrew Murrayd16ed4102019-04-09 10:52:42 +0100462 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
463 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
464 break;
Robin Murphye1bc5d12017-07-25 11:55:41 +0100465 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
466 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
467 break;
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100468 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
469 __user_cache_maint("dc civac", address, ret);
470 break;
471 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
472 __user_cache_maint("ic ivau", address, ret);
473 break;
474 default:
Will Deacon2c9120f32018-02-20 14:16:29 +0000475 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100476 return;
477 }
478
479 if (ret)
Will Deacon2c9120f32018-02-20 14:16:29 +0000480 arm64_notify_segfault(address);
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100481 else
Julien Thierry6436bee2017-10-25 10:04:33 +0100482 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Andre Przywara7dd01ae2016-06-28 18:07:32 +0100483}
484
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100485static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
486{
Anshuman Khandual1c839142018-09-20 09:36:19 +0530487 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Mark Rutland8b6e70f2017-02-09 15:19:19 +0000488 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100489
Mark Rutland8b6e70f2017-02-09 15:19:19 +0000490 pt_regs_write_reg(regs, rt, val);
491
Julien Thierry6436bee2017-10-25 10:04:33 +0100492 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100493}
494
Marc Zyngier6126ce02017-02-01 11:48:58 +0000495static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
496{
Anshuman Khandual1c839142018-09-20 09:36:19 +0530497 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Marc Zyngier6126ce02017-02-01 11:48:58 +0000498
499 pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
Julien Thierry6436bee2017-10-25 10:04:33 +0100500 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Marc Zyngier6126ce02017-02-01 11:48:58 +0000501}
502
Marc Zyngier98421192017-04-24 09:04:03 +0100503static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
504{
Anshuman Khandual1c839142018-09-20 09:36:19 +0530505 int rt = ESR_ELx_SYS64_ISS_RT(esr);
Marc Zyngier98421192017-04-24 09:04:03 +0100506
Marc Zyngierc6f97ad2017-07-21 18:15:27 +0100507 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
Julien Thierry6436bee2017-10-25 10:04:33 +0100508 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Marc Zyngier98421192017-04-24 09:04:03 +0100509}
510
Anshuman Khandual21f84792018-09-20 09:36:21 +0530511static void mrs_handler(unsigned int esr, struct pt_regs *regs)
512{
513 u32 sysreg, rt;
514
515 rt = ESR_ELx_SYS64_ISS_RT(esr);
516 sysreg = esr_sys64_to_sysreg(esr);
517
518 if (do_emulate_mrs(regs, sysreg, rt) != 0)
519 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
520}
521
Marc Zyngierc219bc42018-10-01 12:19:43 +0100522static void wfi_handler(unsigned int esr, struct pt_regs *regs)
523{
524 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
525}
526
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100527struct sys64_hook {
528 unsigned int esr_mask;
529 unsigned int esr_val;
530 void (*handler)(unsigned int esr, struct pt_regs *regs);
531};
532
533static struct sys64_hook sys64_hooks[] = {
534 {
535 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
536 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
537 .handler = user_cache_maint_handler,
538 },
Suzuki K Poulose116c81f2016-09-09 14:07:16 +0100539 {
540 /* Trap read access to CTR_EL0 */
541 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
542 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
543 .handler = ctr_read_handler,
544 },
Marc Zyngier6126ce02017-02-01 11:48:58 +0000545 {
546 /* Trap read access to CNTVCT_EL0 */
547 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
548 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
549 .handler = cntvct_read_handler,
550 },
Marc Zyngier98421192017-04-24 09:04:03 +0100551 {
552 /* Trap read access to CNTFRQ_EL0 */
553 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
554 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
555 .handler = cntfrq_read_handler,
556 },
Anshuman Khandual21f84792018-09-20 09:36:21 +0530557 {
558 /* Trap read access to CPUID registers */
559 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
560 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
561 .handler = mrs_handler,
562 },
Marc Zyngierc219bc42018-10-01 12:19:43 +0100563 {
564 /* Trap WFI instructions executed in userspace */
565 .esr_mask = ESR_ELx_WFx_MASK,
566 .esr_val = ESR_ELx_WFx_WFI_VAL,
567 .handler = wfi_handler,
568 },
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100569 {},
570};
571
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100572
573#ifdef CONFIG_COMPAT
Marc Zyngier1f1c0142018-09-27 17:15:30 +0100574#define PSTATE_IT_1_0_SHIFT 25
575#define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
576#define PSTATE_IT_7_2_SHIFT 10
577#define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
578
579static u32 compat_get_it_state(struct pt_regs *regs)
580{
581 u32 it, pstate = regs->pstate;
582
583 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
584 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
585
586 return it;
587}
588
589static void compat_set_it_state(struct pt_regs *regs, u32 it)
590{
591 u32 pstate_it;
592
593 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
594 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
595
596 regs->pstate &= ~PSR_AA32_IT_MASK;
597 regs->pstate |= pstate_it;
598}
599
600static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
601{
602 int cond;
603
604 /* Only a T32 instruction can trap without CV being set */
605 if (!(esr & ESR_ELx_CV)) {
606 u32 it;
607
608 it = compat_get_it_state(regs);
609 if (!it)
610 return true;
611
612 cond = it >> 4;
613 } else {
614 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
615 }
616
617 return aarch32_opcode_cond_checks[cond](regs->pstate);
618}
619
620static void advance_itstate(struct pt_regs *regs)
621{
622 u32 it;
623
624 /* ARM mode */
625 if (!(regs->pstate & PSR_AA32_T_BIT) ||
626 !(regs->pstate & PSR_AA32_IT_MASK))
627 return;
628
629 it = compat_get_it_state(regs);
630
631 /*
632 * If this is the last instruction of the block, wipe the IT
633 * state. Otherwise advance it.
634 */
635 if (!(it & 7))
636 it = 0;
637 else
638 it = (it & 0xe0) | ((it << 1) & 0x1f);
639
640 compat_set_it_state(regs, it);
641}
642
643static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
644 unsigned int sz)
645{
646 advance_itstate(regs);
647 arm64_skip_faulting_instruction(regs, sz);
648}
649
Marc Zyngier32a3e632018-09-27 17:15:33 +0100650static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
651{
652 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
653
654 pt_regs_write_reg(regs, reg, arch_timer_get_rate());
655 arm64_compat_skip_faulting_instruction(regs, 4);
656}
657
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100658static struct sys64_hook cp15_32_hooks[] = {
Marc Zyngier32a3e632018-09-27 17:15:33 +0100659 {
660 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
661 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
662 .handler = compat_cntfrq_read_handler,
663 },
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100664 {},
665};
666
Marc Zyngier50de0132018-09-27 17:15:32 +0100667static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
668{
669 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
670 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
671 u64 val = arch_counter_get_cntvct();
672
673 pt_regs_write_reg(regs, rt, lower_32_bits(val));
674 pt_regs_write_reg(regs, rt2, upper_32_bits(val));
675 arm64_compat_skip_faulting_instruction(regs, 4);
676}
677
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100678static struct sys64_hook cp15_64_hooks[] = {
Marc Zyngier50de0132018-09-27 17:15:32 +0100679 {
680 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
681 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
682 .handler = compat_cntvct_read_handler,
683 },
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100684 {},
685};
686
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100687asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
688{
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100689 struct sys64_hook *hook, *hook_base;
690
Marc Zyngier1f1c0142018-09-27 17:15:30 +0100691 if (!cp15_cond_valid(esr, regs)) {
692 /*
693 * There is no T16 variant of a CP access, so we
694 * always advance PC by 4 bytes.
695 */
696 arm64_compat_skip_faulting_instruction(regs, 4);
697 return;
698 }
699
Marc Zyngier2a8905e2018-09-27 17:15:31 +0100700 switch (ESR_ELx_EC(esr)) {
701 case ESR_ELx_EC_CP15_32:
702 hook_base = cp15_32_hooks;
703 break;
704 case ESR_ELx_EC_CP15_64:
705 hook_base = cp15_64_hooks;
706 break;
707 default:
708 do_undefinstr(regs);
709 return;
710 }
711
712 for (hook = hook_base; hook->handler; hook++)
713 if ((hook->esr_mask & esr) == hook->esr_val) {
714 hook->handler(esr, regs);
715 return;
716 }
717
Marc Zyngier70c63cd2018-09-27 17:15:29 +0100718 /*
719 * New cp15 instructions may previously have been undefined at
720 * EL0. Fall back to our usual undefined instruction handler
721 * so that we handle these consistently.
722 */
723 do_undefinstr(regs);
724}
725#endif
726
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100727asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
728{
729 struct sys64_hook *hook;
730
731 for (hook = sys64_hooks; hook->handler; hook++)
732 if ((hook->esr_mask & esr) == hook->esr_val) {
733 hook->handler(esr, regs);
734 return;
735 }
736
Mark Rutland49f6cba2017-01-27 16:15:38 +0000737 /*
738 * New SYS instructions may previously have been undefined at EL0. Fall
739 * back to our usual undefined instruction handler so that we handle
740 * these consistently.
741 */
742 do_undefinstr(regs);
Suzuki K Poulose9dbd5bb2016-09-09 14:07:15 +0100743}
744
Mark Rutland60a1f022014-11-18 12:16:30 +0000745static const char *esr_class_str[] = {
746 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
747 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
748 [ESR_ELx_EC_WFx] = "WFI/WFE",
749 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
750 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
751 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
752 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
753 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
754 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
755 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
756 [ESR_ELx_EC_ILL] = "PSTATE.IL",
757 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
758 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
759 [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
760 [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
761 [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
762 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
763 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
Dave Martin67236562017-10-31 15:51:00 +0000764 [ESR_ELx_EC_SVE] = "SVE",
Mark Rutland60a1f022014-11-18 12:16:30 +0000765 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
766 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
767 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
768 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
769 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
770 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
771 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
772 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
773 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
774 [ESR_ELx_EC_SERROR] = "SError",
775 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
776 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
777 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
778 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
779 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
780 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
781 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
782 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
783 [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
784};
785
786const char *esr_get_class_string(u32 esr)
787{
Mark Rutland275f3442016-05-31 12:33:01 +0100788 return esr_class_str[ESR_ELx_EC(esr)];
Mark Rutland60a1f022014-11-18 12:16:30 +0000789}
790
Catalin Marinas60ffc302012-03-05 11:49:27 +0000791/*
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000792 * bad_mode handles the impossible case in the exception vector. This is always
793 * fatal.
Catalin Marinas60ffc302012-03-05 11:49:27 +0000794 */
795asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
796{
797 console_verbose();
798
Mark Rutland8051f4d2016-05-31 12:07:47 +0100799 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
800 handler[reason], smp_processor_id(), esr,
801 esr_get_class_string(esr));
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000802
James Morse0fbeb312017-11-02 12:12:34 +0000803 local_daif_mask();
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000804 panic("bad mode");
805}
806
807/*
808 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
809 * exceptions taken from EL0. Unlike bad_mode, this returns.
810 */
811asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
812{
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000813 void __user *pc = (void __user *)instruction_pointer(regs);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000814
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000815 current->thread.fault_address = 0;
Will Deacon4e829b62018-02-20 15:18:13 +0000816 current->thread.fault_code = esr;
Mark Rutland7d9e8f72017-01-18 17:23:41 +0000817
Eric W. Biedermanfeca3552018-09-22 10:26:57 +0200818 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
819 "Bad EL0 synchronous exception");
Catalin Marinas60ffc302012-03-05 11:49:27 +0000820}
821
Mark Rutland872d8322017-07-14 20:30:35 +0100822#ifdef CONFIG_VMAP_STACK
823
824DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
825 __aligned(16);
826
827asmlinkage void handle_bad_stack(struct pt_regs *regs)
828{
829 unsigned long tsk_stk = (unsigned long)current->stack;
830 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
831 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
832 unsigned int esr = read_sysreg(esr_el1);
833 unsigned long far = read_sysreg(far_el1);
834
835 console_verbose();
836 pr_emerg("Insufficient stack space to handle exception!");
837
838 pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
839 pr_emerg("FAR: 0x%016lx\n", far);
840
841 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
842 tsk_stk, tsk_stk + THREAD_SIZE);
843 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
844 irq_stk, irq_stk + THREAD_SIZE);
845 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
846 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
847
848 __show_regs(regs);
849
850 /*
851 * We use nmi_panic to limit the potential for recusive overflows, and
852 * to get a better stack trace.
853 */
854 nmi_panic(NULL, "kernel stack overflow");
855 cpu_park_loop();
856}
857#endif
858
James Morse6bf0dcf2018-01-15 19:38:57 +0000859void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
Xie XiuQia92d4d12017-11-02 12:12:42 +0000860{
Xie XiuQia92d4d12017-11-02 12:12:42 +0000861 console_verbose();
862
863 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
864 smp_processor_id(), esr, esr_get_class_string(esr));
James Morse6bf0dcf2018-01-15 19:38:57 +0000865 if (regs)
866 __show_regs(regs);
Xie XiuQia92d4d12017-11-02 12:12:42 +0000867
James Morse6bf0dcf2018-01-15 19:38:57 +0000868 nmi_panic(regs, "Asynchronous SError Interrupt");
869
870 cpu_park_loop();
871 unreachable();
872}
873
874bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
875{
876 u32 aet = arm64_ras_serror_get_severity(esr);
877
878 switch (aet) {
879 case ESR_ELx_AET_CE: /* corrected error */
880 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
881 /*
882 * The CPU can make progress. We may take UEO again as
883 * a more severe error.
884 */
885 return false;
886
887 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
888 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
889 /*
890 * The CPU can't make progress. The exception may have
891 * been imprecise.
892 */
893 return true;
894
895 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
896 default:
897 /* Error has been silently propagated */
898 arm64_serror_panic(regs, esr);
899 }
900}
901
902asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
903{
Julien Thierry7d314642019-01-31 14:59:00 +0000904 const bool was_in_nmi = in_nmi();
905
906 if (!was_in_nmi)
907 nmi_enter();
James Morse6bf0dcf2018-01-15 19:38:57 +0000908
909 /* non-RAS errors are not containable */
910 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
911 arm64_serror_panic(regs, esr);
912
Julien Thierry7d314642019-01-31 14:59:00 +0000913 if (!was_in_nmi)
914 nmi_exit();
Xie XiuQia92d4d12017-11-02 12:12:42 +0000915}
916
Catalin Marinas60ffc302012-03-05 11:49:27 +0000917void __pte_error(const char *file, int line, unsigned long val)
918{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000919 pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000920}
921
922void __pmd_error(const char *file, int line, unsigned long val)
923{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000924 pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000925}
926
Jungseok Leec79b954b2014-05-12 18:40:51 +0900927void __pud_error(const char *file, int line, unsigned long val)
928{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000929 pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
Jungseok Leec79b954b2014-05-12 18:40:51 +0900930}
931
Catalin Marinas60ffc302012-03-05 11:49:27 +0000932void __pgd_error(const char *file, int line, unsigned long val)
933{
Will Deaconc9cd0ed2015-12-21 16:44:27 +0000934 pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
Catalin Marinas60ffc302012-03-05 11:49:27 +0000935}
936
Dave P Martin9fb74102015-07-24 16:37:48 +0100937/* GENERIC_BUG traps */
938
939int is_valid_bugaddr(unsigned long addr)
940{
941 /*
942 * bug_handler() only called for BRK #BUG_BRK_IMM.
943 * So the answer is trivial -- any spurious instances with no
944 * bug table entry will be rejected by report_bug() and passed
945 * back to the debug-monitors code and handled as a fatal
946 * unexpected debug exception.
947 */
948 return 1;
949}
950
951static int bug_handler(struct pt_regs *regs, unsigned int esr)
952{
Dave P Martin9fb74102015-07-24 16:37:48 +0100953 switch (report_bug(regs->pc, regs)) {
954 case BUG_TRAP_TYPE_BUG:
955 die("Oops - BUG", regs, 0);
956 break;
957
958 case BUG_TRAP_TYPE_WARN:
959 break;
960
961 default:
962 /* unknown/unrecognised bug trap type */
963 return DBG_HOOK_ERROR;
964 }
965
966 /* If thread survives, skip over the BUG instruction and continue: */
Julien Thierry6436bee2017-10-25 10:04:33 +0100967 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
Dave P Martin9fb74102015-07-24 16:37:48 +0100968 return DBG_HOOK_HANDLED;
969}
970
971static struct break_hook bug_break_hook = {
Dave P Martin9fb74102015-07-24 16:37:48 +0100972 .fn = bug_handler,
Will Deacon26a04d82019-02-26 12:52:47 +0000973 .imm = BUG_BRK_IMM,
Dave P Martin9fb74102015-07-24 16:37:48 +0100974};
975
Andrey Konovalov41eea9c2018-12-28 00:30:54 -0800976#ifdef CONFIG_KASAN_SW_TAGS
977
978#define KASAN_ESR_RECOVER 0x20
979#define KASAN_ESR_WRITE 0x10
980#define KASAN_ESR_SIZE_MASK 0x0f
981#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
982
983static int kasan_handler(struct pt_regs *regs, unsigned int esr)
984{
985 bool recover = esr & KASAN_ESR_RECOVER;
986 bool write = esr & KASAN_ESR_WRITE;
987 size_t size = KASAN_ESR_SIZE(esr);
988 u64 addr = regs->regs[0];
989 u64 pc = regs->pc;
990
Andrey Konovalov41eea9c2018-12-28 00:30:54 -0800991 kasan_report(addr, size, write, pc);
992
993 /*
994 * The instrumentation allows to control whether we can proceed after
995 * a crash was detected. This is done by passing the -recover flag to
996 * the compiler. Disabling recovery allows to generate more compact
997 * code.
998 *
999 * Unfortunately disabling recovery doesn't work for the kernel right
1000 * now. KASAN reporting is disabled in some contexts (for example when
1001 * the allocator accesses slab object metadata; this is controlled by
1002 * current->kasan_depth). All these accesses are detected by the tool,
1003 * even though the reports for them are not printed.
1004 *
1005 * This is something that might be fixed at some point in the future.
1006 */
1007 if (!recover)
1008 die("Oops - KASAN", regs, 0);
1009
1010 /* If thread survives, skip over the brk instruction and continue: */
1011 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1012 return DBG_HOOK_HANDLED;
1013}
1014
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001015static struct break_hook kasan_break_hook = {
Will Deacon26a04d82019-02-26 12:52:47 +00001016 .fn = kasan_handler,
1017 .imm = KASAN_BRK_IMM,
1018 .mask = KASAN_BRK_MASK,
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001019};
1020#endif
1021
Dave P Martin9fb74102015-07-24 16:37:48 +01001022/*
1023 * Initial handler for AArch64 BRK exceptions
1024 * This handler only used until debug_traps_init().
1025 */
1026int __init early_brk64(unsigned long addr, unsigned int esr,
1027 struct pt_regs *regs)
1028{
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001029#ifdef CONFIG_KASAN_SW_TAGS
Will Deacon453b7742019-02-26 15:06:42 +00001030 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
Will Deacon26a04d82019-02-26 12:52:47 +00001031
1032 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001033 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1034#endif
Dave P Martin9fb74102015-07-24 16:37:48 +01001035 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1036}
1037
1038/* This registration must happen early, before debug_traps_init(). */
Catalin Marinas60ffc302012-03-05 11:49:27 +00001039void __init trap_init(void)
1040{
Will Deacon26a04d82019-02-26 12:52:47 +00001041 register_kernel_break_hook(&bug_break_hook);
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001042#ifdef CONFIG_KASAN_SW_TAGS
Will Deacon26a04d82019-02-26 12:52:47 +00001043 register_kernel_break_hook(&kasan_break_hook);
Andrey Konovalov41eea9c2018-12-28 00:30:54 -08001044#endif
Catalin Marinas60ffc302012-03-05 11:49:27 +00001045}