blob: dba2197c05c30e5c8191541b360f7189afb05282 [file] [log] [blame]
Thomas Gleixner457c8992019-05-19 13:08:55 +01001// SPDX-License-Identifier: GPL-2.0-only
Paul Gortmaker744c1932016-09-19 17:04:18 -04002#include <linux/extable.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -08003#include <linux/uaccess.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01004#include <linux/sched/debug.h>
Peter Zijlstra4b5305d2021-11-10 11:01:09 +01005#include <linux/bitfield.h>
Juergen Gross42b3a4c2017-11-24 09:42:21 +01006#include <xen/xen.h>
Ingo Molnarb17b0152017-02-08 18:51:35 +01007
Thomas Gleixner079ec412021-10-15 03:16:41 +02008#include <asm/fpu/api.h>
Brijesh Singhe7599592021-04-27 06:16:34 -05009#include <asm/sev.h>
Andy Lutomirski0d0efc02016-04-02 07:01:33 -070010#include <asm/traps.h>
Borislav Petkov81c29492016-07-05 00:31:27 +020011#include <asm/kdebug.h>
Peter Zijlstra4b5305d2021-11-10 11:01:09 +010012#include <asm/insn-eval.h>
Peter Zijlstra5ce8e392021-11-10 11:01:20 +010013#include <asm/sgx.h>
Peter Zijlstra4b5305d2021-11-10 11:01:09 +010014
15static inline unsigned long *pt_regs_nr(struct pt_regs *regs, int nr)
16{
17 int reg_offset = pt_regs_offset(regs, nr);
18 static unsigned long __dummy;
19
20 if (WARN_ON_ONCE(reg_offset < 0))
21 return &__dummy;
22
23 return (unsigned long *)((unsigned long)regs + reg_offset);
24}
Harvey Harrison6d485832008-01-30 13:31:41 +010025
H. Peter Anvin70627652012-04-20 17:12:48 -070026static inline unsigned long
H. Peter Anvin70627652012-04-20 17:12:48 -070027ex_fixup_addr(const struct exception_table_entry *x)
28{
29 return (unsigned long)&x->fixup + x->fixup;
30}
Tony Luck548acf12016-02-17 10:20:12 -080031
Peter Zijlstra4b5305d2021-11-10 11:01:09 +010032static bool ex_handler_default(const struct exception_table_entry *e,
Thomas Gleixner46d28942021-09-08 15:29:18 +020033 struct pt_regs *regs)
Tony Luck548acf12016-02-17 10:20:12 -080034{
Peter Zijlstra4b5305d2021-11-10 11:01:09 +010035 if (e->data & EX_FLAG_CLEAR_AX)
36 regs->ax = 0;
37 if (e->data & EX_FLAG_CLEAR_DX)
38 regs->dx = 0;
39
40 regs->ip = ex_fixup_addr(e);
Tony Luck548acf12016-02-17 10:20:12 -080041 return true;
42}
Tony Luck548acf12016-02-17 10:20:12 -080043
Thomas Gleixner46d28942021-09-08 15:29:18 +020044static bool ex_handler_fault(const struct exception_table_entry *fixup,
45 struct pt_regs *regs, int trapnr)
Tony Luck548acf12016-02-17 10:20:12 -080046{
Tony Luck548acf12016-02-17 10:20:12 -080047 regs->ax = trapnr;
Thomas Gleixner46d28942021-09-08 15:29:18 +020048 return ex_handler_default(fixup, regs);
Tony Luck548acf12016-02-17 10:20:12 -080049}
Tony Luck548acf12016-02-17 10:20:12 -080050
Peter Zijlstra5ce8e392021-11-10 11:01:20 +010051static bool ex_handler_sgx(const struct exception_table_entry *fixup,
52 struct pt_regs *regs, int trapnr)
53{
54 regs->ax = trapnr | SGX_ENCLS_FAULT_FLAG;
55 return ex_handler_default(fixup, regs);
56}
57
Kees Cook7a46ec02017-08-15 09:19:24 -070058/*
Eric Biggersd5c80282017-09-23 15:00:09 +020059 * Handler for when we fail to restore a task's FPU state. We should never get
60 * here because the FPU state of a task using the FPU (task->thread.fpu.state)
61 * should always be valid. However, past bugs have allowed userspace to set
62 * reserved bits in the XSAVE area using PTRACE_SETREGSET or sys_rt_sigreturn().
63 * These caused XRSTOR to fail when switching to the task, leaking the FPU
64 * registers of the task previously executing on the CPU. Mitigate this class
65 * of vulnerability by restoring from the initial state (essentially, zeroing
66 * out all the FPU registers) if we can't restore from the task's FPU state.
67 */
Thomas Gleixner46d28942021-09-08 15:29:18 +020068static bool ex_handler_fprestore(const struct exception_table_entry *fixup,
69 struct pt_regs *regs)
Eric Biggersd5c80282017-09-23 15:00:09 +020070{
71 regs->ip = ex_fixup_addr(fixup);
72
73 WARN_ONCE(1, "Bad FPU state detected at %pB, reinitializing FPU registers.",
74 (void *)instruction_pointer(regs));
75
Thomas Gleixner079ec412021-10-15 03:16:41 +020076 fpu_reset_from_exception_fixup();
Eric Biggersd5c80282017-09-23 15:00:09 +020077 return true;
78}
Eric Biggersd5c80282017-09-23 15:00:09 +020079
Thomas Gleixner46d28942021-09-08 15:29:18 +020080static bool ex_handler_uaccess(const struct exception_table_entry *fixup,
81 struct pt_regs *regs, int trapnr)
Jann Horn75045f72018-08-28 22:14:18 +020082{
Linus Torvalds00c42372019-02-26 09:16:04 -080083 WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
Thomas Gleixner46d28942021-09-08 15:29:18 +020084 return ex_handler_default(fixup, regs);
Jann Horn75045f72018-08-28 22:14:18 +020085}
Jann Horn75045f72018-08-28 22:14:18 +020086
Thomas Gleixner46d28942021-09-08 15:29:18 +020087static bool ex_handler_copy(const struct exception_table_entry *fixup,
88 struct pt_regs *regs, int trapnr)
Youquan Song278b9172020-10-06 14:09:07 -070089{
90 WARN_ONCE(trapnr == X86_TRAP_GP, "General protection fault in user access. Non-canonical address?");
Thomas Gleixner46d28942021-09-08 15:29:18 +020091 return ex_handler_fault(fixup, regs, trapnr);
Youquan Song278b9172020-10-06 14:09:07 -070092}
Youquan Song278b9172020-10-06 14:09:07 -070093
Peter Zijlstrad52a7342021-11-10 11:01:10 +010094static bool ex_handler_msr(const struct exception_table_entry *fixup,
95 struct pt_regs *regs, bool wrmsr, bool safe, int reg)
Andy Lutomirskifbd70432016-04-02 07:01:37 -070096{
Peter Zijlstrad52a7342021-11-10 11:01:10 +010097 if (!safe && wrmsr &&
98 pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
Borislav Petkov81c29492016-07-05 00:31:27 +020099 (unsigned int)regs->cx, (unsigned int)regs->dx,
100 (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
101 show_stack_regs(regs);
Andy Lutomirskifbd70432016-04-02 07:01:37 -0700102
Peter Zijlstrad52a7342021-11-10 11:01:10 +0100103 if (!safe && !wrmsr &&
104 pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
105 (unsigned int)regs->cx, regs->ip, (void *)regs->ip))
106 show_stack_regs(regs);
107
108 if (!wrmsr) {
109 /* Pretend that the read succeeded and returned 0. */
110 regs->ax = 0;
111 regs->dx = 0;
112 }
113
114 if (safe)
115 *pt_regs_nr(regs, reg) = -EIO;
116
Thomas Gleixner46d28942021-09-08 15:29:18 +0200117 return ex_handler_default(fixup, regs);
Andy Lutomirskifbd70432016-04-02 07:01:37 -0700118}
Andy Lutomirskifbd70432016-04-02 07:01:37 -0700119
Thomas Gleixner46d28942021-09-08 15:29:18 +0200120static bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
121 struct pt_regs *regs)
Andy Lutomirski45e876f2016-04-26 12:23:26 -0700122{
123 if (static_cpu_has(X86_BUG_NULL_SEG))
124 asm volatile ("mov %0, %%fs" : : "rm" (__USER_DS));
125 asm volatile ("mov %0, %%fs" : : "rm" (0));
Thomas Gleixner46d28942021-09-08 15:29:18 +0200126 return ex_handler_default(fixup, regs);
Andy Lutomirski45e876f2016-04-26 12:23:26 -0700127}
Andy Lutomirski45e876f2016-04-26 12:23:26 -0700128
Peter Zijlstra4b5305d2021-11-10 11:01:09 +0100129static bool ex_handler_imm_reg(const struct exception_table_entry *fixup,
130 struct pt_regs *regs, int reg, int imm)
131{
132 *pt_regs_nr(regs, reg) = (long)imm;
133 return ex_handler_default(fixup, regs);
134}
135
Peter Zijlstrad5d797d2021-11-10 11:01:22 +0100136static bool ex_handler_ucopy_len(const struct exception_table_entry *fixup,
137 struct pt_regs *regs, int trapnr, int reg, int imm)
138{
139 regs->cx = imm * regs->cx + *pt_regs_nr(regs, reg);
140 return ex_handler_uaccess(fixup, regs, trapnr);
141}
142
Thomas Gleixner46d28942021-09-08 15:29:18 +0200143int ex_get_fixup_type(unsigned long ip)
Tony Luck548acf12016-02-17 10:20:12 -0800144{
Thomas Gleixner46d28942021-09-08 15:29:18 +0200145 const struct exception_table_entry *e = search_exception_tables(ip);
Tony Luck548acf12016-02-17 10:20:12 -0800146
Peter Zijlstra4b5305d2021-11-10 11:01:09 +0100147 return e ? FIELD_GET(EX_DATA_TYPE_MASK, e->data) : EX_TYPE_NONE;
Tony Luck548acf12016-02-17 10:20:12 -0800148}
149
Jann Horn81fd9c12018-08-28 22:14:19 +0200150int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
151 unsigned long fault_addr)
Tony Luck548acf12016-02-17 10:20:12 -0800152{
153 const struct exception_table_entry *e;
Peter Zijlstra4b5305d2021-11-10 11:01:09 +0100154 int type, reg, imm;
Harvey Harrison6d485832008-01-30 13:31:41 +0100155
156#ifdef CONFIG_PNPBIOS
157 if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
158 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
159 extern u32 pnp_bios_is_utter_crap;
160 pnp_bios_is_utter_crap = 1;
161 printk(KERN_CRIT "PNPBIOS fault.. attempting recovery.\n");
162 __asm__ volatile(
163 "movl %0, %%esp\n\t"
164 "jmp *%1\n\t"
165 : : "g" (pnp_bios_fault_esp), "g" (pnp_bios_fault_eip));
166 panic("do_trap: can't hit this");
167 }
168#endif
169
Tony Luck548acf12016-02-17 10:20:12 -0800170 e = search_exception_tables(regs->ip);
171 if (!e)
172 return 0;
H. Peter Anvin70627652012-04-20 17:12:48 -0700173
Peter Zijlstra4b5305d2021-11-10 11:01:09 +0100174 type = FIELD_GET(EX_DATA_TYPE_MASK, e->data);
175 reg = FIELD_GET(EX_DATA_REG_MASK, e->data);
176 imm = FIELD_GET(EX_DATA_IMM_MASK, e->data);
177
178 switch (type) {
Thomas Gleixner46d28942021-09-08 15:29:18 +0200179 case EX_TYPE_DEFAULT:
Thomas Gleixner2cadf522021-09-08 15:29:19 +0200180 case EX_TYPE_DEFAULT_MCE_SAFE:
Thomas Gleixner46d28942021-09-08 15:29:18 +0200181 return ex_handler_default(e, regs);
182 case EX_TYPE_FAULT:
Thomas Gleixner2cadf522021-09-08 15:29:19 +0200183 case EX_TYPE_FAULT_MCE_SAFE:
Thomas Gleixner46d28942021-09-08 15:29:18 +0200184 return ex_handler_fault(e, regs, trapnr);
185 case EX_TYPE_UACCESS:
186 return ex_handler_uaccess(e, regs, trapnr);
187 case EX_TYPE_COPY:
188 return ex_handler_copy(e, regs, trapnr);
189 case EX_TYPE_CLEAR_FS:
190 return ex_handler_clear_fs(e, regs);
191 case EX_TYPE_FPU_RESTORE:
192 return ex_handler_fprestore(e, regs);
Thomas Gleixner46d28942021-09-08 15:29:18 +0200193 case EX_TYPE_BPF:
194 return ex_handler_bpf(e, regs);
Peter Zijlstrad52a7342021-11-10 11:01:10 +0100195 case EX_TYPE_WRMSR:
196 return ex_handler_msr(e, regs, true, false, reg);
197 case EX_TYPE_RDMSR:
198 return ex_handler_msr(e, regs, false, false, reg);
199 case EX_TYPE_WRMSR_SAFE:
200 return ex_handler_msr(e, regs, true, true, reg);
201 case EX_TYPE_RDMSR_SAFE:
202 return ex_handler_msr(e, regs, false, true, reg);
Thomas Gleixner46d28942021-09-08 15:29:18 +0200203 case EX_TYPE_WRMSR_IN_MCE:
204 ex_handler_msr_mce(regs, true);
205 break;
Peter Zijlstrad52a7342021-11-10 11:01:10 +0100206 case EX_TYPE_RDMSR_IN_MCE:
207 ex_handler_msr_mce(regs, false);
208 break;
Peter Zijlstra9cdbeec2022-01-11 12:11:14 +0100209 case EX_TYPE_POP_REG:
210 regs->sp += sizeof(long);
211 fallthrough;
Peter Zijlstra4b5305d2021-11-10 11:01:09 +0100212 case EX_TYPE_IMM_REG:
213 return ex_handler_imm_reg(e, regs, reg, imm);
Peter Zijlstra5ce8e392021-11-10 11:01:20 +0100214 case EX_TYPE_FAULT_SGX:
215 return ex_handler_sgx(e, regs, trapnr);
Peter Zijlstrad5d797d2021-11-10 11:01:22 +0100216 case EX_TYPE_UCOPY_LEN:
217 return ex_handler_ucopy_len(e, regs, trapnr, reg, imm);
Thomas Gleixner46d28942021-09-08 15:29:18 +0200218 }
219 BUG();
Harvey Harrison6d485832008-01-30 13:31:41 +0100220}
H. Peter Anvin6a1ea272012-04-19 15:24:20 -0700221
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700222extern unsigned int early_recursion_flag;
223
H. Peter Anvin6a1ea272012-04-19 15:24:20 -0700224/* Restricted version used during very early boot */
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700225void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
H. Peter Anvin6a1ea272012-04-19 15:24:20 -0700226{
Andy Lutomirski0d0efc02016-04-02 07:01:33 -0700227 /* Ignore early NMIs. */
228 if (trapnr == X86_TRAP_NMI)
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700229 return;
230
231 if (early_recursion_flag > 2)
232 goto halt_loop;
233
Andy Lutomirskifc0e81b2016-11-19 18:42:40 -0800234 /*
235 * Old CPUs leave the high bits of CS on the stack
236 * undefined. I'm not sure which CPUs do this, but at least
237 * the 486 DX works this way.
Juergen Gross42b3a4c2017-11-24 09:42:21 +0100238 * Xen pv domains are not using the default __KERNEL_CS.
Andy Lutomirskifc0e81b2016-11-19 18:42:40 -0800239 */
Juergen Gross42b3a4c2017-11-24 09:42:21 +0100240 if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700241 goto fail;
Andy Lutomirski0d0efc02016-04-02 07:01:33 -0700242
Andy Lutomirski60a0e202016-04-04 08:46:22 -0700243 /*
244 * The full exception fixup machinery is available as soon as
245 * the early IDT is loaded. This means that it is the
246 * responsibility of extable users to either function correctly
247 * when handlers are invoked early or to simply avoid causing
248 * exceptions before they're ready to handle them.
249 *
250 * This is better than filtering which handlers can be used,
251 * because refusing to call a handler here is guaranteed to
252 * result in a hard-to-debug panic.
253 *
254 * Keep in mind that not all vectors actually get here. Early
Jann Horn81fd9c12018-08-28 22:14:19 +0200255 * page faults, for example, are special.
Andy Lutomirski60a0e202016-04-04 08:46:22 -0700256 */
Jann Horn81fd9c12018-08-28 22:14:19 +0200257 if (fixup_exception(regs, trapnr, regs->orig_ax, 0))
Andy Lutomirskiae7ef452016-04-02 07:01:35 -0700258 return;
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700259
Andy Lutomirski15a416e2020-06-11 20:26:38 -0700260 if (trapnr == X86_TRAP_UD) {
261 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
262 /* Skip the ud2. */
263 regs->ip += LEN_UD2;
264 return;
265 }
266
267 /*
268 * If this was a BUG and report_bug returns or if this
269 * was just a normal #UD, we want to continue onward and
270 * crash.
271 */
272 }
Peter Zijlstra8a524f82017-06-12 13:52:46 +0200273
Andy Lutomirski0e861fb2016-04-02 07:01:34 -0700274fail:
275 early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
276 (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,
277 regs->orig_ax, read_cr2());
278
279 show_regs(regs);
280
281halt_loop:
282 while (true)
283 halt();
H. Peter Anvin6a1ea272012-04-19 15:24:20 -0700284}