blob: e3140abd2e2eb3758e756457f0790361ba208a49 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngierc4b1afd2012-12-10 16:40:41 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Marc Zyngierc4b1afd2012-12-10 16:40:41 +00009 */
10
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000013
14#include <asm/esr.h>
James Morse0067df42018-01-15 19:39:05 +000015#include <asm/exception.h>
Marc Zyngier9d8415d2015-10-25 19:57:11 +000016#include <asm/kvm_asm.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000017#include <asm/kvm_emulate.h>
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000018#include <asm/kvm_mmu.h>
Alex Bennéee70dce72017-11-23 12:11:33 +000019#include <asm/debug-monitors.h>
James Morse3368bd82018-01-15 19:39:04 +000020#include <asm/traps.h>
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000021
Christoffer Dall55009c62019-10-21 16:28:15 +010022#include <kvm/arm_hypercalls.h>
23
Wei Huang0d97f8842015-01-12 11:53:36 -050024#define CREATE_TRACE_POINTS
Marc Zyngier9ed24f42020-05-13 11:40:34 +010025#include "trace_handle_exit.h"
Wei Huang0d97f8842015-01-12 11:53:36 -050026
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080027typedef int (*exit_handle_fn)(struct kvm_vcpu *);
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000028
James Morse3368bd82018-01-15 19:39:04 +000029static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
30{
31 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
32 kvm_inject_vabt(vcpu);
33}
34
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080035static int handle_hvc(struct kvm_vcpu *vcpu)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000036{
Anup Patele8e7fcc2014-04-29 11:24:18 +053037 int ret;
Marc Zyngierdcd2e402012-12-12 18:52:05 +000038
Pavel Fedinf6be5632015-12-04 15:03:14 +030039 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
Wei Huang0d97f8842015-01-12 11:53:36 -050040 kvm_vcpu_hvc_get_imm(vcpu));
Amit Tomarb19e6892015-11-26 10:09:43 +000041 vcpu->stat.hvc_exit_stat++;
Wei Huang0d97f8842015-01-12 11:53:36 -050042
Marc Zyngier09e6be12018-02-06 17:56:12 +000043 ret = kvm_hvc_call_handler(vcpu);
Anup Patele8e7fcc2014-04-29 11:24:18 +053044 if (ret < 0) {
Marc Zyngierc0938c72018-02-06 17:56:05 +000045 vcpu_set_reg(vcpu, 0, ~0UL);
Anup Patele8e7fcc2014-04-29 11:24:18 +053046 return 1;
47 }
48
49 return ret;
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000050}
51
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080052static int handle_smc(struct kvm_vcpu *vcpu)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000053{
Marc Zyngierf5115e82018-02-06 17:56:07 +000054 /*
55 * "If an SMC instruction executed at Non-secure EL1 is
56 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
57 * Trap exception, not a Secure Monitor Call exception [...]"
58 *
59 * We need to advance the PC after the trap, as it would
60 * otherwise return to the same address...
61 */
Marc Zyngierc0938c72018-02-06 17:56:05 +000062 vcpu_set_reg(vcpu, 0, ~0UL);
Marc Zyngiercdb5e022020-10-14 09:29:27 +010063 kvm_incr_pc(vcpu);
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000064 return 1;
65}
66
Suzuki K Poulose82e01912016-11-08 13:56:21 +000067/*
68 * Guest access to FP/ASIMD registers are routed to this handler only
69 * when the system doesn't support FP/ASIMD.
70 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080071static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
Suzuki K Poulose82e01912016-11-08 13:56:21 +000072{
73 kvm_inject_undefined(vcpu);
74 return 1;
75}
76
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000077/**
Marc Zyngierd241aac2013-08-02 11:41:13 +010078 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
79 * instruction executed by a guest
80 *
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000081 * @vcpu: the vcpu pointer
82 *
Marc Zyngierd241aac2013-08-02 11:41:13 +010083 * WFE: Yield the CPU and come back to this vcpu when the scheduler
84 * decides to.
Sean Christopherson91b99ea2021-10-08 19:12:06 -070085 * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000086 * world-switches and schedule other host processes until there is an
87 * incoming IRQ or FIQ to the VM.
88 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080089static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000090{
Gavin Shan3a949f42020-06-30 11:57:05 +100091 if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
Wei Huang0d97f8842015-01-12 11:53:36 -050092 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
Amit Tomarb19e6892015-11-26 10:09:43 +000093 vcpu->stat.wfe_exit_stat++;
Longpeng(Mike)f01fbd22017-08-08 12:05:35 +080094 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
Wei Huang0d97f8842015-01-12 11:53:36 -050095 } else {
96 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
Amit Tomarb19e6892015-11-26 10:09:43 +000097 vcpu->stat.wfi_exit_stat++;
Sean Christopherson6109c5a2021-10-08 19:12:03 -070098 kvm_vcpu_wfi(vcpu);
Wei Huang0d97f8842015-01-12 11:53:36 -050099 }
Marc Zyngierd241aac2013-08-02 11:41:13 +0100100
Marc Zyngiercdb5e022020-10-14 09:29:27 +0100101 kvm_incr_pc(vcpu);
Christoffer Dall05e01272014-08-26 14:33:02 +0200102
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000103 return 1;
104}
105
Alex Bennée4bd611c2015-07-07 17:29:57 +0100106/**
107 * kvm_handle_guest_debug - handle a debug exception instruction
108 *
109 * @vcpu: the vcpu pointer
Alex Bennée4bd611c2015-07-07 17:29:57 +0100110 *
111 * We route all debug exceptions through the same handler. If both the
112 * guest and host are using the same debug facilities it will be up to
113 * userspace to re-inject the correct exception for guest delivery.
114 *
Raghavendra Rao Ananta8ce8a6f2021-08-23 22:39:40 +0000115 * @return: 0 (while setting vcpu->run->exit_reason)
Alex Bennée4bd611c2015-07-07 17:29:57 +0100116 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800117static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
Alex Bennée4bd611c2015-07-07 17:29:57 +0100118{
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800119 struct kvm_run *run = vcpu->run;
Gavin Shan3a949f42020-06-30 11:57:05 +1000120 u32 esr = kvm_vcpu_get_esr(vcpu);
Alex Bennée4bd611c2015-07-07 17:29:57 +0100121
122 run->exit_reason = KVM_EXIT_DEBUG;
Gavin Shan3a949f42020-06-30 11:57:05 +1000123 run->debug.arch.hsr = esr;
Alex Bennée4bd611c2015-07-07 17:29:57 +0100124
Raghavendra Rao Ananta8ce8a6f2021-08-23 22:39:40 +0000125 if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
Alex Bennée834bf882015-07-07 17:30:02 +0100126 run->debug.arch.far = vcpu->arch.fault.far_el2;
Alex Bennée4bd611c2015-07-07 17:29:57 +0100127
Raghavendra Rao Ananta8ce8a6f2021-08-23 22:39:40 +0000128 return 0;
Alex Bennée4bd611c2015-07-07 17:29:57 +0100129}
130
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800131static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
Mark Rutlandba4dd152017-02-20 12:30:12 +0000132{
Gavin Shan3a949f42020-06-30 11:57:05 +1000133 u32 esr = kvm_vcpu_get_esr(vcpu);
Mark Rutlandba4dd152017-02-20 12:30:12 +0000134
Gavin Shan3a949f42020-06-30 11:57:05 +1000135 kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
136 esr, esr_get_class_string(esr));
Mark Rutlandba4dd152017-02-20 12:30:12 +0000137
138 kvm_inject_undefined(vcpu);
139 return 1;
140}
141
Mark Brownd6582202021-10-25 17:32:32 +0100142/*
143 * Guest access to SVE registers should be routed to this handler only
144 * when the system doesn't support SVE.
145 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800146static int handle_sve(struct kvm_vcpu *vcpu)
Dave Martinaac45ff2017-10-31 15:51:17 +0000147{
Dave Martinaac45ff2017-10-31 15:51:17 +0000148 kvm_inject_undefined(vcpu);
149 return 1;
150}
151
Mark Rutland384b40c2019-04-23 10:12:35 +0530152/*
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000153 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
Marc Zyngier29eb5a32020-06-04 11:14:00 +0100154 * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
155 * that we can do is give the guest an UNDEF.
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000156 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800157static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000158{
Marc Zyngier29eb5a32020-06-04 11:14:00 +0100159 kvm_inject_undefined(vcpu);
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000160 return 1;
161}
162
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000163static exit_handle_fn arm_exit_handlers[] = {
Mark Rutlandba4dd152017-02-20 12:30:12 +0000164 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000165 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
166 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
167 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
168 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
169 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
170 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
171 [ESR_ELx_EC_HVC32] = handle_hvc,
172 [ESR_ELx_EC_SMC32] = handle_smc,
173 [ESR_ELx_EC_HVC64] = handle_hvc,
174 [ESR_ELx_EC_SMC64] = handle_smc,
175 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
Dave Martinaac45ff2017-10-31 15:51:17 +0000176 [ESR_ELx_EC_SVE] = handle_sve,
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000177 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
178 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
Alex Bennée337b99b2015-07-07 17:29:58 +0100179 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
Alex Bennée834bf882015-07-07 17:30:02 +0100180 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
181 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
Alex Bennée4bd611c2015-07-07 17:29:57 +0100182 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
183 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000184 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000185 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000186};
187
188static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
189{
Gavin Shan3a949f42020-06-30 11:57:05 +1000190 u32 esr = kvm_vcpu_get_esr(vcpu);
191 u8 esr_ec = ESR_ELx_EC(esr);
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000192
Gavin Shan3a949f42020-06-30 11:57:05 +1000193 return arm_exit_handlers[esr_ec];
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000194}
195
196/*
Alex Bennée7226bc22017-11-16 15:39:20 +0000197 * We may be single-stepping an emulated instruction. If the emulation
198 * has been completed in the kernel, we can return to userspace with a
199 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
200 * emulation first.
201 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800202static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
Alex Bennée7226bc22017-11-16 15:39:20 +0000203{
204 int handled;
205
206 /*
207 * See ARM ARM B1.14.1: "Hyp traps on instructions
208 * that fail their condition code check"
209 */
210 if (!kvm_condition_valid(vcpu)) {
Marc Zyngiercdb5e022020-10-14 09:29:27 +0100211 kvm_incr_pc(vcpu);
Alex Bennée7226bc22017-11-16 15:39:20 +0000212 handled = 1;
213 } else {
214 exit_handle_fn exit_handler;
215
216 exit_handler = kvm_get_exit_handler(vcpu);
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800217 handled = exit_handler(vcpu);
Alex Bennée7226bc22017-11-16 15:39:20 +0000218 }
219
Alex Bennée7226bc22017-11-16 15:39:20 +0000220 return handled;
221}
222
223/*
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000224 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
225 * proper exit to userspace.
226 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800227int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000228{
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800229 struct kvm_run *run = vcpu->run;
230
James Morse12296302022-01-27 12:20:51 +0000231 if (ARM_SERROR_PENDING(exception_index)) {
232 /*
233 * The SError is handled by handle_exit_early(). If the guest
234 * survives it will re-execute the original instruction.
235 */
236 return 1;
237 }
238
Marc Zyngierddb3d072016-09-06 14:02:06 +0100239 exception_index = ARM_EXCEPTION_CODE(exception_index);
240
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000241 switch (exception_index) {
242 case ARM_EXCEPTION_IRQ:
243 return 1;
Marc Zyngier0215a6e2016-09-06 14:02:03 +0100244 case ARM_EXCEPTION_EL1_SERROR:
Mark Rutlandbd7d95c2018-11-09 15:07:11 +0000245 return 1;
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000246 case ARM_EXCEPTION_TRAP:
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800247 return handle_trap_exceptions(vcpu);
James Morsec94b0cf2016-04-27 17:47:04 +0100248 case ARM_EXCEPTION_HYP_GONE:
249 /*
250 * EL2 has been reset to the hyp-stub. This happens when a guest
251 * is pre-empted by kvm_reboot()'s shutdown call.
252 */
253 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
254 return 0;
Christoffer Dalle4e11cc2018-10-17 20:21:16 +0200255 case ARM_EXCEPTION_IL:
256 /*
257 * We attempted an illegal exception return. Guest state must
258 * have been corrupted somehow. Give up.
259 */
260 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
261 return -EINVAL;
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000262 default:
263 kvm_pr_unimpl("Unsupported exception type: %d",
264 exception_index);
265 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
266 return 0;
267 }
268}
James Morse3368bd82018-01-15 19:39:04 +0000269
270/* For exit types that need handling before we can be preempted */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800271void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
James Morse3368bd82018-01-15 19:39:04 +0000272{
James Morse0067df42018-01-15 19:39:05 +0000273 if (ARM_SERROR_PENDING(exception_index)) {
274 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
275 u64 disr = kvm_vcpu_get_disr(vcpu);
276
277 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
278 } else {
279 kvm_inject_vabt(vcpu);
280 }
281
282 return;
283 }
284
James Morse3368bd82018-01-15 19:39:04 +0000285 exception_index = ARM_EXCEPTION_CODE(exception_index);
286
287 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
Gavin Shan3a949f42020-06-30 11:57:05 +1000288 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
James Morse3368bd82018-01-15 19:39:04 +0000289}
Andrew Scullaec0fae2021-03-18 14:33:11 +0000290
Will Deaconccac9692021-08-13 14:03:36 +0100291void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
292 u64 elr_virt, u64 elr_phys,
Andrew Scullaec0fae2021-03-18 14:33:11 +0000293 u64 par, uintptr_t vcpu,
294 u64 far, u64 hpfar) {
Will Deaconccac9692021-08-13 14:03:36 +0100295 u64 elr_in_kimg = __phys_to_kimg(elr_phys);
296 u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
Andrew Scullaec0fae2021-03-18 14:33:11 +0000297 u64 mode = spsr & PSR_MODE_MASK;
298
299 /*
300 * The nVHE hyp symbols are not included by kallsyms to avoid issues
301 * with aliasing. That means that the symbols cannot be printed with the
302 * "%pS" format specifier, so fall back to the vmlinux address if
303 * there's no better option.
304 */
305 if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
306 kvm_err("Invalid host exception to nVHE hyp!\n");
307 } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
308 (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
Andrew Scullaec0fae2021-03-18 14:33:11 +0000309 const char *file = NULL;
310 unsigned int line = 0;
311
312 /* All hyp bugs, including warnings, are treated as fatal. */
Will Deaconccac9692021-08-13 14:03:36 +0100313 if (!is_protected_kvm_enabled() ||
314 IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
315 struct bug_entry *bug = find_bug(elr_in_kimg);
316
317 if (bug)
318 bug_get_file_line(bug, &file, &line);
319 }
Andrew Scullaec0fae2021-03-18 14:33:11 +0000320
321 if (file)
322 kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
323 else
Will Deaconccac9692021-08-13 14:03:36 +0100324 kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
Andrew Scullaec0fae2021-03-18 14:33:11 +0000325 } else {
Will Deaconccac9692021-08-13 14:03:36 +0100326 kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
Andrew Scullaec0fae2021-03-18 14:33:11 +0000327 }
328
329 /*
330 * Hyp has panicked and we're going to handle that by panicking the
331 * kernel. The kernel offset will be revealed in the panic so we're
332 * also safe to reveal the hyp offset as a debugging aid for translating
333 * hyp VAs to vmlinux addresses.
334 */
335 kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
336
337 panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
Will Deaconccac9692021-08-13 14:03:36 +0100338 spsr, elr_virt, esr, far, hpfar, par, vcpu);
Andrew Scullaec0fae2021-03-18 14:33:11 +0000339}