blob: 5abe0617f2afca88ee1e1c3012f06c3624eae0da [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngierc4b1afd2012-12-10 16:40:41 +00002/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
Marc Zyngierc4b1afd2012-12-10 16:40:41 +00009 */
10
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000013
14#include <asm/esr.h>
James Morse0067df42018-01-15 19:39:05 +000015#include <asm/exception.h>
Marc Zyngier9d8415d2015-10-25 19:57:11 +000016#include <asm/kvm_asm.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000017#include <asm/kvm_emulate.h>
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000018#include <asm/kvm_mmu.h>
Alex Bennéee70dce72017-11-23 12:11:33 +000019#include <asm/debug-monitors.h>
James Morse3368bd82018-01-15 19:39:04 +000020#include <asm/traps.h>
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000021
Christoffer Dall55009c62019-10-21 16:28:15 +010022#include <kvm/arm_hypercalls.h>
23
Wei Huang0d97f8842015-01-12 11:53:36 -050024#define CREATE_TRACE_POINTS
Marc Zyngier9ed24f42020-05-13 11:40:34 +010025#include "trace_handle_exit.h"
Wei Huang0d97f8842015-01-12 11:53:36 -050026
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080027typedef int (*exit_handle_fn)(struct kvm_vcpu *);
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000028
James Morse3368bd82018-01-15 19:39:04 +000029static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr)
30{
31 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
32 kvm_inject_vabt(vcpu);
33}
34
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080035static int handle_hvc(struct kvm_vcpu *vcpu)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000036{
Anup Patele8e7fcc2014-04-29 11:24:18 +053037 int ret;
Marc Zyngierdcd2e402012-12-12 18:52:05 +000038
Pavel Fedinf6be5632015-12-04 15:03:14 +030039 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
Wei Huang0d97f8842015-01-12 11:53:36 -050040 kvm_vcpu_hvc_get_imm(vcpu));
Amit Tomarb19e6892015-11-26 10:09:43 +000041 vcpu->stat.hvc_exit_stat++;
Wei Huang0d97f8842015-01-12 11:53:36 -050042
Marc Zyngier09e6be12018-02-06 17:56:12 +000043 ret = kvm_hvc_call_handler(vcpu);
Anup Patele8e7fcc2014-04-29 11:24:18 +053044 if (ret < 0) {
Marc Zyngierc0938c72018-02-06 17:56:05 +000045 vcpu_set_reg(vcpu, 0, ~0UL);
Anup Patele8e7fcc2014-04-29 11:24:18 +053046 return 1;
47 }
48
49 return ret;
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000050}
51
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080052static int handle_smc(struct kvm_vcpu *vcpu)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000053{
Marc Zyngierf5115e82018-02-06 17:56:07 +000054 /*
55 * "If an SMC instruction executed at Non-secure EL1 is
56 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
57 * Trap exception, not a Secure Monitor Call exception [...]"
58 *
59 * We need to advance the PC after the trap, as it would
60 * otherwise return to the same address...
61 */
Marc Zyngierc0938c72018-02-06 17:56:05 +000062 vcpu_set_reg(vcpu, 0, ~0UL);
Marc Zyngiercdb5e022020-10-14 09:29:27 +010063 kvm_incr_pc(vcpu);
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000064 return 1;
65}
66
Suzuki K Poulose82e01912016-11-08 13:56:21 +000067/*
68 * Guest access to FP/ASIMD registers are routed to this handler only
69 * when the system doesn't support FP/ASIMD.
70 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080071static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
Suzuki K Poulose82e01912016-11-08 13:56:21 +000072{
73 kvm_inject_undefined(vcpu);
74 return 1;
75}
76
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000077/**
Marc Zyngierd241aac2013-08-02 11:41:13 +010078 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
79 * instruction executed by a guest
80 *
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000081 * @vcpu: the vcpu pointer
82 *
Marc Zyngierd241aac2013-08-02 11:41:13 +010083 * WFE: Yield the CPU and come back to this vcpu when the scheduler
84 * decides to.
85 * WFI: Simply call kvm_vcpu_block(), which will halt execution of
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000086 * world-switches and schedule other host processes until there is an
87 * incoming IRQ or FIQ to the VM.
88 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +080089static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +000090{
Gavin Shan3a949f42020-06-30 11:57:05 +100091 if (kvm_vcpu_get_esr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
Wei Huang0d97f8842015-01-12 11:53:36 -050092 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
Amit Tomarb19e6892015-11-26 10:09:43 +000093 vcpu->stat.wfe_exit_stat++;
Longpeng(Mike)f01fbd22017-08-08 12:05:35 +080094 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
Wei Huang0d97f8842015-01-12 11:53:36 -050095 } else {
96 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
Amit Tomarb19e6892015-11-26 10:09:43 +000097 vcpu->stat.wfi_exit_stat++;
Marc Zyngierd241aac2013-08-02 11:41:13 +010098 kvm_vcpu_block(vcpu);
Andrew Jones6a6d73b2017-06-04 14:43:54 +020099 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Wei Huang0d97f8842015-01-12 11:53:36 -0500100 }
Marc Zyngierd241aac2013-08-02 11:41:13 +0100101
Marc Zyngiercdb5e022020-10-14 09:29:27 +0100102 kvm_incr_pc(vcpu);
Christoffer Dall05e01272014-08-26 14:33:02 +0200103
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000104 return 1;
105}
106
Alex Bennée4bd611c2015-07-07 17:29:57 +0100107/**
108 * kvm_handle_guest_debug - handle a debug exception instruction
109 *
110 * @vcpu: the vcpu pointer
Alex Bennée4bd611c2015-07-07 17:29:57 +0100111 *
112 * We route all debug exceptions through the same handler. If both the
113 * guest and host are using the same debug facilities it will be up to
114 * userspace to re-inject the correct exception for guest delivery.
115 *
Raghavendra Rao Ananta8ce8a6f2021-08-23 22:39:40 +0000116 * @return: 0 (while setting vcpu->run->exit_reason)
Alex Bennée4bd611c2015-07-07 17:29:57 +0100117 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800118static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
Alex Bennée4bd611c2015-07-07 17:29:57 +0100119{
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800120 struct kvm_run *run = vcpu->run;
Gavin Shan3a949f42020-06-30 11:57:05 +1000121 u32 esr = kvm_vcpu_get_esr(vcpu);
Alex Bennée4bd611c2015-07-07 17:29:57 +0100122
123 run->exit_reason = KVM_EXIT_DEBUG;
Gavin Shan3a949f42020-06-30 11:57:05 +1000124 run->debug.arch.hsr = esr;
Alex Bennée4bd611c2015-07-07 17:29:57 +0100125
Raghavendra Rao Ananta8ce8a6f2021-08-23 22:39:40 +0000126 if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
Alex Bennée834bf882015-07-07 17:30:02 +0100127 run->debug.arch.far = vcpu->arch.fault.far_el2;
Alex Bennée4bd611c2015-07-07 17:29:57 +0100128
Raghavendra Rao Ananta8ce8a6f2021-08-23 22:39:40 +0000129 return 0;
Alex Bennée4bd611c2015-07-07 17:29:57 +0100130}
131
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800132static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
Mark Rutlandba4dd152017-02-20 12:30:12 +0000133{
Gavin Shan3a949f42020-06-30 11:57:05 +1000134 u32 esr = kvm_vcpu_get_esr(vcpu);
Mark Rutlandba4dd152017-02-20 12:30:12 +0000135
Gavin Shan3a949f42020-06-30 11:57:05 +1000136 kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
137 esr, esr_get_class_string(esr));
Mark Rutlandba4dd152017-02-20 12:30:12 +0000138
139 kvm_inject_undefined(vcpu);
140 return 1;
141}
142
Mark Brownd6582202021-10-25 17:32:32 +0100143/*
144 * Guest access to SVE registers should be routed to this handler only
145 * when the system doesn't support SVE.
146 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800147static int handle_sve(struct kvm_vcpu *vcpu)
Dave Martinaac45ff2017-10-31 15:51:17 +0000148{
Dave Martinaac45ff2017-10-31 15:51:17 +0000149 kvm_inject_undefined(vcpu);
150 return 1;
151}
152
Mark Rutland384b40c2019-04-23 10:12:35 +0530153/*
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000154 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
Marc Zyngier29eb5a32020-06-04 11:14:00 +0100155 * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
156 * that we can do is give the guest an UNDEF.
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000157 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800158static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000159{
Marc Zyngier29eb5a32020-06-04 11:14:00 +0100160 kvm_inject_undefined(vcpu);
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000161 return 1;
162}
163
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000164static exit_handle_fn arm_exit_handlers[] = {
Mark Rutlandba4dd152017-02-20 12:30:12 +0000165 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000166 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
167 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
168 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
169 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
170 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
171 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
172 [ESR_ELx_EC_HVC32] = handle_hvc,
173 [ESR_ELx_EC_SMC32] = handle_smc,
174 [ESR_ELx_EC_HVC64] = handle_hvc,
175 [ESR_ELx_EC_SMC64] = handle_smc,
176 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
Dave Martinaac45ff2017-10-31 15:51:17 +0000177 [ESR_ELx_EC_SVE] = handle_sve,
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000178 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
179 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
Alex Bennée337b99b2015-07-07 17:29:58 +0100180 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
Alex Bennée834bf882015-07-07 17:30:02 +0100181 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
182 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
Alex Bennée4bd611c2015-07-07 17:29:57 +0100183 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
184 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
Suzuki K Poulose82e01912016-11-08 13:56:21 +0000185 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
Mark Rutlanda1ee8ab2018-12-07 18:39:22 +0000186 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000187};
188
189static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
190{
Gavin Shan3a949f42020-06-30 11:57:05 +1000191 u32 esr = kvm_vcpu_get_esr(vcpu);
192 u8 esr_ec = ESR_ELx_EC(esr);
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000193
Gavin Shan3a949f42020-06-30 11:57:05 +1000194 return arm_exit_handlers[esr_ec];
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000195}
196
197/*
Alex Bennée7226bc22017-11-16 15:39:20 +0000198 * We may be single-stepping an emulated instruction. If the emulation
199 * has been completed in the kernel, we can return to userspace with a
200 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
201 * emulation first.
202 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800203static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
Alex Bennée7226bc22017-11-16 15:39:20 +0000204{
205 int handled;
206
207 /*
208 * See ARM ARM B1.14.1: "Hyp traps on instructions
209 * that fail their condition code check"
210 */
211 if (!kvm_condition_valid(vcpu)) {
Marc Zyngiercdb5e022020-10-14 09:29:27 +0100212 kvm_incr_pc(vcpu);
Alex Bennée7226bc22017-11-16 15:39:20 +0000213 handled = 1;
214 } else {
215 exit_handle_fn exit_handler;
216
217 exit_handler = kvm_get_exit_handler(vcpu);
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800218 handled = exit_handler(vcpu);
Alex Bennée7226bc22017-11-16 15:39:20 +0000219 }
220
Alex Bennée7226bc22017-11-16 15:39:20 +0000221 return handled;
222}
223
224/*
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000225 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
226 * proper exit to userspace.
227 */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800228int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000229{
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800230 struct kvm_run *run = vcpu->run;
231
Marc Zyngierddb3d072016-09-06 14:02:06 +0100232 exception_index = ARM_EXCEPTION_CODE(exception_index);
233
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000234 switch (exception_index) {
235 case ARM_EXCEPTION_IRQ:
236 return 1;
Marc Zyngier0215a6e2016-09-06 14:02:03 +0100237 case ARM_EXCEPTION_EL1_SERROR:
Mark Rutlandbd7d95c2018-11-09 15:07:11 +0000238 return 1;
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000239 case ARM_EXCEPTION_TRAP:
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800240 return handle_trap_exceptions(vcpu);
James Morsec94b0cf2016-04-27 17:47:04 +0100241 case ARM_EXCEPTION_HYP_GONE:
242 /*
243 * EL2 has been reset to the hyp-stub. This happens when a guest
244 * is pre-empted by kvm_reboot()'s shutdown call.
245 */
246 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
247 return 0;
Christoffer Dalle4e11cc2018-10-17 20:21:16 +0200248 case ARM_EXCEPTION_IL:
249 /*
250 * We attempted an illegal exception return. Guest state must
251 * have been corrupted somehow. Give up.
252 */
253 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
254 return -EINVAL;
Marc Zyngierc4b1afd2012-12-10 16:40:41 +0000255 default:
256 kvm_pr_unimpl("Unsupported exception type: %d",
257 exception_index);
258 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
259 return 0;
260 }
261}
James Morse3368bd82018-01-15 19:39:04 +0000262
263/* For exit types that need handling before we can be preempted */
Tianjia Zhang74cc7e02020-06-23 21:14:15 +0800264void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
James Morse3368bd82018-01-15 19:39:04 +0000265{
James Morse0067df42018-01-15 19:39:05 +0000266 if (ARM_SERROR_PENDING(exception_index)) {
267 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
268 u64 disr = kvm_vcpu_get_disr(vcpu);
269
270 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
271 } else {
272 kvm_inject_vabt(vcpu);
273 }
274
275 return;
276 }
277
James Morse3368bd82018-01-15 19:39:04 +0000278 exception_index = ARM_EXCEPTION_CODE(exception_index);
279
280 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
Gavin Shan3a949f42020-06-30 11:57:05 +1000281 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
James Morse3368bd82018-01-15 19:39:04 +0000282}
Andrew Scullaec0fae2021-03-18 14:33:11 +0000283
Will Deaconccac9692021-08-13 14:03:36 +0100284void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
285 u64 elr_virt, u64 elr_phys,
Andrew Scullaec0fae2021-03-18 14:33:11 +0000286 u64 par, uintptr_t vcpu,
287 u64 far, u64 hpfar) {
Will Deaconccac9692021-08-13 14:03:36 +0100288 u64 elr_in_kimg = __phys_to_kimg(elr_phys);
289 u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
Andrew Scullaec0fae2021-03-18 14:33:11 +0000290 u64 mode = spsr & PSR_MODE_MASK;
291
292 /*
293 * The nVHE hyp symbols are not included by kallsyms to avoid issues
294 * with aliasing. That means that the symbols cannot be printed with the
295 * "%pS" format specifier, so fall back to the vmlinux address if
296 * there's no better option.
297 */
298 if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
299 kvm_err("Invalid host exception to nVHE hyp!\n");
300 } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
301 (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
Andrew Scullaec0fae2021-03-18 14:33:11 +0000302 const char *file = NULL;
303 unsigned int line = 0;
304
305 /* All hyp bugs, including warnings, are treated as fatal. */
Will Deaconccac9692021-08-13 14:03:36 +0100306 if (!is_protected_kvm_enabled() ||
307 IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
308 struct bug_entry *bug = find_bug(elr_in_kimg);
309
310 if (bug)
311 bug_get_file_line(bug, &file, &line);
312 }
Andrew Scullaec0fae2021-03-18 14:33:11 +0000313
314 if (file)
315 kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
316 else
Will Deaconccac9692021-08-13 14:03:36 +0100317 kvm_err("nVHE hyp BUG at: %016llx!\n", elr_virt + hyp_offset);
Andrew Scullaec0fae2021-03-18 14:33:11 +0000318 } else {
Will Deaconccac9692021-08-13 14:03:36 +0100319 kvm_err("nVHE hyp panic at: %016llx!\n", elr_virt + hyp_offset);
Andrew Scullaec0fae2021-03-18 14:33:11 +0000320 }
321
322 /*
323 * Hyp has panicked and we're going to handle that by panicking the
324 * kernel. The kernel offset will be revealed in the panic so we're
325 * also safe to reveal the hyp offset as a debugging aid for translating
326 * hyp VAs to vmlinux addresses.
327 */
328 kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
329
330 panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
Will Deaconccac9692021-08-13 14:03:36 +0100331 spsr, elr_virt, esr, far, hpfar, par, vcpu);
Andrew Scullaec0fae2021-03-18 14:33:11 +0000332}