blob: 799e84a403351df81d2f38d1e92f6e4850b7b002 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngierbe901e92015-10-21 09:57:10 +01002/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngierbe901e92015-10-21 09:57:10 +01005 */
6
Marc Zyngier55e37482018-05-29 13:11:16 +01007#include <linux/arm-smccc.h>
James Morseb7c50fa2019-05-22 18:47:04 +01008#include <linux/kvm_host.h>
Marc Zyngier5f05a72a2015-10-28 15:06:47 +00009#include <linux/types.h>
Vladimir Murzin5a7a8422016-09-12 15:49:15 +010010#include <linux/jump_label.h>
Marc Zyngier90348682018-01-03 16:38:37 +000011#include <uapi/linux/psci.h>
Vladimir Murzin5a7a8422016-09-12 15:49:15 +010012
Marc Zyngiera4097b32018-02-06 17:56:13 +000013#include <kvm/arm_psci.h>
14
Julien Thierry85738e02019-01-31 14:58:48 +000015#include <asm/arch_gicv3.h>
Dave Martin85acda32018-04-20 16:20:43 +010016#include <asm/cpufeature.h>
James Morse7d826022019-01-24 16:32:54 +000017#include <asm/kprobes.h>
Marc Zyngier68908bf2015-01-29 15:47:55 +000018#include <asm/kvm_asm.h>
Marc Zyngierfb5ee362016-09-06 09:28:45 +010019#include <asm/kvm_emulate.h>
Dave Martine6b673b2018-04-06 14:55:59 +010020#include <asm/kvm_host.h>
Marc Zyngier13720a52016-01-28 13:44:07 +000021#include <asm/kvm_hyp.h>
Marc Zyngierd6811982017-10-23 17:11:14 +010022#include <asm/kvm_mmu.h>
Suzuki K Poulose82e01912016-11-08 13:56:21 +000023#include <asm/fpsimd.h>
Alex Bennéee3feebf2017-11-23 12:11:34 +000024#include <asm/debug-monitors.h>
Dave Martin85acda32018-04-20 16:20:43 +010025#include <asm/processor.h>
Dave Martine6b673b2018-04-06 14:55:59 +010026#include <asm/thread_info.h>
Marc Zyngierbe901e92015-10-21 09:57:10 +010027
Dave Martine6b673b2018-04-06 14:55:59 +010028/* Check whether the FP regs were dirtied while in the host-side run loop: */
29static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
Marc Zyngier32876222015-10-28 14:15:45 +000030{
Dave Martine6b673b2018-04-06 14:55:59 +010031 if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
32 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
33 KVM_ARM64_FP_HOST);
Marc Zyngier32876222015-10-28 14:15:45 +000034
Dave Martine6b673b2018-04-06 14:55:59 +010035 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
Marc Zyngier32876222015-10-28 14:15:45 +000036}
37
Christoffer Dallb9f8ca42017-12-27 22:12:12 +010038/* Save the 32-bit only FPSIMD system register state */
39static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
40{
41 if (!vcpu_el1_is_32bit(vcpu))
42 return;
43
44 vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
45}
46
Christoffer Dalld5a21bc2017-08-04 08:50:25 +020047static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
48{
49 /*
50 * We are about to set CPTR_EL2.TFP to trap all floating point
51 * register accesses to EL2, however, the ARM ARM clearly states that
52 * traps are only taken to EL2 if the operation would not otherwise
53 * trap to EL1. Therefore, always make sure that for 32-bit guests,
54 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
55 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
56 * it will cause an exception.
57 */
58 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
59 write_sysreg(1 << 30, fpexc32_el2);
60 isb();
61 }
62}
63
64static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
65{
66 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
67 write_sysreg(1 << 15, hstr_el2);
68
69 /*
70 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
71 * PMSELR_EL0 to make sure it never contains the cycle
72 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
73 * EL1 instead of being trapped to EL2.
74 */
75 write_sysreg(0, pmselr_el0);
76 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
77 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
78}
79
80static void __hyp_text __deactivate_traps_common(void)
81{
82 write_sysreg(0, hstr_el2);
83 write_sysreg(0, pmuserenr_el0);
84}
85
Christoffer Dallb7787e62017-10-03 17:06:15 +020086static void activate_traps_vhe(struct kvm_vcpu *vcpu)
Marc Zyngier68908bf2015-01-29 15:47:55 +000087{
88 u64 val;
89
90 val = read_sysreg(cpacr_el1);
91 val |= CPACR_EL1_TTA;
Dave Martine6b673b2018-04-06 14:55:59 +010092 val &= ~CPACR_EL1_ZEN;
Dave Martinb43b5dd2018-09-28 14:39:17 +010093 if (update_fp_enabled(vcpu)) {
94 if (vcpu_has_sve(vcpu))
95 val |= CPACR_EL1_ZEN;
96 } else {
Dave Martine6b673b2018-04-06 14:55:59 +010097 val &= ~CPACR_EL1_FPEN;
Marc Zyngier7d149192018-08-23 11:51:43 +010098 __activate_traps_fpsimd32(vcpu);
99 }
Dave Martine6b673b2018-04-06 14:55:59 +0100100
Marc Zyngier68908bf2015-01-29 15:47:55 +0000101 write_sysreg(val, cpacr_el1);
102
Marc Zyngier6840bdd2018-01-03 16:38:35 +0000103 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
Marc Zyngier68908bf2015-01-29 15:47:55 +0000104}
James Morse7d826022019-01-24 16:32:54 +0000105NOKPROBE_SYMBOL(activate_traps_vhe);
Marc Zyngier68908bf2015-01-29 15:47:55 +0000106
Christoffer Dalld5a21bc2017-08-04 08:50:25 +0200107static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
Marc Zyngier68908bf2015-01-29 15:47:55 +0000108{
109 u64 val;
110
Christoffer Dalla2465622017-08-04 13:47:18 +0200111 __activate_traps_common(vcpu);
112
Marc Zyngier68908bf2015-01-29 15:47:55 +0000113 val = CPTR_EL2_DEFAULT;
Dave Martine6b673b2018-04-06 14:55:59 +0100114 val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
Marc Zyngier7d149192018-08-23 11:51:43 +0100115 if (!update_fp_enabled(vcpu)) {
Dave Martine6b673b2018-04-06 14:55:59 +0100116 val |= CPTR_EL2_TFP;
Marc Zyngier7d149192018-08-23 11:51:43 +0100117 __activate_traps_fpsimd32(vcpu);
118 }
Dave Martine6b673b2018-04-06 14:55:59 +0100119
Marc Zyngier68908bf2015-01-29 15:47:55 +0000120 write_sysreg(val, cptr_el2);
121}
122
Marc Zyngierbe901e92015-10-21 09:57:10 +0100123static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
124{
Christoffer Dalle72341c2017-12-13 22:56:48 +0100125 u64 hcr = vcpu->arch.hcr_el2;
Marc Zyngierbe901e92015-10-21 09:57:10 +0100126
Marc Zyngierd3ec3a02019-02-07 16:01:21 +0000127 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
128 hcr |= HCR_TVM;
129
Christoffer Dalld5a21bc2017-08-04 08:50:25 +0200130 write_sysreg(hcr, hcr_el2);
Dave Martin93390c02017-10-31 15:50:56 +0000131
Christoffer Dalle72341c2017-12-13 22:56:48 +0100132 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
James Morse4715c142018-01-15 19:39:01 +0000133 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
134
Christoffer Dallb7787e62017-10-03 17:06:15 +0200135 if (has_vhe())
136 activate_traps_vhe(vcpu);
137 else
138 __activate_traps_nvhe(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100139}
140
Christoffer Dallb7787e62017-10-03 17:06:15 +0200141static void deactivate_traps_vhe(void)
Marc Zyngier68908bf2015-01-29 15:47:55 +0000142{
143 extern char vectors[]; /* kernel exception vectors */
Marc Zyngier68908bf2015-01-29 15:47:55 +0000144 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
Marc Zyngier1e4448c2018-12-06 17:31:24 +0000145
146 /*
147 * ARM erratum 1165522 requires the actual execution of the above
148 * before we can switch to the EL2/EL0 translation regime used by
149 * the host.
150 */
151 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
152
Dave Martin17eed272017-10-31 15:51:16 +0000153 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
Marc Zyngier68908bf2015-01-29 15:47:55 +0000154 write_sysreg(vectors, vbar_el1);
155}
James Morse7d826022019-01-24 16:32:54 +0000156NOKPROBE_SYMBOL(deactivate_traps_vhe);
Marc Zyngier68908bf2015-01-29 15:47:55 +0000157
158static void __hyp_text __deactivate_traps_nvhe(void)
159{
Will Deaconf85279b2016-09-22 11:35:43 +0100160 u64 mdcr_el2 = read_sysreg(mdcr_el2);
161
Christoffer Dalla2465622017-08-04 13:47:18 +0200162 __deactivate_traps_common();
163
Will Deaconf85279b2016-09-22 11:35:43 +0100164 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
165 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
166
167 write_sysreg(mdcr_el2, mdcr_el2);
Mark Rutland4eaed6a2018-12-07 18:39:21 +0000168 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
Marc Zyngier68908bf2015-01-29 15:47:55 +0000169 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
170}
171
Marc Zyngierbe901e92015-10-21 09:57:10 +0100172static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
173{
Marc Zyngier44636f92016-09-06 14:02:00 +0100174 /*
175 * If we pended a virtual abort, preserve it until it gets
176 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
177 * the crucial bit is "On taking a vSError interrupt,
178 * HCR_EL2.VSE is cleared to 0."
179 */
Marc Zyngierd3ec3a02019-02-07 16:01:21 +0000180 if (vcpu->arch.hcr_el2 & HCR_VSE) {
181 vcpu->arch.hcr_el2 &= ~HCR_VSE;
182 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
183 }
Marc Zyngier44636f92016-09-06 14:02:00 +0100184
Christoffer Dallb7787e62017-10-03 17:06:15 +0200185 if (has_vhe())
186 deactivate_traps_vhe();
187 else
188 __deactivate_traps_nvhe();
Marc Zyngierbe901e92015-10-21 09:57:10 +0100189}
190
Christoffer Dalla2465622017-08-04 13:47:18 +0200191void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
192{
193 __activate_traps_common(vcpu);
194}
195
196void deactivate_traps_vhe_put(void)
197{
198 u64 mdcr_el2 = read_sysreg(mdcr_el2);
199
200 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
201 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
202 MDCR_EL2_TPMS;
203
204 write_sysreg(mdcr_el2, mdcr_el2);
205
206 __deactivate_traps_common();
207}
208
Christoffer Dall34f8cdf2017-10-10 13:25:21 +0200209static void __hyp_text __activate_vm(struct kvm *kvm)
Marc Zyngierbe901e92015-10-21 09:57:10 +0100210{
Suzuki K Poulose9f98ddd2018-09-26 17:32:39 +0100211 __load_guest_stage2(kvm);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100212}
213
214static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
215{
216 write_sysreg(0, vttbr_el2);
217}
218
Christoffer Dall771621b2017-10-04 23:42:32 +0200219/* Save VGICv3 state on non-VHE systems */
220static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
Marc Zyngierbe901e92015-10-21 09:57:10 +0100221{
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200222 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
Vladimir Murzin5a7a8422016-09-12 15:49:15 +0100223 __vgic_v3_save_state(vcpu);
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200224 __vgic_v3_deactivate_traps(vcpu);
225 }
Marc Zyngierbe901e92015-10-21 09:57:10 +0100226}
227
Christoffer Dall771621b2017-10-04 23:42:32 +0200228/* Restore VGICv3 state on non_VEH systems */
229static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
Marc Zyngierbe901e92015-10-21 09:57:10 +0100230{
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200231 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
232 __vgic_v3_activate_traps(vcpu);
Vladimir Murzin5a7a8422016-09-12 15:49:15 +0100233 __vgic_v3_restore_state(vcpu);
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200234 }
Marc Zyngierbe901e92015-10-21 09:57:10 +0100235}
236
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000237static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
238{
239 u64 par, tmp;
240
241 /*
242 * Resolve the IPA the hard way using the guest VA.
243 *
244 * Stage-1 translation already validated the memory access
245 * rights. As such, we can use the EL1 translation regime, and
246 * don't have to distinguish between EL0 and EL1 access.
247 *
248 * We do need to save/restore PAR_EL1 though, as we haven't
249 * saved the guest context yet, and we may return early...
250 */
251 par = read_sysreg(par_el1);
252 asm volatile("at s1e1r, %0" : : "r" (far));
253 isb();
254
255 tmp = read_sysreg(par_el1);
256 write_sysreg(par, par_el1);
257
Will Deacon5c062ef2019-08-22 17:21:21 +0100258 if (unlikely(tmp & SYS_PAR_EL1_F))
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000259 return false; /* Translation failed, back to guest */
260
261 /* Convert PAR to HPFAR format */
Suzuki K Poulosebc1d7de2018-09-26 17:32:51 +0100262 *hpfar = PAR_TO_HPFAR(tmp);
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000263 return true;
264}
265
266static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
267{
James Morsec60590b2018-01-15 19:39:03 +0000268 u8 ec;
269 u64 esr;
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000270 u64 hpfar, far;
271
James Morsec60590b2018-01-15 19:39:03 +0000272 esr = vcpu->arch.fault.esr_el2;
273 ec = ESR_ELx_EC(esr);
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000274
275 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
276 return true;
277
Dave Martinfdec2a92019-04-06 11:29:40 +0100278 far = read_sysreg_el2(SYS_FAR);
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000279
280 /*
281 * The HPFAR can be invalid if the stage 2 fault did not
282 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
283 * bit is clear) and one of the two following cases are true:
284 * 1. The fault was due to a permission fault
285 * 2. The processor carries errata 834220
286 *
287 * Therefore, for all non S1PTW faults where we either have a
288 * permission fault or the errata workaround is enabled, we
289 * resolve the IPA using the AT instruction.
290 */
291 if (!(esr & ESR_ELx_S1PTW) &&
Marc Zyngierb6749e22019-09-01 22:12:35 +0100292 (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
293 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
Marc Zyngier5f05a72a2015-10-28 15:06:47 +0000294 if (!__translate_far_to_hpfar(far, &hpfar))
295 return false;
296 } else {
297 hpfar = read_sysreg(hpfar_el2);
298 }
299
300 vcpu->arch.fault.far_el2 = far;
301 vcpu->arch.fault.hpfar_el2 = hpfar;
302 return true;
303}
304
Dave Martinb43b5dd2018-09-28 14:39:17 +0100305/* Check for an FPSIMD/SVE trap and handle as appropriate */
306static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
Dave Martinceda9ff2018-02-16 16:35:32 +0000307{
Dave Martinb43b5dd2018-09-28 14:39:17 +0100308 bool vhe, sve_guest, sve_host;
309 u8 hsr_ec;
Dave Martin85acda32018-04-20 16:20:43 +0100310
Dave Martinb43b5dd2018-09-28 14:39:17 +0100311 if (!system_supports_fpsimd())
312 return false;
313
314 if (system_supports_sve()) {
315 sve_guest = vcpu_has_sve(vcpu);
316 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
317 vhe = true;
318 } else {
319 sve_guest = false;
320 sve_host = false;
321 vhe = has_vhe();
322 }
323
324 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
325 if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
326 hsr_ec != ESR_ELx_EC_SVE)
327 return false;
328
329 /* Don't handle SVE traps for non-SVE vcpus here: */
330 if (!sve_guest)
331 if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
332 return false;
333
334 /* Valid trap. Switch the context: */
335
336 if (vhe) {
337 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
338
339 if (sve_guest)
340 reg |= CPACR_EL1_ZEN;
341
342 write_sysreg(reg, cpacr_el1);
343 } else {
Dave Martinceda9ff2018-02-16 16:35:32 +0000344 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
345 cptr_el2);
Dave Martinb43b5dd2018-09-28 14:39:17 +0100346 }
Dave Martinceda9ff2018-02-16 16:35:32 +0000347
348 isb();
349
Dave Martine6b673b2018-04-06 14:55:59 +0100350 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
Dave Martin85acda32018-04-20 16:20:43 +0100351 /*
352 * In the SVE case, VHE is assumed: it is enforced by
353 * Kconfig and kvm_arch_init().
354 */
Dave Martinb43b5dd2018-09-28 14:39:17 +0100355 if (sve_host) {
Dave Martin85acda32018-04-20 16:20:43 +0100356 struct thread_struct *thread = container_of(
Dave Martinb43b5dd2018-09-28 14:39:17 +0100357 vcpu->arch.host_fpsimd_state,
Dave Martin85acda32018-04-20 16:20:43 +0100358 struct thread_struct, uw.fpsimd_state);
359
Dave Martinb43b5dd2018-09-28 14:39:17 +0100360 sve_save_state(sve_pffr(thread),
361 &vcpu->arch.host_fpsimd_state->fpsr);
Dave Martin85acda32018-04-20 16:20:43 +0100362 } else {
Dave Martinb43b5dd2018-09-28 14:39:17 +0100363 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
Dave Martin85acda32018-04-20 16:20:43 +0100364 }
365
Dave Martine6b673b2018-04-06 14:55:59 +0100366 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
367 }
368
Dave Martinb43b5dd2018-09-28 14:39:17 +0100369 if (sve_guest) {
370 sve_load_state(vcpu_sve_pffr(vcpu),
371 &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
372 sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
Dave Martin73433762018-09-28 14:39:16 +0100373 write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
Dave Martinb43b5dd2018-09-28 14:39:17 +0100374 } else {
375 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
376 }
Dave Martin73433762018-09-28 14:39:16 +0100377
Dave Martinceda9ff2018-02-16 16:35:32 +0000378 /* Skip restoring fpexc32 for AArch64 guests */
379 if (!(read_sysreg(hcr_el2) & HCR_RW))
380 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
381 fpexc32_el2);
Dave Martine6b673b2018-04-06 14:55:59 +0100382
383 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
Dave Martincf412b02018-05-02 14:18:02 +0100384
385 return true;
Dave Martinceda9ff2018-02-16 16:35:32 +0000386}
387
Marc Zyngierd3ec3a02019-02-07 16:01:21 +0000388static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
389{
390 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
391 int rt = kvm_vcpu_sys_get_rt(vcpu);
392 u64 val = vcpu_get_reg(vcpu, rt);
393
394 /*
395 * The normal sysreg handling code expects to see the traps,
396 * let's not do anything here.
397 */
398 if (vcpu->arch.hcr_el2 & HCR_TVM)
399 return false;
400
401 switch (sysreg) {
402 case SYS_SCTLR_EL1:
403 write_sysreg_el1(val, SYS_SCTLR);
404 break;
405 case SYS_TTBR0_EL1:
406 write_sysreg_el1(val, SYS_TTBR0);
407 break;
408 case SYS_TTBR1_EL1:
409 write_sysreg_el1(val, SYS_TTBR1);
410 break;
411 case SYS_TCR_EL1:
412 write_sysreg_el1(val, SYS_TCR);
413 break;
414 case SYS_ESR_EL1:
415 write_sysreg_el1(val, SYS_ESR);
416 break;
417 case SYS_FAR_EL1:
418 write_sysreg_el1(val, SYS_FAR);
419 break;
420 case SYS_AFSR0_EL1:
421 write_sysreg_el1(val, SYS_AFSR0);
422 break;
423 case SYS_AFSR1_EL1:
424 write_sysreg_el1(val, SYS_AFSR1);
425 break;
426 case SYS_MAIR_EL1:
427 write_sysreg_el1(val, SYS_MAIR);
428 break;
429 case SYS_AMAIR_EL1:
430 write_sysreg_el1(val, SYS_AMAIR);
431 break;
432 case SYS_CONTEXTIDR_EL1:
433 write_sysreg_el1(val, SYS_CONTEXTIDR);
434 break;
435 default:
436 return false;
437 }
438
439 __kvm_skip_instr(vcpu);
440 return true;
441}
442
Christoffer Dalldc251402017-10-03 13:16:04 +0200443/*
444 * Return true when we were able to fixup the guest exit and should return to
445 * the guest, false when we should restore the host state and return to the
446 * main run loop.
447 */
448static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
449{
450 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
Dave Martinfdec2a92019-04-06 11:29:40 +0100451 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
Christoffer Dalldc251402017-10-03 13:16:04 +0200452
453 /*
454 * We're using the raw exception code in order to only process
455 * the trap if no SError is pending. We will come back to the
456 * same PC once the SError has been injected, and replay the
457 * trapping instruction.
458 */
Dave Martin7846b312018-05-02 13:36:48 +0100459 if (*exit_code != ARM_EXCEPTION_TRAP)
460 goto exit;
461
Marc Zyngierd3ec3a02019-02-07 16:01:21 +0000462 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
463 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
464 handle_tx2_tvm(vcpu))
465 return true;
466
Dave Martincf412b02018-05-02 14:18:02 +0100467 /*
468 * We trap the first access to the FP/SIMD to save the host context
469 * and restore the guest context lazily.
470 * If FP/SIMD is not implemented, handle the trap and inject an
471 * undefined instruction exception to the guest.
Dave Martinb43b5dd2018-09-28 14:39:17 +0100472 * Similarly for trapped SVE accesses.
Dave Martincf412b02018-05-02 14:18:02 +0100473 */
Dave Martinb43b5dd2018-09-28 14:39:17 +0100474 if (__hyp_handle_fpsimd(vcpu))
475 return true;
Dave Martincf412b02018-05-02 14:18:02 +0100476
Dave Martin7846b312018-05-02 13:36:48 +0100477 if (!__populate_fault_info(vcpu))
Christoffer Dalldc251402017-10-03 13:16:04 +0200478 return true;
479
Dave Martin7846b312018-05-02 13:36:48 +0100480 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
Christoffer Dalldc251402017-10-03 13:16:04 +0200481 bool valid;
482
483 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
484 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
485 kvm_vcpu_dabt_isvalid(vcpu) &&
486 !kvm_vcpu_dabt_isextabt(vcpu) &&
487 !kvm_vcpu_dabt_iss1tw(vcpu);
488
489 if (valid) {
490 int ret = __vgic_v2_perform_cpuif_access(vcpu);
491
Mark Rutlandbd7d95c2018-11-09 15:07:11 +0000492 if (ret == 1)
Dave Martinba4f4cb2018-05-02 13:23:07 +0100493 return true;
Christoffer Dalldc251402017-10-03 13:16:04 +0200494
Mark Rutlandbd7d95c2018-11-09 15:07:11 +0000495 /* Promote an illegal access to an SError.*/
496 if (ret == -1)
Christoffer Dalldc251402017-10-03 13:16:04 +0200497 *exit_code = ARM_EXCEPTION_EL1_SERROR;
Dave Martin7846b312018-05-02 13:36:48 +0100498
499 goto exit;
Christoffer Dalldc251402017-10-03 13:16:04 +0200500 }
501 }
502
503 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
Christoffer Dalldc251402017-10-03 13:16:04 +0200504 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
505 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
506 int ret = __vgic_v3_perform_cpuif_access(vcpu);
507
Mark Rutlandbd7d95c2018-11-09 15:07:11 +0000508 if (ret == 1)
Dave Martinba4f4cb2018-05-02 13:23:07 +0100509 return true;
Christoffer Dalldc251402017-10-03 13:16:04 +0200510 }
511
Dave Martin7846b312018-05-02 13:36:48 +0100512exit:
Christoffer Dalldc251402017-10-03 13:16:04 +0200513 /* Return to the host kernel and handle the exit */
514 return false;
515}
516
Marc Zyngier55e37482018-05-29 13:11:16 +0100517static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
518{
519 if (!cpus_have_const_cap(ARM64_SSBD))
520 return false;
521
522 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
523}
524
525static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
526{
527#ifdef CONFIG_ARM64_SSBD
528 /*
529 * The host runs with the workaround always present. If the
530 * guest wants it disabled, so be it...
531 */
532 if (__needs_ssbd_off(vcpu) &&
533 __hyp_this_cpu_read(arm64_ssbd_callback_required))
534 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
535#endif
536}
537
538static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
539{
540#ifdef CONFIG_ARM64_SSBD
541 /*
542 * If the guest has disabled the workaround, bring it back on.
543 */
544 if (__needs_ssbd_off(vcpu) &&
545 __hyp_this_cpu_read(arm64_ssbd_callback_required))
546 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
547#endif
548}
549
James Morseb7c50fa2019-05-22 18:47:04 +0100550/**
551 * Disable host events, enable guest events
552 */
553static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
554{
555 struct kvm_host_data *host;
556 struct kvm_pmu_events *pmu;
557
558 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
559 pmu = &host->pmu_events;
560
561 if (pmu->events_host)
562 write_sysreg(pmu->events_host, pmcntenclr_el0);
563
564 if (pmu->events_guest)
565 write_sysreg(pmu->events_guest, pmcntenset_el0);
566
567 return (pmu->events_host || pmu->events_guest);
568}
569
570/**
571 * Disable guest events, enable host events
572 */
573static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
574{
575 struct kvm_host_data *host;
576 struct kvm_pmu_events *pmu;
577
578 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
579 pmu = &host->pmu_events;
580
581 if (pmu->events_guest)
582 write_sysreg(pmu->events_guest, pmcntenclr_el0);
583
584 if (pmu->events_host)
585 write_sysreg(pmu->events_host, pmcntenset_el0);
586}
587
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200588/* Switch to the guest for VHE systems running in EL2 */
589int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
590{
591 struct kvm_cpu_context *host_ctxt;
592 struct kvm_cpu_context *guest_ctxt;
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200593 u64 exit_code;
594
Christoffer Dall86d05682016-12-23 00:20:38 +0100595 host_ctxt = vcpu->arch.host_cpu_context;
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200596 host_ctxt->__hyp_running_vcpu = vcpu;
597 guest_ctxt = &vcpu->arch.ctxt;
598
Christoffer Dallf8374532017-10-10 22:19:31 +0200599 sysreg_save_host_state_vhe(host_ctxt);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200600
Marc Zyngier1e4448c2018-12-06 17:31:24 +0000601 /*
602 * ARM erratum 1165522 requires us to configure both stage 1 and
603 * stage 2 translation for the guest context before we clear
604 * HCR_EL2.TGE.
605 *
606 * We have already configured the guest's stage 1 translation in
607 * kvm_vcpu_load_sysregs above. We must now call __activate_vm
608 * before __activate_traps, because __activate_vm configures
609 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
610 * (among other things).
611 */
Christoffer Dall34f8cdf2017-10-10 13:25:21 +0200612 __activate_vm(vcpu->kvm);
Marc Zyngierbfae1b92018-12-06 17:31:21 +0000613 __activate_traps(vcpu);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200614
Christoffer Dallf8374532017-10-10 22:19:31 +0200615 sysreg_restore_guest_state_vhe(guest_ctxt);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200616 __debug_switch_to_guest(vcpu);
617
Marc Zyngier55e37482018-05-29 13:11:16 +0100618 __set_guest_arch_workaround_state(vcpu);
619
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200620 do {
621 /* Jump in the fire! */
622 exit_code = __guest_enter(vcpu, host_ctxt);
623
624 /* And we're baaack! */
625 } while (fixup_guest_exit(vcpu, &exit_code));
626
Marc Zyngier55e37482018-05-29 13:11:16 +0100627 __set_host_arch_workaround_state(vcpu);
628
Christoffer Dallf8374532017-10-10 22:19:31 +0200629 sysreg_save_guest_state_vhe(guest_ctxt);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200630
631 __deactivate_traps(vcpu);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200632
Christoffer Dallf8374532017-10-10 22:19:31 +0200633 sysreg_restore_host_state_vhe(host_ctxt);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200634
Dave Martine6b673b2018-04-06 14:55:59 +0100635 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
Christoffer Dallb9f8ca42017-12-27 22:12:12 +0100636 __fpsimd_save_fpexc32(vcpu);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200637
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200638 __debug_switch_to_host(vcpu);
639
640 return exit_code;
641}
James Morse7d826022019-01-24 16:32:54 +0000642NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
Christoffer Dall3f5c90b2017-10-03 14:02:12 +0200643
644/* Switch to the guest for legacy non-VHE systems */
645int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
Marc Zyngierbe901e92015-10-21 09:57:10 +0100646{
647 struct kvm_cpu_context *host_ctxt;
648 struct kvm_cpu_context *guest_ctxt;
Andrew Murray3d91bef2019-04-09 20:22:14 +0100649 bool pmu_switch_needed;
Marc Zyngierbe901e92015-10-21 09:57:10 +0100650 u64 exit_code;
651
Julien Thierry85738e02019-01-31 14:58:48 +0000652 /*
653 * Having IRQs masked via PMR when entering the guest means the GIC
654 * will not signal the CPU of interrupts of lower priority, and the
655 * only way to get out will be via guest exceptions.
656 * Naturally, we want to avoid this.
657 */
658 if (system_uses_irq_prio_masking()) {
Julien Thierrybd82d4b2019-06-11 10:38:10 +0100659 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
Julien Thierry85738e02019-01-31 14:58:48 +0000660 dsb(sy);
661 }
662
Marc Zyngierbe901e92015-10-21 09:57:10 +0100663 vcpu = kern_hyp_va(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100664
665 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
James Morsec97e1662018-01-08 15:38:05 +0000666 host_ctxt->__hyp_running_vcpu = vcpu;
Marc Zyngierbe901e92015-10-21 09:57:10 +0100667 guest_ctxt = &vcpu->arch.ctxt;
668
Andrew Murray3d91bef2019-04-09 20:22:14 +0100669 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
670
Christoffer Dall4cdecab2017-10-10 22:40:13 +0200671 __sysreg_save_state_nvhe(host_ctxt);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100672
Christoffer Dall34f8cdf2017-10-10 13:25:21 +0200673 __activate_vm(kern_hyp_va(vcpu->kvm));
Marc Zyngierbfae1b92018-12-06 17:31:21 +0000674 __activate_traps(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100675
Christoffer Dall771621b2017-10-04 23:42:32 +0200676 __hyp_vgic_restore_state(vcpu);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100677 __timer_enable_traps(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100678
679 /*
680 * We must restore the 32-bit state before the sysregs, thanks
Marc Zyngier674e7012016-08-16 15:03:01 +0100681 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
Marc Zyngierbe901e92015-10-21 09:57:10 +0100682 */
683 __sysreg32_restore_state(vcpu);
Christoffer Dall4cdecab2017-10-10 22:40:13 +0200684 __sysreg_restore_state_nvhe(guest_ctxt);
Christoffer Dall014c4c72017-10-10 20:10:08 +0200685 __debug_switch_to_guest(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100686
Marc Zyngier55e37482018-05-29 13:11:16 +0100687 __set_guest_arch_workaround_state(vcpu);
688
Christoffer Dalldc251402017-10-03 13:16:04 +0200689 do {
690 /* Jump in the fire! */
691 exit_code = __guest_enter(vcpu, host_ctxt);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100692
Christoffer Dalldc251402017-10-03 13:16:04 +0200693 /* And we're baaack! */
694 } while (fixup_guest_exit(vcpu, &exit_code));
Marc Zyngier59da1cb2017-06-09 12:49:33 +0100695
Marc Zyngier55e37482018-05-29 13:11:16 +0100696 __set_host_arch_workaround_state(vcpu);
697
Christoffer Dall4cdecab2017-10-10 22:40:13 +0200698 __sysreg_save_state_nvhe(guest_ctxt);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100699 __sysreg32_save_state(vcpu);
Christoffer Dall688c50a2017-01-04 16:10:28 +0100700 __timer_disable_traps(vcpu);
Christoffer Dall771621b2017-10-04 23:42:32 +0200701 __hyp_vgic_save_state(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100702
703 __deactivate_traps(vcpu);
704 __deactivate_vm(vcpu);
705
Christoffer Dall4cdecab2017-10-10 22:40:13 +0200706 __sysreg_restore_state_nvhe(host_ctxt);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100707
Dave Martine6b673b2018-04-06 14:55:59 +0100708 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
Christoffer Dallb9f8ca42017-12-27 22:12:12 +0100709 __fpsimd_save_fpexc32(vcpu);
Marc Zyngierc13d1682015-10-26 08:34:09 +0000710
Will Deaconf85279b2016-09-22 11:35:43 +0100711 /*
712 * This must come after restoring the host sysregs, since a non-VHE
713 * system may enable SPE here and make use of the TTBRs.
714 */
Christoffer Dall014c4c72017-10-10 20:10:08 +0200715 __debug_switch_to_host(vcpu);
Marc Zyngierbe901e92015-10-21 09:57:10 +0100716
Andrew Murray3d91bef2019-04-09 20:22:14 +0100717 if (pmu_switch_needed)
718 __pmu_switch_to_host(host_ctxt);
719
Julien Thierry85738e02019-01-31 14:58:48 +0000720 /* Returning to host will clear PSR.I, remask PMR if needed */
721 if (system_uses_irq_prio_masking())
722 gic_write_pmr(GIC_PRIO_IRQOFF);
723
Marc Zyngierbe901e92015-10-21 09:57:10 +0100724 return exit_code;
725}
Marc Zyngier53fd5b62015-10-25 15:21:52 +0000726
727static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
728
James Morsec97e1662018-01-08 15:38:05 +0000729static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200730 struct kvm_cpu_context *__host_ctxt)
Marc Zyngier53fd5b62015-10-25 15:21:52 +0000731{
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200732 struct kvm_vcpu *vcpu;
Marc Zyngiercf7df132016-06-30 18:40:35 +0100733 unsigned long str_va;
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000734
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200735 vcpu = __host_ctxt->__hyp_running_vcpu;
736
737 if (read_sysreg(vttbr_el2)) {
738 __timer_disable_traps(vcpu);
739 __deactivate_traps(vcpu);
740 __deactivate_vm(vcpu);
Christoffer Dall4cdecab2017-10-10 22:40:13 +0200741 __sysreg_restore_state_nvhe(__host_ctxt);
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200742 }
743
Marc Zyngiercf7df132016-06-30 18:40:35 +0100744 /*
745 * Force the panic string to be loaded from the literal pool,
746 * making sure it is a kernel address and not a PC-relative
747 * reference.
748 */
749 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
750
751 __hyp_do_panic(str_va,
Dave Martinfdec2a92019-04-06 11:29:40 +0100752 spsr, elr,
753 read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
James Morsec97e1662018-01-08 15:38:05 +0000754 read_sysreg(hpfar_el2), par, vcpu);
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000755}
756
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200757static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
758 struct kvm_cpu_context *host_ctxt)
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000759{
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200760 struct kvm_vcpu *vcpu;
761 vcpu = host_ctxt->__hyp_running_vcpu;
762
763 __deactivate_traps(vcpu);
Christoffer Dallf8374532017-10-10 22:19:31 +0200764 sysreg_restore_host_state_vhe(host_ctxt);
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200765
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000766 panic(__hyp_panic_string,
767 spsr, elr,
Dave Martinfdec2a92019-04-06 11:29:40 +0100768 read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
James Morsec97e1662018-01-08 15:38:05 +0000769 read_sysreg(hpfar_el2), par, vcpu);
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000770}
James Morse7d826022019-01-24 16:32:54 +0000771NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000772
Christoffer Dall4464e212017-10-08 17:01:56 +0200773void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
Marc Zyngier253dcbd2015-11-17 14:07:45 +0000774{
Dave Martinfdec2a92019-04-06 11:29:40 +0100775 u64 spsr = read_sysreg_el2(SYS_SPSR);
776 u64 elr = read_sysreg_el2(SYS_ELR);
Marc Zyngier53fd5b62015-10-25 15:21:52 +0000777 u64 par = read_sysreg(par_el1);
778
Christoffer Dall8f17f5e2017-10-09 21:43:50 +0200779 if (!has_vhe())
780 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
781 else
782 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
Marc Zyngier53fd5b62015-10-25 15:21:52 +0000783
784 unreachable();
785}