blob: 4f3a087e36d51c6e53d2fedc9370b1edcdbca0c9 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngier06282fd2015-10-19 15:50:37 +01002/*
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
Marc Zyngier06282fd2015-10-19 15:50:37 +01005 */
6
7#include <linux/compiler.h>
8#include <linux/irqchip/arm-gic.h>
9#include <linux/kvm_host.h>
James Morseb2202442018-05-04 16:19:24 +010010#include <linux/swab.h>
Marc Zyngier06282fd2015-10-19 15:50:37 +010011
Marc Zyngierbf8feb32016-09-06 09:28:46 +010012#include <asm/kvm_emulate.h>
Marc Zyngier13720a52016-01-28 13:44:07 +000013#include <asm/kvm_hyp.h>
Marc Zyngierd6811982017-10-23 17:11:14 +010014#include <asm/kvm_mmu.h>
Marc Zyngier06282fd2015-10-19 15:50:37 +010015
James Morseb2202442018-05-04 16:19:24 +010016static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
17{
18 if (vcpu_mode_is_32bit(vcpu))
Dave Martinfdec2a92019-04-06 11:29:40 +010019 return !!(read_sysreg_el2(SYS_SPSR) & PSR_AA32_E_BIT);
James Morseb2202442018-05-04 16:19:24 +010020
21 return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
22}
23
Marc Zyngier3272f0d2016-09-06 14:02:17 +010024/*
25 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
26 * guest.
27 *
28 * @vcpu: the offending vcpu
29 *
30 * Returns:
31 * 1: GICV access successfully performed
32 * 0: Not a GICV access
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000033 * -1: Illegal GICV access successfully performed
Marc Zyngier3272f0d2016-09-06 14:02:17 +010034 */
35int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
Marc Zyngierfb5ee362016-09-06 09:28:45 +010036{
Marc Zyngierbf8feb32016-09-06 09:28:46 +010037 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
38 struct vgic_dist *vgic = &kvm->arch.vgic;
39 phys_addr_t fault_ipa;
40 void __iomem *addr;
41 int rd;
42
43 /* Build the full address */
44 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
45 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
46
47 /* If not for GICV, move on */
48 if (fault_ipa < vgic->vgic_cpu_base ||
49 fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
Marc Zyngier3272f0d2016-09-06 14:02:17 +010050 return 0;
Marc Zyngierbf8feb32016-09-06 09:28:46 +010051
52 /* Reject anything but a 32bit access */
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000053 if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) {
54 __kvm_skip_instr(vcpu);
Marc Zyngier3272f0d2016-09-06 14:02:17 +010055 return -1;
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000056 }
Marc Zyngierbf8feb32016-09-06 09:28:46 +010057
58 /* Not aligned? Don't bother */
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000059 if (fault_ipa & 3) {
60 __kvm_skip_instr(vcpu);
Marc Zyngier3272f0d2016-09-06 14:02:17 +010061 return -1;
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000062 }
Marc Zyngierbf8feb32016-09-06 09:28:46 +010063
64 rd = kvm_vcpu_dabt_get_rd(vcpu);
Marc Zyngier1bb32a42017-12-04 16:43:23 +000065 addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
Marc Zyngierbf8feb32016-09-06 09:28:46 +010066 addr += fault_ipa - vgic->vgic_cpu_base;
67
68 if (kvm_vcpu_dabt_iswrite(vcpu)) {
James Morseb2202442018-05-04 16:19:24 +010069 u32 data = vcpu_get_reg(vcpu, rd);
70 if (__is_be(vcpu)) {
71 /* guest pre-swabbed data, undo this for writel() */
James Morse8c2d1462020-02-20 16:58:38 +000072 data = __kvm_swab32(data);
James Morseb2202442018-05-04 16:19:24 +010073 }
Marc Zyngierbf8feb32016-09-06 09:28:46 +010074 writel_relaxed(data, addr);
75 } else {
76 u32 data = readl_relaxed(addr);
James Morseb2202442018-05-04 16:19:24 +010077 if (__is_be(vcpu)) {
78 /* guest expects swabbed data */
James Morse8c2d1462020-02-20 16:58:38 +000079 data = __kvm_swab32(data);
James Morseb2202442018-05-04 16:19:24 +010080 }
81 vcpu_set_reg(vcpu, rd, data);
Marc Zyngierbf8feb32016-09-06 09:28:46 +010082 }
83
Mark Rutlandbd7d95c2018-11-09 15:07:11 +000084 __kvm_skip_instr(vcpu);
85
Marc Zyngier3272f0d2016-09-06 14:02:17 +010086 return 1;
Marc Zyngierfb5ee362016-09-06 09:28:45 +010087}