Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 2 | /* |
| 3 | * VGICv3 MMIO handling functions |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 6 | #include <linux/bitfield.h> |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 7 | #include <linux/irqchip/arm-gic-v3.h> |
| 8 | #include <linux/kvm.h> |
| 9 | #include <linux/kvm_host.h> |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 11 | #include <kvm/iodev.h> |
| 12 | #include <kvm/arm_vgic.h> |
| 13 | |
| 14 | #include <asm/kvm_emulate.h> |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 15 | #include <asm/kvm_arm.h> |
| 16 | #include <asm/kvm_mmu.h> |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 17 | |
| 18 | #include "vgic.h" |
| 19 | #include "vgic-mmio.h" |
| 20 | |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 21 | /* extract @num bytes at @offset bytes offset in data */ |
Vladimir Murzin | d7d0a11 | 2016-09-12 15:49:20 +0100 | [diff] [blame] | 22 | unsigned long extract_bytes(u64 data, unsigned int offset, |
Andre Przywara | 424c338 | 2016-07-15 12:43:32 +0100 | [diff] [blame] | 23 | unsigned int num) |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 24 | { |
| 25 | return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); |
| 26 | } |
| 27 | |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 28 | /* allows updates of any half of a 64-bit register (or the whole thing) */ |
Andre Przywara | 424c338 | 2016-07-15 12:43:32 +0100 | [diff] [blame] | 29 | u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, |
| 30 | unsigned long val) |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 31 | { |
| 32 | int lower = (offset & 4) * 8; |
| 33 | int upper = lower + 8 * len - 1; |
| 34 | |
| 35 | reg &= ~GENMASK_ULL(upper, lower); |
| 36 | val &= GENMASK_ULL(len * 8 - 1, 0); |
| 37 | |
| 38 | return reg | ((u64)val << lower); |
| 39 | } |
| 40 | |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 41 | bool vgic_has_its(struct kvm *kvm) |
| 42 | { |
| 43 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 44 | |
| 45 | if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3) |
| 46 | return false; |
| 47 | |
Andre Przywara | 1085fdc | 2016-07-15 12:43:31 +0100 | [diff] [blame] | 48 | return dist->has_its; |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 49 | } |
| 50 | |
Marc Zyngier | e7c4805 | 2017-10-27 15:28:37 +0100 | [diff] [blame] | 51 | bool vgic_supports_direct_msis(struct kvm *kvm) |
| 52 | { |
| 53 | return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm); |
| 54 | } |
| 55 | |
Christoffer Dall | d53c2c29 | 2018-07-16 15:06:25 +0200 | [diff] [blame] | 56 | /* |
| 57 | * The Revision field in the IIDR have the following meanings: |
| 58 | * |
| 59 | * Revision 2: Interrupt groups are guest-configurable and signaled using |
| 60 | * their configured groups. |
| 61 | */ |
| 62 | |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 63 | static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, |
| 64 | gpa_t addr, unsigned int len) |
| 65 | { |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 66 | struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 67 | u32 value = 0; |
| 68 | |
| 69 | switch (addr & 0x0c) { |
| 70 | case GICD_CTLR: |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 71 | if (vgic->enabled) |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 72 | value |= GICD_CTLR_ENABLE_SS_G1; |
| 73 | value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 74 | if (vgic->nassgireq) |
| 75 | value |= GICD_CTLR_nASSGIreq; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 76 | break; |
| 77 | case GICD_TYPER: |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 78 | value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 79 | value = (value >> 5) - 1; |
Andre Przywara | 0e4e82f | 2016-07-15 12:43:38 +0100 | [diff] [blame] | 80 | if (vgic_has_its(vcpu->kvm)) { |
| 81 | value |= (INTERRUPT_ID_BITS_ITS - 1) << 19; |
| 82 | value |= GICD_TYPER_LPIS; |
| 83 | } else { |
| 84 | value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19; |
| 85 | } |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 86 | break; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 87 | case GICD_TYPER2: |
| 88 | if (kvm_vgic_global_state.has_gicv4_1) |
| 89 | value = GICD_TYPER2_nASSGIcap; |
| 90 | break; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 91 | case GICD_IIDR: |
Christoffer Dall | a2dca21 | 2018-07-16 15:06:18 +0200 | [diff] [blame] | 92 | value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) | |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 93 | (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) | |
Christoffer Dall | a2dca21 | 2018-07-16 15:06:18 +0200 | [diff] [blame] | 94 | (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT); |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 95 | break; |
| 96 | default: |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | return value; |
| 101 | } |
| 102 | |
| 103 | static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, |
| 104 | gpa_t addr, unsigned int len, |
| 105 | unsigned long val) |
| 106 | { |
| 107 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 108 | |
| 109 | switch (addr & 0x0c) { |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 110 | case GICD_CTLR: { |
| 111 | bool was_enabled, is_hwsgi; |
| 112 | |
| 113 | mutex_lock(&vcpu->kvm->lock); |
| 114 | |
| 115 | was_enabled = dist->enabled; |
| 116 | is_hwsgi = dist->nassgireq; |
| 117 | |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 118 | dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; |
| 119 | |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 120 | /* Not a GICv4.1? No HW SGIs */ |
| 121 | if (!kvm_vgic_global_state.has_gicv4_1) |
| 122 | val &= ~GICD_CTLR_nASSGIreq; |
| 123 | |
| 124 | /* Dist stays enabled? nASSGIreq is RO */ |
| 125 | if (was_enabled && dist->enabled) { |
| 126 | val &= ~GICD_CTLR_nASSGIreq; |
| 127 | val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi); |
| 128 | } |
| 129 | |
| 130 | /* Switching HW SGIs? */ |
| 131 | dist->nassgireq = val & GICD_CTLR_nASSGIreq; |
| 132 | if (is_hwsgi != dist->nassgireq) |
| 133 | vgic_v4_configure_vsgis(vcpu->kvm); |
| 134 | |
Marc Zyngier | d9c3872 | 2020-03-04 20:33:28 +0000 | [diff] [blame] | 135 | if (kvm_vgic_global_state.has_gicv4_1 && |
| 136 | was_enabled != dist->enabled) |
| 137 | kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4); |
| 138 | else if (!was_enabled && dist->enabled) |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 139 | vgic_kick_vcpus(vcpu->kvm); |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 140 | |
| 141 | mutex_unlock(&vcpu->kvm->lock); |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 142 | break; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 143 | } |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 144 | case GICD_TYPER: |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 145 | case GICD_TYPER2: |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 146 | case GICD_IIDR: |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 147 | /* This is at best for documentation purposes... */ |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 148 | return; |
| 149 | } |
| 150 | } |
| 151 | |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 152 | static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, |
| 153 | gpa_t addr, unsigned int len, |
| 154 | unsigned long val) |
| 155 | { |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 156 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 157 | |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 158 | switch (addr & 0x0c) { |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 159 | case GICD_TYPER2: |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 160 | case GICD_IIDR: |
| 161 | if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) |
| 162 | return -EINVAL; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 163 | return 0; |
| 164 | case GICD_CTLR: |
| 165 | /* Not a GICv4.1? No HW SGIs */ |
| 166 | if (!kvm_vgic_global_state.has_gicv4_1) |
| 167 | val &= ~GICD_CTLR_nASSGIreq; |
| 168 | |
| 169 | dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; |
| 170 | dist->nassgireq = val & GICD_CTLR_nASSGIreq; |
| 171 | return 0; |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 172 | } |
| 173 | |
| 174 | vgic_mmio_write_v3_misc(vcpu, addr, len, val); |
| 175 | return 0; |
| 176 | } |
| 177 | |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 178 | static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu, |
| 179 | gpa_t addr, unsigned int len) |
| 180 | { |
| 181 | int intid = VGIC_ADDR_TO_INTID(addr, 64); |
| 182 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 183 | unsigned long ret = 0; |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 184 | |
| 185 | if (!irq) |
| 186 | return 0; |
| 187 | |
| 188 | /* The upper word is RAZ for us. */ |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 189 | if (!(addr & 4)) |
| 190 | ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len); |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 191 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 192 | vgic_put_irq(vcpu->kvm, irq); |
| 193 | return ret; |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 194 | } |
| 195 | |
| 196 | static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, |
| 197 | gpa_t addr, unsigned int len, |
| 198 | unsigned long val) |
| 199 | { |
| 200 | int intid = VGIC_ADDR_TO_INTID(addr, 64); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 201 | struct vgic_irq *irq; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 202 | unsigned long flags; |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 203 | |
| 204 | /* The upper word is WI for us since we don't implement Aff3. */ |
| 205 | if (addr & 4) |
| 206 | return; |
| 207 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 208 | irq = vgic_get_irq(vcpu->kvm, NULL, intid); |
| 209 | |
| 210 | if (!irq) |
| 211 | return; |
| 212 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 213 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 214 | |
| 215 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ |
| 216 | irq->mpidr = val & GENMASK(23, 0); |
| 217 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); |
| 218 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 219 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 220 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 221 | } |
| 222 | |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 223 | static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu, |
| 224 | gpa_t addr, unsigned int len) |
| 225 | { |
| 226 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 227 | |
| 228 | return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0; |
| 229 | } |
| 230 | |
| 231 | |
| 232 | static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, |
| 233 | gpa_t addr, unsigned int len, |
| 234 | unsigned long val) |
| 235 | { |
| 236 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 237 | bool was_enabled = vgic_cpu->lpis_enabled; |
| 238 | |
| 239 | if (!vgic_has_its(vcpu->kvm)) |
| 240 | return; |
| 241 | |
| 242 | vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS; |
| 243 | |
Marc Zyngier | b4931af | 2019-05-22 18:16:49 +0100 | [diff] [blame] | 244 | if (was_enabled && !vgic_cpu->lpis_enabled) { |
Marc Zyngier | 96085b9 | 2019-04-02 06:36:23 +0100 | [diff] [blame] | 245 | vgic_flush_pending_lpis(vcpu); |
Marc Zyngier | b4931af | 2019-05-22 18:16:49 +0100 | [diff] [blame] | 246 | vgic_its_invalidate_cache(vcpu->kvm); |
| 247 | } |
Marc Zyngier | 96085b9 | 2019-04-02 06:36:23 +0100 | [diff] [blame] | 248 | |
Andre Przywara | 0e4e82f | 2016-07-15 12:43:38 +0100 | [diff] [blame] | 249 | if (!was_enabled && vgic_cpu->lpis_enabled) |
| 250 | vgic_enable_lpis(vcpu); |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 251 | } |
| 252 | |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 253 | static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, |
| 254 | gpa_t addr, unsigned int len) |
| 255 | { |
| 256 | unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); |
Eric Auger | ba7b3f1 | 2018-05-22 09:55:10 +0200 | [diff] [blame] | 257 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 258 | struct vgic_redist_region *rdreg = vgic_cpu->rdreg; |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 259 | int target_vcpu_id = vcpu->vcpu_id; |
Eric Auger | ba7b3f1 | 2018-05-22 09:55:10 +0200 | [diff] [blame] | 260 | gpa_t last_rdist_typer = rdreg->base + GICR_TYPER + |
| 261 | (rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE; |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 262 | u64 value; |
| 263 | |
Vladimir Murzin | e533a37 | 2016-09-12 15:49:19 +0100 | [diff] [blame] | 264 | value = (u64)(mpidr & GENMASK(23, 0)) << 32; |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 265 | value |= ((target_vcpu_id & 0xffff) << 8); |
Eric Auger | ba7b3f1 | 2018-05-22 09:55:10 +0200 | [diff] [blame] | 266 | |
| 267 | if (addr == last_rdist_typer) |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 268 | value |= GICR_TYPER_LAST; |
Andre Przywara | 0e4e82f | 2016-07-15 12:43:38 +0100 | [diff] [blame] | 269 | if (vgic_has_its(vcpu->kvm)) |
| 270 | value |= GICR_TYPER_PLPIS; |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 271 | |
| 272 | return extract_bytes(value, addr & 7, len); |
| 273 | } |
| 274 | |
| 275 | static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu, |
| 276 | gpa_t addr, unsigned int len) |
| 277 | { |
| 278 | return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); |
| 279 | } |
| 280 | |
Andre Przywara | 54f59d2 | 2016-01-22 18:18:52 +0000 | [diff] [blame] | 281 | static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu, |
| 282 | gpa_t addr, unsigned int len) |
| 283 | { |
| 284 | switch (addr & 0xffff) { |
| 285 | case GICD_PIDR2: |
| 286 | /* report a GICv3 compliant implementation */ |
| 287 | return 0x3b; |
| 288 | } |
| 289 | |
| 290 | return 0; |
| 291 | } |
| 292 | |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 293 | static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, |
| 294 | gpa_t addr, unsigned int len) |
| 295 | { |
| 296 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 297 | u32 value = 0; |
| 298 | int i; |
| 299 | |
| 300 | /* |
| 301 | * pending state of interrupt is latched in pending_latch variable. |
| 302 | * Userspace will save and restore pending state and line_level |
| 303 | * separately. |
Mauro Carvalho Chehab | 72ef5e5 | 2020-04-14 18:48:35 +0200 | [diff] [blame^] | 304 | * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 305 | * for handling of ISPENDR and ICPENDR. |
| 306 | */ |
| 307 | for (i = 0; i < len * 8; i++) { |
| 308 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 309 | bool state = irq->pending_latch; |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 310 | |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 311 | if (irq->hw && vgic_irq_is_sgi(irq->intid)) { |
| 312 | int err; |
| 313 | |
| 314 | err = irq_get_irqchip_state(irq->host_irq, |
| 315 | IRQCHIP_STATE_PENDING, |
| 316 | &state); |
| 317 | WARN_ON(err); |
| 318 | } |
| 319 | |
| 320 | if (state) |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 321 | value |= (1U << i); |
| 322 | |
| 323 | vgic_put_irq(vcpu->kvm, irq); |
| 324 | } |
| 325 | |
| 326 | return value; |
| 327 | } |
| 328 | |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 329 | static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, |
| 330 | gpa_t addr, unsigned int len, |
| 331 | unsigned long val) |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 332 | { |
| 333 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 334 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 335 | unsigned long flags; |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 336 | |
| 337 | for (i = 0; i < len * 8; i++) { |
| 338 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 339 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 340 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 341 | if (test_bit(i, &val)) { |
| 342 | /* |
| 343 | * pending_latch is set irrespective of irq type |
| 344 | * (level or edge) to avoid dependency that VM should |
| 345 | * restore irq config before pending info. |
| 346 | */ |
| 347 | irq->pending_latch = true; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 348 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 349 | } else { |
| 350 | irq->pending_latch = false; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 351 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 352 | } |
| 353 | |
| 354 | vgic_put_irq(vcpu->kvm, irq); |
| 355 | } |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 356 | |
| 357 | return 0; |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 358 | } |
| 359 | |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 360 | /* We want to avoid outer shareable. */ |
| 361 | u64 vgic_sanitise_shareability(u64 field) |
| 362 | { |
| 363 | switch (field) { |
| 364 | case GIC_BASER_OuterShareable: |
| 365 | return GIC_BASER_InnerShareable; |
| 366 | default: |
| 367 | return field; |
| 368 | } |
| 369 | } |
| 370 | |
| 371 | /* Avoid any inner non-cacheable mapping. */ |
| 372 | u64 vgic_sanitise_inner_cacheability(u64 field) |
| 373 | { |
| 374 | switch (field) { |
| 375 | case GIC_BASER_CACHE_nCnB: |
| 376 | case GIC_BASER_CACHE_nC: |
| 377 | return GIC_BASER_CACHE_RaWb; |
| 378 | default: |
| 379 | return field; |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | /* Non-cacheable or same-as-inner are OK. */ |
| 384 | u64 vgic_sanitise_outer_cacheability(u64 field) |
| 385 | { |
| 386 | switch (field) { |
| 387 | case GIC_BASER_CACHE_SameAsInner: |
| 388 | case GIC_BASER_CACHE_nC: |
| 389 | return field; |
| 390 | default: |
| 391 | return GIC_BASER_CACHE_nC; |
| 392 | } |
| 393 | } |
| 394 | |
| 395 | u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, |
| 396 | u64 (*sanitise_fn)(u64)) |
| 397 | { |
| 398 | u64 field = (reg & field_mask) >> field_shift; |
| 399 | |
| 400 | field = sanitise_fn(field) << field_shift; |
| 401 | return (reg & ~field_mask) | field; |
| 402 | } |
| 403 | |
| 404 | #define PROPBASER_RES0_MASK \ |
| 405 | (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5)) |
| 406 | #define PENDBASER_RES0_MASK \ |
| 407 | (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \ |
| 408 | GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0)) |
| 409 | |
| 410 | static u64 vgic_sanitise_pendbaser(u64 reg) |
| 411 | { |
| 412 | reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, |
| 413 | GICR_PENDBASER_SHAREABILITY_SHIFT, |
| 414 | vgic_sanitise_shareability); |
| 415 | reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, |
| 416 | GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, |
| 417 | vgic_sanitise_inner_cacheability); |
| 418 | reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, |
| 419 | GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, |
| 420 | vgic_sanitise_outer_cacheability); |
| 421 | |
| 422 | reg &= ~PENDBASER_RES0_MASK; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 423 | |
| 424 | return reg; |
| 425 | } |
| 426 | |
| 427 | static u64 vgic_sanitise_propbaser(u64 reg) |
| 428 | { |
| 429 | reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, |
| 430 | GICR_PROPBASER_SHAREABILITY_SHIFT, |
| 431 | vgic_sanitise_shareability); |
| 432 | reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, |
| 433 | GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, |
| 434 | vgic_sanitise_inner_cacheability); |
| 435 | reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, |
| 436 | GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, |
| 437 | vgic_sanitise_outer_cacheability); |
| 438 | |
| 439 | reg &= ~PROPBASER_RES0_MASK; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 440 | return reg; |
| 441 | } |
| 442 | |
| 443 | static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu, |
| 444 | gpa_t addr, unsigned int len) |
| 445 | { |
| 446 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 447 | |
| 448 | return extract_bytes(dist->propbaser, addr & 7, len); |
| 449 | } |
| 450 | |
| 451 | static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu, |
| 452 | gpa_t addr, unsigned int len, |
| 453 | unsigned long val) |
| 454 | { |
| 455 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 456 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 457 | u64 old_propbaser, propbaser; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 458 | |
| 459 | /* Storing a value with LPIs already enabled is undefined */ |
| 460 | if (vgic_cpu->lpis_enabled) |
| 461 | return; |
| 462 | |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 463 | do { |
Christoffer Dall | 3af4e41 | 2017-08-02 16:28:42 +0200 | [diff] [blame] | 464 | old_propbaser = READ_ONCE(dist->propbaser); |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 465 | propbaser = old_propbaser; |
| 466 | propbaser = update_64bit_reg(propbaser, addr & 4, len, val); |
| 467 | propbaser = vgic_sanitise_propbaser(propbaser); |
| 468 | } while (cmpxchg64(&dist->propbaser, old_propbaser, |
| 469 | propbaser) != old_propbaser); |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 470 | } |
| 471 | |
| 472 | static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu, |
| 473 | gpa_t addr, unsigned int len) |
| 474 | { |
| 475 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Zenghui Yu | 5f675c5 | 2019-12-20 19:18:33 +0800 | [diff] [blame] | 476 | u64 value = vgic_cpu->pendbaser; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 477 | |
Zenghui Yu | 5f675c5 | 2019-12-20 19:18:33 +0800 | [diff] [blame] | 478 | value &= ~GICR_PENDBASER_PTZ; |
| 479 | |
| 480 | return extract_bytes(value, addr & 7, len); |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu, |
| 484 | gpa_t addr, unsigned int len, |
| 485 | unsigned long val) |
| 486 | { |
| 487 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 488 | u64 old_pendbaser, pendbaser; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 489 | |
| 490 | /* Storing a value with LPIs already enabled is undefined */ |
| 491 | if (vgic_cpu->lpis_enabled) |
| 492 | return; |
| 493 | |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 494 | do { |
Christoffer Dall | 3af4e41 | 2017-08-02 16:28:42 +0200 | [diff] [blame] | 495 | old_pendbaser = READ_ONCE(vgic_cpu->pendbaser); |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 496 | pendbaser = old_pendbaser; |
| 497 | pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val); |
| 498 | pendbaser = vgic_sanitise_pendbaser(pendbaser); |
| 499 | } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser, |
| 500 | pendbaser) != old_pendbaser); |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 501 | } |
| 502 | |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 503 | /* |
| 504 | * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the |
| 505 | * redistributors, while SPIs are covered by registers in the distributor |
| 506 | * block. Trying to set private IRQs in this block gets ignored. |
| 507 | * We take some special care here to fix the calculation of the register |
| 508 | * offset. |
| 509 | */ |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 510 | #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \ |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 511 | { \ |
| 512 | .reg_offset = off, \ |
| 513 | .bits_per_irq = bpi, \ |
| 514 | .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ |
| 515 | .access_flags = acc, \ |
| 516 | .read = vgic_mmio_read_raz, \ |
| 517 | .write = vgic_mmio_write_wi, \ |
| 518 | }, { \ |
| 519 | .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ |
| 520 | .bits_per_irq = bpi, \ |
| 521 | .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \ |
| 522 | .access_flags = acc, \ |
| 523 | .read = rd, \ |
| 524 | .write = wr, \ |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 525 | .uaccess_read = ur, \ |
| 526 | .uaccess_write = uw, \ |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 527 | } |
| 528 | |
| 529 | static const struct vgic_register_region vgic_v3_dist_registers[] = { |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 530 | REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR, |
| 531 | vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, |
| 532 | NULL, vgic_mmio_uaccess_write_v3_misc, |
| 533 | 16, VGIC_ACCESS_32bit), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 534 | REGISTER_DESC_WITH_LENGTH(GICD_STATUSR, |
| 535 | vgic_mmio_read_rao, vgic_mmio_write_wi, 4, |
| 536 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 537 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR, |
Christoffer Dall | d53c2c29 | 2018-07-16 15:06:25 +0200 | [diff] [blame] | 538 | vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 539 | VGIC_ACCESS_32bit), |
| 540 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 541 | vgic_mmio_read_enable, vgic_mmio_write_senable, NULL, NULL, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 542 | VGIC_ACCESS_32bit), |
| 543 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 544 | vgic_mmio_read_enable, vgic_mmio_write_cenable, NULL, NULL, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 545 | VGIC_ACCESS_32bit), |
| 546 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 547 | vgic_mmio_read_pending, vgic_mmio_write_spending, |
| 548 | vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 549 | VGIC_ACCESS_32bit), |
| 550 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 551 | vgic_mmio_read_pending, vgic_mmio_write_cpending, |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 552 | vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 553 | VGIC_ACCESS_32bit), |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 554 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 555 | vgic_mmio_read_active, vgic_mmio_write_sactive, |
| 556 | NULL, vgic_mmio_uaccess_write_sactive, 1, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 557 | VGIC_ACCESS_32bit), |
| 558 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 559 | vgic_mmio_read_active, vgic_mmio_write_cactive, |
| 560 | NULL, vgic_mmio_uaccess_write_cactive, |
| 561 | 1, VGIC_ACCESS_32bit), |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 562 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, |
| 563 | vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, |
| 564 | 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), |
| 565 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR, |
| 566 | vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8, |
| 567 | VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), |
| 568 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR, |
| 569 | vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2, |
| 570 | VGIC_ACCESS_32bit), |
| 571 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR, |
| 572 | vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1, |
| 573 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 574 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 575 | vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 576 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
| 577 | REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, |
Andre Przywara | 54f59d2 | 2016-01-22 18:18:52 +0000 | [diff] [blame] | 578 | vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 579 | VGIC_ACCESS_32bit), |
| 580 | }; |
| 581 | |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 582 | static const struct vgic_register_region vgic_v3_rd_registers[] = { |
| 583 | /* RD_base registers */ |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 584 | REGISTER_DESC_WITH_LENGTH(GICR_CTLR, |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 585 | vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 586 | VGIC_ACCESS_32bit), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 587 | REGISTER_DESC_WITH_LENGTH(GICR_STATUSR, |
| 588 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 589 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 590 | REGISTER_DESC_WITH_LENGTH(GICR_IIDR, |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 591 | vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 592 | VGIC_ACCESS_32bit), |
| 593 | REGISTER_DESC_WITH_LENGTH(GICR_TYPER, |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 594 | vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 595 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 596 | REGISTER_DESC_WITH_LENGTH(GICR_WAKER, |
| 597 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 598 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 599 | REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER, |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 600 | vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 601 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
| 602 | REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER, |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 603 | vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 604 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
| 605 | REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, |
Andre Przywara | 54f59d2 | 2016-01-22 18:18:52 +0000 | [diff] [blame] | 606 | vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 607 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 608 | /* SGI_base registers */ |
| 609 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0, |
Christoffer Dall | d53c2c29 | 2018-07-16 15:06:25 +0200 | [diff] [blame] | 610 | vgic_mmio_read_group, vgic_mmio_write_group, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 611 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 612 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ISENABLER0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 613 | vgic_mmio_read_enable, vgic_mmio_write_senable, 4, |
| 614 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 615 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICENABLER0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 616 | vgic_mmio_read_enable, vgic_mmio_write_cenable, 4, |
| 617 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 618 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 619 | vgic_mmio_read_pending, vgic_mmio_write_spending, |
| 620 | vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 621 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 622 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 623 | vgic_mmio_read_pending, vgic_mmio_write_cpending, |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 624 | vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 625 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 626 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0, |
Christoffer Dall | 0710f9a6 | 2017-06-04 13:23:52 +0200 | [diff] [blame] | 627 | vgic_mmio_read_active, vgic_mmio_write_sactive, |
| 628 | NULL, vgic_mmio_uaccess_write_sactive, |
| 629 | 4, VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 630 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0, |
Christoffer Dall | 0710f9a6 | 2017-06-04 13:23:52 +0200 | [diff] [blame] | 631 | vgic_mmio_read_active, vgic_mmio_write_cactive, |
| 632 | NULL, vgic_mmio_uaccess_write_cactive, |
| 633 | 4, VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 634 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 635 | vgic_mmio_read_priority, vgic_mmio_write_priority, 32, |
| 636 | VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 637 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 638 | vgic_mmio_read_config, vgic_mmio_write_config, 8, |
| 639 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 640 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 641 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 642 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 643 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 644 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 645 | VGIC_ACCESS_32bit), |
| 646 | }; |
| 647 | |
| 648 | unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev) |
| 649 | { |
| 650 | dev->regions = vgic_v3_dist_registers; |
| 651 | dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers); |
| 652 | |
| 653 | kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops); |
| 654 | |
| 655 | return SZ_64K; |
| 656 | } |
| 657 | |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 658 | /** |
| 659 | * vgic_register_redist_iodev - register a single redist iodev |
| 660 | * @vcpu: The VCPU to which the redistributor belongs |
| 661 | * |
| 662 | * Register a KVM iodev for this VCPU's redistributor using the address |
| 663 | * provided. |
| 664 | * |
| 665 | * Return 0 on success, -ERRNO otherwise. |
| 666 | */ |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 667 | int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 668 | { |
| 669 | struct kvm *kvm = vcpu->kvm; |
| 670 | struct vgic_dist *vgic = &kvm->arch.vgic; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 671 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 672 | struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 673 | struct vgic_redist_region *rdreg; |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 674 | gpa_t rd_base; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 675 | int ret; |
| 676 | |
Eric Auger | c011f4e | 2018-05-22 09:55:14 +0200 | [diff] [blame] | 677 | if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) |
| 678 | return 0; |
| 679 | |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 680 | /* |
| 681 | * We may be creating VCPUs before having set the base address for the |
| 682 | * redistributor region, in which case we will come back to this |
| 683 | * function for all VCPUs when the base address is set. Just return |
| 684 | * without doing any work for now. |
| 685 | */ |
Eric Auger | dc52461 | 2018-05-22 09:55:09 +0200 | [diff] [blame] | 686 | rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 687 | if (!rdreg) |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 688 | return 0; |
| 689 | |
| 690 | if (!vgic_v3_check_base(kvm)) |
| 691 | return -EINVAL; |
| 692 | |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 693 | vgic_cpu->rdreg = rdreg; |
| 694 | |
| 695 | rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 696 | |
| 697 | kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); |
| 698 | rd_dev->base_addr = rd_base; |
| 699 | rd_dev->iodev_type = IODEV_REDIST; |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 700 | rd_dev->regions = vgic_v3_rd_registers; |
| 701 | rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 702 | rd_dev->redist_vcpu = vcpu; |
| 703 | |
| 704 | mutex_lock(&kvm->slots_lock); |
| 705 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 706 | 2 * SZ_64K, &rd_dev->dev); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 707 | mutex_unlock(&kvm->slots_lock); |
| 708 | |
| 709 | if (ret) |
| 710 | return ret; |
| 711 | |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 712 | rdreg->free_index++; |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 713 | return 0; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 714 | } |
| 715 | |
| 716 | static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) |
| 717 | { |
| 718 | struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 719 | |
| 720 | kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 721 | } |
| 722 | |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 723 | static int vgic_register_all_redist_iodevs(struct kvm *kvm) |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 724 | { |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 725 | struct kvm_vcpu *vcpu; |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 726 | int c, ret = 0; |
| 727 | |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 728 | kvm_for_each_vcpu(c, vcpu, kvm) { |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 729 | ret = vgic_register_redist_iodev(vcpu); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 730 | if (ret) |
| 731 | break; |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 732 | } |
| 733 | |
| 734 | if (ret) { |
| 735 | /* The current c failed, so we start with the previous one. */ |
Christoffer Dall | fa472fa | 2017-05-17 21:16:09 +0200 | [diff] [blame] | 736 | mutex_lock(&kvm->slots_lock); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 737 | for (c--; c >= 0; c--) { |
Andre Przywara | 8f6cdc1 | 2016-07-15 12:43:22 +0100 | [diff] [blame] | 738 | vcpu = kvm_get_vcpu(kvm, c); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 739 | vgic_unregister_redist_iodev(vcpu); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 740 | } |
Christoffer Dall | fa472fa | 2017-05-17 21:16:09 +0200 | [diff] [blame] | 741 | mutex_unlock(&kvm->slots_lock); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 742 | } |
| 743 | |
| 744 | return ret; |
| 745 | } |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 746 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 747 | /** |
| 748 | * vgic_v3_insert_redist_region - Insert a new redistributor region |
| 749 | * |
| 750 | * Performs various checks before inserting the rdist region in the list. |
| 751 | * Those tests depend on whether the size of the rdist region is known |
| 752 | * (ie. count != 0). The list is sorted by rdist region index. |
| 753 | * |
| 754 | * @kvm: kvm handle |
| 755 | * @index: redist region index |
| 756 | * @base: base of the new rdist region |
| 757 | * @count: number of redistributors the region is made of (0 in the old style |
| 758 | * single region, whose size is induced from the number of vcpus) |
| 759 | * |
| 760 | * Return 0 on success, < 0 otherwise |
| 761 | */ |
| 762 | static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index, |
| 763 | gpa_t base, uint32_t count) |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 764 | { |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 765 | struct vgic_dist *d = &kvm->arch.vgic; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 766 | struct vgic_redist_region *rdreg; |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 767 | struct list_head *rd_regions = &d->rd_regions; |
| 768 | size_t size = count * KVM_VGIC_V3_REDIST_SIZE; |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 769 | int ret; |
| 770 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 771 | /* single rdist region already set ?*/ |
| 772 | if (!count && !list_empty(rd_regions)) |
| 773 | return -EINVAL; |
| 774 | |
| 775 | /* cross the end of memory ? */ |
| 776 | if (base + size < base) |
| 777 | return -EINVAL; |
| 778 | |
| 779 | if (list_empty(rd_regions)) { |
| 780 | if (index != 0) |
| 781 | return -EINVAL; |
| 782 | } else { |
| 783 | rdreg = list_last_entry(rd_regions, |
| 784 | struct vgic_redist_region, list); |
| 785 | if (index != rdreg->index + 1) |
| 786 | return -EINVAL; |
| 787 | |
| 788 | /* Cannot add an explicitly sized regions after legacy region */ |
| 789 | if (!rdreg->count) |
| 790 | return -EINVAL; |
| 791 | } |
| 792 | |
| 793 | /* |
| 794 | * For legacy single-region redistributor regions (!count), |
| 795 | * check that the redistributor region does not overlap with the |
| 796 | * distributor's address space. |
| 797 | */ |
| 798 | if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && |
| 799 | vgic_dist_overlap(kvm, base, size)) |
| 800 | return -EINVAL; |
| 801 | |
| 802 | /* collision with any other rdist region? */ |
| 803 | if (vgic_v3_rdist_overlap(kvm, base, size)) |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 804 | return -EINVAL; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 805 | |
| 806 | rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL); |
| 807 | if (!rdreg) |
| 808 | return -ENOMEM; |
| 809 | |
| 810 | rdreg->base = VGIC_ADDR_UNDEF; |
| 811 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 812 | ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K); |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 813 | if (ret) |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 814 | goto free; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 815 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 816 | rdreg->base = base; |
| 817 | rdreg->count = count; |
| 818 | rdreg->free_index = 0; |
| 819 | rdreg->index = index; |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 820 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 821 | list_add_tail(&rdreg->list, rd_regions); |
| 822 | return 0; |
| 823 | free: |
| 824 | kfree(rdreg); |
| 825 | return ret; |
| 826 | } |
| 827 | |
Eric Auger | 04c1109 | 2018-05-22 09:55:17 +0200 | [diff] [blame] | 828 | int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 829 | { |
| 830 | int ret; |
| 831 | |
Eric Auger | 04c1109 | 2018-05-22 09:55:17 +0200 | [diff] [blame] | 832 | ret = vgic_v3_insert_redist_region(kvm, index, addr, count); |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 833 | if (ret) |
| 834 | return ret; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 835 | |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 836 | /* |
| 837 | * Register iodevs for each existing VCPU. Adding more VCPUs |
| 838 | * afterwards will register the iodevs when needed. |
| 839 | */ |
| 840 | ret = vgic_register_all_redist_iodevs(kvm); |
| 841 | if (ret) |
| 842 | return ret; |
| 843 | |
| 844 | return 0; |
| 845 | } |
| 846 | |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 847 | int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) |
| 848 | { |
| 849 | const struct vgic_register_region *region; |
| 850 | struct vgic_io_device iodev; |
| 851 | struct vgic_reg_attr reg_attr; |
| 852 | struct kvm_vcpu *vcpu; |
| 853 | gpa_t addr; |
| 854 | int ret; |
| 855 | |
| 856 | ret = vgic_v3_parse_attr(dev, attr, ®_attr); |
| 857 | if (ret) |
| 858 | return ret; |
| 859 | |
| 860 | vcpu = reg_attr.vcpu; |
| 861 | addr = reg_attr.addr; |
| 862 | |
| 863 | switch (attr->group) { |
| 864 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: |
| 865 | iodev.regions = vgic_v3_dist_registers; |
| 866 | iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers); |
| 867 | iodev.base_addr = 0; |
| 868 | break; |
| 869 | case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{ |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 870 | iodev.regions = vgic_v3_rd_registers; |
| 871 | iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 872 | iodev.base_addr = 0; |
| 873 | break; |
| 874 | } |
Vijaya Kumar K | d017d7b | 2017-01-26 19:50:51 +0530 | [diff] [blame] | 875 | case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: { |
| 876 | u64 reg, id; |
| 877 | |
| 878 | id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK); |
| 879 | return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, ®); |
| 880 | } |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 881 | default: |
| 882 | return -ENXIO; |
| 883 | } |
| 884 | |
| 885 | /* We only support aligned 32-bit accesses. */ |
| 886 | if (addr & 3) |
| 887 | return -ENXIO; |
| 888 | |
| 889 | region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32)); |
| 890 | if (!region) |
| 891 | return -ENXIO; |
| 892 | |
| 893 | return 0; |
| 894 | } |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 895 | /* |
| 896 | * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI |
| 897 | * generation register ICC_SGI1R_EL1) with a given VCPU. |
| 898 | * If the VCPU's MPIDR matches, return the level0 affinity, otherwise |
| 899 | * return -1. |
| 900 | */ |
| 901 | static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) |
| 902 | { |
| 903 | unsigned long affinity; |
| 904 | int level0; |
| 905 | |
| 906 | /* |
| 907 | * Split the current VCPU's MPIDR into affinity level 0 and the |
| 908 | * rest as this is what we have to compare against. |
| 909 | */ |
| 910 | affinity = kvm_vcpu_get_mpidr_aff(vcpu); |
| 911 | level0 = MPIDR_AFFINITY_LEVEL(affinity, 0); |
| 912 | affinity &= ~MPIDR_LEVEL_MASK; |
| 913 | |
| 914 | /* bail out if the upper three levels don't match */ |
| 915 | if (sgi_aff != affinity) |
| 916 | return -1; |
| 917 | |
| 918 | /* Is this VCPU's bit set in the mask ? */ |
| 919 | if (!(sgi_cpu_mask & BIT(level0))) |
| 920 | return -1; |
| 921 | |
| 922 | return level0; |
| 923 | } |
| 924 | |
| 925 | /* |
| 926 | * The ICC_SGI* registers encode the affinity differently from the MPIDR, |
| 927 | * so provide a wrapper to use the existing defines to isolate a certain |
| 928 | * affinity level. |
| 929 | */ |
| 930 | #define SGI_AFFINITY_LEVEL(reg, level) \ |
| 931 | ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \ |
| 932 | >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) |
| 933 | |
| 934 | /** |
| 935 | * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs |
| 936 | * @vcpu: The VCPU requesting a SGI |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 937 | * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU |
| 938 | * @allow_group1: Does the sysreg access allow generation of G1 SGIs |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 939 | * |
| 940 | * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register. |
| 941 | * This will trap in sys_regs.c and call this function. |
| 942 | * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the |
| 943 | * target processors as well as a bitmask of 16 Aff0 CPUs. |
| 944 | * If the interrupt routing mode bit is not set, we iterate over all VCPUs to |
| 945 | * check for matching ones. If this bit is set, we signal all, but not the |
| 946 | * calling VCPU. |
| 947 | */ |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 948 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 949 | { |
| 950 | struct kvm *kvm = vcpu->kvm; |
| 951 | struct kvm_vcpu *c_vcpu; |
| 952 | u16 target_cpus; |
| 953 | u64 mpidr; |
| 954 | int sgi, c; |
| 955 | int vcpu_id = vcpu->vcpu_id; |
| 956 | bool broadcast; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 957 | unsigned long flags; |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 958 | |
| 959 | sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; |
Vladimir Murzin | e533a37 | 2016-09-12 15:49:19 +0100 | [diff] [blame] | 960 | broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 961 | target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; |
| 962 | mpidr = SGI_AFFINITY_LEVEL(reg, 3); |
| 963 | mpidr |= SGI_AFFINITY_LEVEL(reg, 2); |
| 964 | mpidr |= SGI_AFFINITY_LEVEL(reg, 1); |
| 965 | |
| 966 | /* |
| 967 | * We iterate over all VCPUs to find the MPIDRs matching the request. |
| 968 | * If we have handled one CPU, we clear its bit to detect early |
| 969 | * if we are already finished. This avoids iterating through all |
| 970 | * VCPUs when most of the times we just signal a single VCPU. |
| 971 | */ |
| 972 | kvm_for_each_vcpu(c, c_vcpu, kvm) { |
| 973 | struct vgic_irq *irq; |
| 974 | |
| 975 | /* Exit early if we have dealt with all requested CPUs */ |
| 976 | if (!broadcast && target_cpus == 0) |
| 977 | break; |
| 978 | |
| 979 | /* Don't signal the calling VCPU */ |
| 980 | if (broadcast && c == vcpu_id) |
| 981 | continue; |
| 982 | |
| 983 | if (!broadcast) { |
| 984 | int level0; |
| 985 | |
| 986 | level0 = match_mpidr(mpidr, target_cpus, c_vcpu); |
| 987 | if (level0 == -1) |
| 988 | continue; |
| 989 | |
| 990 | /* remove this matching VCPU from the mask */ |
| 991 | target_cpus &= ~BIT(level0); |
| 992 | } |
| 993 | |
| 994 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); |
| 995 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 996 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 997 | |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 998 | /* |
| 999 | * An access targetting Group0 SGIs can only generate |
| 1000 | * those, while an access targetting Group1 SGIs can |
| 1001 | * generate interrupts of either group. |
| 1002 | */ |
| 1003 | if (!irq->group || allow_group1) { |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 1004 | if (!irq->hw) { |
| 1005 | irq->pending_latch = true; |
| 1006 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
| 1007 | } else { |
| 1008 | /* HW SGI? Ask the GIC to inject it */ |
| 1009 | int err; |
| 1010 | err = irq_set_irqchip_state(irq->host_irq, |
| 1011 | IRQCHIP_STATE_PENDING, |
| 1012 | true); |
| 1013 | WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); |
| 1014 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
| 1015 | } |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 1016 | } else { |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 1017 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 1018 | } |
| 1019 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 1020 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 1021 | } |
| 1022 | } |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 1023 | |
| 1024 | int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 1025 | int offset, u32 *val) |
| 1026 | { |
| 1027 | struct vgic_io_device dev = { |
| 1028 | .regions = vgic_v3_dist_registers, |
| 1029 | .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers), |
| 1030 | }; |
| 1031 | |
| 1032 | return vgic_uaccess(vcpu, &dev, is_write, offset, val); |
| 1033 | } |
| 1034 | |
| 1035 | int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 1036 | int offset, u32 *val) |
| 1037 | { |
| 1038 | struct vgic_io_device rd_dev = { |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 1039 | .regions = vgic_v3_rd_registers, |
| 1040 | .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 1041 | }; |
| 1042 | |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 1043 | return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val); |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 1044 | } |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 1045 | |
| 1046 | int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 1047 | u32 intid, u64 *val) |
| 1048 | { |
| 1049 | if (intid % 32) |
| 1050 | return -EINVAL; |
| 1051 | |
| 1052 | if (is_write) |
| 1053 | vgic_write_irq_line_level_info(vcpu, intid, *val); |
| 1054 | else |
| 1055 | *val = vgic_read_irq_line_level_info(vcpu, intid); |
| 1056 | |
| 1057 | return 0; |
| 1058 | } |