Thomas Gleixner | 1802d0b | 2019-05-27 08:55:21 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 2 | /* |
| 3 | * VGICv3 MMIO handling functions |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 4 | */ |
| 5 | |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 6 | #include <linux/bitfield.h> |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 7 | #include <linux/irqchip/arm-gic-v3.h> |
| 8 | #include <linux/kvm.h> |
| 9 | #include <linux/kvm_host.h> |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 10 | #include <linux/interrupt.h> |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 11 | #include <kvm/iodev.h> |
| 12 | #include <kvm/arm_vgic.h> |
| 13 | |
| 14 | #include <asm/kvm_emulate.h> |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 15 | #include <asm/kvm_arm.h> |
| 16 | #include <asm/kvm_mmu.h> |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 17 | |
| 18 | #include "vgic.h" |
| 19 | #include "vgic-mmio.h" |
| 20 | |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 21 | /* extract @num bytes at @offset bytes offset in data */ |
Vladimir Murzin | d7d0a11 | 2016-09-12 15:49:20 +0100 | [diff] [blame] | 22 | unsigned long extract_bytes(u64 data, unsigned int offset, |
Andre Przywara | 424c338 | 2016-07-15 12:43:32 +0100 | [diff] [blame] | 23 | unsigned int num) |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 24 | { |
| 25 | return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0); |
| 26 | } |
| 27 | |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 28 | /* allows updates of any half of a 64-bit register (or the whole thing) */ |
Andre Przywara | 424c338 | 2016-07-15 12:43:32 +0100 | [diff] [blame] | 29 | u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len, |
| 30 | unsigned long val) |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 31 | { |
| 32 | int lower = (offset & 4) * 8; |
| 33 | int upper = lower + 8 * len - 1; |
| 34 | |
| 35 | reg &= ~GENMASK_ULL(upper, lower); |
| 36 | val &= GENMASK_ULL(len * 8 - 1, 0); |
| 37 | |
| 38 | return reg | ((u64)val << lower); |
| 39 | } |
| 40 | |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 41 | bool vgic_has_its(struct kvm *kvm) |
| 42 | { |
| 43 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 44 | |
| 45 | if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3) |
| 46 | return false; |
| 47 | |
Andre Przywara | 1085fdc | 2016-07-15 12:43:31 +0100 | [diff] [blame] | 48 | return dist->has_its; |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 49 | } |
| 50 | |
Marc Zyngier | e7c4805 | 2017-10-27 15:28:37 +0100 | [diff] [blame] | 51 | bool vgic_supports_direct_msis(struct kvm *kvm) |
| 52 | { |
Marc Zyngier | 958e8e1 | 2020-04-24 15:30:30 +0100 | [diff] [blame] | 53 | return (kvm_vgic_global_state.has_gicv4_1 || |
| 54 | (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm))); |
Marc Zyngier | e7c4805 | 2017-10-27 15:28:37 +0100 | [diff] [blame] | 55 | } |
| 56 | |
Christoffer Dall | d53c2c29 | 2018-07-16 15:06:25 +0200 | [diff] [blame] | 57 | /* |
| 58 | * The Revision field in the IIDR have the following meanings: |
| 59 | * |
| 60 | * Revision 2: Interrupt groups are guest-configurable and signaled using |
| 61 | * their configured groups. |
| 62 | */ |
| 63 | |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 64 | static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu, |
| 65 | gpa_t addr, unsigned int len) |
| 66 | { |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 67 | struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 68 | u32 value = 0; |
| 69 | |
| 70 | switch (addr & 0x0c) { |
| 71 | case GICD_CTLR: |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 72 | if (vgic->enabled) |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 73 | value |= GICD_CTLR_ENABLE_SS_G1; |
| 74 | value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 75 | if (vgic->nassgireq) |
| 76 | value |= GICD_CTLR_nASSGIreq; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 77 | break; |
| 78 | case GICD_TYPER: |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 79 | value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 80 | value = (value >> 5) - 1; |
Andre Przywara | 0e4e82f | 2016-07-15 12:43:38 +0100 | [diff] [blame] | 81 | if (vgic_has_its(vcpu->kvm)) { |
| 82 | value |= (INTERRUPT_ID_BITS_ITS - 1) << 19; |
| 83 | value |= GICD_TYPER_LPIS; |
| 84 | } else { |
| 85 | value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19; |
| 86 | } |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 87 | break; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 88 | case GICD_TYPER2: |
Lorenzo Pieralisi | 46135d6 | 2021-03-17 10:07:19 +0000 | [diff] [blame] | 89 | if (kvm_vgic_global_state.has_gicv4_1 && gic_cpuif_has_vsgi()) |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 90 | value = GICD_TYPER2_nASSGIcap; |
| 91 | break; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 92 | case GICD_IIDR: |
Christoffer Dall | a2dca21 | 2018-07-16 15:06:18 +0200 | [diff] [blame] | 93 | value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) | |
Christoffer Dall | aa075b0 | 2018-07-16 15:06:19 +0200 | [diff] [blame] | 94 | (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) | |
Christoffer Dall | a2dca21 | 2018-07-16 15:06:18 +0200 | [diff] [blame] | 95 | (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT); |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 96 | break; |
| 97 | default: |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | return value; |
| 102 | } |
| 103 | |
| 104 | static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu, |
| 105 | gpa_t addr, unsigned int len, |
| 106 | unsigned long val) |
| 107 | { |
| 108 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 109 | |
| 110 | switch (addr & 0x0c) { |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 111 | case GICD_CTLR: { |
| 112 | bool was_enabled, is_hwsgi; |
| 113 | |
| 114 | mutex_lock(&vcpu->kvm->lock); |
| 115 | |
| 116 | was_enabled = dist->enabled; |
| 117 | is_hwsgi = dist->nassgireq; |
| 118 | |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 119 | dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; |
| 120 | |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 121 | /* Not a GICv4.1? No HW SGIs */ |
Lorenzo Pieralisi | 46135d6 | 2021-03-17 10:07:19 +0000 | [diff] [blame] | 122 | if (!kvm_vgic_global_state.has_gicv4_1 || !gic_cpuif_has_vsgi()) |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 123 | val &= ~GICD_CTLR_nASSGIreq; |
| 124 | |
| 125 | /* Dist stays enabled? nASSGIreq is RO */ |
| 126 | if (was_enabled && dist->enabled) { |
| 127 | val &= ~GICD_CTLR_nASSGIreq; |
| 128 | val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi); |
| 129 | } |
| 130 | |
| 131 | /* Switching HW SGIs? */ |
| 132 | dist->nassgireq = val & GICD_CTLR_nASSGIreq; |
| 133 | if (is_hwsgi != dist->nassgireq) |
| 134 | vgic_v4_configure_vsgis(vcpu->kvm); |
| 135 | |
Marc Zyngier | d9c3872 | 2020-03-04 20:33:28 +0000 | [diff] [blame] | 136 | if (kvm_vgic_global_state.has_gicv4_1 && |
| 137 | was_enabled != dist->enabled) |
| 138 | kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4); |
| 139 | else if (!was_enabled && dist->enabled) |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 140 | vgic_kick_vcpus(vcpu->kvm); |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 141 | |
| 142 | mutex_unlock(&vcpu->kvm->lock); |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 143 | break; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 144 | } |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 145 | case GICD_TYPER: |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 146 | case GICD_TYPER2: |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 147 | case GICD_IIDR: |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 148 | /* This is at best for documentation purposes... */ |
Andre Przywara | fd59ed3 | 2016-01-27 14:54:30 +0000 | [diff] [blame] | 149 | return; |
| 150 | } |
| 151 | } |
| 152 | |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 153 | static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu, |
| 154 | gpa_t addr, unsigned int len, |
| 155 | unsigned long val) |
| 156 | { |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 157 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 158 | |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 159 | switch (addr & 0x0c) { |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 160 | case GICD_TYPER2: |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 161 | case GICD_IIDR: |
| 162 | if (val != vgic_mmio_read_v3_misc(vcpu, addr, len)) |
| 163 | return -EINVAL; |
Marc Zyngier | 2291ff2 | 2020-03-04 20:33:27 +0000 | [diff] [blame] | 164 | return 0; |
| 165 | case GICD_CTLR: |
| 166 | /* Not a GICv4.1? No HW SGIs */ |
| 167 | if (!kvm_vgic_global_state.has_gicv4_1) |
| 168 | val &= ~GICD_CTLR_nASSGIreq; |
| 169 | |
| 170 | dist->enabled = val & GICD_CTLR_ENABLE_SS_G1; |
| 171 | dist->nassgireq = val & GICD_CTLR_nASSGIreq; |
| 172 | return 0; |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 173 | } |
| 174 | |
| 175 | vgic_mmio_write_v3_misc(vcpu, addr, len, val); |
| 176 | return 0; |
| 177 | } |
| 178 | |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 179 | static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu, |
| 180 | gpa_t addr, unsigned int len) |
| 181 | { |
| 182 | int intid = VGIC_ADDR_TO_INTID(addr, 64); |
| 183 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 184 | unsigned long ret = 0; |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 185 | |
| 186 | if (!irq) |
| 187 | return 0; |
| 188 | |
| 189 | /* The upper word is RAZ for us. */ |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 190 | if (!(addr & 4)) |
| 191 | ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len); |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 192 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 193 | vgic_put_irq(vcpu->kvm, irq); |
| 194 | return ret; |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, |
| 198 | gpa_t addr, unsigned int len, |
| 199 | unsigned long val) |
| 200 | { |
| 201 | int intid = VGIC_ADDR_TO_INTID(addr, 64); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 202 | struct vgic_irq *irq; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 203 | unsigned long flags; |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 204 | |
| 205 | /* The upper word is WI for us since we don't implement Aff3. */ |
| 206 | if (addr & 4) |
| 207 | return; |
| 208 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 209 | irq = vgic_get_irq(vcpu->kvm, NULL, intid); |
| 210 | |
| 211 | if (!irq) |
| 212 | return; |
| 213 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 214 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 215 | |
| 216 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ |
| 217 | irq->mpidr = val & GENMASK(23, 0); |
| 218 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); |
| 219 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 220 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 221 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 78a714a | 2016-01-25 16:45:37 +0000 | [diff] [blame] | 222 | } |
| 223 | |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 224 | static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu, |
| 225 | gpa_t addr, unsigned int len) |
| 226 | { |
| 227 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 228 | |
| 229 | return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0; |
| 230 | } |
| 231 | |
| 232 | |
| 233 | static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu, |
| 234 | gpa_t addr, unsigned int len, |
| 235 | unsigned long val) |
| 236 | { |
| 237 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 238 | bool was_enabled = vgic_cpu->lpis_enabled; |
| 239 | |
| 240 | if (!vgic_has_its(vcpu->kvm)) |
| 241 | return; |
| 242 | |
| 243 | vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS; |
| 244 | |
Marc Zyngier | b4931af | 2019-05-22 18:16:49 +0100 | [diff] [blame] | 245 | if (was_enabled && !vgic_cpu->lpis_enabled) { |
Marc Zyngier | 96085b9 | 2019-04-02 06:36:23 +0100 | [diff] [blame] | 246 | vgic_flush_pending_lpis(vcpu); |
Marc Zyngier | b4931af | 2019-05-22 18:16:49 +0100 | [diff] [blame] | 247 | vgic_its_invalidate_cache(vcpu->kvm); |
| 248 | } |
Marc Zyngier | 96085b9 | 2019-04-02 06:36:23 +0100 | [diff] [blame] | 249 | |
Andre Przywara | 0e4e82f | 2016-07-15 12:43:38 +0100 | [diff] [blame] | 250 | if (!was_enabled && vgic_cpu->lpis_enabled) |
| 251 | vgic_enable_lpis(vcpu); |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 252 | } |
| 253 | |
Eric Auger | 28e9d4b | 2021-04-05 18:39:40 +0200 | [diff] [blame] | 254 | static bool vgic_mmio_vcpu_rdist_is_last(struct kvm_vcpu *vcpu) |
| 255 | { |
| 256 | struct vgic_dist *vgic = &vcpu->kvm->arch.vgic; |
| 257 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 258 | struct vgic_redist_region *iter, *rdreg = vgic_cpu->rdreg; |
| 259 | |
| 260 | if (!rdreg) |
| 261 | return false; |
| 262 | |
| 263 | if (vgic_cpu->rdreg_index < rdreg->free_index - 1) { |
| 264 | return false; |
| 265 | } else if (rdreg->count && vgic_cpu->rdreg_index == (rdreg->count - 1)) { |
| 266 | struct list_head *rd_regions = &vgic->rd_regions; |
| 267 | gpa_t end = rdreg->base + rdreg->count * KVM_VGIC_V3_REDIST_SIZE; |
| 268 | |
| 269 | /* |
| 270 | * the rdist is the last one of the redist region, |
| 271 | * check whether there is no other contiguous rdist region |
| 272 | */ |
| 273 | list_for_each_entry(iter, rd_regions, list) { |
| 274 | if (iter->base == end && iter->free_index > 0) |
| 275 | return false; |
| 276 | } |
| 277 | } |
| 278 | return true; |
| 279 | } |
| 280 | |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 281 | static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, |
| 282 | gpa_t addr, unsigned int len) |
| 283 | { |
| 284 | unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu); |
| 285 | int target_vcpu_id = vcpu->vcpu_id; |
| 286 | u64 value; |
| 287 | |
Vladimir Murzin | e533a37 | 2016-09-12 15:49:19 +0100 | [diff] [blame] | 288 | value = (u64)(mpidr & GENMASK(23, 0)) << 32; |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 289 | value |= ((target_vcpu_id & 0xffff) << 8); |
Eric Auger | ba7b3f1 | 2018-05-22 09:55:10 +0200 | [diff] [blame] | 290 | |
Eric Auger | 28e9d4b | 2021-04-05 18:39:40 +0200 | [diff] [blame] | 291 | if (vgic_has_its(vcpu->kvm)) |
| 292 | value |= GICR_TYPER_PLPIS; |
| 293 | |
| 294 | if (vgic_mmio_vcpu_rdist_is_last(vcpu)) |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 295 | value |= GICR_TYPER_LAST; |
| 296 | |
| 297 | return extract_bytes(value, addr & 7, len); |
| 298 | } |
| 299 | |
| 300 | static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu, |
| 301 | gpa_t addr, unsigned int len) |
| 302 | { |
| 303 | return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0); |
| 304 | } |
| 305 | |
Andre Przywara | 54f59d2 | 2016-01-22 18:18:52 +0000 | [diff] [blame] | 306 | static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu, |
| 307 | gpa_t addr, unsigned int len) |
| 308 | { |
| 309 | switch (addr & 0xffff) { |
| 310 | case GICD_PIDR2: |
| 311 | /* report a GICv3 compliant implementation */ |
| 312 | return 0x3b; |
| 313 | } |
| 314 | |
| 315 | return 0; |
| 316 | } |
| 317 | |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 318 | static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu, |
| 319 | gpa_t addr, unsigned int len) |
| 320 | { |
| 321 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 322 | u32 value = 0; |
| 323 | int i; |
| 324 | |
| 325 | /* |
| 326 | * pending state of interrupt is latched in pending_latch variable. |
| 327 | * Userspace will save and restore pending state and line_level |
| 328 | * separately. |
Mauro Carvalho Chehab | 72ef5e5 | 2020-04-14 18:48:35 +0200 | [diff] [blame] | 329 | * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 330 | * for handling of ISPENDR and ICPENDR. |
| 331 | */ |
| 332 | for (i = 0; i < len * 8; i++) { |
| 333 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 334 | bool state = irq->pending_latch; |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 335 | |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 336 | if (irq->hw && vgic_irq_is_sgi(irq->intid)) { |
| 337 | int err; |
| 338 | |
| 339 | err = irq_get_irqchip_state(irq->host_irq, |
| 340 | IRQCHIP_STATE_PENDING, |
| 341 | &state); |
| 342 | WARN_ON(err); |
| 343 | } |
| 344 | |
| 345 | if (state) |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 346 | value |= (1U << i); |
| 347 | |
| 348 | vgic_put_irq(vcpu->kvm, irq); |
| 349 | } |
| 350 | |
| 351 | return value; |
| 352 | } |
| 353 | |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 354 | static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, |
| 355 | gpa_t addr, unsigned int len, |
| 356 | unsigned long val) |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 357 | { |
| 358 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 359 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 360 | unsigned long flags; |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 361 | |
| 362 | for (i = 0; i < len * 8; i++) { |
| 363 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 364 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 365 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 366 | if (test_bit(i, &val)) { |
| 367 | /* |
| 368 | * pending_latch is set irrespective of irq type |
| 369 | * (level or edge) to avoid dependency that VM should |
| 370 | * restore irq config before pending info. |
| 371 | */ |
| 372 | irq->pending_latch = true; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 373 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 374 | } else { |
| 375 | irq->pending_latch = false; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 376 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | vgic_put_irq(vcpu->kvm, irq); |
| 380 | } |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 381 | |
| 382 | return 0; |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 383 | } |
| 384 | |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 385 | /* We want to avoid outer shareable. */ |
| 386 | u64 vgic_sanitise_shareability(u64 field) |
| 387 | { |
| 388 | switch (field) { |
| 389 | case GIC_BASER_OuterShareable: |
| 390 | return GIC_BASER_InnerShareable; |
| 391 | default: |
| 392 | return field; |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | /* Avoid any inner non-cacheable mapping. */ |
| 397 | u64 vgic_sanitise_inner_cacheability(u64 field) |
| 398 | { |
| 399 | switch (field) { |
| 400 | case GIC_BASER_CACHE_nCnB: |
| 401 | case GIC_BASER_CACHE_nC: |
| 402 | return GIC_BASER_CACHE_RaWb; |
| 403 | default: |
| 404 | return field; |
| 405 | } |
| 406 | } |
| 407 | |
| 408 | /* Non-cacheable or same-as-inner are OK. */ |
| 409 | u64 vgic_sanitise_outer_cacheability(u64 field) |
| 410 | { |
| 411 | switch (field) { |
| 412 | case GIC_BASER_CACHE_SameAsInner: |
| 413 | case GIC_BASER_CACHE_nC: |
| 414 | return field; |
| 415 | default: |
Alexander Graf | 7315321 | 2020-07-01 16:02:06 +0200 | [diff] [blame] | 416 | return GIC_BASER_CACHE_SameAsInner; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 417 | } |
| 418 | } |
| 419 | |
| 420 | u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift, |
| 421 | u64 (*sanitise_fn)(u64)) |
| 422 | { |
| 423 | u64 field = (reg & field_mask) >> field_shift; |
| 424 | |
| 425 | field = sanitise_fn(field) << field_shift; |
| 426 | return (reg & ~field_mask) | field; |
| 427 | } |
| 428 | |
| 429 | #define PROPBASER_RES0_MASK \ |
| 430 | (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5)) |
| 431 | #define PENDBASER_RES0_MASK \ |
| 432 | (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) | \ |
| 433 | GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0)) |
| 434 | |
| 435 | static u64 vgic_sanitise_pendbaser(u64 reg) |
| 436 | { |
| 437 | reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK, |
| 438 | GICR_PENDBASER_SHAREABILITY_SHIFT, |
| 439 | vgic_sanitise_shareability); |
| 440 | reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK, |
| 441 | GICR_PENDBASER_INNER_CACHEABILITY_SHIFT, |
| 442 | vgic_sanitise_inner_cacheability); |
| 443 | reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK, |
| 444 | GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT, |
| 445 | vgic_sanitise_outer_cacheability); |
| 446 | |
| 447 | reg &= ~PENDBASER_RES0_MASK; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 448 | |
| 449 | return reg; |
| 450 | } |
| 451 | |
| 452 | static u64 vgic_sanitise_propbaser(u64 reg) |
| 453 | { |
| 454 | reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK, |
| 455 | GICR_PROPBASER_SHAREABILITY_SHIFT, |
| 456 | vgic_sanitise_shareability); |
| 457 | reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK, |
| 458 | GICR_PROPBASER_INNER_CACHEABILITY_SHIFT, |
| 459 | vgic_sanitise_inner_cacheability); |
| 460 | reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK, |
| 461 | GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT, |
| 462 | vgic_sanitise_outer_cacheability); |
| 463 | |
| 464 | reg &= ~PROPBASER_RES0_MASK; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 465 | return reg; |
| 466 | } |
| 467 | |
| 468 | static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu, |
| 469 | gpa_t addr, unsigned int len) |
| 470 | { |
| 471 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 472 | |
| 473 | return extract_bytes(dist->propbaser, addr & 7, len); |
| 474 | } |
| 475 | |
| 476 | static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu, |
| 477 | gpa_t addr, unsigned int len, |
| 478 | unsigned long val) |
| 479 | { |
| 480 | struct vgic_dist *dist = &vcpu->kvm->arch.vgic; |
| 481 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 482 | u64 old_propbaser, propbaser; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 483 | |
| 484 | /* Storing a value with LPIs already enabled is undefined */ |
| 485 | if (vgic_cpu->lpis_enabled) |
| 486 | return; |
| 487 | |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 488 | do { |
Christoffer Dall | 3af4e41 | 2017-08-02 16:28:42 +0200 | [diff] [blame] | 489 | old_propbaser = READ_ONCE(dist->propbaser); |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 490 | propbaser = old_propbaser; |
| 491 | propbaser = update_64bit_reg(propbaser, addr & 4, len, val); |
| 492 | propbaser = vgic_sanitise_propbaser(propbaser); |
| 493 | } while (cmpxchg64(&dist->propbaser, old_propbaser, |
| 494 | propbaser) != old_propbaser); |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 495 | } |
| 496 | |
| 497 | static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu, |
| 498 | gpa_t addr, unsigned int len) |
| 499 | { |
| 500 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Zenghui Yu | 5f675c5 | 2019-12-20 19:18:33 +0800 | [diff] [blame] | 501 | u64 value = vgic_cpu->pendbaser; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 502 | |
Zenghui Yu | 5f675c5 | 2019-12-20 19:18:33 +0800 | [diff] [blame] | 503 | value &= ~GICR_PENDBASER_PTZ; |
| 504 | |
| 505 | return extract_bytes(value, addr & 7, len); |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 506 | } |
| 507 | |
| 508 | static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu, |
| 509 | gpa_t addr, unsigned int len, |
| 510 | unsigned long val) |
| 511 | { |
| 512 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 513 | u64 old_pendbaser, pendbaser; |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 514 | |
| 515 | /* Storing a value with LPIs already enabled is undefined */ |
| 516 | if (vgic_cpu->lpis_enabled) |
| 517 | return; |
| 518 | |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 519 | do { |
Christoffer Dall | 3af4e41 | 2017-08-02 16:28:42 +0200 | [diff] [blame] | 520 | old_pendbaser = READ_ONCE(vgic_cpu->pendbaser); |
Christoffer Dall | d9ae449 | 2016-08-03 18:03:44 +0200 | [diff] [blame] | 521 | pendbaser = old_pendbaser; |
| 522 | pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val); |
| 523 | pendbaser = vgic_sanitise_pendbaser(pendbaser); |
| 524 | } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser, |
| 525 | pendbaser) != old_pendbaser); |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 526 | } |
| 527 | |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 528 | /* |
| 529 | * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the |
| 530 | * redistributors, while SPIs are covered by registers in the distributor |
| 531 | * block. Trying to set private IRQs in this block gets ignored. |
| 532 | * We take some special care here to fix the calculation of the register |
| 533 | * offset. |
| 534 | */ |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 535 | #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \ |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 536 | { \ |
| 537 | .reg_offset = off, \ |
| 538 | .bits_per_irq = bpi, \ |
| 539 | .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ |
| 540 | .access_flags = acc, \ |
| 541 | .read = vgic_mmio_read_raz, \ |
| 542 | .write = vgic_mmio_write_wi, \ |
| 543 | }, { \ |
| 544 | .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8, \ |
| 545 | .bits_per_irq = bpi, \ |
| 546 | .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8, \ |
| 547 | .access_flags = acc, \ |
| 548 | .read = rd, \ |
| 549 | .write = wr, \ |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 550 | .uaccess_read = ur, \ |
| 551 | .uaccess_write = uw, \ |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | static const struct vgic_register_region vgic_v3_dist_registers[] = { |
Christoffer Dall | b489edc | 2018-07-16 15:06:24 +0200 | [diff] [blame] | 555 | REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR, |
| 556 | vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc, |
| 557 | NULL, vgic_mmio_uaccess_write_v3_misc, |
| 558 | 16, VGIC_ACCESS_32bit), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 559 | REGISTER_DESC_WITH_LENGTH(GICD_STATUSR, |
| 560 | vgic_mmio_read_rao, vgic_mmio_write_wi, 4, |
| 561 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 562 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR, |
Christoffer Dall | d53c2c29 | 2018-07-16 15:06:25 +0200 | [diff] [blame] | 563 | vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 564 | VGIC_ACCESS_32bit), |
| 565 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER, |
Marc Zyngier | 41ee52e | 2020-04-09 13:05:26 +0100 | [diff] [blame] | 566 | vgic_mmio_read_enable, vgic_mmio_write_senable, |
| 567 | NULL, vgic_uaccess_write_senable, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 568 | VGIC_ACCESS_32bit), |
| 569 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER, |
Marc Zyngier | 41ee52e | 2020-04-09 13:05:26 +0100 | [diff] [blame] | 570 | vgic_mmio_read_enable, vgic_mmio_write_cenable, |
| 571 | NULL, vgic_uaccess_write_cenable, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 572 | VGIC_ACCESS_32bit), |
| 573 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 574 | vgic_mmio_read_pending, vgic_mmio_write_spending, |
| 575 | vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 576 | VGIC_ACCESS_32bit), |
| 577 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 578 | vgic_mmio_read_pending, vgic_mmio_write_cpending, |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 579 | vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 580 | VGIC_ACCESS_32bit), |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 581 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER, |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 582 | vgic_mmio_read_active, vgic_mmio_write_sactive, |
Marc Zyngier | 9a50ebb | 2020-04-06 16:21:20 +0100 | [diff] [blame] | 583 | vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 584 | VGIC_ACCESS_32bit), |
| 585 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER, |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 586 | vgic_mmio_read_active, vgic_mmio_write_cactive, |
Marc Zyngier | 9a50ebb | 2020-04-06 16:21:20 +0100 | [diff] [blame] | 587 | vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 588 | 1, VGIC_ACCESS_32bit), |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 589 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR, |
| 590 | vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL, |
| 591 | 8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), |
| 592 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR, |
| 593 | vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8, |
| 594 | VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), |
| 595 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR, |
| 596 | vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2, |
| 597 | VGIC_ACCESS_32bit), |
| 598 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR, |
| 599 | vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1, |
| 600 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 601 | REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 602 | vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 603 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
| 604 | REGISTER_DESC_WITH_LENGTH(GICD_IDREGS, |
Andre Przywara | 54f59d2 | 2016-01-22 18:18:52 +0000 | [diff] [blame] | 605 | vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 606 | VGIC_ACCESS_32bit), |
| 607 | }; |
| 608 | |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 609 | static const struct vgic_register_region vgic_v3_rd_registers[] = { |
| 610 | /* RD_base registers */ |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 611 | REGISTER_DESC_WITH_LENGTH(GICR_CTLR, |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 612 | vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 613 | VGIC_ACCESS_32bit), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 614 | REGISTER_DESC_WITH_LENGTH(GICR_STATUSR, |
| 615 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 616 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 617 | REGISTER_DESC_WITH_LENGTH(GICR_IIDR, |
Andre Przywara | 741972d | 2016-01-27 14:54:46 +0000 | [diff] [blame] | 618 | vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 619 | VGIC_ACCESS_32bit), |
Zenghui Yu | 23bde34 | 2020-11-17 23:16:29 +0800 | [diff] [blame] | 620 | REGISTER_DESC_WITH_LENGTH_UACCESS(GICR_TYPER, |
| 621 | vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, |
Eric Auger | 28e9d4b | 2021-04-05 18:39:40 +0200 | [diff] [blame] | 622 | NULL, vgic_mmio_uaccess_write_wi, 8, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 623 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 624 | REGISTER_DESC_WITH_LENGTH(GICR_WAKER, |
| 625 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 626 | VGIC_ACCESS_32bit), |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 627 | REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER, |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 628 | vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 629 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
| 630 | REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER, |
Andre Przywara | 0aa1de5 | 2016-07-15 12:43:29 +0100 | [diff] [blame] | 631 | vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 632 | VGIC_ACCESS_64bit | VGIC_ACCESS_32bit), |
| 633 | REGISTER_DESC_WITH_LENGTH(GICR_IDREGS, |
Andre Przywara | 54f59d2 | 2016-01-22 18:18:52 +0000 | [diff] [blame] | 634 | vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 635 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 636 | /* SGI_base registers */ |
| 637 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0, |
Christoffer Dall | d53c2c29 | 2018-07-16 15:06:25 +0200 | [diff] [blame] | 638 | vgic_mmio_read_group, vgic_mmio_write_group, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 639 | VGIC_ACCESS_32bit), |
Marc Zyngier | 41ee52e | 2020-04-09 13:05:26 +0100 | [diff] [blame] | 640 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0, |
| 641 | vgic_mmio_read_enable, vgic_mmio_write_senable, |
| 642 | NULL, vgic_uaccess_write_senable, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 643 | VGIC_ACCESS_32bit), |
Marc Zyngier | 41ee52e | 2020-04-09 13:05:26 +0100 | [diff] [blame] | 644 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0, |
| 645 | vgic_mmio_read_enable, vgic_mmio_write_cenable, |
| 646 | NULL, vgic_uaccess_write_cenable, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 647 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 648 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 649 | vgic_mmio_read_pending, vgic_mmio_write_spending, |
| 650 | vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 651 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 652 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0, |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 653 | vgic_mmio_read_pending, vgic_mmio_write_cpending, |
Christoffer Dall | c6e0917 | 2018-07-16 15:06:23 +0200 | [diff] [blame] | 654 | vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 655 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 656 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0, |
Christoffer Dall | 0710f9a6 | 2017-06-04 13:23:52 +0200 | [diff] [blame] | 657 | vgic_mmio_read_active, vgic_mmio_write_sactive, |
Marc Zyngier | 9a50ebb | 2020-04-06 16:21:20 +0100 | [diff] [blame] | 658 | vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4, |
| 659 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 660 | REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0, |
Christoffer Dall | 0710f9a6 | 2017-06-04 13:23:52 +0200 | [diff] [blame] | 661 | vgic_mmio_read_active, vgic_mmio_write_cactive, |
Marc Zyngier | 9a50ebb | 2020-04-06 16:21:20 +0100 | [diff] [blame] | 662 | vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4, |
| 663 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 664 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 665 | vgic_mmio_read_priority, vgic_mmio_write_priority, 32, |
| 666 | VGIC_ACCESS_32bit | VGIC_ACCESS_8bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 667 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 668 | vgic_mmio_read_config, vgic_mmio_write_config, 8, |
| 669 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 670 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 671 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 672 | VGIC_ACCESS_32bit), |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 673 | REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR, |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 674 | vgic_mmio_read_raz, vgic_mmio_write_wi, 4, |
| 675 | VGIC_ACCESS_32bit), |
| 676 | }; |
| 677 | |
| 678 | unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev) |
| 679 | { |
| 680 | dev->regions = vgic_v3_dist_registers; |
| 681 | dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers); |
| 682 | |
| 683 | kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops); |
| 684 | |
| 685 | return SZ_64K; |
| 686 | } |
| 687 | |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 688 | /** |
| 689 | * vgic_register_redist_iodev - register a single redist iodev |
| 690 | * @vcpu: The VCPU to which the redistributor belongs |
| 691 | * |
| 692 | * Register a KVM iodev for this VCPU's redistributor using the address |
| 693 | * provided. |
| 694 | * |
| 695 | * Return 0 on success, -ERRNO otherwise. |
| 696 | */ |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 697 | int vgic_register_redist_iodev(struct kvm_vcpu *vcpu) |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 698 | { |
| 699 | struct kvm *kvm = vcpu->kvm; |
| 700 | struct vgic_dist *vgic = &kvm->arch.vgic; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 701 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 702 | struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 703 | struct vgic_redist_region *rdreg; |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 704 | gpa_t rd_base; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 705 | int ret; |
| 706 | |
Eric Auger | c011f4e | 2018-05-22 09:55:14 +0200 | [diff] [blame] | 707 | if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) |
| 708 | return 0; |
| 709 | |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 710 | /* |
| 711 | * We may be creating VCPUs before having set the base address for the |
| 712 | * redistributor region, in which case we will come back to this |
| 713 | * function for all VCPUs when the base address is set. Just return |
| 714 | * without doing any work for now. |
| 715 | */ |
Eric Auger | dc52461 | 2018-05-22 09:55:09 +0200 | [diff] [blame] | 716 | rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions); |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 717 | if (!rdreg) |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 718 | return 0; |
| 719 | |
| 720 | if (!vgic_v3_check_base(kvm)) |
| 721 | return -EINVAL; |
| 722 | |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 723 | vgic_cpu->rdreg = rdreg; |
Eric Auger | 28e9d4b | 2021-04-05 18:39:40 +0200 | [diff] [blame] | 724 | vgic_cpu->rdreg_index = rdreg->free_index; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 725 | |
| 726 | rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 727 | |
| 728 | kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops); |
| 729 | rd_dev->base_addr = rd_base; |
| 730 | rd_dev->iodev_type = IODEV_REDIST; |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 731 | rd_dev->regions = vgic_v3_rd_registers; |
| 732 | rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 733 | rd_dev->redist_vcpu = vcpu; |
| 734 | |
| 735 | mutex_lock(&kvm->slots_lock); |
| 736 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base, |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 737 | 2 * SZ_64K, &rd_dev->dev); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 738 | mutex_unlock(&kvm->slots_lock); |
| 739 | |
| 740 | if (ret) |
| 741 | return ret; |
| 742 | |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 743 | rdreg->free_index++; |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 744 | return 0; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 745 | } |
| 746 | |
| 747 | static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu) |
| 748 | { |
| 749 | struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev; |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 750 | |
| 751 | kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 752 | } |
| 753 | |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 754 | static int vgic_register_all_redist_iodevs(struct kvm *kvm) |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 755 | { |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 756 | struct kvm_vcpu *vcpu; |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 757 | int c, ret = 0; |
| 758 | |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 759 | kvm_for_each_vcpu(c, vcpu, kvm) { |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 760 | ret = vgic_register_redist_iodev(vcpu); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 761 | if (ret) |
| 762 | break; |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 763 | } |
| 764 | |
| 765 | if (ret) { |
| 766 | /* The current c failed, so we start with the previous one. */ |
Christoffer Dall | fa472fa | 2017-05-17 21:16:09 +0200 | [diff] [blame] | 767 | mutex_lock(&kvm->slots_lock); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 768 | for (c--; c >= 0; c--) { |
Andre Przywara | 8f6cdc1 | 2016-07-15 12:43:22 +0100 | [diff] [blame] | 769 | vcpu = kvm_get_vcpu(kvm, c); |
Christoffer Dall | 7fadcd3 | 2017-05-08 12:18:26 +0200 | [diff] [blame] | 770 | vgic_unregister_redist_iodev(vcpu); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 771 | } |
Christoffer Dall | fa472fa | 2017-05-17 21:16:09 +0200 | [diff] [blame] | 772 | mutex_unlock(&kvm->slots_lock); |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 773 | } |
| 774 | |
| 775 | return ret; |
| 776 | } |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 777 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 778 | /** |
Eric Auger | e5a3563 | 2021-04-05 18:39:39 +0200 | [diff] [blame] | 779 | * vgic_v3_alloc_redist_region - Allocate a new redistributor region |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 780 | * |
| 781 | * Performs various checks before inserting the rdist region in the list. |
| 782 | * Those tests depend on whether the size of the rdist region is known |
| 783 | * (ie. count != 0). The list is sorted by rdist region index. |
| 784 | * |
| 785 | * @kvm: kvm handle |
| 786 | * @index: redist region index |
| 787 | * @base: base of the new rdist region |
| 788 | * @count: number of redistributors the region is made of (0 in the old style |
| 789 | * single region, whose size is induced from the number of vcpus) |
| 790 | * |
| 791 | * Return 0 on success, < 0 otherwise |
| 792 | */ |
Eric Auger | e5a3563 | 2021-04-05 18:39:39 +0200 | [diff] [blame] | 793 | static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index, |
| 794 | gpa_t base, uint32_t count) |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 795 | { |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 796 | struct vgic_dist *d = &kvm->arch.vgic; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 797 | struct vgic_redist_region *rdreg; |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 798 | struct list_head *rd_regions = &d->rd_regions; |
Ricardo Koller | 4612d98 | 2021-10-04 18:19:12 -0700 | [diff] [blame] | 799 | int nr_vcpus = atomic_read(&kvm->online_vcpus); |
| 800 | size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE |
| 801 | : nr_vcpus * KVM_VGIC_V3_REDIST_SIZE; |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 802 | int ret; |
| 803 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 804 | /* cross the end of memory ? */ |
| 805 | if (base + size < base) |
| 806 | return -EINVAL; |
| 807 | |
| 808 | if (list_empty(rd_regions)) { |
| 809 | if (index != 0) |
| 810 | return -EINVAL; |
| 811 | } else { |
| 812 | rdreg = list_last_entry(rd_regions, |
| 813 | struct vgic_redist_region, list); |
Eric Auger | d9b201e | 2021-04-05 18:39:33 +0200 | [diff] [blame] | 814 | |
| 815 | /* Don't mix single region and discrete redist regions */ |
| 816 | if (!count && rdreg->count) |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 817 | return -EINVAL; |
| 818 | |
Eric Auger | d9b201e | 2021-04-05 18:39:33 +0200 | [diff] [blame] | 819 | if (!count) |
| 820 | return -EEXIST; |
| 821 | |
| 822 | if (index != rdreg->index + 1) |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 823 | return -EINVAL; |
| 824 | } |
| 825 | |
| 826 | /* |
| 827 | * For legacy single-region redistributor regions (!count), |
| 828 | * check that the redistributor region does not overlap with the |
| 829 | * distributor's address space. |
| 830 | */ |
| 831 | if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) && |
| 832 | vgic_dist_overlap(kvm, base, size)) |
| 833 | return -EINVAL; |
| 834 | |
| 835 | /* collision with any other rdist region? */ |
| 836 | if (vgic_v3_rdist_overlap(kvm, base, size)) |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 837 | return -EINVAL; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 838 | |
Jia He | 3ef2316 | 2021-09-07 20:31:11 +0800 | [diff] [blame] | 839 | rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT); |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 840 | if (!rdreg) |
| 841 | return -ENOMEM; |
| 842 | |
| 843 | rdreg->base = VGIC_ADDR_UNDEF; |
| 844 | |
Ricardo Koller | 4612d98 | 2021-10-04 18:19:12 -0700 | [diff] [blame] | 845 | ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size); |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 846 | if (ret) |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 847 | goto free; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 848 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 849 | rdreg->base = base; |
| 850 | rdreg->count = count; |
| 851 | rdreg->free_index = 0; |
| 852 | rdreg->index = index; |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 853 | |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 854 | list_add_tail(&rdreg->list, rd_regions); |
| 855 | return 0; |
| 856 | free: |
| 857 | kfree(rdreg); |
| 858 | return ret; |
| 859 | } |
| 860 | |
Eric Auger | e5a3563 | 2021-04-05 18:39:39 +0200 | [diff] [blame] | 861 | void vgic_v3_free_redist_region(struct vgic_redist_region *rdreg) |
| 862 | { |
| 863 | list_del(&rdreg->list); |
| 864 | kfree(rdreg); |
| 865 | } |
| 866 | |
Eric Auger | 04c1109 | 2018-05-22 09:55:17 +0200 | [diff] [blame] | 867 | int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count) |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 868 | { |
| 869 | int ret; |
| 870 | |
Eric Auger | e5a3563 | 2021-04-05 18:39:39 +0200 | [diff] [blame] | 871 | ret = vgic_v3_alloc_redist_region(kvm, index, addr, count); |
Eric Auger | ccc27bf | 2018-05-22 09:55:12 +0200 | [diff] [blame] | 872 | if (ret) |
| 873 | return ret; |
Eric Auger | dbd9733 | 2018-05-22 09:55:08 +0200 | [diff] [blame] | 874 | |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 875 | /* |
| 876 | * Register iodevs for each existing VCPU. Adding more VCPUs |
| 877 | * afterwards will register the iodevs when needed. |
| 878 | */ |
| 879 | ret = vgic_register_all_redist_iodevs(kvm); |
Eric Auger | 8542a8f | 2021-04-05 18:39:35 +0200 | [diff] [blame] | 880 | if (ret) { |
| 881 | struct vgic_redist_region *rdreg; |
| 882 | |
| 883 | rdreg = vgic_v3_rdist_region_from_index(kvm, index); |
Eric Auger | e5a3563 | 2021-04-05 18:39:39 +0200 | [diff] [blame] | 884 | vgic_v3_free_redist_region(rdreg); |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 885 | return ret; |
Eric Auger | 8542a8f | 2021-04-05 18:39:35 +0200 | [diff] [blame] | 886 | } |
Christoffer Dall | 1aab6f4 | 2017-05-08 12:30:24 +0200 | [diff] [blame] | 887 | |
| 888 | return 0; |
| 889 | } |
| 890 | |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 891 | int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) |
| 892 | { |
| 893 | const struct vgic_register_region *region; |
| 894 | struct vgic_io_device iodev; |
| 895 | struct vgic_reg_attr reg_attr; |
| 896 | struct kvm_vcpu *vcpu; |
| 897 | gpa_t addr; |
| 898 | int ret; |
| 899 | |
| 900 | ret = vgic_v3_parse_attr(dev, attr, ®_attr); |
| 901 | if (ret) |
| 902 | return ret; |
| 903 | |
| 904 | vcpu = reg_attr.vcpu; |
| 905 | addr = reg_attr.addr; |
| 906 | |
| 907 | switch (attr->group) { |
| 908 | case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: |
| 909 | iodev.regions = vgic_v3_dist_registers; |
| 910 | iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers); |
| 911 | iodev.base_addr = 0; |
| 912 | break; |
| 913 | case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{ |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 914 | iodev.regions = vgic_v3_rd_registers; |
| 915 | iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers); |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 916 | iodev.base_addr = 0; |
| 917 | break; |
| 918 | } |
Vijaya Kumar K | d017d7b | 2017-01-26 19:50:51 +0530 | [diff] [blame] | 919 | case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: { |
| 920 | u64 reg, id; |
| 921 | |
| 922 | id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK); |
| 923 | return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, ®); |
| 924 | } |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 925 | default: |
| 926 | return -ENXIO; |
| 927 | } |
| 928 | |
| 929 | /* We only support aligned 32-bit accesses. */ |
| 930 | if (addr & 3) |
| 931 | return -ENXIO; |
| 932 | |
| 933 | region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32)); |
| 934 | if (!region) |
| 935 | return -ENXIO; |
| 936 | |
| 937 | return 0; |
| 938 | } |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 939 | /* |
| 940 | * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI |
| 941 | * generation register ICC_SGI1R_EL1) with a given VCPU. |
| 942 | * If the VCPU's MPIDR matches, return the level0 affinity, otherwise |
| 943 | * return -1. |
| 944 | */ |
| 945 | static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) |
| 946 | { |
| 947 | unsigned long affinity; |
| 948 | int level0; |
| 949 | |
| 950 | /* |
| 951 | * Split the current VCPU's MPIDR into affinity level 0 and the |
| 952 | * rest as this is what we have to compare against. |
| 953 | */ |
| 954 | affinity = kvm_vcpu_get_mpidr_aff(vcpu); |
| 955 | level0 = MPIDR_AFFINITY_LEVEL(affinity, 0); |
| 956 | affinity &= ~MPIDR_LEVEL_MASK; |
| 957 | |
| 958 | /* bail out if the upper three levels don't match */ |
| 959 | if (sgi_aff != affinity) |
| 960 | return -1; |
| 961 | |
| 962 | /* Is this VCPU's bit set in the mask ? */ |
| 963 | if (!(sgi_cpu_mask & BIT(level0))) |
| 964 | return -1; |
| 965 | |
| 966 | return level0; |
| 967 | } |
| 968 | |
| 969 | /* |
| 970 | * The ICC_SGI* registers encode the affinity differently from the MPIDR, |
| 971 | * so provide a wrapper to use the existing defines to isolate a certain |
| 972 | * affinity level. |
| 973 | */ |
| 974 | #define SGI_AFFINITY_LEVEL(reg, level) \ |
| 975 | ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \ |
| 976 | >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) |
| 977 | |
| 978 | /** |
| 979 | * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs |
| 980 | * @vcpu: The VCPU requesting a SGI |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 981 | * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU |
| 982 | * @allow_group1: Does the sysreg access allow generation of G1 SGIs |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 983 | * |
| 984 | * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register. |
| 985 | * This will trap in sys_regs.c and call this function. |
| 986 | * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the |
| 987 | * target processors as well as a bitmask of 16 Aff0 CPUs. |
| 988 | * If the interrupt routing mode bit is not set, we iterate over all VCPUs to |
| 989 | * check for matching ones. If this bit is set, we signal all, but not the |
| 990 | * calling VCPU. |
| 991 | */ |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 992 | void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 993 | { |
| 994 | struct kvm *kvm = vcpu->kvm; |
| 995 | struct kvm_vcpu *c_vcpu; |
| 996 | u16 target_cpus; |
| 997 | u64 mpidr; |
| 998 | int sgi, c; |
| 999 | int vcpu_id = vcpu->vcpu_id; |
| 1000 | bool broadcast; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 1001 | unsigned long flags; |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 1002 | |
| 1003 | sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; |
Vladimir Murzin | e533a37 | 2016-09-12 15:49:19 +0100 | [diff] [blame] | 1004 | broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 1005 | target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; |
| 1006 | mpidr = SGI_AFFINITY_LEVEL(reg, 3); |
| 1007 | mpidr |= SGI_AFFINITY_LEVEL(reg, 2); |
| 1008 | mpidr |= SGI_AFFINITY_LEVEL(reg, 1); |
| 1009 | |
| 1010 | /* |
| 1011 | * We iterate over all VCPUs to find the MPIDRs matching the request. |
| 1012 | * If we have handled one CPU, we clear its bit to detect early |
| 1013 | * if we are already finished. This avoids iterating through all |
| 1014 | * VCPUs when most of the times we just signal a single VCPU. |
| 1015 | */ |
| 1016 | kvm_for_each_vcpu(c, c_vcpu, kvm) { |
| 1017 | struct vgic_irq *irq; |
| 1018 | |
| 1019 | /* Exit early if we have dealt with all requested CPUs */ |
| 1020 | if (!broadcast && target_cpus == 0) |
| 1021 | break; |
| 1022 | |
| 1023 | /* Don't signal the calling VCPU */ |
| 1024 | if (broadcast && c == vcpu_id) |
| 1025 | continue; |
| 1026 | |
| 1027 | if (!broadcast) { |
| 1028 | int level0; |
| 1029 | |
| 1030 | level0 = match_mpidr(mpidr, target_cpus, c_vcpu); |
| 1031 | if (level0 == -1) |
| 1032 | continue; |
| 1033 | |
| 1034 | /* remove this matching VCPU from the mask */ |
| 1035 | target_cpus &= ~BIT(level0); |
| 1036 | } |
| 1037 | |
| 1038 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); |
| 1039 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 1040 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 1041 | |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 1042 | /* |
Xiaoming Ni | ad14c19 | 2020-08-28 11:18:22 +0800 | [diff] [blame] | 1043 | * An access targeting Group0 SGIs can only generate |
| 1044 | * those, while an access targeting Group1 SGIs can |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 1045 | * generate interrupts of either group. |
| 1046 | */ |
| 1047 | if (!irq->group || allow_group1) { |
Marc Zyngier | ef1820b | 2020-03-04 20:33:25 +0000 | [diff] [blame] | 1048 | if (!irq->hw) { |
| 1049 | irq->pending_latch = true; |
| 1050 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
| 1051 | } else { |
| 1052 | /* HW SGI? Ask the GIC to inject it */ |
| 1053 | int err; |
| 1054 | err = irq_set_irqchip_state(irq->host_irq, |
| 1055 | IRQCHIP_STATE_PENDING, |
| 1056 | true); |
| 1057 | WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); |
| 1058 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
| 1059 | } |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 1060 | } else { |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 1061 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Marc Zyngier | 6249f2a | 2018-08-06 12:51:19 +0100 | [diff] [blame] | 1062 | } |
| 1063 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 1064 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 621ecd8 | 2016-01-26 15:31:15 +0000 | [diff] [blame] | 1065 | } |
| 1066 | } |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 1067 | |
| 1068 | int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 1069 | int offset, u32 *val) |
| 1070 | { |
| 1071 | struct vgic_io_device dev = { |
| 1072 | .regions = vgic_v3_dist_registers, |
| 1073 | .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers), |
| 1074 | }; |
| 1075 | |
| 1076 | return vgic_uaccess(vcpu, &dev, is_write, offset, val); |
| 1077 | } |
| 1078 | |
| 1079 | int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 1080 | int offset, u32 *val) |
| 1081 | { |
| 1082 | struct vgic_io_device rd_dev = { |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 1083 | .regions = vgic_v3_rd_registers, |
| 1084 | .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers), |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 1085 | }; |
| 1086 | |
Eric Auger | 3109741 | 2019-08-23 19:33:30 +0200 | [diff] [blame] | 1087 | return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val); |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 1088 | } |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 1089 | |
| 1090 | int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write, |
| 1091 | u32 intid, u64 *val) |
| 1092 | { |
| 1093 | if (intid % 32) |
| 1094 | return -EINVAL; |
| 1095 | |
| 1096 | if (is_write) |
| 1097 | vgic_write_irq_line_level_info(vcpu, intid, *val); |
| 1098 | else |
| 1099 | *val = vgic_read_irq_line_level_info(vcpu, intid); |
| 1100 | |
| 1101 | return 0; |
| 1102 | } |