blob: 41ecf219c3334b9b0ef8ec899ca6380ae8c55ac3 [file] [log] [blame]
Thomas Gleixnercaab2772019-06-03 07:44:50 +02001// SPDX-License-Identifier: GPL-2.0-only
Marc Zyngier59529f62015-11-30 13:09:53 +00002
3#include <linux/irqchip/arm-gic-v3.h>
Shenming Luf66b7b12021-03-22 14:01:56 +08004#include <linux/irq.h>
5#include <linux/irqdomain.h>
Marc Zyngier59529f62015-11-30 13:09:53 +00006#include <linux/kvm.h>
7#include <linux/kvm_host.h>
Eric Auger90977732015-12-01 15:02:35 +01008#include <kvm/arm_vgic.h>
Christoffer Dall923a2e32017-10-05 00:18:07 +02009#include <asm/kvm_hyp.h>
Eric Auger90977732015-12-01 15:02:35 +010010#include <asm/kvm_mmu.h>
11#include <asm/kvm_asm.h>
Marc Zyngier59529f62015-11-30 13:09:53 +000012
13#include "vgic.h"
14
Marc Zyngierabf55762017-06-09 12:49:45 +010015static bool group0_trap;
Marc Zyngier9c7bfc22017-06-09 12:49:40 +010016static bool group1_trap;
Marc Zyngierff895112017-06-09 12:49:53 +010017static bool common_trap;
Marc Zyngiera7546052017-10-27 15:28:54 +010018static bool gicv4_enable;
Marc Zyngier9c7bfc22017-06-09 12:49:40 +010019
Marc Zyngier59529f62015-11-30 13:09:53 +000020void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
21{
22 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
23
24 cpuif->vgic_hcr |= ICH_HCR_UIE;
25}
26
Christoffer Dallaf061492016-12-29 15:44:27 +010027static bool lr_signals_eoi_mi(u64 lr_val)
28{
29 return !(lr_val & ICH_LR_STATE) && (lr_val & ICH_LR_EOI) &&
30 !(lr_val & ICH_LR_HW);
31}
32
Marc Zyngier59529f62015-11-30 13:09:53 +000033void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
34{
Christoffer Dall8ac76ef2017-03-18 13:48:42 +010035 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
36 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
Marc Zyngier59529f62015-11-30 13:09:53 +000037 u32 model = vcpu->kvm->arch.vgic.vgic_model;
38 int lr;
Jia Hed0823cb2018-08-03 21:57:04 +080039
40 DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
Marc Zyngier59529f62015-11-30 13:09:53 +000041
Marc Zyngier53692902018-04-18 10:39:04 +010042 cpuif->vgic_hcr &= ~ICH_HCR_UIE;
Christoffer Dallaf061492016-12-29 15:44:27 +010043
Christoffer Dallfc5d1f12018-12-01 08:41:28 -080044 for (lr = 0; lr < cpuif->used_lrs; lr++) {
Marc Zyngier59529f62015-11-30 13:09:53 +000045 u64 val = cpuif->vgic_lr[lr];
Marc Zyngier53692902018-04-18 10:39:04 +010046 u32 intid, cpuid;
Marc Zyngier59529f62015-11-30 13:09:53 +000047 struct vgic_irq *irq;
Marc Zyngier53692902018-04-18 10:39:04 +010048 bool is_v2_sgi = false;
Marc Zyngier59529f62015-11-30 13:09:53 +000049
Marc Zyngier53692902018-04-18 10:39:04 +010050 cpuid = val & GICH_LR_PHYSID_CPUID;
51 cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
52
53 if (model == KVM_DEV_TYPE_ARM_VGIC_V3) {
Marc Zyngier59529f62015-11-30 13:09:53 +000054 intid = val & ICH_LR_VIRTUAL_ID_MASK;
Marc Zyngier53692902018-04-18 10:39:04 +010055 } else {
Marc Zyngier59529f62015-11-30 13:09:53 +000056 intid = val & GICH_LR_VIRTUALID;
Marc Zyngier53692902018-04-18 10:39:04 +010057 is_v2_sgi = vgic_irq_is_sgi(intid);
58 }
Christoffer Dallaf061492016-12-29 15:44:27 +010059
60 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
61 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
62 kvm_notify_acked_irq(vcpu->kvm, 0,
63 intid - VGIC_NR_PRIVATE_IRQS);
64
Marc Zyngier59529f62015-11-30 13:09:53 +000065 irq = vgic_get_irq(vcpu->kvm, vcpu, intid);
Andre Przywara38024112016-07-15 12:43:33 +010066 if (!irq) /* An LPI could have been unmapped. */
67 continue;
Marc Zyngier59529f62015-11-30 13:09:53 +000068
Julien Thierry8fa3adb2019-01-07 15:06:15 +000069 raw_spin_lock(&irq->irq_lock);
Marc Zyngier59529f62015-11-30 13:09:53 +000070
71 /* Always preserve the active bit */
72 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
73
Marc Zyngier53692902018-04-18 10:39:04 +010074 if (irq->active && is_v2_sgi)
75 irq->active_source = cpuid;
76
Marc Zyngier59529f62015-11-30 13:09:53 +000077 /* Edge is the only case where we preserve the pending bit */
78 if (irq->config == VGIC_CONFIG_EDGE &&
79 (val & ICH_LR_PENDING_BIT)) {
Christoffer Dall8694e4d2017-01-23 14:07:18 +010080 irq->pending_latch = true;
Marc Zyngier59529f62015-11-30 13:09:53 +000081
Marc Zyngier53692902018-04-18 10:39:04 +010082 if (is_v2_sgi)
Marc Zyngier59529f62015-11-30 13:09:53 +000083 irq->source |= (1 << cpuid);
Marc Zyngier59529f62015-11-30 13:09:53 +000084 }
85
Marc Zyngier637d1222016-05-25 15:26:36 +010086 /*
87 * Clear soft pending state when level irqs have been acked.
Marc Zyngier637d1222016-05-25 15:26:36 +010088 */
Marc Zyngier67b5b672018-03-09 14:59:40 +000089 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
90 irq->pending_latch = false;
Marc Zyngier59529f62015-11-30 13:09:53 +000091
Christoffer Dalle40cc572017-08-29 10:40:44 +020092 /*
93 * Level-triggered mapped IRQs are special because we only
94 * observe rising edges as input to the VGIC.
95 *
96 * If the guest never acked the interrupt we have to sample
97 * the physical line and set the line level, because the
98 * device state could have changed or we simply need to
99 * process the still pending interrupt later.
100 *
101 * If this causes us to lower the level, we have to also clear
102 * the physical active state, since we will otherwise never be
103 * told when the interrupt becomes asserted again.
104 */
105 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT)) {
106 irq->line_level = vgic_get_phys_line_level(irq);
107
108 if (!irq->line_level)
109 vgic_irq_set_phys_active(irq, false);
110 }
111
Julien Thierry8fa3adb2019-01-07 15:06:15 +0000112 raw_spin_unlock(&irq->irq_lock);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100113 vgic_put_irq(vcpu->kvm, irq);
Marc Zyngier59529f62015-11-30 13:09:53 +0000114 }
Christoffer Dall8ac76ef2017-03-18 13:48:42 +0100115
Christoffer Dallfc5d1f12018-12-01 08:41:28 -0800116 cpuif->used_lrs = 0;
Marc Zyngier59529f62015-11-30 13:09:53 +0000117}
118
119/* Requires the irq to be locked already */
120void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
121{
122 u32 model = vcpu->kvm->arch.vgic.vgic_model;
123 u64 val = irq->intid;
Marc Zyngier53692902018-04-18 10:39:04 +0100124 bool allow_pending = true, is_v2_sgi;
Marc Zyngier59529f62015-11-30 13:09:53 +0000125
Marc Zyngier53692902018-04-18 10:39:04 +0100126 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
127 model == KVM_DEV_TYPE_ARM_VGIC_V2);
128
129 if (irq->active) {
Marc Zyngier67b5b672018-03-09 14:59:40 +0000130 val |= ICH_LR_ACTIVE_BIT;
Marc Zyngier53692902018-04-18 10:39:04 +0100131 if (is_v2_sgi)
132 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
133 if (vgic_irq_is_multi_sgi(irq)) {
134 allow_pending = false;
135 val |= ICH_LR_EOI;
136 }
137 }
Marc Zyngier67b5b672018-03-09 14:59:40 +0000138
139 if (irq->hw) {
140 val |= ICH_LR_HW;
141 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
142 /*
143 * Never set pending+active on a HW interrupt, as the
144 * pending state is kept at the physical distributor
145 * level.
146 */
147 if (irq->active)
148 allow_pending = false;
149 } else {
150 if (irq->config == VGIC_CONFIG_LEVEL) {
151 val |= ICH_LR_EOI;
152
153 /*
154 * Software resampling doesn't work very well
155 * if we allow P+A, so let's not do that.
156 */
157 if (irq->active)
158 allow_pending = false;
159 }
160 }
161
162 if (allow_pending && irq_is_pending(irq)) {
Marc Zyngier59529f62015-11-30 13:09:53 +0000163 val |= ICH_LR_PENDING_BIT;
164
165 if (irq->config == VGIC_CONFIG_EDGE)
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100166 irq->pending_latch = false;
Marc Zyngier59529f62015-11-30 13:09:53 +0000167
168 if (vgic_irq_is_sgi(irq->intid) &&
169 model == KVM_DEV_TYPE_ARM_VGIC_V2) {
170 u32 src = ffs(irq->source);
171
Marc Zyngier82e40f52019-08-28 11:10:16 +0100172 if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
173 irq->intid))
174 return;
175
Marc Zyngier59529f62015-11-30 13:09:53 +0000176 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
177 irq->source &= ~(1 << (src - 1));
Marc Zyngier53692902018-04-18 10:39:04 +0100178 if (irq->source) {
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100179 irq->pending_latch = true;
Marc Zyngier53692902018-04-18 10:39:04 +0100180 val |= ICH_LR_EOI;
181 }
Marc Zyngier59529f62015-11-30 13:09:53 +0000182 }
183 }
184
Marc Zyngier59529f62015-11-30 13:09:53 +0000185 /*
Christoffer Dalle40cc572017-08-29 10:40:44 +0200186 * Level-triggered mapped IRQs are special because we only observe
187 * rising edges as input to the VGIC. We therefore lower the line
188 * level here, so that we can take new virtual IRQs. See
189 * vgic_v3_fold_lr_state for more info.
190 */
191 if (vgic_irq_is_mapped_level(irq) && (val & ICH_LR_PENDING_BIT))
192 irq->line_level = false;
193
Christoffer Dall87322092018-07-16 15:06:22 +0200194 if (irq->group)
Marc Zyngier59529f62015-11-30 13:09:53 +0000195 val |= ICH_LR_GROUP;
196
197 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
198
199 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
200}
201
202void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)
203{
204 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
205}
Andre Przywarae4823a72015-12-03 11:47:37 +0000206
207void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
208{
Christoffer Dall328e5662016-03-24 11:21:04 +0100209 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
Christoffer Dall28232a42017-05-20 14:12:34 +0200210 u32 model = vcpu->kvm->arch.vgic.vgic_model;
Andre Przywarae4823a72015-12-03 11:47:37 +0000211 u32 vmcr;
212
Christoffer Dall28232a42017-05-20 14:12:34 +0200213 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
214 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
215 ICH_VMCR_ACK_CTL_MASK;
216 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
217 ICH_VMCR_FIQ_EN_MASK;
218 } else {
219 /*
220 * When emulating GICv3 on GICv3 with SRE=1 on the
221 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
222 */
223 vmcr = ICH_VMCR_FIQ_EN_MASK;
224 }
225
226 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
227 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
Andre Przywarae4823a72015-12-03 11:47:37 +0000228 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
229 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
230 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530231 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
232 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
Andre Przywarae4823a72015-12-03 11:47:37 +0000233
Christoffer Dall328e5662016-03-24 11:21:04 +0100234 cpu_if->vgic_vmcr = vmcr;
Andre Przywarae4823a72015-12-03 11:47:37 +0000235}
236
237void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
238{
Christoffer Dall328e5662016-03-24 11:21:04 +0100239 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
Christoffer Dall28232a42017-05-20 14:12:34 +0200240 u32 model = vcpu->kvm->arch.vgic.vgic_model;
Christoffer Dall328e5662016-03-24 11:21:04 +0100241 u32 vmcr;
242
243 vmcr = cpu_if->vgic_vmcr;
Andre Przywarae4823a72015-12-03 11:47:37 +0000244
Christoffer Dall28232a42017-05-20 14:12:34 +0200245 if (model == KVM_DEV_TYPE_ARM_VGIC_V2) {
246 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
247 ICH_VMCR_ACK_CTL_SHIFT;
248 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
249 ICH_VMCR_FIQ_EN_SHIFT;
250 } else {
251 /*
252 * When emulating GICv3 on GICv3 with SRE=1 on the
253 * VFIQEn bit is RES1 and the VAckCtl bit is RES0.
254 */
255 vmcrp->fiqen = 1;
256 vmcrp->ackctl = 0;
257 }
258
259 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
260 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
Andre Przywarae4823a72015-12-03 11:47:37 +0000261 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
262 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
263 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530264 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
265 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
Andre Przywarae4823a72015-12-03 11:47:37 +0000266}
Eric Auger90977732015-12-01 15:02:35 +0100267
Andre Przywara0aa1de52016-07-15 12:43:29 +0100268#define INITIAL_PENDBASER_VALUE \
269 (GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) | \
270 GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, SameAsInner) | \
271 GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable))
272
Eric Augerad275b8b2015-12-21 18:09:38 +0100273void vgic_v3_enable(struct kvm_vcpu *vcpu)
274{
Eric Augerf7b69852015-12-02 10:30:13 +0100275 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
276
277 /*
278 * By forcing VMCR to zero, the GIC will restore the binary
279 * points to their reset values. Anything else resets to zero
280 * anyway.
281 */
282 vgic_v3->vgic_vmcr = 0;
Eric Augerf7b69852015-12-02 10:30:13 +0100283
284 /*
285 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
286 * way, so we force SRE to 1 to demonstrate this to the guest.
Marc Zyngier4dfc0502017-02-21 11:32:47 +0000287 * Also, we don't support any form of IRQ/FIQ bypass.
Eric Augerf7b69852015-12-02 10:30:13 +0100288 * This goes with the spec allowing the value to be RAO/WI.
289 */
Andre Przywara0aa1de52016-07-15 12:43:29 +0100290 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
Marc Zyngier4dfc0502017-02-21 11:32:47 +0000291 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
292 ICC_SRE_EL1_DFB |
293 ICC_SRE_EL1_SRE);
Andre Przywara0aa1de52016-07-15 12:43:29 +0100294 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
295 } else {
Eric Augerf7b69852015-12-02 10:30:13 +0100296 vgic_v3->vgic_sre = 0;
Andre Przywara0aa1de52016-07-15 12:43:29 +0100297 }
Eric Augerf7b69852015-12-02 10:30:13 +0100298
Vijaya Kumar Kd017d7b2017-01-26 19:50:51 +0530299 vcpu->arch.vgic_cpu.num_id_bits = (kvm_vgic_global_state.ich_vtr_el2 &
300 ICH_VTR_ID_BITS_MASK) >>
301 ICH_VTR_ID_BITS_SHIFT;
302 vcpu->arch.vgic_cpu.num_pri_bits = ((kvm_vgic_global_state.ich_vtr_el2 &
303 ICH_VTR_PRI_BITS_MASK) >>
304 ICH_VTR_PRI_BITS_SHIFT) + 1;
305
Eric Augerf7b69852015-12-02 10:30:13 +0100306 /* Get the show on the road... */
307 vgic_v3->vgic_hcr = ICH_HCR_EN;
Marc Zyngierabf55762017-06-09 12:49:45 +0100308 if (group0_trap)
309 vgic_v3->vgic_hcr |= ICH_HCR_TALL0;
Marc Zyngier9c7bfc22017-06-09 12:49:40 +0100310 if (group1_trap)
311 vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
Marc Zyngierff895112017-06-09 12:49:53 +0100312 if (common_trap)
313 vgic_v3->vgic_hcr |= ICH_HCR_TC;
Eric Augerad275b8b2015-12-21 18:09:38 +0100314}
315
Eric Auger44de9d62017-05-04 11:19:52 +0200316int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
317{
318 struct kvm_vcpu *vcpu;
319 int byte_offset, bit_nr;
320 gpa_t pendbase, ptr;
321 bool status;
322 u8 val;
323 int ret;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200324 unsigned long flags;
Eric Auger44de9d62017-05-04 11:19:52 +0200325
326retry:
327 vcpu = irq->target_vcpu;
328 if (!vcpu)
329 return 0;
330
331 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
332
333 byte_offset = irq->intid / BITS_PER_BYTE;
334 bit_nr = irq->intid % BITS_PER_BYTE;
335 ptr = pendbase + byte_offset;
336
Andre Przywara711702b2018-05-11 15:20:15 +0100337 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
Eric Auger44de9d62017-05-04 11:19:52 +0200338 if (ret)
339 return ret;
340
341 status = val & (1 << bit_nr);
342
Julien Thierry8fa3adb2019-01-07 15:06:15 +0000343 raw_spin_lock_irqsave(&irq->irq_lock, flags);
Eric Auger44de9d62017-05-04 11:19:52 +0200344 if (irq->target_vcpu != vcpu) {
Julien Thierry8fa3adb2019-01-07 15:06:15 +0000345 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
Eric Auger44de9d62017-05-04 11:19:52 +0200346 goto retry;
347 }
348 irq->pending_latch = status;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200349 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Eric Auger44de9d62017-05-04 11:19:52 +0200350
351 if (status) {
352 /* clear consumed data */
353 val &= ~(1 << bit_nr);
Marc Zyngiera6ecfb12019-03-19 12:47:11 +0000354 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
Eric Auger44de9d62017-05-04 11:19:52 +0200355 if (ret)
356 return ret;
357 }
358 return 0;
359}
360
Shenming Luf66b7b12021-03-22 14:01:56 +0800361/*
362 * The deactivation of the doorbell interrupt will trigger the
363 * unmapping of the associated vPE.
364 */
365static void unmap_all_vpes(struct vgic_dist *dist)
366{
367 struct irq_desc *desc;
368 int i;
369
370 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
371 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
372 irq_domain_deactivate_irq(irq_desc_get_irq_data(desc));
373 }
374}
375
376static void map_all_vpes(struct vgic_dist *dist)
377{
378 struct irq_desc *desc;
379 int i;
380
381 for (i = 0; i < dist->its_vm.nr_vpes; i++) {
382 desc = irq_to_desc(dist->its_vm.vpes[i]->irq);
383 irq_domain_activate_irq(irq_desc_get_irq_data(desc), false);
384 }
385}
386
Eric Auger28077122017-01-09 16:28:27 +0100387/**
Zenghui Yubad36e42019-10-29 15:19:18 +0800388 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
Eric Auger28077122017-01-09 16:28:27 +0100389 * kvm lock and all vcpu lock must be held
390 */
391int vgic_v3_save_pending_tables(struct kvm *kvm)
392{
393 struct vgic_dist *dist = &kvm->arch.vgic;
Eric Auger28077122017-01-09 16:28:27 +0100394 struct vgic_irq *irq;
Zenghui Yuca185b22019-10-29 15:19:19 +0800395 gpa_t last_ptr = ~(gpa_t)0;
Shenming Luf66b7b12021-03-22 14:01:56 +0800396 bool vlpi_avail = false;
397 int ret = 0;
Marc Zyngierddb4b012017-11-16 17:58:16 +0000398 u8 val;
Eric Auger28077122017-01-09 16:28:27 +0100399
Shenming Luf66b7b12021-03-22 14:01:56 +0800400 if (unlikely(!vgic_initialized(kvm)))
401 return -ENXIO;
402
403 /*
404 * A preparation for getting any VLPI states.
405 * The above vgic initialized check also ensures that the allocation
406 * and enabling of the doorbells have already been done.
407 */
408 if (kvm_vgic_global_state.has_gicv4_1) {
409 unmap_all_vpes(dist);
410 vlpi_avail = true;
411 }
412
Eric Auger28077122017-01-09 16:28:27 +0100413 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
414 int byte_offset, bit_nr;
415 struct kvm_vcpu *vcpu;
416 gpa_t pendbase, ptr;
Shenming Luf66b7b12021-03-22 14:01:56 +0800417 bool is_pending;
Eric Auger28077122017-01-09 16:28:27 +0100418 bool stored;
Eric Auger28077122017-01-09 16:28:27 +0100419
420 vcpu = irq->target_vcpu;
421 if (!vcpu)
422 continue;
423
424 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
425
426 byte_offset = irq->intid / BITS_PER_BYTE;
427 bit_nr = irq->intid % BITS_PER_BYTE;
428 ptr = pendbase + byte_offset;
429
Zenghui Yuca185b22019-10-29 15:19:19 +0800430 if (ptr != last_ptr) {
Andre Przywara711702b2018-05-11 15:20:15 +0100431 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
Eric Auger28077122017-01-09 16:28:27 +0100432 if (ret)
Shenming Luf66b7b12021-03-22 14:01:56 +0800433 goto out;
Zenghui Yuca185b22019-10-29 15:19:19 +0800434 last_ptr = ptr;
Eric Auger28077122017-01-09 16:28:27 +0100435 }
436
437 stored = val & (1U << bit_nr);
Shenming Luf66b7b12021-03-22 14:01:56 +0800438
439 is_pending = irq->pending_latch;
440
441 if (irq->hw && vlpi_avail)
442 vgic_v4_get_vlpi_state(irq, &is_pending);
443
444 if (stored == is_pending)
Eric Auger28077122017-01-09 16:28:27 +0100445 continue;
446
Shenming Luf66b7b12021-03-22 14:01:56 +0800447 if (is_pending)
Eric Auger28077122017-01-09 16:28:27 +0100448 val |= 1 << bit_nr;
449 else
450 val &= ~(1 << bit_nr);
451
Marc Zyngiera6ecfb12019-03-19 12:47:11 +0000452 ret = kvm_write_guest_lock(kvm, ptr, &val, 1);
Eric Auger28077122017-01-09 16:28:27 +0100453 if (ret)
Shenming Luf66b7b12021-03-22 14:01:56 +0800454 goto out;
Eric Auger28077122017-01-09 16:28:27 +0100455 }
Shenming Luf66b7b12021-03-22 14:01:56 +0800456
457out:
458 if (vlpi_avail)
459 map_all_vpes(dist);
460
461 return ret;
Eric Auger28077122017-01-09 16:28:27 +0100462}
463
Eric Auger028bf272018-05-22 09:55:11 +0200464/**
465 * vgic_v3_rdist_overlap - check if a region overlaps with any
466 * existing redistributor region
467 *
468 * @kvm: kvm handle
469 * @base: base of the region
470 * @size: size of region
471 *
472 * Return: true if there is an overlap
473 */
474bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size)
475{
476 struct vgic_dist *d = &kvm->arch.vgic;
477 struct vgic_redist_region *rdreg;
478
479 list_for_each_entry(rdreg, &d->rd_regions, list) {
480 if ((base + size > rdreg->base) &&
481 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
482 return true;
483 }
484 return false;
485}
486
Christoffer Dall9a746d72017-05-08 12:23:51 +0200487/*
488 * Check for overlapping regions and for regions crossing the end of memory
489 * for base addresses which have already been set.
490 */
491bool vgic_v3_check_base(struct kvm *kvm)
Eric Augerb0442ee2015-12-21 15:04:42 +0100492{
493 struct vgic_dist *d = &kvm->arch.vgic;
Eric Auger028bf272018-05-22 09:55:11 +0200494 struct vgic_redist_region *rdreg;
Eric Augerb0442ee2015-12-21 15:04:42 +0100495
Christoffer Dall9a746d72017-05-08 12:23:51 +0200496 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
497 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
Eric Augerb0442ee2015-12-21 15:04:42 +0100498 return false;
Christoffer Dall9a746d72017-05-08 12:23:51 +0200499
Eric Auger028bf272018-05-22 09:55:11 +0200500 list_for_each_entry(rdreg, &d->rd_regions, list) {
501 if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
502 rdreg->base)
503 return false;
504 }
Eric Augerb0442ee2015-12-21 15:04:42 +0100505
Eric Auger028bf272018-05-22 09:55:11 +0200506 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
Christoffer Dall9a746d72017-05-08 12:23:51 +0200507 return true;
508
Eric Auger028bf272018-05-22 09:55:11 +0200509 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
510 KVM_VGIC_V3_DIST_SIZE);
Eric Augerb0442ee2015-12-21 15:04:42 +0100511}
512
Eric Augerdc524612018-05-22 09:55:09 +0200513/**
514 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
515 * which has free space to put a new rdist region.
516 *
517 * @rd_regions: redistributor region list head
518 *
519 * A redistributor regions maps n redistributors, n = region size / (2 x 64kB).
520 * Stride between redistributors is 0 and regions are filled in the index order.
521 *
522 * Return: the redist region handle, if any, that has space to map a new rdist
523 * region.
524 */
525struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rd_regions)
526{
527 struct vgic_redist_region *rdreg;
528
529 list_for_each_entry(rdreg, rd_regions, list) {
530 if (!vgic_v3_redist_region_full(rdreg))
531 return rdreg;
532 }
533 return NULL;
534}
535
Eric Auger04c11092018-05-22 09:55:17 +0200536struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
537 u32 index)
538{
539 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
540 struct vgic_redist_region *rdreg;
541
542 list_for_each_entry(rdreg, rd_regions, list) {
543 if (rdreg->index == index)
544 return rdreg;
545 }
546 return NULL;
547}
548
549
Eric Augerb0442ee2015-12-21 15:04:42 +0100550int vgic_v3_map_resources(struct kvm *kvm)
551{
Eric Augerb0442ee2015-12-21 15:04:42 +0100552 struct vgic_dist *dist = &kvm->arch.vgic;
Eric Augerc957a6d2018-05-22 09:55:15 +0200553 struct kvm_vcpu *vcpu;
554 int ret = 0;
555 int c;
Eric Augerb0442ee2015-12-21 15:04:42 +0100556
Eric Augerc957a6d2018-05-22 09:55:15 +0200557 kvm_for_each_vcpu(c, vcpu, kvm) {
558 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
559
560 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
561 kvm_debug("vcpu %d redistributor base not set\n", c);
Marc Zyngier101068b2020-12-27 14:28:34 +0000562 return -ENXIO;
Eric Augerc957a6d2018-05-22 09:55:15 +0200563 }
564 }
565
566 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
Eric Augerb0442ee2015-12-21 15:04:42 +0100567 kvm_err("Need to set vgic distributor addresses first\n");
Marc Zyngier101068b2020-12-27 14:28:34 +0000568 return -ENXIO;
Eric Augerb0442ee2015-12-21 15:04:42 +0100569 }
570
571 if (!vgic_v3_check_base(kvm)) {
572 kvm_err("VGIC redist and dist frames overlap\n");
Marc Zyngier101068b2020-12-27 14:28:34 +0000573 return -EINVAL;
Eric Augerb0442ee2015-12-21 15:04:42 +0100574 }
575
576 /*
577 * For a VGICv3 we require the userland to explicitly initialize
578 * the VGIC before we need to use it.
579 */
580 if (!vgic_initialized(kvm)) {
Marc Zyngier101068b2020-12-27 14:28:34 +0000581 return -EBUSY;
Eric Augerb0442ee2015-12-21 15:04:42 +0100582 }
583
584 ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
585 if (ret) {
586 kvm_err("Unable to register VGICv3 dist MMIO regions\n");
Marc Zyngier101068b2020-12-27 14:28:34 +0000587 return ret;
Eric Augerb0442ee2015-12-21 15:04:42 +0100588 }
589
Marc Zyngier2291ff22020-03-04 20:33:27 +0000590 if (kvm_vgic_global_state.has_gicv4_1)
591 vgic_v4_configure_vsgis(kvm);
Eric Augerb0442ee2015-12-21 15:04:42 +0100592
Marc Zyngier101068b2020-12-27 14:28:34 +0000593 return 0;
Eric Augerb0442ee2015-12-21 15:04:42 +0100594}
595
Marc Zyngier59da1cb2017-06-09 12:49:33 +0100596DEFINE_STATIC_KEY_FALSE(vgic_v3_cpuif_trap);
597
Marc Zyngiere23f62f2017-06-09 12:49:46 +0100598static int __init early_group0_trap_cfg(char *buf)
599{
600 return strtobool(buf, &group0_trap);
601}
602early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
603
Marc Zyngier182936e2017-06-09 12:49:41 +0100604static int __init early_group1_trap_cfg(char *buf)
605{
606 return strtobool(buf, &group1_trap);
607}
608early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
609
Marc Zyngierff895112017-06-09 12:49:53 +0100610static int __init early_common_trap_cfg(char *buf)
611{
612 return strtobool(buf, &common_trap);
613}
614early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
615
Marc Zyngiera7546052017-10-27 15:28:54 +0100616static int __init early_gicv4_enable(char *buf)
617{
618 return strtobool(buf, &gicv4_enable);
619}
620early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
621
Eric Auger90977732015-12-01 15:02:35 +0100622/**
Alexandru Elisei0ed5f5d2019-08-15 10:56:22 +0100623 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
624 * @info: pointer to the GIC description
Eric Auger90977732015-12-01 15:02:35 +0100625 *
Alexandru Elisei0ed5f5d2019-08-15 10:56:22 +0100626 * Returns 0 if the VGICv3 has been probed successfully, returns an error code
627 * otherwise
Eric Auger90977732015-12-01 15:02:35 +0100628 */
629int vgic_v3_probe(const struct gic_kvm_info *info)
630{
Marc Zyngierb9d699e2021-03-05 18:52:52 +0000631 u64 ich_vtr_el2 = kvm_call_hyp_ret(__vgic_v3_get_gic_config);
Marc Zyngier9739f6e2021-03-05 18:52:53 +0000632 bool has_v2;
Andre Przywara42c88702016-07-15 12:43:23 +0100633 int ret;
Eric Auger90977732015-12-01 15:02:35 +0100634
Marc Zyngier9739f6e2021-03-05 18:52:53 +0000635 has_v2 = ich_vtr_el2 >> 63;
Marc Zyngierb9d699e2021-03-05 18:52:52 +0000636 ich_vtr_el2 = (u32)ich_vtr_el2;
637
Eric Auger90977732015-12-01 15:02:35 +0100638 /*
Fuad Tabba656012c2020-04-01 15:03:10 +0100639 * The ListRegs field is 5 bits, but there is an architectural
Eric Auger90977732015-12-01 15:02:35 +0100640 * maximum of 16 list registers. Just ignore bit 4...
641 */
642 kvm_vgic_global_state.nr_lr = (ich_vtr_el2 & 0xf) + 1;
643 kvm_vgic_global_state.can_emulate_gicv2 = false;
Vijaya Kumar Kd017d7b2017-01-26 19:50:51 +0530644 kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
Eric Auger90977732015-12-01 15:02:35 +0100645
Marc Zyngiera7546052017-10-27 15:28:54 +0100646 /* GICv4 support? */
647 if (info->has_v4) {
648 kvm_vgic_global_state.has_gicv4 = gicv4_enable;
Marc Zyngierae699ad2020-03-04 20:33:20 +0000649 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
650 kvm_info("GICv4%s support %sabled\n",
651 kvm_vgic_global_state.has_gicv4_1 ? ".1" : "",
Marc Zyngiera7546052017-10-27 15:28:54 +0100652 gicv4_enable ? "en" : "dis");
653 }
654
Marc Zyngier9739f6e2021-03-05 18:52:53 +0000655 kvm_vgic_global_state.vcpu_base = 0;
656
Eric Auger90977732015-12-01 15:02:35 +0100657 if (!info->vcpu.start) {
658 kvm_info("GICv3: no GICV resource entry\n");
Marc Zyngier9739f6e2021-03-05 18:52:53 +0000659 } else if (!has_v2) {
660 pr_warn(FW_BUG "CPU interface incapable of MMIO access\n");
Eric Auger90977732015-12-01 15:02:35 +0100661 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
662 pr_warn("GICV physical address 0x%llx not page aligned\n",
663 (unsigned long long)info->vcpu.start);
Eric Auger90977732015-12-01 15:02:35 +0100664 } else {
665 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
666 kvm_vgic_global_state.can_emulate_gicv2 = true;
Andre Przywara42c88702016-07-15 12:43:23 +0100667 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
668 if (ret) {
669 kvm_err("Cannot register GICv2 KVM device.\n");
670 return ret;
671 }
Eric Auger90977732015-12-01 15:02:35 +0100672 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
673 }
Andre Przywara42c88702016-07-15 12:43:23 +0100674 ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V3);
675 if (ret) {
676 kvm_err("Cannot register GICv3 KVM device.\n");
677 kvm_unregister_device_ops(KVM_DEV_TYPE_ARM_VGIC_V2);
678 return ret;
679 }
680
Eric Auger90977732015-12-01 15:02:35 +0100681 if (kvm_vgic_global_state.vcpu_base == 0)
682 kvm_info("disabling GICv2 emulation\n");
Eric Auger90977732015-12-01 15:02:35 +0100683
David Daney690a3412017-06-09 12:49:48 +0100684 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_30115)) {
685 group0_trap = true;
686 group1_trap = true;
687 }
David Daney690a3412017-06-09 12:49:48 +0100688
Marc Zyngierff895112017-06-09 12:49:53 +0100689 if (group0_trap || group1_trap || common_trap) {
Marc Zyngier2873b502017-06-09 12:49:54 +0100690 kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
691 group0_trap ? "G0" : "",
692 group1_trap ? "G1" : "",
693 common_trap ? "C" : "");
Marc Zyngier182936e2017-06-09 12:49:41 +0100694 static_branch_enable(&vgic_v3_cpuif_trap);
695 }
696
Eric Auger90977732015-12-01 15:02:35 +0100697 kvm_vgic_global_state.vctrl_base = NULL;
698 kvm_vgic_global_state.type = VGIC_V3;
699 kvm_vgic_global_state.max_gic_vcpus = VGIC_V3_MAX_CPUS;
700
701 return 0;
702}
Christoffer Dall328e5662016-03-24 11:21:04 +0100703
704void vgic_v3_load(struct kvm_vcpu *vcpu)
705{
706 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
707
Marc Zyngierff567612017-04-19 12:15:26 +0100708 /*
709 * If dealing with a GICv2 emulation on GICv3, VMCR_EL2.VFIQen
710 * is dependent on ICC_SRE_EL1.SRE, and we have to perform the
711 * VMCR_EL2 save/restore in the world switch.
712 */
713 if (likely(cpu_if->vgic_sre))
714 kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
Christoffer Dall923a2e32017-10-05 00:18:07 +0200715
Andrew Sculla0712612020-09-15 11:46:43 +0100716 kvm_call_hyp(__vgic_v3_restore_aprs, cpu_if);
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200717
718 if (has_vhe())
Christoffer Dallfc5d1f12018-12-01 08:41:28 -0800719 __vgic_v3_activate_traps(cpu_if);
Marc Zyngier8e01d9a2019-10-27 14:41:59 +0000720
721 WARN_ON(vgic_v4_load(vcpu));
Christoffer Dall328e5662016-03-24 11:21:04 +0100722}
723
Marc Zyngier5eeaf102019-08-02 10:28:32 +0100724void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
Christoffer Dall328e5662016-03-24 11:21:04 +0100725{
726 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
727
Marc Zyngierff567612017-04-19 12:15:26 +0100728 if (likely(cpu_if->vgic_sre))
Marc Zyngier7aa8d142019-01-05 15:49:50 +0000729 cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
Marc Zyngier5eeaf102019-08-02 10:28:32 +0100730}
731
732void vgic_v3_put(struct kvm_vcpu *vcpu)
733{
Christoffer Dallfc5d1f12018-12-01 08:41:28 -0800734 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
735
Marc Zyngier8e01d9a2019-10-27 14:41:59 +0000736 WARN_ON(vgic_v4_put(vcpu, false));
737
Marc Zyngier5eeaf102019-08-02 10:28:32 +0100738 vgic_v3_vmcr_sync(vcpu);
Christoffer Dall923a2e32017-10-05 00:18:07 +0200739
Andrew Sculla0712612020-09-15 11:46:43 +0100740 kvm_call_hyp(__vgic_v3_save_aprs, cpu_if);
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200741
742 if (has_vhe())
Christoffer Dallfc5d1f12018-12-01 08:41:28 -0800743 __vgic_v3_deactivate_traps(cpu_if);
Christoffer Dall328e5662016-03-24 11:21:04 +0100744}