blob: 2b450d49a0467a93776d814d9e6fc4a8e82c45ec [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
Christoffer Dalldf635c52017-09-01 16:25:12 +020019#include <kvm/arm_arch_timer.h>
Marc Zyngier4493b1c2016-04-26 11:06:12 +010020#include <kvm/arm_vgic.h>
21
22#include "vgic.h"
23#include "vgic-mmio.h"
24
25unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
26 gpa_t addr, unsigned int len)
27{
28 return 0;
29}
30
31unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
32 gpa_t addr, unsigned int len)
33{
34 return -1UL;
35}
36
37void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
38 unsigned int len, unsigned long val)
39{
40 /* Ignore */
41}
42
Christoffer Dallc6e09172018-07-16 15:06:23 +020043int vgic_mmio_uaccess_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
44 unsigned int len, unsigned long val)
45{
46 /* Ignore */
47 return 0;
48}
49
Christoffer Dalld53c2c292018-07-16 15:06:25 +020050unsigned long vgic_mmio_read_group(struct kvm_vcpu *vcpu,
51 gpa_t addr, unsigned int len)
52{
53 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
54 u32 value = 0;
55 int i;
56
57 /* Loop over all IRQs affected by this read */
58 for (i = 0; i < len * 8; i++) {
59 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
60
61 if (irq->group)
62 value |= BIT(i);
63
64 vgic_put_irq(vcpu->kvm, irq);
65 }
66
67 return value;
68}
69
70void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr,
71 unsigned int len, unsigned long val)
72{
73 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
74 int i;
75 unsigned long flags;
76
77 for (i = 0; i < len * 8; i++) {
78 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
79
80 spin_lock_irqsave(&irq->irq_lock, flags);
81 irq->group = !!(val & BIT(i));
82 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
83
84 vgic_put_irq(vcpu->kvm, irq);
85 }
86}
87
Andre Przywarafd122e62015-12-01 14:33:05 +000088/*
89 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
90 * of the enabled bit, so there is only one function for both here.
91 */
92unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
93 gpa_t addr, unsigned int len)
94{
95 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
96 u32 value = 0;
97 int i;
98
99 /* Loop over all IRQs affected by this read */
100 for (i = 0; i < len * 8; i++) {
101 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
102
103 if (irq->enabled)
104 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100105
106 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +0000107 }
108
109 return value;
110}
111
112void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
113 gpa_t addr, unsigned int len,
114 unsigned long val)
115{
116 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
117 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200118 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +0000119
120 for_each_set_bit(i, &val, len * 8) {
121 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
122
Christoffer Dall006df0f2016-10-16 22:19:11 +0200123 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +0000124 irq->enabled = true;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200125 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100126
127 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +0000128 }
129}
130
131void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
132 gpa_t addr, unsigned int len,
133 unsigned long val)
134{
135 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
136 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200137 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +0000138
139 for_each_set_bit(i, &val, len * 8) {
140 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
141
Christoffer Dall006df0f2016-10-16 22:19:11 +0200142 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +0000143
144 irq->enabled = false;
145
Christoffer Dall006df0f2016-10-16 22:19:11 +0200146 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100147 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +0000148 }
149}
150
Andre Przywara96b29802015-12-01 14:33:41 +0000151unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
152 gpa_t addr, unsigned int len)
153{
154 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
155 u32 value = 0;
156 int i;
157
158 /* Loop over all IRQs affected by this read */
159 for (i = 0; i < len * 8; i++) {
160 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Andre Przywara62b06f82018-03-06 09:21:06 +0000161 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000162
Andre Przywara62b06f82018-03-06 09:21:06 +0000163 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100164 if (irq_is_pending(irq))
Andre Przywara96b29802015-12-01 14:33:41 +0000165 value |= (1U << i);
Andre Przywara62b06f82018-03-06 09:21:06 +0000166 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100167
168 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000169 }
170
171 return value;
172}
173
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700174/*
175 * This function will return the VCPU that performed the MMIO access and
176 * trapped from within the VM, and will return NULL if this is a userspace
177 * access.
178 *
179 * We can disable preemption locally around accessing the per-CPU variable,
180 * and use the resolved vcpu pointer after enabling preemption again, because
181 * even if the current thread is migrated to another CPU, reading the per-CPU
182 * value later will give us the same value as we update the per-CPU variable
183 * in the preempt notifier handlers.
184 */
185static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
186{
187 struct kvm_vcpu *vcpu;
188
189 preempt_disable();
190 vcpu = kvm_arm_get_running_vcpu();
191 preempt_enable();
192 return vcpu;
193}
194
Christoffer Dalldf635c52017-09-01 16:25:12 +0200195/* Must be called with irq->irq_lock held */
196static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
197 bool is_uaccess)
198{
199 if (is_uaccess)
200 return;
201
202 irq->pending_latch = true;
203 vgic_irq_set_phys_active(irq, true);
204}
205
Andre Przywara96b29802015-12-01 14:33:41 +0000206void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
207 gpa_t addr, unsigned int len,
208 unsigned long val)
209{
Christoffer Dalldf635c52017-09-01 16:25:12 +0200210 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
Andre Przywara96b29802015-12-01 14:33:41 +0000211 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
212 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200213 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000214
215 for_each_set_bit(i, &val, len * 8) {
216 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
217
Christoffer Dall006df0f2016-10-16 22:19:11 +0200218 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dalldf635c52017-09-01 16:25:12 +0200219 if (irq->hw)
220 vgic_hw_irq_spending(vcpu, irq, is_uaccess);
221 else
222 irq->pending_latch = true;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200223 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100224 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000225 }
226}
227
Christoffer Dalldf635c52017-09-01 16:25:12 +0200228/* Must be called with irq->irq_lock held */
229static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
230 bool is_uaccess)
231{
232 if (is_uaccess)
233 return;
234
235 irq->pending_latch = false;
236
237 /*
238 * We don't want the guest to effectively mask the physical
239 * interrupt by doing a write to SPENDR followed by a write to
240 * CPENDR for HW interrupts, so we clear the active state on
241 * the physical side if the virtual interrupt is not active.
242 * This may lead to taking an additional interrupt on the
243 * host, but that should not be a problem as the worst that
244 * can happen is an additional vgic injection. We also clear
245 * the pending state to maintain proper semantics for edge HW
246 * interrupts.
247 */
248 vgic_irq_set_phys_pending(irq, false);
249 if (!irq->active)
250 vgic_irq_set_phys_active(irq, false);
251}
252
Andre Przywara96b29802015-12-01 14:33:41 +0000253void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
254 gpa_t addr, unsigned int len,
255 unsigned long val)
256{
Christoffer Dalldf635c52017-09-01 16:25:12 +0200257 bool is_uaccess = !vgic_get_mmio_requester_vcpu();
Andre Przywara96b29802015-12-01 14:33:41 +0000258 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
259 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200260 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000261
262 for_each_set_bit(i, &val, len * 8) {
263 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
264
Christoffer Dall006df0f2016-10-16 22:19:11 +0200265 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara96b29802015-12-01 14:33:41 +0000266
Christoffer Dalldf635c52017-09-01 16:25:12 +0200267 if (irq->hw)
268 vgic_hw_irq_cpending(vcpu, irq, is_uaccess);
269 else
270 irq->pending_latch = false;
Andre Przywara96b29802015-12-01 14:33:41 +0000271
Christoffer Dall006df0f2016-10-16 22:19:11 +0200272 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100273 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000274 }
275}
276
Andre Przywara69b6fe02015-12-01 12:40:58 +0000277unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
278 gpa_t addr, unsigned int len)
279{
280 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
281 u32 value = 0;
282 int i;
283
284 /* Loop over all IRQs affected by this read */
285 for (i = 0; i < len * 8; i++) {
286 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
287
288 if (irq->active)
289 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100290
291 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000292 }
293
294 return value;
295}
296
Christoffer Dalldf635c52017-09-01 16:25:12 +0200297/* Must be called with irq->irq_lock held */
298static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
299 bool active, bool is_uaccess)
300{
301 if (is_uaccess)
302 return;
303
304 irq->active = active;
305 vgic_irq_set_phys_active(irq, active);
306}
307
Christoffer Dall35a2d582016-05-20 15:25:28 +0200308static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
Christoffer Dalldf635c52017-09-01 16:25:12 +0200309 bool active)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200310{
Christoffer Dall006df0f2016-10-16 22:19:11 +0200311 unsigned long flags;
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700312 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
Jintack Lim370a0ec2017-03-06 05:42:37 -0800313
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700314 spin_lock_irqsave(&irq->irq_lock, flags);
Jintack Lim370a0ec2017-03-06 05:42:37 -0800315
Christoffer Dall35a2d582016-05-20 15:25:28 +0200316 /*
317 * If this virtual IRQ was written into a list register, we
318 * have to make sure the CPU that runs the VCPU thread has
Jintack Lim370a0ec2017-03-06 05:42:37 -0800319 * synced back the LR state to the struct vgic_irq.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200320 *
Jintack Lim370a0ec2017-03-06 05:42:37 -0800321 * As long as the conditions below are true, we know the VCPU thread
322 * may be on its way back from the guest (we kicked the VCPU thread in
323 * vgic_change_active_prepare) and still has to sync back this IRQ,
324 * so we release and re-acquire the spin_lock to let the other thread
325 * sync back the IRQ.
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700326 *
327 * When accessing VGIC state from user space, requester_vcpu is
328 * NULL, which is fine, because we guarantee that no VCPUs are running
329 * when accessing VGIC state from user space so irq->vcpu->cpu is
330 * always -1.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200331 */
332 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
Jintack Lim370a0ec2017-03-06 05:42:37 -0800333 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
Marc Zyngier05fb05a2016-06-02 09:24:06 +0100334 irq->vcpu->cpu != -1) /* VCPU thread is running */
Christoffer Dall35a2d582016-05-20 15:25:28 +0200335 cond_resched_lock(&irq->irq_lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200336
Marc Zyngier53692902018-04-18 10:39:04 +0100337 if (irq->hw) {
Christoffer Dalldf635c52017-09-01 16:25:12 +0200338 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
Marc Zyngier53692902018-04-18 10:39:04 +0100339 } else {
340 u32 model = vcpu->kvm->arch.vgic.vgic_model;
Christoffer Dall60c3ab32018-12-11 12:51:03 +0100341 u8 active_source;
Marc Zyngier53692902018-04-18 10:39:04 +0100342
Christoffer Dalldf635c52017-09-01 16:25:12 +0200343 irq->active = active;
Christoffer Dall60c3ab32018-12-11 12:51:03 +0100344
345 /*
346 * The GICv2 architecture indicates that the source CPUID for
347 * an SGI should be provided during an EOI which implies that
348 * the active state is stored somewhere, but at the same time
349 * this state is not architecturally exposed anywhere and we
350 * have no way of knowing the right source.
351 *
352 * This may lead to a VCPU not being able to receive
353 * additional instances of a particular SGI after migration
354 * for a GICv2 VM on some GIC implementations. Oh well.
355 */
356 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
357
Marc Zyngier53692902018-04-18 10:39:04 +0100358 if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
359 active && vgic_irq_is_sgi(irq->intid))
Christoffer Dall60c3ab32018-12-11 12:51:03 +0100360 irq->active_source = active_source;
Marc Zyngier53692902018-04-18 10:39:04 +0100361 }
Christoffer Dalldf635c52017-09-01 16:25:12 +0200362
363 if (irq->active)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200364 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200365 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200366 spin_unlock_irqrestore(&irq->irq_lock, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200367}
368
369/*
370 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
371 * is not queued on some running VCPU's LRs, because then the change to the
372 * active state can be overwritten when the VCPU's state is synced coming back
373 * from the guest.
374 *
375 * For shared interrupts, we have to stop all the VCPUs because interrupts can
376 * be migrated while we don't hold the IRQ locks and we don't want to be
377 * chasing moving targets.
378 *
Christoffer Dallabd72292017-05-06 20:01:24 +0200379 * For private interrupts we don't have to do anything because userspace
380 * accesses to the VGIC state already require all VCPUs to be stopped, and
381 * only the VCPU itself can modify its private interrupts active state, which
382 * guarantees that the VCPU is not running.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200383 */
384static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
385{
Christoffer Dallabd72292017-05-06 20:01:24 +0200386 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200387 kvm_arm_halt_guest(vcpu->kvm);
388}
389
390/* See vgic_change_active_prepare */
391static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
392{
Christoffer Dallabd72292017-05-06 20:01:24 +0200393 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200394 kvm_arm_resume_guest(vcpu->kvm);
395}
396
Christoffer Dall31971912017-05-16 09:44:39 +0200397static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
398 gpa_t addr, unsigned int len,
399 unsigned long val)
Andre Przywara69b6fe02015-12-01 12:40:58 +0000400{
401 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
402 int i;
403
Andre Przywara69b6fe02015-12-01 12:40:58 +0000404 for_each_set_bit(i, &val, len * 8) {
405 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200406 vgic_mmio_change_active(vcpu, irq, false);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100407 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000408 }
Christoffer Dall31971912017-05-16 09:44:39 +0200409}
410
411void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
412 gpa_t addr, unsigned int len,
413 unsigned long val)
414{
415 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
416
Christoffer Dallabd72292017-05-06 20:01:24 +0200417 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall31971912017-05-16 09:44:39 +0200418 vgic_change_active_prepare(vcpu, intid);
419
420 __vgic_mmio_write_cactive(vcpu, addr, len, val);
421
Christoffer Dall35a2d582016-05-20 15:25:28 +0200422 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200423 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000424}
425
Christoffer Dallc6e09172018-07-16 15:06:23 +0200426int vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
Christoffer Dall31971912017-05-16 09:44:39 +0200427 gpa_t addr, unsigned int len,
428 unsigned long val)
429{
430 __vgic_mmio_write_cactive(vcpu, addr, len, val);
Christoffer Dallc6e09172018-07-16 15:06:23 +0200431 return 0;
Christoffer Dall31971912017-05-16 09:44:39 +0200432}
433
434static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
435 gpa_t addr, unsigned int len,
436 unsigned long val)
437{
438 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
439 int i;
440
441 for_each_set_bit(i, &val, len * 8) {
442 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
443 vgic_mmio_change_active(vcpu, irq, true);
444 vgic_put_irq(vcpu->kvm, irq);
445 }
446}
447
Andre Przywara69b6fe02015-12-01 12:40:58 +0000448void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
449 gpa_t addr, unsigned int len,
450 unsigned long val)
451{
452 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000453
Christoffer Dallabd72292017-05-06 20:01:24 +0200454 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200455 vgic_change_active_prepare(vcpu, intid);
Christoffer Dall31971912017-05-16 09:44:39 +0200456
457 __vgic_mmio_write_sactive(vcpu, addr, len, val);
458
Christoffer Dall35a2d582016-05-20 15:25:28 +0200459 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200460 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000461}
462
Christoffer Dallc6e09172018-07-16 15:06:23 +0200463int vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
Christoffer Dall31971912017-05-16 09:44:39 +0200464 gpa_t addr, unsigned int len,
465 unsigned long val)
466{
467 __vgic_mmio_write_sactive(vcpu, addr, len, val);
Christoffer Dallc6e09172018-07-16 15:06:23 +0200468 return 0;
Christoffer Dall31971912017-05-16 09:44:39 +0200469}
470
Andre Przywara055658b2015-12-01 14:34:02 +0000471unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
472 gpa_t addr, unsigned int len)
473{
474 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
475 int i;
476 u64 val = 0;
477
478 for (i = 0; i < len; i++) {
479 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
480
481 val |= (u64)irq->priority << (i * 8);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100482
483 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000484 }
485
486 return val;
487}
488
489/*
490 * We currently don't handle changing the priority of an interrupt that
491 * is already pending on a VCPU. If there is a need for this, we would
492 * need to make this VCPU exit and re-evaluate the priorities, potentially
493 * leading to this interrupt getting presented now to the guest (if it has
494 * been masked by the priority mask before).
495 */
496void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
497 gpa_t addr, unsigned int len,
498 unsigned long val)
499{
500 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
501 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200502 unsigned long flags;
Andre Przywara055658b2015-12-01 14:34:02 +0000503
504 for (i = 0; i < len; i++) {
505 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
506
Christoffer Dall006df0f2016-10-16 22:19:11 +0200507 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara055658b2015-12-01 14:34:02 +0000508 /* Narrow the priority range to what we actually support */
509 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200510 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100511
512 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000513 }
514}
515
Andre Przywara79717e42015-12-01 12:41:31 +0000516unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
517 gpa_t addr, unsigned int len)
518{
519 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
520 u32 value = 0;
521 int i;
522
523 for (i = 0; i < len * 4; i++) {
524 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
525
526 if (irq->config == VGIC_CONFIG_EDGE)
527 value |= (2U << (i * 2));
Andre Przywara5dd4b922016-07-15 12:43:27 +0100528
529 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000530 }
531
532 return value;
533}
534
535void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
536 gpa_t addr, unsigned int len,
537 unsigned long val)
538{
539 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
540 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200541 unsigned long flags;
Andre Przywara79717e42015-12-01 12:41:31 +0000542
543 for (i = 0; i < len * 4; i++) {
Andre Przywara5dd4b922016-07-15 12:43:27 +0100544 struct vgic_irq *irq;
Andre Przywara79717e42015-12-01 12:41:31 +0000545
546 /*
547 * The configuration cannot be changed for SGIs in general,
548 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
549 * code relies on PPIs being level triggered, so we also
550 * make them read-only here.
551 */
552 if (intid + i < VGIC_NR_PRIVATE_IRQS)
553 continue;
554
Andre Przywara5dd4b922016-07-15 12:43:27 +0100555 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200556 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100557
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100558 if (test_bit(i * 2 + 1, &val))
Andre Przywara79717e42015-12-01 12:41:31 +0000559 irq->config = VGIC_CONFIG_EDGE;
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100560 else
Andre Przywara79717e42015-12-01 12:41:31 +0000561 irq->config = VGIC_CONFIG_LEVEL;
Andre Przywara5dd4b922016-07-15 12:43:27 +0100562
Christoffer Dall006df0f2016-10-16 22:19:11 +0200563 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100564 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000565 }
566}
567
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530568u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
569{
570 int i;
571 u64 val = 0;
572 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
573
574 for (i = 0; i < 32; i++) {
575 struct vgic_irq *irq;
576
577 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
578 continue;
579
580 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
581 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
582 val |= (1U << i);
583
584 vgic_put_irq(vcpu->kvm, irq);
585 }
586
587 return val;
588}
589
590void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
591 const u64 val)
592{
593 int i;
594 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200595 unsigned long flags;
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530596
597 for (i = 0; i < 32; i++) {
598 struct vgic_irq *irq;
599 bool new_level;
600
601 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
602 continue;
603
604 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
605
606 /*
607 * Line level is set irrespective of irq type
608 * (level or edge) to avoid dependency that VM should
609 * restore irq config before line level.
610 */
611 new_level = !!(val & (1U << i));
Christoffer Dall006df0f2016-10-16 22:19:11 +0200612 spin_lock_irqsave(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530613 irq->line_level = new_level;
614 if (new_level)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200615 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530616 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200617 spin_unlock_irqrestore(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530618
619 vgic_put_irq(vcpu->kvm, irq);
620 }
621}
622
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100623static int match_region(const void *key, const void *elt)
624{
625 const unsigned int offset = (unsigned long)key;
626 const struct vgic_register_region *region = elt;
627
628 if (offset < region->reg_offset)
629 return -1;
630
631 if (offset >= region->reg_offset + region->len)
632 return 1;
633
634 return 0;
635}
636
Eric Auger4b7171a2016-12-20 09:20:00 +0100637const struct vgic_register_region *
638vgic_find_mmio_region(const struct vgic_register_region *regions,
639 int nr_regions, unsigned int offset)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100640{
Eric Auger4b7171a2016-12-20 09:20:00 +0100641 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
642 sizeof(regions[0]), match_region);
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100643}
644
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530645void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
646{
647 if (kvm_vgic_global_state.type == VGIC_V2)
648 vgic_v2_set_vmcr(vcpu, vmcr);
649 else
650 vgic_v3_set_vmcr(vcpu, vmcr);
651}
652
653void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
654{
655 if (kvm_vgic_global_state.type == VGIC_V2)
656 vgic_v2_get_vmcr(vcpu, vmcr);
657 else
658 vgic_v3_get_vmcr(vcpu, vmcr);
659}
660
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100661/*
662 * kvm_mmio_read_buf() returns a value in a format where it can be converted
663 * to a byte array and be directly observed as the guest wanted it to appear
664 * in memory if it had done the store itself, which is LE for the GIC, as the
665 * guest knows the GIC is always LE.
666 *
667 * We convert this value to the CPUs native format to deal with it as a data
668 * value.
669 */
670unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
671{
672 unsigned long data = kvm_mmio_read_buf(val, len);
673
674 switch (len) {
675 case 1:
676 return data;
677 case 2:
678 return le16_to_cpu(data);
679 case 4:
680 return le32_to_cpu(data);
681 default:
682 return le64_to_cpu(data);
683 }
684}
685
686/*
687 * kvm_mmio_write_buf() expects a value in a format such that if converted to
688 * a byte array it is observed as the guest would see it if it could perform
689 * the load directly. Since the GIC is LE, and the guest knows this, the
690 * guest expects a value in little endian format.
691 *
692 * We convert the data value from the CPUs native format to LE so that the
693 * value is returned in the proper format.
694 */
695void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
696 unsigned long data)
697{
698 switch (len) {
699 case 1:
700 break;
701 case 2:
702 data = cpu_to_le16(data);
703 break;
704 case 4:
705 data = cpu_to_le32(data);
706 break;
707 default:
708 data = cpu_to_le64(data);
709 }
710
711 kvm_mmio_write_buf(buf, len, data);
712}
713
714static
715struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
716{
717 return container_of(dev, struct vgic_io_device, dev);
718}
719
Andre Przywara112b0b82016-11-01 18:00:08 +0000720static bool check_region(const struct kvm *kvm,
721 const struct vgic_register_region *region,
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100722 gpa_t addr, int len)
723{
Andre Przywara112b0b82016-11-01 18:00:08 +0000724 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
725
726 switch (len) {
727 case sizeof(u8):
728 flags = VGIC_ACCESS_8bit;
729 break;
730 case sizeof(u32):
731 flags = VGIC_ACCESS_32bit;
732 break;
733 case sizeof(u64):
734 flags = VGIC_ACCESS_64bit;
735 break;
736 default:
737 return false;
738 }
739
740 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
741 if (!region->bits_per_irq)
742 return true;
743
744 /* Do we access a non-allocated IRQ? */
745 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
746 }
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100747
748 return false;
749}
750
Vijaya Kumar K94574c92017-01-26 19:50:47 +0530751const struct vgic_register_region *
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530752vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
753 gpa_t addr, int len)
754{
755 const struct vgic_register_region *region;
756
757 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
758 addr - iodev->base_addr);
759 if (!region || !check_region(vcpu->kvm, region, addr, len))
760 return NULL;
761
762 return region;
763}
764
765static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
766 gpa_t addr, u32 *val)
767{
768 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
769 const struct vgic_register_region *region;
770 struct kvm_vcpu *r_vcpu;
771
772 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
773 if (!region) {
774 *val = 0;
775 return 0;
776 }
777
778 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
779 if (region->uaccess_read)
780 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
781 else
782 *val = region->read(r_vcpu, addr, sizeof(u32));
783
784 return 0;
785}
786
787static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
788 gpa_t addr, const u32 *val)
789{
790 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
791 const struct vgic_register_region *region;
792 struct kvm_vcpu *r_vcpu;
793
794 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
795 if (!region)
796 return 0;
797
798 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
799 if (region->uaccess_write)
Christoffer Dallc6e09172018-07-16 15:06:23 +0200800 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530801
Christoffer Dallc6e09172018-07-16 15:06:23 +0200802 region->write(r_vcpu, addr, sizeof(u32), *val);
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530803 return 0;
804}
805
806/*
807 * Userland access to VGIC registers.
808 */
809int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
810 bool is_write, int offset, u32 *val)
811{
812 if (is_write)
813 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
814 else
815 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
816}
817
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100818static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
819 gpa_t addr, int len, void *val)
820{
821 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
822 const struct vgic_register_region *region;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100823 unsigned long data = 0;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100824
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530825 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
826 if (!region) {
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100827 memset(val, 0, len);
828 return 0;
829 }
830
Andre Przywara59c5ab42016-07-15 12:43:30 +0100831 switch (iodev->iodev_type) {
832 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000833 data = region->read(vcpu, addr, len);
834 break;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100835 case IODEV_DIST:
836 data = region->read(vcpu, addr, len);
837 break;
838 case IODEV_REDIST:
839 data = region->read(iodev->redist_vcpu, addr, len);
840 break;
841 case IODEV_ITS:
842 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
843 break;
844 }
845
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100846 vgic_data_host_to_mmio_bus(val, len, data);
847 return 0;
848}
849
850static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
851 gpa_t addr, int len, const void *val)
852{
853 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
854 const struct vgic_register_region *region;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100855 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
856
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530857 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
858 if (!region)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100859 return 0;
860
Andre Przywara59c5ab42016-07-15 12:43:30 +0100861 switch (iodev->iodev_type) {
862 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000863 region->write(vcpu, addr, len, data);
Andre Przywara59c5ab42016-07-15 12:43:30 +0100864 break;
865 case IODEV_DIST:
866 region->write(vcpu, addr, len, data);
867 break;
868 case IODEV_REDIST:
869 region->write(iodev->redist_vcpu, addr, len, data);
870 break;
871 case IODEV_ITS:
872 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
873 break;
874 }
875
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100876 return 0;
877}
878
879struct kvm_io_device_ops kvm_io_gic_ops = {
880 .read = dispatch_mmio_read,
881 .write = dispatch_mmio_write,
882};
Andre Przywarafb848db2016-04-26 21:32:49 +0100883
884int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
885 enum vgic_type type)
886{
887 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
888 int ret = 0;
889 unsigned int len;
890
891 switch (type) {
892 case VGIC_V2:
893 len = vgic_v2_init_dist_iodev(io_device);
894 break;
Andre Przywaraed9b8ce2015-12-01 14:34:34 +0000895 case VGIC_V3:
896 len = vgic_v3_init_dist_iodev(io_device);
897 break;
Andre Przywarafb848db2016-04-26 21:32:49 +0100898 default:
899 BUG_ON(1);
900 }
901
902 io_device->base_addr = dist_base_address;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100903 io_device->iodev_type = IODEV_DIST;
Andre Przywarafb848db2016-04-26 21:32:49 +0100904 io_device->redist_vcpu = NULL;
905
906 mutex_lock(&kvm->slots_lock);
907 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
908 len, &io_device->dev);
909 mutex_unlock(&kvm->slots_lock);
910
911 return ret;
912}