blob: fdad95f62fa3bb7d8238cbfe5dfd1969b27fbb04 [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
19#include <kvm/arm_vgic.h>
20
21#include "vgic.h"
22#include "vgic-mmio.h"
23
24unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
25 gpa_t addr, unsigned int len)
26{
27 return 0;
28}
29
30unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
31 gpa_t addr, unsigned int len)
32{
33 return -1UL;
34}
35
36void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
37 unsigned int len, unsigned long val)
38{
39 /* Ignore */
40}
41
Andre Przywarafd122e62015-12-01 14:33:05 +000042/*
43 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
44 * of the enabled bit, so there is only one function for both here.
45 */
46unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
47 gpa_t addr, unsigned int len)
48{
49 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
50 u32 value = 0;
51 int i;
52
53 /* Loop over all IRQs affected by this read */
54 for (i = 0; i < len * 8; i++) {
55 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56
57 if (irq->enabled)
58 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +010059
60 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +000061 }
62
63 return value;
64}
65
66void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
67 gpa_t addr, unsigned int len,
68 unsigned long val)
69{
70 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
71 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +020072 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +000073
74 for_each_set_bit(i, &val, len * 8) {
75 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
76
Christoffer Dall006df0f2016-10-16 22:19:11 +020077 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +000078 irq->enabled = true;
Christoffer Dall006df0f2016-10-16 22:19:11 +020079 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +010080
81 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +000082 }
83}
84
85void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
86 gpa_t addr, unsigned int len,
87 unsigned long val)
88{
89 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
90 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +020091 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +000092
93 for_each_set_bit(i, &val, len * 8) {
94 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
95
Christoffer Dall006df0f2016-10-16 22:19:11 +020096 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +000097
98 irq->enabled = false;
99
Christoffer Dall006df0f2016-10-16 22:19:11 +0200100 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100101 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +0000102 }
103}
104
Andre Przywara96b29802015-12-01 14:33:41 +0000105unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
106 gpa_t addr, unsigned int len)
107{
108 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
109 u32 value = 0;
110 int i;
111
112 /* Loop over all IRQs affected by this read */
113 for (i = 0; i < len * 8; i++) {
114 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
115
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100116 if (irq_is_pending(irq))
Andre Przywara96b29802015-12-01 14:33:41 +0000117 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100118
119 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000120 }
121
122 return value;
123}
124
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700125/*
126 * This function will return the VCPU that performed the MMIO access and
127 * trapped from within the VM, and will return NULL if this is a userspace
128 * access.
129 *
130 * We can disable preemption locally around accessing the per-CPU variable,
131 * and use the resolved vcpu pointer after enabling preemption again, because
132 * even if the current thread is migrated to another CPU, reading the per-CPU
133 * value later will give us the same value as we update the per-CPU variable
134 * in the preempt notifier handlers.
135 */
136static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void)
137{
138 struct kvm_vcpu *vcpu;
139
140 preempt_disable();
141 vcpu = kvm_arm_get_running_vcpu();
142 preempt_enable();
143 return vcpu;
144}
145
Andre Przywara96b29802015-12-01 14:33:41 +0000146void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
147 gpa_t addr, unsigned int len,
148 unsigned long val)
149{
150 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
151 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200152 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000153
154 for_each_set_bit(i, &val, len * 8) {
155 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
156
Christoffer Dall006df0f2016-10-16 22:19:11 +0200157 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100158 irq->pending_latch = true;
Andre Przywara96b29802015-12-01 14:33:41 +0000159
Christoffer Dall006df0f2016-10-16 22:19:11 +0200160 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100161 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000162 }
163}
164
165void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
166 gpa_t addr, unsigned int len,
167 unsigned long val)
168{
169 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
170 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200171 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000172
173 for_each_set_bit(i, &val, len * 8) {
174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
175
Christoffer Dall006df0f2016-10-16 22:19:11 +0200176 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara96b29802015-12-01 14:33:41 +0000177
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100178 irq->pending_latch = false;
Andre Przywara96b29802015-12-01 14:33:41 +0000179
Christoffer Dall006df0f2016-10-16 22:19:11 +0200180 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100181 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000182 }
183}
184
Andre Przywara69b6fe02015-12-01 12:40:58 +0000185unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
186 gpa_t addr, unsigned int len)
187{
188 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
189 u32 value = 0;
190 int i;
191
192 /* Loop over all IRQs affected by this read */
193 for (i = 0; i < len * 8; i++) {
194 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
195
196 if (irq->active)
197 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100198
199 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000200 }
201
202 return value;
203}
204
Christoffer Dall35a2d582016-05-20 15:25:28 +0200205static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
206 bool new_active_state)
207{
Christoffer Dall006df0f2016-10-16 22:19:11 +0200208 unsigned long flags;
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700209 struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu();
Jintack Lim370a0ec2017-03-06 05:42:37 -0800210
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700211 spin_lock_irqsave(&irq->irq_lock, flags);
Jintack Lim370a0ec2017-03-06 05:42:37 -0800212
Christoffer Dall35a2d582016-05-20 15:25:28 +0200213 /*
214 * If this virtual IRQ was written into a list register, we
215 * have to make sure the CPU that runs the VCPU thread has
Jintack Lim370a0ec2017-03-06 05:42:37 -0800216 * synced back the LR state to the struct vgic_irq.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200217 *
Jintack Lim370a0ec2017-03-06 05:42:37 -0800218 * As long as the conditions below are true, we know the VCPU thread
219 * may be on its way back from the guest (we kicked the VCPU thread in
220 * vgic_change_active_prepare) and still has to sync back this IRQ,
221 * so we release and re-acquire the spin_lock to let the other thread
222 * sync back the IRQ.
Christoffer Dall6c1b75212017-09-14 11:08:45 -0700223 *
224 * When accessing VGIC state from user space, requester_vcpu is
225 * NULL, which is fine, because we guarantee that no VCPUs are running
226 * when accessing VGIC state from user space so irq->vcpu->cpu is
227 * always -1.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200228 */
229 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
Jintack Lim370a0ec2017-03-06 05:42:37 -0800230 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
Marc Zyngier05fb05a2016-06-02 09:24:06 +0100231 irq->vcpu->cpu != -1) /* VCPU thread is running */
Christoffer Dall35a2d582016-05-20 15:25:28 +0200232 cond_resched_lock(&irq->irq_lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200233
234 irq->active = new_active_state;
235 if (new_active_state)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200236 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200237 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200238 spin_unlock_irqrestore(&irq->irq_lock, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200239}
240
241/*
242 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
243 * is not queued on some running VCPU's LRs, because then the change to the
244 * active state can be overwritten when the VCPU's state is synced coming back
245 * from the guest.
246 *
247 * For shared interrupts, we have to stop all the VCPUs because interrupts can
248 * be migrated while we don't hold the IRQ locks and we don't want to be
249 * chasing moving targets.
250 *
Christoffer Dallabd72292017-05-06 20:01:24 +0200251 * For private interrupts we don't have to do anything because userspace
252 * accesses to the VGIC state already require all VCPUs to be stopped, and
253 * only the VCPU itself can modify its private interrupts active state, which
254 * guarantees that the VCPU is not running.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200255 */
256static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
257{
Christoffer Dallabd72292017-05-06 20:01:24 +0200258 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200259 kvm_arm_halt_guest(vcpu->kvm);
260}
261
262/* See vgic_change_active_prepare */
263static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
264{
Christoffer Dallabd72292017-05-06 20:01:24 +0200265 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200266 kvm_arm_resume_guest(vcpu->kvm);
267}
268
Christoffer Dall31971912017-05-16 09:44:39 +0200269static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
270 gpa_t addr, unsigned int len,
271 unsigned long val)
Andre Przywara69b6fe02015-12-01 12:40:58 +0000272{
273 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
274 int i;
275
Andre Przywara69b6fe02015-12-01 12:40:58 +0000276 for_each_set_bit(i, &val, len * 8) {
277 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200278 vgic_mmio_change_active(vcpu, irq, false);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100279 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000280 }
Christoffer Dall31971912017-05-16 09:44:39 +0200281}
282
283void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
284 gpa_t addr, unsigned int len,
285 unsigned long val)
286{
287 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
288
Christoffer Dallabd72292017-05-06 20:01:24 +0200289 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall31971912017-05-16 09:44:39 +0200290 vgic_change_active_prepare(vcpu, intid);
291
292 __vgic_mmio_write_cactive(vcpu, addr, len, val);
293
Christoffer Dall35a2d582016-05-20 15:25:28 +0200294 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200295 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000296}
297
Christoffer Dall31971912017-05-16 09:44:39 +0200298void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
299 gpa_t addr, unsigned int len,
300 unsigned long val)
301{
302 __vgic_mmio_write_cactive(vcpu, addr, len, val);
303}
304
305static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
306 gpa_t addr, unsigned int len,
307 unsigned long val)
308{
309 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
310 int i;
311
312 for_each_set_bit(i, &val, len * 8) {
313 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
314 vgic_mmio_change_active(vcpu, irq, true);
315 vgic_put_irq(vcpu->kvm, irq);
316 }
317}
318
Andre Przywara69b6fe02015-12-01 12:40:58 +0000319void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
320 gpa_t addr, unsigned int len,
321 unsigned long val)
322{
323 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000324
Christoffer Dallabd72292017-05-06 20:01:24 +0200325 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200326 vgic_change_active_prepare(vcpu, intid);
Christoffer Dall31971912017-05-16 09:44:39 +0200327
328 __vgic_mmio_write_sactive(vcpu, addr, len, val);
329
Christoffer Dall35a2d582016-05-20 15:25:28 +0200330 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200331 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000332}
333
Christoffer Dall31971912017-05-16 09:44:39 +0200334void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
335 gpa_t addr, unsigned int len,
336 unsigned long val)
337{
338 __vgic_mmio_write_sactive(vcpu, addr, len, val);
339}
340
Andre Przywara055658b2015-12-01 14:34:02 +0000341unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
342 gpa_t addr, unsigned int len)
343{
344 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
345 int i;
346 u64 val = 0;
347
348 for (i = 0; i < len; i++) {
349 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
350
351 val |= (u64)irq->priority << (i * 8);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100352
353 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000354 }
355
356 return val;
357}
358
359/*
360 * We currently don't handle changing the priority of an interrupt that
361 * is already pending on a VCPU. If there is a need for this, we would
362 * need to make this VCPU exit and re-evaluate the priorities, potentially
363 * leading to this interrupt getting presented now to the guest (if it has
364 * been masked by the priority mask before).
365 */
366void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
367 gpa_t addr, unsigned int len,
368 unsigned long val)
369{
370 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
371 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200372 unsigned long flags;
Andre Przywara055658b2015-12-01 14:34:02 +0000373
374 for (i = 0; i < len; i++) {
375 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
376
Christoffer Dall006df0f2016-10-16 22:19:11 +0200377 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara055658b2015-12-01 14:34:02 +0000378 /* Narrow the priority range to what we actually support */
379 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200380 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100381
382 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000383 }
384}
385
Andre Przywara79717e42015-12-01 12:41:31 +0000386unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
387 gpa_t addr, unsigned int len)
388{
389 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
390 u32 value = 0;
391 int i;
392
393 for (i = 0; i < len * 4; i++) {
394 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
395
396 if (irq->config == VGIC_CONFIG_EDGE)
397 value |= (2U << (i * 2));
Andre Przywara5dd4b922016-07-15 12:43:27 +0100398
399 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000400 }
401
402 return value;
403}
404
405void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
406 gpa_t addr, unsigned int len,
407 unsigned long val)
408{
409 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
410 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200411 unsigned long flags;
Andre Przywara79717e42015-12-01 12:41:31 +0000412
413 for (i = 0; i < len * 4; i++) {
Andre Przywara5dd4b922016-07-15 12:43:27 +0100414 struct vgic_irq *irq;
Andre Przywara79717e42015-12-01 12:41:31 +0000415
416 /*
417 * The configuration cannot be changed for SGIs in general,
418 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
419 * code relies on PPIs being level triggered, so we also
420 * make them read-only here.
421 */
422 if (intid + i < VGIC_NR_PRIVATE_IRQS)
423 continue;
424
Andre Przywara5dd4b922016-07-15 12:43:27 +0100425 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200426 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100427
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100428 if (test_bit(i * 2 + 1, &val))
Andre Przywara79717e42015-12-01 12:41:31 +0000429 irq->config = VGIC_CONFIG_EDGE;
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100430 else
Andre Przywara79717e42015-12-01 12:41:31 +0000431 irq->config = VGIC_CONFIG_LEVEL;
Andre Przywara5dd4b922016-07-15 12:43:27 +0100432
Christoffer Dall006df0f2016-10-16 22:19:11 +0200433 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100434 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000435 }
436}
437
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530438u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
439{
440 int i;
441 u64 val = 0;
442 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
443
444 for (i = 0; i < 32; i++) {
445 struct vgic_irq *irq;
446
447 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
448 continue;
449
450 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
451 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
452 val |= (1U << i);
453
454 vgic_put_irq(vcpu->kvm, irq);
455 }
456
457 return val;
458}
459
460void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
461 const u64 val)
462{
463 int i;
464 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200465 unsigned long flags;
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530466
467 for (i = 0; i < 32; i++) {
468 struct vgic_irq *irq;
469 bool new_level;
470
471 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
472 continue;
473
474 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
475
476 /*
477 * Line level is set irrespective of irq type
478 * (level or edge) to avoid dependency that VM should
479 * restore irq config before line level.
480 */
481 new_level = !!(val & (1U << i));
Christoffer Dall006df0f2016-10-16 22:19:11 +0200482 spin_lock_irqsave(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530483 irq->line_level = new_level;
484 if (new_level)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200485 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530486 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200487 spin_unlock_irqrestore(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530488
489 vgic_put_irq(vcpu->kvm, irq);
490 }
491}
492
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100493static int match_region(const void *key, const void *elt)
494{
495 const unsigned int offset = (unsigned long)key;
496 const struct vgic_register_region *region = elt;
497
498 if (offset < region->reg_offset)
499 return -1;
500
501 if (offset >= region->reg_offset + region->len)
502 return 1;
503
504 return 0;
505}
506
Eric Auger4b7171a2016-12-20 09:20:00 +0100507const struct vgic_register_region *
508vgic_find_mmio_region(const struct vgic_register_region *regions,
509 int nr_regions, unsigned int offset)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100510{
Eric Auger4b7171a2016-12-20 09:20:00 +0100511 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
512 sizeof(regions[0]), match_region);
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100513}
514
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530515void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
516{
517 if (kvm_vgic_global_state.type == VGIC_V2)
518 vgic_v2_set_vmcr(vcpu, vmcr);
519 else
520 vgic_v3_set_vmcr(vcpu, vmcr);
521}
522
523void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
524{
525 if (kvm_vgic_global_state.type == VGIC_V2)
526 vgic_v2_get_vmcr(vcpu, vmcr);
527 else
528 vgic_v3_get_vmcr(vcpu, vmcr);
529}
530
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100531/*
532 * kvm_mmio_read_buf() returns a value in a format where it can be converted
533 * to a byte array and be directly observed as the guest wanted it to appear
534 * in memory if it had done the store itself, which is LE for the GIC, as the
535 * guest knows the GIC is always LE.
536 *
537 * We convert this value to the CPUs native format to deal with it as a data
538 * value.
539 */
540unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
541{
542 unsigned long data = kvm_mmio_read_buf(val, len);
543
544 switch (len) {
545 case 1:
546 return data;
547 case 2:
548 return le16_to_cpu(data);
549 case 4:
550 return le32_to_cpu(data);
551 default:
552 return le64_to_cpu(data);
553 }
554}
555
556/*
557 * kvm_mmio_write_buf() expects a value in a format such that if converted to
558 * a byte array it is observed as the guest would see it if it could perform
559 * the load directly. Since the GIC is LE, and the guest knows this, the
560 * guest expects a value in little endian format.
561 *
562 * We convert the data value from the CPUs native format to LE so that the
563 * value is returned in the proper format.
564 */
565void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
566 unsigned long data)
567{
568 switch (len) {
569 case 1:
570 break;
571 case 2:
572 data = cpu_to_le16(data);
573 break;
574 case 4:
575 data = cpu_to_le32(data);
576 break;
577 default:
578 data = cpu_to_le64(data);
579 }
580
581 kvm_mmio_write_buf(buf, len, data);
582}
583
584static
585struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
586{
587 return container_of(dev, struct vgic_io_device, dev);
588}
589
Andre Przywara112b0b82016-11-01 18:00:08 +0000590static bool check_region(const struct kvm *kvm,
591 const struct vgic_register_region *region,
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100592 gpa_t addr, int len)
593{
Andre Przywara112b0b82016-11-01 18:00:08 +0000594 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
595
596 switch (len) {
597 case sizeof(u8):
598 flags = VGIC_ACCESS_8bit;
599 break;
600 case sizeof(u32):
601 flags = VGIC_ACCESS_32bit;
602 break;
603 case sizeof(u64):
604 flags = VGIC_ACCESS_64bit;
605 break;
606 default:
607 return false;
608 }
609
610 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
611 if (!region->bits_per_irq)
612 return true;
613
614 /* Do we access a non-allocated IRQ? */
615 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
616 }
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100617
618 return false;
619}
620
Vijaya Kumar K94574c92017-01-26 19:50:47 +0530621const struct vgic_register_region *
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530622vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
623 gpa_t addr, int len)
624{
625 const struct vgic_register_region *region;
626
627 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
628 addr - iodev->base_addr);
629 if (!region || !check_region(vcpu->kvm, region, addr, len))
630 return NULL;
631
632 return region;
633}
634
635static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
636 gpa_t addr, u32 *val)
637{
638 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
639 const struct vgic_register_region *region;
640 struct kvm_vcpu *r_vcpu;
641
642 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
643 if (!region) {
644 *val = 0;
645 return 0;
646 }
647
648 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
649 if (region->uaccess_read)
650 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
651 else
652 *val = region->read(r_vcpu, addr, sizeof(u32));
653
654 return 0;
655}
656
657static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
658 gpa_t addr, const u32 *val)
659{
660 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
661 const struct vgic_register_region *region;
662 struct kvm_vcpu *r_vcpu;
663
664 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
665 if (!region)
666 return 0;
667
668 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
669 if (region->uaccess_write)
670 region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
671 else
672 region->write(r_vcpu, addr, sizeof(u32), *val);
673
674 return 0;
675}
676
677/*
678 * Userland access to VGIC registers.
679 */
680int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
681 bool is_write, int offset, u32 *val)
682{
683 if (is_write)
684 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
685 else
686 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
687}
688
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100689static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
690 gpa_t addr, int len, void *val)
691{
692 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
693 const struct vgic_register_region *region;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100694 unsigned long data = 0;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100695
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530696 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
697 if (!region) {
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100698 memset(val, 0, len);
699 return 0;
700 }
701
Andre Przywara59c5ab42016-07-15 12:43:30 +0100702 switch (iodev->iodev_type) {
703 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000704 data = region->read(vcpu, addr, len);
705 break;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100706 case IODEV_DIST:
707 data = region->read(vcpu, addr, len);
708 break;
709 case IODEV_REDIST:
710 data = region->read(iodev->redist_vcpu, addr, len);
711 break;
712 case IODEV_ITS:
713 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
714 break;
715 }
716
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100717 vgic_data_host_to_mmio_bus(val, len, data);
718 return 0;
719}
720
721static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
722 gpa_t addr, int len, const void *val)
723{
724 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
725 const struct vgic_register_region *region;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100726 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
727
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530728 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
729 if (!region)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100730 return 0;
731
Andre Przywara59c5ab42016-07-15 12:43:30 +0100732 switch (iodev->iodev_type) {
733 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000734 region->write(vcpu, addr, len, data);
Andre Przywara59c5ab42016-07-15 12:43:30 +0100735 break;
736 case IODEV_DIST:
737 region->write(vcpu, addr, len, data);
738 break;
739 case IODEV_REDIST:
740 region->write(iodev->redist_vcpu, addr, len, data);
741 break;
742 case IODEV_ITS:
743 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
744 break;
745 }
746
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100747 return 0;
748}
749
750struct kvm_io_device_ops kvm_io_gic_ops = {
751 .read = dispatch_mmio_read,
752 .write = dispatch_mmio_write,
753};
Andre Przywarafb848db2016-04-26 21:32:49 +0100754
755int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
756 enum vgic_type type)
757{
758 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
759 int ret = 0;
760 unsigned int len;
761
762 switch (type) {
763 case VGIC_V2:
764 len = vgic_v2_init_dist_iodev(io_device);
765 break;
Andre Przywaraed9b8ce2015-12-01 14:34:34 +0000766 case VGIC_V3:
767 len = vgic_v3_init_dist_iodev(io_device);
768 break;
Andre Przywarafb848db2016-04-26 21:32:49 +0100769 default:
770 BUG_ON(1);
771 }
772
773 io_device->base_addr = dist_base_address;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100774 io_device->iodev_type = IODEV_DIST;
Andre Przywarafb848db2016-04-26 21:32:49 +0100775 io_device->redist_vcpu = NULL;
776
777 mutex_lock(&kvm->slots_lock);
778 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
779 len, &io_device->dev);
780 mutex_unlock(&kvm->slots_lock);
781
782 return ret;
783}