blob: 70f674bb13a166f1cecc87023e57d5166e0579a3 [file] [log] [blame]
Marc Zyngier1a89dd92013-01-21 19:36:12 -05001/*
2 * Copyright (C) 2012 ARM Ltd.
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
Marc Zyngier01ac5e32013-01-21 19:36:16 -050019#include <linux/cpu.h>
Marc Zyngier1a89dd92013-01-21 19:36:12 -050020#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/interrupt.h>
23#include <linux/io.h>
Marc Zyngier01ac5e32013-01-21 19:36:16 -050024#include <linux/of.h>
25#include <linux/of_address.h>
26#include <linux/of_irq.h>
Christoffer Dall2a2f3e262014-02-02 13:41:02 -080027#include <linux/uaccess.h>
Marc Zyngier01ac5e32013-01-21 19:36:16 -050028
29#include <linux/irqchip/arm-gic.h>
30
Marc Zyngier1a89dd92013-01-21 19:36:12 -050031#include <asm/kvm_emulate.h>
Marc Zyngier01ac5e32013-01-21 19:36:16 -050032#include <asm/kvm_arm.h>
33#include <asm/kvm_mmu.h>
Marc Zyngier1a89dd92013-01-21 19:36:12 -050034
Marc Zyngierb47ef922013-01-21 19:36:14 -050035/*
36 * How the whole thing works (courtesy of Christoffer Dall):
37 *
38 * - At any time, the dist->irq_pending_on_cpu is the oracle that knows if
39 * something is pending
40 * - VGIC pending interrupts are stored on the vgic.irq_state vgic
41 * bitmap (this bitmap is updated by both user land ioctls and guest
42 * mmio ops, and other in-kernel peripherals such as the
43 * arch. timers) and indicate the 'wire' state.
44 * - Every time the bitmap changes, the irq_pending_on_cpu oracle is
45 * recalculated
46 * - To calculate the oracle, we need info for each cpu from
47 * compute_pending_for_cpu, which considers:
48 * - PPI: dist->irq_state & dist->irq_enable
49 * - SPI: dist->irq_state & dist->irq_enable & dist->irq_spi_target
50 * - irq_spi_target is a 'formatted' version of the GICD_ICFGR
51 * registers, stored on each vcpu. We only keep one bit of
52 * information per interrupt, making sure that only one vcpu can
53 * accept the interrupt.
54 * - The same is true when injecting an interrupt, except that we only
55 * consider a single interrupt at a time. The irq_spi_cpu array
56 * contains the target CPU for each SPI.
57 *
58 * The handling of level interrupts adds some extra complexity. We
59 * need to track when the interrupt has been EOIed, so we can sample
60 * the 'line' again. This is achieved as such:
61 *
62 * - When a level interrupt is moved onto a vcpu, the corresponding
63 * bit in irq_active is set. As long as this bit is set, the line
64 * will be ignored for further interrupts. The interrupt is injected
65 * into the vcpu with the GICH_LR_EOI bit set (generate a
66 * maintenance interrupt on EOI).
67 * - When the interrupt is EOIed, the maintenance interrupt fires,
68 * and clears the corresponding bit in irq_active. This allow the
69 * interrupt line to be sampled again.
70 */
71
Christoffer Dall330690c2013-01-21 19:36:13 -050072#define VGIC_ADDR_UNDEF (-1)
73#define IS_VGIC_ADDR_UNDEF(_x) ((_x) == VGIC_ADDR_UNDEF)
74
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -070075#define PRODUCT_ID_KVM 0x4b /* ASCII code K */
76#define IMPLEMENTER_ARM 0x43b
77#define GICC_ARCH_VERSION_V2 0x2
78
Marc Zyngier01ac5e32013-01-21 19:36:16 -050079/* Physical address of vgic virtual cpu interface */
80static phys_addr_t vgic_vcpu_base;
81
82/* Virtual control interface base address */
83static void __iomem *vgic_vctrl_base;
84
85static struct device_node *vgic_node;
86
Marc Zyngier1a89dd92013-01-21 19:36:12 -050087#define ACCESS_READ_VALUE (1 << 0)
88#define ACCESS_READ_RAZ (0 << 0)
89#define ACCESS_READ_MASK(x) ((x) & (1 << 0))
90#define ACCESS_WRITE_IGNORED (0 << 1)
91#define ACCESS_WRITE_SETBIT (1 << 1)
92#define ACCESS_WRITE_CLEARBIT (2 << 1)
93#define ACCESS_WRITE_VALUE (3 << 1)
94#define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
95
Marc Zyngiera1fcb442013-01-21 19:36:15 -050096static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +010097static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
Marc Zyngierb47ef922013-01-21 19:36:14 -050098static void vgic_update_state(struct kvm *kvm);
Marc Zyngier5863c2c2013-01-21 19:36:15 -050099static void vgic_kick_vcpus(struct kvm *kvm);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500100static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg);
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100101static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
102static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
Marc Zyngierbeee38b2014-02-04 17:48:10 +0000103static void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
104static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
Marc Zyngier01ac5e32013-01-21 19:36:16 -0500105
Marc Zyngierbeee38b2014-02-04 17:48:10 +0000106static u32 vgic_nr_lr;
Marc Zyngier01ac5e32013-01-21 19:36:16 -0500107static unsigned int vgic_maint_irq;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500108
109static u32 *vgic_bitmap_get_reg(struct vgic_bitmap *x,
110 int cpuid, u32 offset)
111{
112 offset >>= 2;
113 if (!offset)
114 return x->percpu[cpuid].reg;
115 else
116 return x->shared.reg + offset - 1;
117}
118
119static int vgic_bitmap_get_irq_val(struct vgic_bitmap *x,
120 int cpuid, int irq)
121{
122 if (irq < VGIC_NR_PRIVATE_IRQS)
123 return test_bit(irq, x->percpu[cpuid].reg_ul);
124
125 return test_bit(irq - VGIC_NR_PRIVATE_IRQS, x->shared.reg_ul);
126}
127
128static void vgic_bitmap_set_irq_val(struct vgic_bitmap *x, int cpuid,
129 int irq, int val)
130{
131 unsigned long *reg;
132
133 if (irq < VGIC_NR_PRIVATE_IRQS) {
134 reg = x->percpu[cpuid].reg_ul;
135 } else {
136 reg = x->shared.reg_ul;
137 irq -= VGIC_NR_PRIVATE_IRQS;
138 }
139
140 if (val)
141 set_bit(irq, reg);
142 else
143 clear_bit(irq, reg);
144}
145
146static unsigned long *vgic_bitmap_get_cpu_map(struct vgic_bitmap *x, int cpuid)
147{
148 if (unlikely(cpuid >= VGIC_MAX_CPUS))
149 return NULL;
150 return x->percpu[cpuid].reg_ul;
151}
152
153static unsigned long *vgic_bitmap_get_shared_map(struct vgic_bitmap *x)
154{
155 return x->shared.reg_ul;
156}
157
158static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
159{
160 offset >>= 2;
161 BUG_ON(offset > (VGIC_NR_IRQS / 4));
Christoffer Dall8d989152013-08-29 11:08:24 +0100162 if (offset < 8)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500163 return x->percpu[cpuid] + offset;
164 else
165 return x->shared + offset - 8;
166}
167
168#define VGIC_CFG_LEVEL 0
169#define VGIC_CFG_EDGE 1
170
171static bool vgic_irq_is_edge(struct kvm_vcpu *vcpu, int irq)
172{
173 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
174 int irq_val;
175
176 irq_val = vgic_bitmap_get_irq_val(&dist->irq_cfg, vcpu->vcpu_id, irq);
177 return irq_val == VGIC_CFG_EDGE;
178}
179
180static int vgic_irq_is_enabled(struct kvm_vcpu *vcpu, int irq)
181{
182 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
183
184 return vgic_bitmap_get_irq_val(&dist->irq_enabled, vcpu->vcpu_id, irq);
185}
186
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500187static int vgic_irq_is_active(struct kvm_vcpu *vcpu, int irq)
188{
189 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
190
191 return vgic_bitmap_get_irq_val(&dist->irq_active, vcpu->vcpu_id, irq);
192}
193
194static void vgic_irq_set_active(struct kvm_vcpu *vcpu, int irq)
195{
196 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
197
198 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 1);
199}
200
201static void vgic_irq_clear_active(struct kvm_vcpu *vcpu, int irq)
202{
203 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
204
205 vgic_bitmap_set_irq_val(&dist->irq_active, vcpu->vcpu_id, irq, 0);
206}
207
208static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
209{
210 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
211
212 return vgic_bitmap_get_irq_val(&dist->irq_state, vcpu->vcpu_id, irq);
213}
214
Marc Zyngierb47ef922013-01-21 19:36:14 -0500215static void vgic_dist_irq_set(struct kvm_vcpu *vcpu, int irq)
216{
217 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
218
219 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 1);
220}
221
222static void vgic_dist_irq_clear(struct kvm_vcpu *vcpu, int irq)
223{
224 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
225
226 vgic_bitmap_set_irq_val(&dist->irq_state, vcpu->vcpu_id, irq, 0);
227}
228
229static void vgic_cpu_irq_set(struct kvm_vcpu *vcpu, int irq)
230{
231 if (irq < VGIC_NR_PRIVATE_IRQS)
232 set_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
233 else
234 set_bit(irq - VGIC_NR_PRIVATE_IRQS,
235 vcpu->arch.vgic_cpu.pending_shared);
236}
237
238static void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
239{
240 if (irq < VGIC_NR_PRIVATE_IRQS)
241 clear_bit(irq, vcpu->arch.vgic_cpu.pending_percpu);
242 else
243 clear_bit(irq - VGIC_NR_PRIVATE_IRQS,
244 vcpu->arch.vgic_cpu.pending_shared);
245}
246
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500247static u32 mmio_data_read(struct kvm_exit_mmio *mmio, u32 mask)
248{
249 return *((u32 *)mmio->data) & mask;
250}
251
252static void mmio_data_write(struct kvm_exit_mmio *mmio, u32 mask, u32 value)
253{
254 *((u32 *)mmio->data) = value & mask;
255}
256
257/**
258 * vgic_reg_access - access vgic register
259 * @mmio: pointer to the data describing the mmio access
260 * @reg: pointer to the virtual backing of vgic distributor data
261 * @offset: least significant 2 bits used for word offset
262 * @mode: ACCESS_ mode (see defines above)
263 *
264 * Helper to make vgic register access easier using one of the access
265 * modes defined for vgic register access
266 * (read,raz,write-ignored,setbit,clearbit,write)
267 */
268static void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
269 phys_addr_t offset, int mode)
270{
271 int word_offset = (offset & 3) * 8;
272 u32 mask = (1UL << (mmio->len * 8)) - 1;
273 u32 regval;
274
275 /*
276 * Any alignment fault should have been delivered to the guest
277 * directly (ARM ARM B3.12.7 "Prioritization of aborts").
278 */
279
280 if (reg) {
281 regval = *reg;
282 } else {
283 BUG_ON(mode != (ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED));
284 regval = 0;
285 }
286
287 if (mmio->is_write) {
288 u32 data = mmio_data_read(mmio, mask) << word_offset;
289 switch (ACCESS_WRITE_MASK(mode)) {
290 case ACCESS_WRITE_IGNORED:
291 return;
292
293 case ACCESS_WRITE_SETBIT:
294 regval |= data;
295 break;
296
297 case ACCESS_WRITE_CLEARBIT:
298 regval &= ~data;
299 break;
300
301 case ACCESS_WRITE_VALUE:
302 regval = (regval & ~(mask << word_offset)) | data;
303 break;
304 }
305 *reg = regval;
306 } else {
307 switch (ACCESS_READ_MASK(mode)) {
308 case ACCESS_READ_RAZ:
309 regval = 0;
310 /* fall through */
311
312 case ACCESS_READ_VALUE:
313 mmio_data_write(mmio, mask, regval >> word_offset);
314 }
315 }
316}
317
Marc Zyngierb47ef922013-01-21 19:36:14 -0500318static bool handle_mmio_misc(struct kvm_vcpu *vcpu,
319 struct kvm_exit_mmio *mmio, phys_addr_t offset)
320{
321 u32 reg;
322 u32 word_offset = offset & 3;
323
324 switch (offset & ~3) {
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -0700325 case 0: /* GICD_CTLR */
Marc Zyngierb47ef922013-01-21 19:36:14 -0500326 reg = vcpu->kvm->arch.vgic.enabled;
327 vgic_reg_access(mmio, &reg, word_offset,
328 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
329 if (mmio->is_write) {
330 vcpu->kvm->arch.vgic.enabled = reg & 1;
331 vgic_update_state(vcpu->kvm);
332 return true;
333 }
334 break;
335
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -0700336 case 4: /* GICD_TYPER */
Marc Zyngierb47ef922013-01-21 19:36:14 -0500337 reg = (atomic_read(&vcpu->kvm->online_vcpus) - 1) << 5;
338 reg |= (VGIC_NR_IRQS >> 5) - 1;
339 vgic_reg_access(mmio, &reg, word_offset,
340 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
341 break;
342
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -0700343 case 8: /* GICD_IIDR */
344 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500345 vgic_reg_access(mmio, &reg, word_offset,
346 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
347 break;
348 }
349
350 return false;
351}
352
353static bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu,
354 struct kvm_exit_mmio *mmio, phys_addr_t offset)
355{
356 vgic_reg_access(mmio, NULL, offset,
357 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
358 return false;
359}
360
361static bool handle_mmio_set_enable_reg(struct kvm_vcpu *vcpu,
362 struct kvm_exit_mmio *mmio,
363 phys_addr_t offset)
364{
365 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
366 vcpu->vcpu_id, offset);
367 vgic_reg_access(mmio, reg, offset,
368 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
369 if (mmio->is_write) {
370 vgic_update_state(vcpu->kvm);
371 return true;
372 }
373
374 return false;
375}
376
377static bool handle_mmio_clear_enable_reg(struct kvm_vcpu *vcpu,
378 struct kvm_exit_mmio *mmio,
379 phys_addr_t offset)
380{
381 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_enabled,
382 vcpu->vcpu_id, offset);
383 vgic_reg_access(mmio, reg, offset,
384 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
385 if (mmio->is_write) {
386 if (offset < 4) /* Force SGI enabled */
387 *reg |= 0xffff;
Marc Zyngiera1fcb442013-01-21 19:36:15 -0500388 vgic_retire_disabled_irqs(vcpu);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500389 vgic_update_state(vcpu->kvm);
390 return true;
391 }
392
393 return false;
394}
395
396static bool handle_mmio_set_pending_reg(struct kvm_vcpu *vcpu,
397 struct kvm_exit_mmio *mmio,
398 phys_addr_t offset)
399{
400 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
401 vcpu->vcpu_id, offset);
402 vgic_reg_access(mmio, reg, offset,
403 ACCESS_READ_VALUE | ACCESS_WRITE_SETBIT);
404 if (mmio->is_write) {
405 vgic_update_state(vcpu->kvm);
406 return true;
407 }
408
409 return false;
410}
411
412static bool handle_mmio_clear_pending_reg(struct kvm_vcpu *vcpu,
413 struct kvm_exit_mmio *mmio,
414 phys_addr_t offset)
415{
416 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_state,
417 vcpu->vcpu_id, offset);
418 vgic_reg_access(mmio, reg, offset,
419 ACCESS_READ_VALUE | ACCESS_WRITE_CLEARBIT);
420 if (mmio->is_write) {
421 vgic_update_state(vcpu->kvm);
422 return true;
423 }
424
425 return false;
426}
427
428static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
429 struct kvm_exit_mmio *mmio,
430 phys_addr_t offset)
431{
432 u32 *reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
433 vcpu->vcpu_id, offset);
434 vgic_reg_access(mmio, reg, offset,
435 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
436 return false;
437}
438
439#define GICD_ITARGETSR_SIZE 32
440#define GICD_CPUTARGETS_BITS 8
441#define GICD_IRQS_PER_ITARGETSR (GICD_ITARGETSR_SIZE / GICD_CPUTARGETS_BITS)
442static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
443{
444 struct vgic_dist *dist = &kvm->arch.vgic;
Marc Zyngier986af8e2013-08-29 11:08:22 +0100445 int i;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500446 u32 val = 0;
447
448 irq -= VGIC_NR_PRIVATE_IRQS;
449
Marc Zyngier986af8e2013-08-29 11:08:22 +0100450 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
451 val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500452
453 return val;
454}
455
456static void vgic_set_target_reg(struct kvm *kvm, u32 val, int irq)
457{
458 struct vgic_dist *dist = &kvm->arch.vgic;
459 struct kvm_vcpu *vcpu;
460 int i, c;
461 unsigned long *bmap;
462 u32 target;
463
464 irq -= VGIC_NR_PRIVATE_IRQS;
465
466 /*
467 * Pick the LSB in each byte. This ensures we target exactly
468 * one vcpu per IRQ. If the byte is null, assume we target
469 * CPU0.
470 */
471 for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++) {
472 int shift = i * GICD_CPUTARGETS_BITS;
473 target = ffs((val >> shift) & 0xffU);
474 target = target ? (target - 1) : 0;
475 dist->irq_spi_cpu[irq + i] = target;
476 kvm_for_each_vcpu(c, vcpu, kvm) {
477 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
478 if (c == target)
479 set_bit(irq + i, bmap);
480 else
481 clear_bit(irq + i, bmap);
482 }
483 }
484}
485
486static bool handle_mmio_target_reg(struct kvm_vcpu *vcpu,
487 struct kvm_exit_mmio *mmio,
488 phys_addr_t offset)
489{
490 u32 reg;
491
492 /* We treat the banked interrupts targets as read-only */
493 if (offset < 32) {
494 u32 roreg = 1 << vcpu->vcpu_id;
495 roreg |= roreg << 8;
496 roreg |= roreg << 16;
497
498 vgic_reg_access(mmio, &roreg, offset,
499 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
500 return false;
501 }
502
503 reg = vgic_get_target_reg(vcpu->kvm, offset & ~3U);
504 vgic_reg_access(mmio, &reg, offset,
505 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
506 if (mmio->is_write) {
507 vgic_set_target_reg(vcpu->kvm, reg, offset & ~3U);
508 vgic_update_state(vcpu->kvm);
509 return true;
510 }
511
512 return false;
513}
514
515static u32 vgic_cfg_expand(u16 val)
516{
517 u32 res = 0;
518 int i;
519
520 /*
521 * Turn a 16bit value like abcd...mnop into a 32bit word
522 * a0b0c0d0...m0n0o0p0, which is what the HW cfg register is.
523 */
524 for (i = 0; i < 16; i++)
525 res |= ((val >> i) & VGIC_CFG_EDGE) << (2 * i + 1);
526
527 return res;
528}
529
530static u16 vgic_cfg_compress(u32 val)
531{
532 u16 res = 0;
533 int i;
534
535 /*
536 * Turn a 32bit word a0b0c0d0...m0n0o0p0 into 16bit value like
537 * abcd...mnop which is what we really care about.
538 */
539 for (i = 0; i < 16; i++)
540 res |= ((val >> (i * 2 + 1)) & VGIC_CFG_EDGE) << i;
541
542 return res;
543}
544
545/*
546 * The distributor uses 2 bits per IRQ for the CFG register, but the
547 * LSB is always 0. As such, we only keep the upper bit, and use the
548 * two above functions to compress/expand the bits
549 */
550static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
551 struct kvm_exit_mmio *mmio, phys_addr_t offset)
552{
553 u32 val;
Marc Zyngier6545eae2013-08-29 11:08:23 +0100554 u32 *reg;
555
Marc Zyngier6545eae2013-08-29 11:08:23 +0100556 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200557 vcpu->vcpu_id, offset >> 1);
Marc Zyngier6545eae2013-08-29 11:08:23 +0100558
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200559 if (offset & 4)
Marc Zyngierb47ef922013-01-21 19:36:14 -0500560 val = *reg >> 16;
561 else
562 val = *reg & 0xffff;
563
564 val = vgic_cfg_expand(val);
565 vgic_reg_access(mmio, &val, offset,
566 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
567 if (mmio->is_write) {
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200568 if (offset < 8) {
Marc Zyngierb47ef922013-01-21 19:36:14 -0500569 *reg = ~0U; /* Force PPIs/SGIs to 1 */
570 return false;
571 }
572
573 val = vgic_cfg_compress(val);
Andre Przywaraf2ae85b2014-04-11 00:07:18 +0200574 if (offset & 4) {
Marc Zyngierb47ef922013-01-21 19:36:14 -0500575 *reg &= 0xffff;
576 *reg |= val << 16;
577 } else {
578 *reg &= 0xffff << 16;
579 *reg |= val;
580 }
581 }
582
583 return false;
584}
585
586static bool handle_mmio_sgi_reg(struct kvm_vcpu *vcpu,
587 struct kvm_exit_mmio *mmio, phys_addr_t offset)
588{
589 u32 reg;
590 vgic_reg_access(mmio, &reg, offset,
591 ACCESS_READ_RAZ | ACCESS_WRITE_VALUE);
592 if (mmio->is_write) {
593 vgic_dispatch_sgi(vcpu, reg);
594 vgic_update_state(vcpu->kvm);
595 return true;
596 }
597
598 return false;
599}
600
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800601/**
602 * vgic_unqueue_irqs - move pending IRQs from LRs to the distributor
603 * @vgic_cpu: Pointer to the vgic_cpu struct holding the LRs
604 *
605 * Move any pending IRQs that have already been assigned to LRs back to the
606 * emulated distributor state so that the complete emulated state can be read
607 * from the main emulation structures without investigating the LRs.
608 *
609 * Note that IRQs in the active state in the LRs get their pending state moved
610 * to the distributor but the active state stays in the LRs, because we don't
611 * track the active state on the distributor side.
612 */
613static void vgic_unqueue_irqs(struct kvm_vcpu *vcpu)
614{
615 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
616 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
617 int vcpu_id = vcpu->vcpu_id;
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100618 int i;
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800619
620 for_each_set_bit(i, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100621 struct vgic_lr lr = vgic_get_lr(vcpu, i);
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800622
623 /*
624 * There are three options for the state bits:
625 *
626 * 01: pending
627 * 10: active
628 * 11: pending and active
629 *
630 * If the LR holds only an active interrupt (not pending) then
631 * just leave it alone.
632 */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100633 if ((lr.state & LR_STATE_MASK) == LR_STATE_ACTIVE)
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800634 continue;
635
636 /*
637 * Reestablish the pending state on the distributor and the
638 * CPU interface. It may have already been pending, but that
639 * is fine, then we are only setting a few bits that were
640 * already set.
641 */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100642 vgic_dist_irq_set(vcpu, lr.irq);
643 if (lr.irq < VGIC_NR_SGIS)
644 dist->irq_sgi_sources[vcpu_id][lr.irq] |= 1 << lr.source;
645 lr.state &= ~LR_STATE_PENDING;
646 vgic_set_lr(vcpu, i, lr);
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800647
648 /*
649 * If there's no state left on the LR (it could still be
650 * active), then the LR does not hold any useful info and can
651 * be marked as free for other use.
652 */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100653 if (!(lr.state & LR_STATE_MASK))
654 vgic_retire_lr(i, lr.irq, vcpu);
Christoffer Dallcbd333a2013-11-15 20:51:31 -0800655
656 /* Finally update the VGIC state. */
657 vgic_update_state(vcpu->kvm);
658 }
659}
660
Christoffer Dall90a53552013-10-25 21:22:31 +0100661/* Handle reads of GICD_CPENDSGIRn and GICD_SPENDSGIRn */
662static bool read_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
663 struct kvm_exit_mmio *mmio,
664 phys_addr_t offset)
Christoffer Dallc07a0192013-10-25 21:17:31 +0100665{
Christoffer Dall90a53552013-10-25 21:22:31 +0100666 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
667 int sgi;
668 int min_sgi = (offset & ~0x3) * 4;
669 int max_sgi = min_sgi + 3;
670 int vcpu_id = vcpu->vcpu_id;
671 u32 reg = 0;
672
673 /* Copy source SGIs from distributor side */
674 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
675 int shift = 8 * (sgi - min_sgi);
676 reg |= (u32)dist->irq_sgi_sources[vcpu_id][sgi] << shift;
677 }
678
679 mmio_data_write(mmio, ~0, reg);
Christoffer Dallc07a0192013-10-25 21:17:31 +0100680 return false;
681}
682
Christoffer Dall90a53552013-10-25 21:22:31 +0100683static bool write_set_clear_sgi_pend_reg(struct kvm_vcpu *vcpu,
684 struct kvm_exit_mmio *mmio,
685 phys_addr_t offset, bool set)
686{
687 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
688 int sgi;
689 int min_sgi = (offset & ~0x3) * 4;
690 int max_sgi = min_sgi + 3;
691 int vcpu_id = vcpu->vcpu_id;
692 u32 reg;
693 bool updated = false;
694
695 reg = mmio_data_read(mmio, ~0);
696
697 /* Clear pending SGIs on the distributor */
698 for (sgi = min_sgi; sgi <= max_sgi; sgi++) {
699 u8 mask = reg >> (8 * (sgi - min_sgi));
700 if (set) {
701 if ((dist->irq_sgi_sources[vcpu_id][sgi] & mask) != mask)
702 updated = true;
703 dist->irq_sgi_sources[vcpu_id][sgi] |= mask;
704 } else {
705 if (dist->irq_sgi_sources[vcpu_id][sgi] & mask)
706 updated = true;
707 dist->irq_sgi_sources[vcpu_id][sgi] &= ~mask;
708 }
709 }
710
711 if (updated)
712 vgic_update_state(vcpu->kvm);
713
714 return updated;
715}
716
Christoffer Dallc07a0192013-10-25 21:17:31 +0100717static bool handle_mmio_sgi_set(struct kvm_vcpu *vcpu,
718 struct kvm_exit_mmio *mmio,
719 phys_addr_t offset)
720{
Christoffer Dall90a53552013-10-25 21:22:31 +0100721 if (!mmio->is_write)
722 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
723 else
724 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, true);
725}
726
727static bool handle_mmio_sgi_clear(struct kvm_vcpu *vcpu,
728 struct kvm_exit_mmio *mmio,
729 phys_addr_t offset)
730{
731 if (!mmio->is_write)
732 return read_set_clear_sgi_pend_reg(vcpu, mmio, offset);
733 else
734 return write_set_clear_sgi_pend_reg(vcpu, mmio, offset, false);
Christoffer Dallc07a0192013-10-25 21:17:31 +0100735}
736
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500737/*
738 * I would have liked to use the kvm_bus_io_*() API instead, but it
739 * cannot cope with banked registers (only the VM pointer is passed
740 * around, and we need the vcpu). One of these days, someone please
741 * fix it!
742 */
743struct mmio_range {
744 phys_addr_t base;
745 unsigned long len;
746 bool (*handle_mmio)(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
747 phys_addr_t offset);
748};
749
Christoffer Dall1006e8c2013-09-23 14:55:56 -0700750static const struct mmio_range vgic_dist_ranges[] = {
Marc Zyngierb47ef922013-01-21 19:36:14 -0500751 {
752 .base = GIC_DIST_CTRL,
753 .len = 12,
754 .handle_mmio = handle_mmio_misc,
755 },
756 {
757 .base = GIC_DIST_IGROUP,
758 .len = VGIC_NR_IRQS / 8,
759 .handle_mmio = handle_mmio_raz_wi,
760 },
761 {
762 .base = GIC_DIST_ENABLE_SET,
763 .len = VGIC_NR_IRQS / 8,
764 .handle_mmio = handle_mmio_set_enable_reg,
765 },
766 {
767 .base = GIC_DIST_ENABLE_CLEAR,
768 .len = VGIC_NR_IRQS / 8,
769 .handle_mmio = handle_mmio_clear_enable_reg,
770 },
771 {
772 .base = GIC_DIST_PENDING_SET,
773 .len = VGIC_NR_IRQS / 8,
774 .handle_mmio = handle_mmio_set_pending_reg,
775 },
776 {
777 .base = GIC_DIST_PENDING_CLEAR,
778 .len = VGIC_NR_IRQS / 8,
779 .handle_mmio = handle_mmio_clear_pending_reg,
780 },
781 {
782 .base = GIC_DIST_ACTIVE_SET,
783 .len = VGIC_NR_IRQS / 8,
784 .handle_mmio = handle_mmio_raz_wi,
785 },
786 {
787 .base = GIC_DIST_ACTIVE_CLEAR,
788 .len = VGIC_NR_IRQS / 8,
789 .handle_mmio = handle_mmio_raz_wi,
790 },
791 {
792 .base = GIC_DIST_PRI,
793 .len = VGIC_NR_IRQS,
794 .handle_mmio = handle_mmio_priority_reg,
795 },
796 {
797 .base = GIC_DIST_TARGET,
798 .len = VGIC_NR_IRQS,
799 .handle_mmio = handle_mmio_target_reg,
800 },
801 {
802 .base = GIC_DIST_CONFIG,
803 .len = VGIC_NR_IRQS / 4,
804 .handle_mmio = handle_mmio_cfg_reg,
805 },
806 {
807 .base = GIC_DIST_SOFTINT,
808 .len = 4,
809 .handle_mmio = handle_mmio_sgi_reg,
810 },
Christoffer Dallc07a0192013-10-25 21:17:31 +0100811 {
812 .base = GIC_DIST_SGI_PENDING_CLEAR,
813 .len = VGIC_NR_SGIS,
814 .handle_mmio = handle_mmio_sgi_clear,
815 },
816 {
817 .base = GIC_DIST_SGI_PENDING_SET,
818 .len = VGIC_NR_SGIS,
819 .handle_mmio = handle_mmio_sgi_set,
820 },
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500821 {}
822};
823
824static const
825struct mmio_range *find_matching_range(const struct mmio_range *ranges,
826 struct kvm_exit_mmio *mmio,
Christoffer Dall1006e8c2013-09-23 14:55:56 -0700827 phys_addr_t offset)
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500828{
829 const struct mmio_range *r = ranges;
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500830
831 while (r->len) {
Christoffer Dall1006e8c2013-09-23 14:55:56 -0700832 if (offset >= r->base &&
833 (offset + mmio->len) <= (r->base + r->len))
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500834 return r;
835 r++;
836 }
837
838 return NULL;
839}
840
841/**
842 * vgic_handle_mmio - handle an in-kernel MMIO access
843 * @vcpu: pointer to the vcpu performing the access
844 * @run: pointer to the kvm_run structure
845 * @mmio: pointer to the data describing the access
846 *
847 * returns true if the MMIO access has been performed in kernel space,
848 * and false if it needs to be emulated in user space.
849 */
850bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
851 struct kvm_exit_mmio *mmio)
852{
Marc Zyngierb47ef922013-01-21 19:36:14 -0500853 const struct mmio_range *range;
854 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
855 unsigned long base = dist->vgic_dist_base;
856 bool updated_state;
857 unsigned long offset;
858
859 if (!irqchip_in_kernel(vcpu->kvm) ||
860 mmio->phys_addr < base ||
861 (mmio->phys_addr + mmio->len) > (base + KVM_VGIC_V2_DIST_SIZE))
862 return false;
863
864 /* We don't support ldrd / strd or ldm / stm to the emulated vgic */
865 if (mmio->len > 4) {
866 kvm_inject_dabt(vcpu, mmio->phys_addr);
867 return true;
868 }
869
Christoffer Dall1006e8c2013-09-23 14:55:56 -0700870 offset = mmio->phys_addr - base;
871 range = find_matching_range(vgic_dist_ranges, mmio, offset);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500872 if (unlikely(!range || !range->handle_mmio)) {
873 pr_warn("Unhandled access %d %08llx %d\n",
874 mmio->is_write, mmio->phys_addr, mmio->len);
875 return false;
876 }
877
878 spin_lock(&vcpu->kvm->arch.vgic.lock);
879 offset = mmio->phys_addr - range->base - base;
880 updated_state = range->handle_mmio(vcpu, mmio, offset);
881 spin_unlock(&vcpu->kvm->arch.vgic.lock);
882 kvm_prepare_mmio(run, mmio);
883 kvm_handle_mmio_return(vcpu, run);
884
Marc Zyngier5863c2c2013-01-21 19:36:15 -0500885 if (updated_state)
886 vgic_kick_vcpus(vcpu->kvm);
887
Marc Zyngierb47ef922013-01-21 19:36:14 -0500888 return true;
889}
890
891static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
892{
893 struct kvm *kvm = vcpu->kvm;
894 struct vgic_dist *dist = &kvm->arch.vgic;
895 int nrcpus = atomic_read(&kvm->online_vcpus);
896 u8 target_cpus;
897 int sgi, mode, c, vcpu_id;
898
899 vcpu_id = vcpu->vcpu_id;
900
901 sgi = reg & 0xf;
902 target_cpus = (reg >> 16) & 0xff;
903 mode = (reg >> 24) & 3;
904
905 switch (mode) {
906 case 0:
907 if (!target_cpus)
908 return;
Haibin Wang91021a62014-04-10 13:14:32 +0100909 break;
Marc Zyngierb47ef922013-01-21 19:36:14 -0500910
911 case 1:
912 target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
913 break;
914
915 case 2:
916 target_cpus = 1 << vcpu_id;
917 break;
918 }
919
920 kvm_for_each_vcpu(c, vcpu, kvm) {
921 if (target_cpus & 1) {
922 /* Flag the SGI as pending */
923 vgic_dist_irq_set(vcpu, sgi);
924 dist->irq_sgi_sources[c][sgi] |= 1 << vcpu_id;
925 kvm_debug("SGI%d from CPU%d to CPU%d\n", sgi, vcpu_id, c);
926 }
927
928 target_cpus >>= 1;
929 }
930}
931
932static int compute_pending_for_cpu(struct kvm_vcpu *vcpu)
933{
Marc Zyngier9d949dc2013-01-21 19:36:14 -0500934 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
935 unsigned long *pending, *enabled, *pend_percpu, *pend_shared;
936 unsigned long pending_private, pending_shared;
937 int vcpu_id;
938
939 vcpu_id = vcpu->vcpu_id;
940 pend_percpu = vcpu->arch.vgic_cpu.pending_percpu;
941 pend_shared = vcpu->arch.vgic_cpu.pending_shared;
942
943 pending = vgic_bitmap_get_cpu_map(&dist->irq_state, vcpu_id);
944 enabled = vgic_bitmap_get_cpu_map(&dist->irq_enabled, vcpu_id);
945 bitmap_and(pend_percpu, pending, enabled, VGIC_NR_PRIVATE_IRQS);
946
947 pending = vgic_bitmap_get_shared_map(&dist->irq_state);
948 enabled = vgic_bitmap_get_shared_map(&dist->irq_enabled);
949 bitmap_and(pend_shared, pending, enabled, VGIC_NR_SHARED_IRQS);
950 bitmap_and(pend_shared, pend_shared,
951 vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]),
952 VGIC_NR_SHARED_IRQS);
953
954 pending_private = find_first_bit(pend_percpu, VGIC_NR_PRIVATE_IRQS);
955 pending_shared = find_first_bit(pend_shared, VGIC_NR_SHARED_IRQS);
956 return (pending_private < VGIC_NR_PRIVATE_IRQS ||
957 pending_shared < VGIC_NR_SHARED_IRQS);
Marc Zyngierb47ef922013-01-21 19:36:14 -0500958}
959
960/*
961 * Update the interrupt state and determine which CPUs have pending
962 * interrupts. Must be called with distributor lock held.
963 */
964static void vgic_update_state(struct kvm *kvm)
965{
966 struct vgic_dist *dist = &kvm->arch.vgic;
967 struct kvm_vcpu *vcpu;
968 int c;
969
970 if (!dist->enabled) {
971 set_bit(0, &dist->irq_pending_on_cpu);
972 return;
973 }
974
975 kvm_for_each_vcpu(c, vcpu, kvm) {
976 if (compute_pending_for_cpu(vcpu)) {
977 pr_debug("CPU%d has pending interrupts\n", c);
978 set_bit(c, &dist->irq_pending_on_cpu);
979 }
980 }
Marc Zyngier1a89dd92013-01-21 19:36:12 -0500981}
Christoffer Dall330690c2013-01-21 19:36:13 -0500982
Marc Zyngier8d5c6b02013-06-03 15:55:02 +0100983static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
984{
985 struct vgic_lr lr_desc;
986 u32 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr];
987
988 lr_desc.irq = val & GICH_LR_VIRTUALID;
989 if (lr_desc.irq <= 15)
990 lr_desc.source = (val >> GICH_LR_PHYSID_CPUID_SHIFT) & 0x7;
991 else
992 lr_desc.source = 0;
993 lr_desc.state = 0;
994
995 if (val & GICH_LR_PENDING_BIT)
996 lr_desc.state |= LR_STATE_PENDING;
997 if (val & GICH_LR_ACTIVE_BIT)
998 lr_desc.state |= LR_STATE_ACTIVE;
999 if (val & GICH_LR_EOI)
1000 lr_desc.state |= LR_EOI_INT;
1001
1002 return lr_desc;
1003}
1004
1005static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
1006 struct vgic_lr lr_desc)
1007{
1008 u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
1009
1010 if (lr_desc.state & LR_STATE_PENDING)
1011 lr_val |= GICH_LR_PENDING_BIT;
1012 if (lr_desc.state & LR_STATE_ACTIVE)
1013 lr_val |= GICH_LR_ACTIVE_BIT;
1014 if (lr_desc.state & LR_EOI_INT)
1015 lr_val |= GICH_LR_EOI;
1016
1017 vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
1018}
1019
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001020static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1021 struct vgic_lr lr_desc)
1022{
1023 if (!(lr_desc.state & LR_STATE_MASK))
1024 set_bit(lr, (unsigned long *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr);
1025}
1026
1027static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
1028{
1029 u64 val;
1030
1031#if BITS_PER_LONG == 64
1032 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[1];
1033 val <<= 32;
1034 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr[0];
1035#else
1036 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr;
1037#endif
1038 return val;
1039}
1040
Marc Zyngier8d6a0312013-06-04 10:33:43 +01001041static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
1042{
1043 u64 val;
1044
1045#if BITS_PER_LONG == 64
1046 val = vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[1];
1047 val <<= 32;
1048 val |= vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr[0];
1049#else
1050 val = *(u64 *)vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
1051#endif
1052 return val;
1053}
1054
Marc Zyngier495dd852013-06-04 11:02:10 +01001055static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
1056{
1057 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
1058 u32 ret = 0;
1059
1060 if (misr & GICH_MISR_EOI)
1061 ret |= INT_STATUS_EOI;
1062 if (misr & GICH_MISR_U)
1063 ret |= INT_STATUS_UNDERFLOW;
1064
1065 return ret;
1066}
1067
Marc Zyngier909d9b52013-06-04 11:24:17 +01001068static void vgic_v2_enable_underflow(struct kvm_vcpu *vcpu)
1069{
1070 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr |= GICH_HCR_UIE;
1071}
1072
1073static void vgic_v2_disable_underflow(struct kvm_vcpu *vcpu)
1074{
1075 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr &= ~GICH_HCR_UIE;
1076}
1077
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001078static void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
1079{
1080 u32 vmcr = vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr;
1081
1082 vmcrp->ctlr = (vmcr & GICH_VMCR_CTRL_MASK) >> GICH_VMCR_CTRL_SHIFT;
1083 vmcrp->abpr = (vmcr & GICH_VMCR_ALIAS_BINPOINT_MASK) >> GICH_VMCR_ALIAS_BINPOINT_SHIFT;
1084 vmcrp->bpr = (vmcr & GICH_VMCR_BINPOINT_MASK) >> GICH_VMCR_BINPOINT_SHIFT;
1085 vmcrp->pmr = (vmcr & GICH_VMCR_PRIMASK_MASK) >> GICH_VMCR_PRIMASK_SHIFT;
1086}
1087
1088static void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcrp)
1089{
1090 u32 vmcr;
1091
1092 vmcr = (vmcrp->ctlr << GICH_VMCR_CTRL_SHIFT) & GICH_VMCR_CTRL_MASK;
1093 vmcr |= (vmcrp->abpr << GICH_VMCR_ALIAS_BINPOINT_SHIFT) & GICH_VMCR_ALIAS_BINPOINT_MASK;
1094 vmcr |= (vmcrp->bpr << GICH_VMCR_BINPOINT_SHIFT) & GICH_VMCR_BINPOINT_MASK;
1095 vmcr |= (vmcrp->pmr << GICH_VMCR_PRIMASK_SHIFT) & GICH_VMCR_PRIMASK_MASK;
1096
1097 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = vmcr;
1098}
1099
Marc Zyngierda8dafd12013-06-04 11:36:38 +01001100static void vgic_v2_enable(struct kvm_vcpu *vcpu)
1101{
1102 /*
1103 * By forcing VMCR to zero, the GIC will restore the binary
1104 * points to their reset values. Anything else resets to zero
1105 * anyway.
1106 */
1107 vcpu->arch.vgic_cpu.vgic_v2.vgic_vmcr = 0;
1108
1109 /* Get the show on the road... */
1110 vcpu->arch.vgic_cpu.vgic_v2.vgic_hcr = GICH_HCR_EN;
1111}
1112
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001113static const struct vgic_ops vgic_ops = {
1114 .get_lr = vgic_v2_get_lr,
1115 .set_lr = vgic_v2_set_lr,
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001116 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
1117 .get_elrsr = vgic_v2_get_elrsr,
Marc Zyngier8d6a0312013-06-04 10:33:43 +01001118 .get_eisr = vgic_v2_get_eisr,
Marc Zyngier495dd852013-06-04 11:02:10 +01001119 .get_interrupt_status = vgic_v2_get_interrupt_status,
Marc Zyngier909d9b52013-06-04 11:24:17 +01001120 .enable_underflow = vgic_v2_enable_underflow,
1121 .disable_underflow = vgic_v2_disable_underflow,
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001122 .get_vmcr = vgic_v2_get_vmcr,
1123 .set_vmcr = vgic_v2_set_vmcr,
Marc Zyngierda8dafd12013-06-04 11:36:38 +01001124 .enable = vgic_v2_enable,
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001125};
1126
1127static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr)
1128{
1129 return vgic_ops.get_lr(vcpu, lr);
1130}
1131
1132static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr,
1133 struct vgic_lr vlr)
1134{
1135 vgic_ops.set_lr(vcpu, lr, vlr);
1136}
1137
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001138static void vgic_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
1139 struct vgic_lr vlr)
1140{
1141 vgic_ops.sync_lr_elrsr(vcpu, lr, vlr);
1142}
1143
1144static inline u64 vgic_get_elrsr(struct kvm_vcpu *vcpu)
1145{
1146 return vgic_ops.get_elrsr(vcpu);
1147}
1148
Marc Zyngier8d6a0312013-06-04 10:33:43 +01001149static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
1150{
1151 return vgic_ops.get_eisr(vcpu);
1152}
1153
Marc Zyngier495dd852013-06-04 11:02:10 +01001154static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
1155{
1156 return vgic_ops.get_interrupt_status(vcpu);
1157}
1158
Marc Zyngier909d9b52013-06-04 11:24:17 +01001159static inline void vgic_enable_underflow(struct kvm_vcpu *vcpu)
1160{
1161 vgic_ops.enable_underflow(vcpu);
1162}
1163
1164static inline void vgic_disable_underflow(struct kvm_vcpu *vcpu)
1165{
1166 vgic_ops.disable_underflow(vcpu);
1167}
1168
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001169static inline void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1170{
1171 vgic_ops.get_vmcr(vcpu, vmcr);
1172}
1173
1174static void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
1175{
1176 vgic_ops.set_vmcr(vcpu, vmcr);
1177}
1178
Marc Zyngierda8dafd12013-06-04 11:36:38 +01001179static inline void vgic_enable(struct kvm_vcpu *vcpu)
1180{
1181 vgic_ops.enable(vcpu);
1182}
1183
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001184static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
1185{
1186 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1187 struct vgic_lr vlr = vgic_get_lr(vcpu, lr_nr);
1188
1189 vlr.state = 0;
1190 vgic_set_lr(vcpu, lr_nr, vlr);
1191 clear_bit(lr_nr, vgic_cpu->lr_used);
1192 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
1193}
Marc Zyngiera1fcb442013-01-21 19:36:15 -05001194
1195/*
1196 * An interrupt may have been disabled after being made pending on the
1197 * CPU interface (the classic case is a timer running while we're
1198 * rebooting the guest - the interrupt would kick as soon as the CPU
1199 * interface gets enabled, with deadly consequences).
1200 *
1201 * The solution is to examine already active LRs, and check the
1202 * interrupt is still enabled. If not, just retire it.
1203 */
1204static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu)
1205{
1206 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1207 int lr;
1208
1209 for_each_set_bit(lr, vgic_cpu->lr_used, vgic_cpu->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001210 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
Marc Zyngiera1fcb442013-01-21 19:36:15 -05001211
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001212 if (!vgic_irq_is_enabled(vcpu, vlr.irq)) {
1213 vgic_retire_lr(lr, vlr.irq, vcpu);
1214 if (vgic_irq_is_active(vcpu, vlr.irq))
1215 vgic_irq_clear_active(vcpu, vlr.irq);
Marc Zyngiera1fcb442013-01-21 19:36:15 -05001216 }
1217 }
1218}
1219
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001220/*
1221 * Queue an interrupt to a CPU virtual interface. Return true on success,
1222 * or false if it wasn't possible to queue it.
1223 */
1224static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
1225{
1226 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001227 struct vgic_lr vlr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001228 int lr;
1229
1230 /* Sanitize the input... */
1231 BUG_ON(sgi_source_id & ~7);
1232 BUG_ON(sgi_source_id && irq >= VGIC_NR_SGIS);
1233 BUG_ON(irq >= VGIC_NR_IRQS);
1234
1235 kvm_debug("Queue IRQ%d\n", irq);
1236
1237 lr = vgic_cpu->vgic_irq_lr_map[irq];
1238
1239 /* Do we have an active interrupt for the same CPUID? */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001240 if (lr != LR_EMPTY) {
1241 vlr = vgic_get_lr(vcpu, lr);
1242 if (vlr.source == sgi_source_id) {
1243 kvm_debug("LR%d piggyback for IRQ%d\n", lr, vlr.irq);
1244 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
1245 vlr.state |= LR_STATE_PENDING;
1246 vgic_set_lr(vcpu, lr, vlr);
1247 return true;
1248 }
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001249 }
1250
1251 /* Try to use another LR for this interrupt */
1252 lr = find_first_zero_bit((unsigned long *)vgic_cpu->lr_used,
1253 vgic_cpu->nr_lr);
1254 if (lr >= vgic_cpu->nr_lr)
1255 return false;
1256
1257 kvm_debug("LR%d allocated for IRQ%d %x\n", lr, irq, sgi_source_id);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001258 vgic_cpu->vgic_irq_lr_map[irq] = lr;
1259 set_bit(lr, vgic_cpu->lr_used);
1260
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001261 vlr.irq = irq;
1262 vlr.source = sgi_source_id;
1263 vlr.state = LR_STATE_PENDING;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001264 if (!vgic_irq_is_edge(vcpu, irq))
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001265 vlr.state |= LR_EOI_INT;
1266
1267 vgic_set_lr(vcpu, lr, vlr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001268
1269 return true;
1270}
1271
1272static bool vgic_queue_sgi(struct kvm_vcpu *vcpu, int irq)
1273{
1274 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1275 unsigned long sources;
1276 int vcpu_id = vcpu->vcpu_id;
1277 int c;
1278
1279 sources = dist->irq_sgi_sources[vcpu_id][irq];
1280
1281 for_each_set_bit(c, &sources, VGIC_MAX_CPUS) {
1282 if (vgic_queue_irq(vcpu, c, irq))
1283 clear_bit(c, &sources);
1284 }
1285
1286 dist->irq_sgi_sources[vcpu_id][irq] = sources;
1287
1288 /*
1289 * If the sources bitmap has been cleared it means that we
1290 * could queue all the SGIs onto link registers (see the
1291 * clear_bit above), and therefore we are done with them in
1292 * our emulated gic and can get rid of them.
1293 */
1294 if (!sources) {
1295 vgic_dist_irq_clear(vcpu, irq);
1296 vgic_cpu_irq_clear(vcpu, irq);
1297 return true;
1298 }
1299
1300 return false;
1301}
1302
1303static bool vgic_queue_hwirq(struct kvm_vcpu *vcpu, int irq)
1304{
1305 if (vgic_irq_is_active(vcpu, irq))
1306 return true; /* level interrupt, already queued */
1307
1308 if (vgic_queue_irq(vcpu, 0, irq)) {
1309 if (vgic_irq_is_edge(vcpu, irq)) {
1310 vgic_dist_irq_clear(vcpu, irq);
1311 vgic_cpu_irq_clear(vcpu, irq);
1312 } else {
1313 vgic_irq_set_active(vcpu, irq);
1314 }
1315
1316 return true;
1317 }
1318
1319 return false;
1320}
1321
1322/*
1323 * Fill the list registers with pending interrupts before running the
1324 * guest.
1325 */
1326static void __kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1327{
1328 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1329 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1330 int i, vcpu_id;
1331 int overflow = 0;
1332
1333 vcpu_id = vcpu->vcpu_id;
1334
1335 /*
1336 * We may not have any pending interrupt, or the interrupts
1337 * may have been serviced from another vcpu. In all cases,
1338 * move along.
1339 */
1340 if (!kvm_vgic_vcpu_pending_irq(vcpu)) {
1341 pr_debug("CPU%d has no pending interrupt\n", vcpu_id);
1342 goto epilog;
1343 }
1344
1345 /* SGIs */
1346 for_each_set_bit(i, vgic_cpu->pending_percpu, VGIC_NR_SGIS) {
1347 if (!vgic_queue_sgi(vcpu, i))
1348 overflow = 1;
1349 }
1350
1351 /* PPIs */
1352 for_each_set_bit_from(i, vgic_cpu->pending_percpu, VGIC_NR_PRIVATE_IRQS) {
1353 if (!vgic_queue_hwirq(vcpu, i))
1354 overflow = 1;
1355 }
1356
1357 /* SPIs */
1358 for_each_set_bit(i, vgic_cpu->pending_shared, VGIC_NR_SHARED_IRQS) {
1359 if (!vgic_queue_hwirq(vcpu, i + VGIC_NR_PRIVATE_IRQS))
1360 overflow = 1;
1361 }
1362
1363epilog:
1364 if (overflow) {
Marc Zyngier909d9b52013-06-04 11:24:17 +01001365 vgic_enable_underflow(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001366 } else {
Marc Zyngier909d9b52013-06-04 11:24:17 +01001367 vgic_disable_underflow(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001368 /*
1369 * We're about to run this VCPU, and we've consumed
1370 * everything the distributor had in store for
1371 * us. Claim we don't have anything pending. We'll
1372 * adjust that if needed while exiting.
1373 */
1374 clear_bit(vcpu_id, &dist->irq_pending_on_cpu);
1375 }
1376}
1377
1378static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1379{
1380 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
Marc Zyngier495dd852013-06-04 11:02:10 +01001381 u32 status = vgic_get_interrupt_status(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001382 bool level_pending = false;
1383
Marc Zyngier495dd852013-06-04 11:02:10 +01001384 kvm_debug("STATUS = %08x\n", status);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001385
Marc Zyngier495dd852013-06-04 11:02:10 +01001386 if (status & INT_STATUS_EOI) {
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001387 /*
1388 * Some level interrupts have been EOIed. Clear their
1389 * active bit.
1390 */
Marc Zyngier8d6a0312013-06-04 10:33:43 +01001391 u64 eisr = vgic_get_eisr(vcpu);
1392 unsigned long *eisr_ptr = (unsigned long *)&eisr;
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001393 int lr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001394
Marc Zyngier8d6a0312013-06-04 10:33:43 +01001395 for_each_set_bit(lr, eisr_ptr, vgic_cpu->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001396 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001397
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001398 vgic_irq_clear_active(vcpu, vlr.irq);
1399 WARN_ON(vlr.state & LR_STATE_MASK);
1400 vlr.state = 0;
1401 vgic_set_lr(vcpu, lr, vlr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001402
1403 /* Any additional pending interrupt? */
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001404 if (vgic_dist_irq_is_pending(vcpu, vlr.irq)) {
1405 vgic_cpu_irq_set(vcpu, vlr.irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001406 level_pending = true;
1407 } else {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001408 vgic_cpu_irq_clear(vcpu, vlr.irq);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001409 }
Marc Zyngier75da01e2013-01-31 11:25:52 +00001410
1411 /*
1412 * Despite being EOIed, the LR may not have
1413 * been marked as empty.
1414 */
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001415 vgic_sync_lr_elrsr(vcpu, lr, vlr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001416 }
1417 }
1418
Marc Zyngier495dd852013-06-04 11:02:10 +01001419 if (status & INT_STATUS_UNDERFLOW)
Marc Zyngier909d9b52013-06-04 11:24:17 +01001420 vgic_disable_underflow(vcpu);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001421
1422 return level_pending;
1423}
1424
1425/*
Marc Zyngier33c83cb2013-02-01 18:28:30 +00001426 * Sync back the VGIC state after a guest run. The distributor lock is
1427 * needed so we don't get preempted in the middle of the state processing.
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001428 */
1429static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1430{
1431 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1432 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001433 u64 elrsr;
1434 unsigned long *elrsr_ptr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001435 int lr, pending;
1436 bool level_pending;
1437
1438 level_pending = vgic_process_maintenance(vcpu);
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001439 elrsr = vgic_get_elrsr(vcpu);
1440 elrsr_ptr = (unsigned long *)&elrsr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001441
1442 /* Clear mappings for empty LRs */
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001443 for_each_set_bit(lr, elrsr_ptr, vgic_cpu->nr_lr) {
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001444 struct vgic_lr vlr;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001445
1446 if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
1447 continue;
1448
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001449 vlr = vgic_get_lr(vcpu, lr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001450
Marc Zyngier8d5c6b02013-06-03 15:55:02 +01001451 BUG_ON(vlr.irq >= VGIC_NR_IRQS);
1452 vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001453 }
1454
1455 /* Check if we still have something up our sleeve... */
Marc Zyngier69bb2c92013-06-04 10:29:39 +01001456 pending = find_first_zero_bit(elrsr_ptr, vgic_cpu->nr_lr);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001457 if (level_pending || pending < vgic_cpu->nr_lr)
1458 set_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1459}
1460
1461void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1462{
1463 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1464
1465 if (!irqchip_in_kernel(vcpu->kvm))
1466 return;
1467
1468 spin_lock(&dist->lock);
1469 __kvm_vgic_flush_hwstate(vcpu);
1470 spin_unlock(&dist->lock);
1471}
1472
1473void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1474{
Marc Zyngier33c83cb2013-02-01 18:28:30 +00001475 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1476
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001477 if (!irqchip_in_kernel(vcpu->kvm))
1478 return;
1479
Marc Zyngier33c83cb2013-02-01 18:28:30 +00001480 spin_lock(&dist->lock);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001481 __kvm_vgic_sync_hwstate(vcpu);
Marc Zyngier33c83cb2013-02-01 18:28:30 +00001482 spin_unlock(&dist->lock);
Marc Zyngier9d949dc2013-01-21 19:36:14 -05001483}
1484
1485int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
1486{
1487 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1488
1489 if (!irqchip_in_kernel(vcpu->kvm))
1490 return 0;
1491
1492 return test_bit(vcpu->vcpu_id, &dist->irq_pending_on_cpu);
1493}
1494
Marc Zyngier5863c2c2013-01-21 19:36:15 -05001495static void vgic_kick_vcpus(struct kvm *kvm)
1496{
1497 struct kvm_vcpu *vcpu;
1498 int c;
1499
1500 /*
1501 * We've injected an interrupt, time to find out who deserves
1502 * a good kick...
1503 */
1504 kvm_for_each_vcpu(c, vcpu, kvm) {
1505 if (kvm_vgic_vcpu_pending_irq(vcpu))
1506 kvm_vcpu_kick(vcpu);
1507 }
1508}
1509
1510static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
1511{
1512 int is_edge = vgic_irq_is_edge(vcpu, irq);
1513 int state = vgic_dist_irq_is_pending(vcpu, irq);
1514
1515 /*
1516 * Only inject an interrupt if:
1517 * - edge triggered and we have a rising edge
1518 * - level triggered and we change level
1519 */
1520 if (is_edge)
1521 return level > state;
1522 else
1523 return level != state;
1524}
1525
1526static bool vgic_update_irq_state(struct kvm *kvm, int cpuid,
1527 unsigned int irq_num, bool level)
1528{
1529 struct vgic_dist *dist = &kvm->arch.vgic;
1530 struct kvm_vcpu *vcpu;
1531 int is_edge, is_level;
1532 int enabled;
1533 bool ret = true;
1534
1535 spin_lock(&dist->lock);
1536
1537 vcpu = kvm_get_vcpu(kvm, cpuid);
1538 is_edge = vgic_irq_is_edge(vcpu, irq_num);
1539 is_level = !is_edge;
1540
1541 if (!vgic_validate_injection(vcpu, irq_num, level)) {
1542 ret = false;
1543 goto out;
1544 }
1545
1546 if (irq_num >= VGIC_NR_PRIVATE_IRQS) {
1547 cpuid = dist->irq_spi_cpu[irq_num - VGIC_NR_PRIVATE_IRQS];
1548 vcpu = kvm_get_vcpu(kvm, cpuid);
1549 }
1550
1551 kvm_debug("Inject IRQ%d level %d CPU%d\n", irq_num, level, cpuid);
1552
1553 if (level)
1554 vgic_dist_irq_set(vcpu, irq_num);
1555 else
1556 vgic_dist_irq_clear(vcpu, irq_num);
1557
1558 enabled = vgic_irq_is_enabled(vcpu, irq_num);
1559
1560 if (!enabled) {
1561 ret = false;
1562 goto out;
1563 }
1564
1565 if (is_level && vgic_irq_is_active(vcpu, irq_num)) {
1566 /*
1567 * Level interrupt in progress, will be picked up
1568 * when EOId.
1569 */
1570 ret = false;
1571 goto out;
1572 }
1573
1574 if (level) {
1575 vgic_cpu_irq_set(vcpu, irq_num);
1576 set_bit(cpuid, &dist->irq_pending_on_cpu);
1577 }
1578
1579out:
1580 spin_unlock(&dist->lock);
1581
1582 return ret;
1583}
1584
1585/**
1586 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
1587 * @kvm: The VM structure pointer
1588 * @cpuid: The CPU for PPIs
1589 * @irq_num: The IRQ number that is assigned to the device
1590 * @level: Edge-triggered: true: to trigger the interrupt
1591 * false: to ignore the call
1592 * Level-sensitive true: activates an interrupt
1593 * false: deactivates an interrupt
1594 *
1595 * The GIC is not concerned with devices being active-LOW or active-HIGH for
1596 * level-sensitive interrupts. You can think of the level parameter as 1
1597 * being HIGH and 0 being LOW and all devices being active-HIGH.
1598 */
1599int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
1600 bool level)
1601{
1602 if (vgic_update_irq_state(kvm, cpuid, irq_num, level))
1603 vgic_kick_vcpus(kvm);
1604
1605 return 0;
1606}
1607
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001608static irqreturn_t vgic_maintenance_handler(int irq, void *data)
1609{
1610 /*
1611 * We cannot rely on the vgic maintenance interrupt to be
1612 * delivered synchronously. This means we can only use it to
1613 * exit the VM, and we perform the handling of EOIed
1614 * interrupts on the exit path (see vgic_process_maintenance).
1615 */
1616 return IRQ_HANDLED;
1617}
1618
Christoffer Dalle1ba0202013-09-23 14:55:55 -07001619/**
1620 * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
1621 * @vcpu: pointer to the vcpu struct
1622 *
1623 * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
1624 * this vcpu and enable the VGIC for this VCPU
1625 */
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001626int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
1627{
1628 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
1629 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1630 int i;
1631
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001632 if (vcpu->vcpu_id >= VGIC_MAX_CPUS)
1633 return -EBUSY;
1634
1635 for (i = 0; i < VGIC_NR_IRQS; i++) {
1636 if (i < VGIC_NR_PPIS)
1637 vgic_bitmap_set_irq_val(&dist->irq_enabled,
1638 vcpu->vcpu_id, i, 1);
1639 if (i < VGIC_NR_PRIVATE_IRQS)
1640 vgic_bitmap_set_irq_val(&dist->irq_cfg,
1641 vcpu->vcpu_id, i, VGIC_CFG_EDGE);
1642
1643 vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
1644 }
1645
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001646 vgic_cpu->nr_lr = vgic_nr_lr;
Marc Zyngierda8dafd12013-06-04 11:36:38 +01001647
1648 vgic_enable(vcpu);
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001649
1650 return 0;
1651}
1652
1653static void vgic_init_maintenance_interrupt(void *info)
1654{
1655 enable_percpu_irq(vgic_maint_irq, 0);
1656}
1657
1658static int vgic_cpu_notify(struct notifier_block *self,
1659 unsigned long action, void *cpu)
1660{
1661 switch (action) {
1662 case CPU_STARTING:
1663 case CPU_STARTING_FROZEN:
1664 vgic_init_maintenance_interrupt(NULL);
1665 break;
1666 case CPU_DYING:
1667 case CPU_DYING_FROZEN:
1668 disable_percpu_irq(vgic_maint_irq);
1669 break;
1670 }
1671
1672 return NOTIFY_OK;
1673}
1674
1675static struct notifier_block vgic_cpu_nb = {
1676 .notifier_call = vgic_cpu_notify,
1677};
1678
1679int kvm_vgic_hyp_init(void)
1680{
1681 int ret;
1682 struct resource vctrl_res;
1683 struct resource vcpu_res;
1684
1685 vgic_node = of_find_compatible_node(NULL, NULL, "arm,cortex-a15-gic");
1686 if (!vgic_node) {
1687 kvm_err("error: no compatible vgic node in DT\n");
1688 return -ENODEV;
1689 }
1690
1691 vgic_maint_irq = irq_of_parse_and_map(vgic_node, 0);
1692 if (!vgic_maint_irq) {
1693 kvm_err("error getting vgic maintenance irq from DT\n");
1694 ret = -ENXIO;
1695 goto out;
1696 }
1697
1698 ret = request_percpu_irq(vgic_maint_irq, vgic_maintenance_handler,
1699 "vgic", kvm_get_running_vcpus());
1700 if (ret) {
1701 kvm_err("Cannot register interrupt %d\n", vgic_maint_irq);
1702 goto out;
1703 }
1704
Ming Lei553f8092014-04-07 01:36:08 +08001705 ret = __register_cpu_notifier(&vgic_cpu_nb);
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001706 if (ret) {
1707 kvm_err("Cannot register vgic CPU notifier\n");
1708 goto out_free_irq;
1709 }
1710
1711 ret = of_address_to_resource(vgic_node, 2, &vctrl_res);
1712 if (ret) {
1713 kvm_err("Cannot obtain VCTRL resource\n");
1714 goto out_free_irq;
1715 }
1716
1717 vgic_vctrl_base = of_iomap(vgic_node, 2);
1718 if (!vgic_vctrl_base) {
1719 kvm_err("Cannot ioremap VCTRL\n");
1720 ret = -ENOMEM;
1721 goto out_free_irq;
1722 }
1723
1724 vgic_nr_lr = readl_relaxed(vgic_vctrl_base + GICH_VTR);
1725 vgic_nr_lr = (vgic_nr_lr & 0x3f) + 1;
1726
1727 ret = create_hyp_io_mappings(vgic_vctrl_base,
1728 vgic_vctrl_base + resource_size(&vctrl_res),
1729 vctrl_res.start);
1730 if (ret) {
1731 kvm_err("Cannot map VCTRL into hyp\n");
1732 goto out_unmap;
1733 }
1734
1735 kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
1736 vctrl_res.start, vgic_maint_irq);
1737 on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
1738
1739 if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
1740 kvm_err("Cannot obtain VCPU resource\n");
1741 ret = -ENXIO;
1742 goto out_unmap;
1743 }
1744 vgic_vcpu_base = vcpu_res.start;
1745
1746 goto out;
1747
1748out_unmap:
1749 iounmap(vgic_vctrl_base);
1750out_free_irq:
1751 free_percpu_irq(vgic_maint_irq, kvm_get_running_vcpus());
1752out:
1753 of_node_put(vgic_node);
1754 return ret;
1755}
1756
Christoffer Dalle1ba0202013-09-23 14:55:55 -07001757/**
1758 * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
1759 * @kvm: pointer to the kvm struct
1760 *
1761 * Map the virtual CPU interface into the VM before running any VCPUs. We
1762 * can't do this at creation time, because user space must first set the
1763 * virtual CPU interface address in the guest physical address space. Also
1764 * initialize the ITARGETSRn regs to 0 on the emulated distributor.
1765 */
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001766int kvm_vgic_init(struct kvm *kvm)
1767{
1768 int ret = 0, i;
1769
Christoffer Dalle1ba0202013-09-23 14:55:55 -07001770 if (!irqchip_in_kernel(kvm))
1771 return 0;
1772
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001773 mutex_lock(&kvm->lock);
1774
1775 if (vgic_initialized(kvm))
1776 goto out;
1777
1778 if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) ||
1779 IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_cpu_base)) {
1780 kvm_err("Need to set vgic cpu and dist addresses first\n");
1781 ret = -ENXIO;
1782 goto out;
1783 }
1784
1785 ret = kvm_phys_addr_ioremap(kvm, kvm->arch.vgic.vgic_cpu_base,
1786 vgic_vcpu_base, KVM_VGIC_V2_CPU_SIZE);
1787 if (ret) {
1788 kvm_err("Unable to remap VGIC CPU to VCPU\n");
1789 goto out;
1790 }
1791
1792 for (i = VGIC_NR_PRIVATE_IRQS; i < VGIC_NR_IRQS; i += 4)
1793 vgic_set_target_reg(kvm, 0, i);
1794
1795 kvm->arch.vgic.ready = true;
1796out:
1797 mutex_unlock(&kvm->lock);
1798 return ret;
1799}
1800
1801int kvm_vgic_create(struct kvm *kvm)
1802{
Christoffer Dall73306722013-10-25 17:29:18 +01001803 int i, vcpu_lock_idx = -1, ret = 0;
1804 struct kvm_vcpu *vcpu;
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001805
1806 mutex_lock(&kvm->lock);
1807
Christoffer Dall73306722013-10-25 17:29:18 +01001808 if (kvm->arch.vgic.vctrl_base) {
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001809 ret = -EEXIST;
1810 goto out;
1811 }
1812
Christoffer Dall73306722013-10-25 17:29:18 +01001813 /*
1814 * Any time a vcpu is run, vcpu_load is called which tries to grab the
1815 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1816 * that no other VCPUs are run while we create the vgic.
1817 */
1818 kvm_for_each_vcpu(i, vcpu, kvm) {
1819 if (!mutex_trylock(&vcpu->mutex))
1820 goto out_unlock;
1821 vcpu_lock_idx = i;
1822 }
1823
1824 kvm_for_each_vcpu(i, vcpu, kvm) {
1825 if (vcpu->arch.has_run_once) {
1826 ret = -EBUSY;
1827 goto out_unlock;
1828 }
1829 }
1830
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001831 spin_lock_init(&kvm->arch.vgic.lock);
1832 kvm->arch.vgic.vctrl_base = vgic_vctrl_base;
1833 kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
1834 kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
1835
Christoffer Dall73306722013-10-25 17:29:18 +01001836out_unlock:
1837 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
1838 vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
1839 mutex_unlock(&vcpu->mutex);
1840 }
1841
Marc Zyngier01ac5e32013-01-21 19:36:16 -05001842out:
1843 mutex_unlock(&kvm->lock);
1844 return ret;
1845}
1846
Christoffer Dall330690c2013-01-21 19:36:13 -05001847static bool vgic_ioaddr_overlap(struct kvm *kvm)
1848{
1849 phys_addr_t dist = kvm->arch.vgic.vgic_dist_base;
1850 phys_addr_t cpu = kvm->arch.vgic.vgic_cpu_base;
1851
1852 if (IS_VGIC_ADDR_UNDEF(dist) || IS_VGIC_ADDR_UNDEF(cpu))
1853 return 0;
1854 if ((dist <= cpu && dist + KVM_VGIC_V2_DIST_SIZE > cpu) ||
1855 (cpu <= dist && cpu + KVM_VGIC_V2_CPU_SIZE > dist))
1856 return -EBUSY;
1857 return 0;
1858}
1859
1860static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
1861 phys_addr_t addr, phys_addr_t size)
1862{
1863 int ret;
1864
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001865 if (addr & ~KVM_PHYS_MASK)
1866 return -E2BIG;
1867
1868 if (addr & (SZ_4K - 1))
1869 return -EINVAL;
1870
Christoffer Dall330690c2013-01-21 19:36:13 -05001871 if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
1872 return -EEXIST;
1873 if (addr + size < addr)
1874 return -EINVAL;
1875
Haibin Wang30c21172014-04-29 14:49:17 +08001876 *ioaddr = addr;
Christoffer Dall330690c2013-01-21 19:36:13 -05001877 ret = vgic_ioaddr_overlap(kvm);
1878 if (ret)
Haibin Wang30c21172014-04-29 14:49:17 +08001879 *ioaddr = VGIC_ADDR_UNDEF;
1880
Christoffer Dall330690c2013-01-21 19:36:13 -05001881 return ret;
1882}
1883
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001884/**
1885 * kvm_vgic_addr - set or get vgic VM base addresses
1886 * @kvm: pointer to the vm struct
1887 * @type: the VGIC addr type, one of KVM_VGIC_V2_ADDR_TYPE_XXX
1888 * @addr: pointer to address value
1889 * @write: if true set the address in the VM address space, if false read the
1890 * address
1891 *
1892 * Set or get the vgic base addresses for the distributor and the virtual CPU
1893 * interface in the VM physical address space. These addresses are properties
1894 * of the emulated core/SoC and therefore user space initially knows this
1895 * information.
1896 */
1897int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
Christoffer Dall330690c2013-01-21 19:36:13 -05001898{
1899 int r = 0;
1900 struct vgic_dist *vgic = &kvm->arch.vgic;
1901
Christoffer Dall330690c2013-01-21 19:36:13 -05001902 mutex_lock(&kvm->lock);
1903 switch (type) {
1904 case KVM_VGIC_V2_ADDR_TYPE_DIST:
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001905 if (write) {
1906 r = vgic_ioaddr_assign(kvm, &vgic->vgic_dist_base,
1907 *addr, KVM_VGIC_V2_DIST_SIZE);
1908 } else {
1909 *addr = vgic->vgic_dist_base;
1910 }
Christoffer Dall330690c2013-01-21 19:36:13 -05001911 break;
1912 case KVM_VGIC_V2_ADDR_TYPE_CPU:
Christoffer Dallce01e4e2013-09-23 14:55:56 -07001913 if (write) {
1914 r = vgic_ioaddr_assign(kvm, &vgic->vgic_cpu_base,
1915 *addr, KVM_VGIC_V2_CPU_SIZE);
1916 } else {
1917 *addr = vgic->vgic_cpu_base;
1918 }
Christoffer Dall330690c2013-01-21 19:36:13 -05001919 break;
1920 default:
1921 r = -ENODEV;
1922 }
1923
1924 mutex_unlock(&kvm->lock);
1925 return r;
1926}
Christoffer Dall73306722013-10-25 17:29:18 +01001927
Christoffer Dallc07a0192013-10-25 21:17:31 +01001928static bool handle_cpu_mmio_misc(struct kvm_vcpu *vcpu,
1929 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1930{
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001931 bool updated = false;
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001932 struct vgic_vmcr vmcr;
1933 u32 *vmcr_field;
1934 u32 reg;
1935
1936 vgic_get_vmcr(vcpu, &vmcr);
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001937
1938 switch (offset & ~0x3) {
1939 case GIC_CPU_CTRL:
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001940 vmcr_field = &vmcr.ctlr;
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001941 break;
1942 case GIC_CPU_PRIMASK:
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001943 vmcr_field = &vmcr.pmr;
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001944 break;
1945 case GIC_CPU_BINPOINT:
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001946 vmcr_field = &vmcr.bpr;
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001947 break;
1948 case GIC_CPU_ALIAS_BINPOINT:
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001949 vmcr_field = &vmcr.abpr;
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001950 break;
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001951 default:
1952 BUG();
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001953 }
1954
1955 if (!mmio->is_write) {
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001956 reg = *vmcr_field;
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001957 mmio_data_write(mmio, ~0, reg);
1958 } else {
1959 reg = mmio_data_read(mmio, ~0);
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001960 if (reg != *vmcr_field) {
1961 *vmcr_field = reg;
1962 vgic_set_vmcr(vcpu, &vmcr);
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001963 updated = true;
Marc Zyngierbeee38b2014-02-04 17:48:10 +00001964 }
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001965 }
1966 return updated;
Christoffer Dallc07a0192013-10-25 21:17:31 +01001967}
1968
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07001969static bool handle_mmio_abpr(struct kvm_vcpu *vcpu,
1970 struct kvm_exit_mmio *mmio, phys_addr_t offset)
1971{
1972 return handle_cpu_mmio_misc(vcpu, mmio, GIC_CPU_ALIAS_BINPOINT);
1973}
1974
1975static bool handle_cpu_mmio_ident(struct kvm_vcpu *vcpu,
1976 struct kvm_exit_mmio *mmio,
1977 phys_addr_t offset)
1978{
1979 u32 reg;
1980
1981 if (mmio->is_write)
1982 return false;
1983
1984 /* GICC_IIDR */
1985 reg = (PRODUCT_ID_KVM << 20) |
1986 (GICC_ARCH_VERSION_V2 << 16) |
1987 (IMPLEMENTER_ARM << 0);
1988 mmio_data_write(mmio, ~0, reg);
1989 return false;
1990}
1991
1992/*
1993 * CPU Interface Register accesses - these are not accessed by the VM, but by
1994 * user space for saving and restoring VGIC state.
1995 */
Christoffer Dallc07a0192013-10-25 21:17:31 +01001996static const struct mmio_range vgic_cpu_ranges[] = {
1997 {
1998 .base = GIC_CPU_CTRL,
1999 .len = 12,
2000 .handle_mmio = handle_cpu_mmio_misc,
2001 },
2002 {
2003 .base = GIC_CPU_ALIAS_BINPOINT,
2004 .len = 4,
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07002005 .handle_mmio = handle_mmio_abpr,
Christoffer Dallc07a0192013-10-25 21:17:31 +01002006 },
2007 {
2008 .base = GIC_CPU_ACTIVEPRIO,
2009 .len = 16,
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07002010 .handle_mmio = handle_mmio_raz_wi,
Christoffer Dallc07a0192013-10-25 21:17:31 +01002011 },
2012 {
2013 .base = GIC_CPU_IDENT,
2014 .len = 4,
Christoffer Dallfa20f5ae2013-09-23 14:55:57 -07002015 .handle_mmio = handle_cpu_mmio_ident,
Christoffer Dallc07a0192013-10-25 21:17:31 +01002016 },
2017};
2018
2019static int vgic_attr_regs_access(struct kvm_device *dev,
2020 struct kvm_device_attr *attr,
2021 u32 *reg, bool is_write)
2022{
2023 const struct mmio_range *r = NULL, *ranges;
2024 phys_addr_t offset;
2025 int ret, cpuid, c;
2026 struct kvm_vcpu *vcpu, *tmp_vcpu;
2027 struct vgic_dist *vgic;
2028 struct kvm_exit_mmio mmio;
2029
2030 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2031 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
2032 KVM_DEV_ARM_VGIC_CPUID_SHIFT;
2033
2034 mutex_lock(&dev->kvm->lock);
2035
2036 if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
2037 ret = -EINVAL;
2038 goto out;
2039 }
2040
2041 vcpu = kvm_get_vcpu(dev->kvm, cpuid);
2042 vgic = &dev->kvm->arch.vgic;
2043
2044 mmio.len = 4;
2045 mmio.is_write = is_write;
2046 if (is_write)
2047 mmio_data_write(&mmio, ~0, *reg);
2048 switch (attr->group) {
2049 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2050 mmio.phys_addr = vgic->vgic_dist_base + offset;
2051 ranges = vgic_dist_ranges;
2052 break;
2053 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2054 mmio.phys_addr = vgic->vgic_cpu_base + offset;
2055 ranges = vgic_cpu_ranges;
2056 break;
2057 default:
2058 BUG();
2059 }
2060 r = find_matching_range(ranges, &mmio, offset);
2061
2062 if (unlikely(!r || !r->handle_mmio)) {
2063 ret = -ENXIO;
2064 goto out;
2065 }
2066
2067
2068 spin_lock(&vgic->lock);
2069
2070 /*
2071 * Ensure that no other VCPU is running by checking the vcpu->cpu
2072 * field. If no other VPCUs are running we can safely access the VGIC
2073 * state, because even if another VPU is run after this point, that
2074 * VCPU will not touch the vgic state, because it will block on
2075 * getting the vgic->lock in kvm_vgic_sync_hwstate().
2076 */
2077 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
2078 if (unlikely(tmp_vcpu->cpu != -1)) {
2079 ret = -EBUSY;
2080 goto out_vgic_unlock;
2081 }
2082 }
2083
Christoffer Dallcbd333a2013-11-15 20:51:31 -08002084 /*
2085 * Move all pending IRQs from the LRs on all VCPUs so the pending
2086 * state can be properly represented in the register state accessible
2087 * through this API.
2088 */
2089 kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm)
2090 vgic_unqueue_irqs(tmp_vcpu);
2091
Christoffer Dallc07a0192013-10-25 21:17:31 +01002092 offset -= r->base;
2093 r->handle_mmio(vcpu, &mmio, offset);
2094
2095 if (!is_write)
2096 *reg = mmio_data_read(&mmio, ~0);
2097
2098 ret = 0;
2099out_vgic_unlock:
2100 spin_unlock(&vgic->lock);
2101out:
2102 mutex_unlock(&dev->kvm->lock);
2103 return ret;
2104}
2105
Christoffer Dall73306722013-10-25 17:29:18 +01002106static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2107{
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002108 int r;
2109
2110 switch (attr->group) {
2111 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2112 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2113 u64 addr;
2114 unsigned long type = (unsigned long)attr->attr;
2115
2116 if (copy_from_user(&addr, uaddr, sizeof(addr)))
2117 return -EFAULT;
2118
2119 r = kvm_vgic_addr(dev->kvm, type, &addr, true);
2120 return (r == -ENODEV) ? -ENXIO : r;
2121 }
Christoffer Dallc07a0192013-10-25 21:17:31 +01002122
2123 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2124 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2125 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2126 u32 reg;
2127
2128 if (get_user(reg, uaddr))
2129 return -EFAULT;
2130
2131 return vgic_attr_regs_access(dev, attr, &reg, true);
2132 }
2133
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002134 }
2135
Christoffer Dall73306722013-10-25 17:29:18 +01002136 return -ENXIO;
2137}
2138
2139static int vgic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2140{
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002141 int r = -ENXIO;
2142
2143 switch (attr->group) {
2144 case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2145 u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2146 u64 addr;
2147 unsigned long type = (unsigned long)attr->attr;
2148
2149 r = kvm_vgic_addr(dev->kvm, type, &addr, false);
2150 if (r)
2151 return (r == -ENODEV) ? -ENXIO : r;
2152
2153 if (copy_to_user(uaddr, &addr, sizeof(addr)))
2154 return -EFAULT;
Christoffer Dallc07a0192013-10-25 21:17:31 +01002155 break;
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002156 }
Christoffer Dallc07a0192013-10-25 21:17:31 +01002157
2158 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2159 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: {
2160 u32 __user *uaddr = (u32 __user *)(long)attr->addr;
2161 u32 reg = 0;
2162
2163 r = vgic_attr_regs_access(dev, attr, &reg, false);
2164 if (r)
2165 return r;
2166 r = put_user(reg, uaddr);
2167 break;
2168 }
2169
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002170 }
2171
2172 return r;
Christoffer Dall73306722013-10-25 17:29:18 +01002173}
2174
Christoffer Dallc07a0192013-10-25 21:17:31 +01002175static int vgic_has_attr_regs(const struct mmio_range *ranges,
2176 phys_addr_t offset)
2177{
2178 struct kvm_exit_mmio dev_attr_mmio;
2179
2180 dev_attr_mmio.len = 4;
2181 if (find_matching_range(ranges, &dev_attr_mmio, offset))
2182 return 0;
2183 else
2184 return -ENXIO;
2185}
2186
Christoffer Dall73306722013-10-25 17:29:18 +01002187static int vgic_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2188{
Christoffer Dallc07a0192013-10-25 21:17:31 +01002189 phys_addr_t offset;
2190
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002191 switch (attr->group) {
2192 case KVM_DEV_ARM_VGIC_GRP_ADDR:
2193 switch (attr->attr) {
2194 case KVM_VGIC_V2_ADDR_TYPE_DIST:
2195 case KVM_VGIC_V2_ADDR_TYPE_CPU:
2196 return 0;
2197 }
2198 break;
Christoffer Dallc07a0192013-10-25 21:17:31 +01002199 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
2200 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2201 return vgic_has_attr_regs(vgic_dist_ranges, offset);
2202 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
2203 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
2204 return vgic_has_attr_regs(vgic_cpu_ranges, offset);
Christoffer Dallce01e4e2013-09-23 14:55:56 -07002205 }
Christoffer Dall73306722013-10-25 17:29:18 +01002206 return -ENXIO;
2207}
2208
2209static void vgic_destroy(struct kvm_device *dev)
2210{
2211 kfree(dev);
2212}
2213
2214static int vgic_create(struct kvm_device *dev, u32 type)
2215{
2216 return kvm_vgic_create(dev->kvm);
2217}
2218
2219struct kvm_device_ops kvm_arm_vgic_v2_ops = {
2220 .name = "kvm-arm-vgic",
2221 .create = vgic_create,
2222 .destroy = vgic_destroy,
2223 .set_attr = vgic_set_attr,
2224 .get_attr = vgic_get_attr,
2225 .has_attr = vgic_has_attr,
2226};