blob: 8db1db597223f9f5dafd03b6e1a9f12bc1923c7e [file] [log] [blame]
Andre Przywaraa0675c22014-06-07 00:54:51 +02001/*
2 * GICv3 distributor and redistributor emulation
3 *
4 * GICv3 emulation is currently only supported on a GICv3 host (because
5 * we rely on the hardware's CPU interface virtualization support), but
6 * supports both hardware with or without the optional GICv2 backwards
7 * compatibility features.
8 *
9 * Limitations of the emulation:
10 * (RAZ/WI: read as zero, write ignore, RAO/WI: read as one, write ignore)
11 * - We do not support LPIs (yet). TYPER.LPIS is reported as 0 and is RAZ/WI.
12 * - We do not support the message based interrupts (MBIs) triggered by
13 * writes to the GICD_{SET,CLR}SPI_* registers. TYPER.MBIS is reported as 0.
14 * - We do not support the (optional) backwards compatibility feature.
15 * GICD_CTLR.ARE resets to 1 and is RAO/WI. If the _host_ GIC supports
16 * the compatiblity feature, you can use a GICv2 in the guest, though.
17 * - We only support a single security state. GICD_CTLR.DS is 1 and is RAO/WI.
18 * - Priorities are not emulated (same as the GICv2 emulation). Linux
19 * as a guest is fine with this, because it does not use priorities.
20 * - We only support Group1 interrupts. Again Linux uses only those.
21 *
22 * Copyright (C) 2014 ARM Ltd.
23 * Author: Andre Przywara <andre.przywara@arm.com>
24 *
25 * This program is free software; you can redistribute it and/or modify
26 * it under the terms of the GNU General Public License version 2 as
27 * published by the Free Software Foundation.
28 *
29 * This program is distributed in the hope that it will be useful,
30 * but WITHOUT ANY WARRANTY; without even the implied warranty of
31 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
32 * GNU General Public License for more details.
33 *
34 * You should have received a copy of the GNU General Public License
35 * along with this program. If not, see <http://www.gnu.org/licenses/>.
36 */
37
38#include <linux/cpu.h>
39#include <linux/kvm.h>
40#include <linux/kvm_host.h>
41#include <linux/interrupt.h>
42
43#include <linux/irqchip/arm-gic-v3.h>
44#include <kvm/arm_vgic.h>
45
46#include <asm/kvm_emulate.h>
47#include <asm/kvm_arm.h>
48#include <asm/kvm_mmu.h>
49
50#include "vgic.h"
51
52static bool handle_mmio_rao_wi(struct kvm_vcpu *vcpu,
53 struct kvm_exit_mmio *mmio, phys_addr_t offset)
54{
55 u32 reg = 0xffffffff;
56
57 vgic_reg_access(mmio, &reg, offset,
58 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
59
60 return false;
61}
62
63static bool handle_mmio_ctlr(struct kvm_vcpu *vcpu,
64 struct kvm_exit_mmio *mmio, phys_addr_t offset)
65{
66 u32 reg = 0;
67
68 /*
69 * Force ARE and DS to 1, the guest cannot change this.
70 * For the time being we only support Group1 interrupts.
71 */
72 if (vcpu->kvm->arch.vgic.enabled)
73 reg = GICD_CTLR_ENABLE_SS_G1;
74 reg |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
75
76 vgic_reg_access(mmio, &reg, offset,
77 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
78 if (mmio->is_write) {
79 if (reg & GICD_CTLR_ENABLE_SS_G0)
80 kvm_info("guest tried to enable unsupported Group0 interrupts\n");
81 vcpu->kvm->arch.vgic.enabled = !!(reg & GICD_CTLR_ENABLE_SS_G1);
82 vgic_update_state(vcpu->kvm);
83 return true;
84 }
85 return false;
86}
87
88/*
89 * As this implementation does not provide compatibility
90 * with GICv2 (ARE==1), we report zero CPUs in bits [5..7].
91 * Also LPIs and MBIs are not supported, so we set the respective bits to 0.
92 * Also we report at most 2**10=1024 interrupt IDs (to match 1024 SPIs).
93 */
94#define INTERRUPT_ID_BITS 10
95static bool handle_mmio_typer(struct kvm_vcpu *vcpu,
96 struct kvm_exit_mmio *mmio, phys_addr_t offset)
97{
98 u32 reg;
99
100 reg = (min(vcpu->kvm->arch.vgic.nr_irqs, 1024) >> 5) - 1;
101
102 reg |= (INTERRUPT_ID_BITS - 1) << 19;
103
104 vgic_reg_access(mmio, &reg, offset,
105 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
106
107 return false;
108}
109
110static bool handle_mmio_iidr(struct kvm_vcpu *vcpu,
111 struct kvm_exit_mmio *mmio, phys_addr_t offset)
112{
113 u32 reg;
114
115 reg = (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
116 vgic_reg_access(mmio, &reg, offset,
117 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
118
119 return false;
120}
121
122static bool handle_mmio_set_enable_reg_dist(struct kvm_vcpu *vcpu,
123 struct kvm_exit_mmio *mmio,
124 phys_addr_t offset)
125{
126 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
127 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
128 vcpu->vcpu_id,
129 ACCESS_WRITE_SETBIT);
130
131 vgic_reg_access(mmio, NULL, offset,
132 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
133 return false;
134}
135
136static bool handle_mmio_clear_enable_reg_dist(struct kvm_vcpu *vcpu,
137 struct kvm_exit_mmio *mmio,
138 phys_addr_t offset)
139{
140 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
141 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
142 vcpu->vcpu_id,
143 ACCESS_WRITE_CLEARBIT);
144
145 vgic_reg_access(mmio, NULL, offset,
146 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
147 return false;
148}
149
150static bool handle_mmio_set_pending_reg_dist(struct kvm_vcpu *vcpu,
151 struct kvm_exit_mmio *mmio,
152 phys_addr_t offset)
153{
154 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
155 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
156 vcpu->vcpu_id);
157
158 vgic_reg_access(mmio, NULL, offset,
159 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
160 return false;
161}
162
163static bool handle_mmio_clear_pending_reg_dist(struct kvm_vcpu *vcpu,
164 struct kvm_exit_mmio *mmio,
165 phys_addr_t offset)
166{
167 if (likely(offset >= VGIC_NR_PRIVATE_IRQS / 8))
168 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
169 vcpu->vcpu_id);
170
171 vgic_reg_access(mmio, NULL, offset,
172 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
173 return false;
174}
175
176static bool handle_mmio_priority_reg_dist(struct kvm_vcpu *vcpu,
177 struct kvm_exit_mmio *mmio,
178 phys_addr_t offset)
179{
180 u32 *reg;
181
182 if (unlikely(offset < VGIC_NR_PRIVATE_IRQS)) {
183 vgic_reg_access(mmio, NULL, offset,
184 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
185 return false;
186 }
187
188 reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
189 vcpu->vcpu_id, offset);
190 vgic_reg_access(mmio, reg, offset,
191 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
192 return false;
193}
194
195static bool handle_mmio_cfg_reg_dist(struct kvm_vcpu *vcpu,
196 struct kvm_exit_mmio *mmio,
197 phys_addr_t offset)
198{
199 u32 *reg;
200
201 if (unlikely(offset < VGIC_NR_PRIVATE_IRQS / 4)) {
202 vgic_reg_access(mmio, NULL, offset,
203 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
204 return false;
205 }
206
207 reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
208 vcpu->vcpu_id, offset >> 1);
209
210 return vgic_handle_cfg_reg(reg, mmio, offset);
211}
212
213/*
214 * We use a compressed version of the MPIDR (all 32 bits in one 32-bit word)
215 * when we store the target MPIDR written by the guest.
216 */
217static u32 compress_mpidr(unsigned long mpidr)
218{
219 u32 ret;
220
221 ret = MPIDR_AFFINITY_LEVEL(mpidr, 0);
222 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8;
223 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16;
224 ret |= MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24;
225
226 return ret;
227}
228
229static unsigned long uncompress_mpidr(u32 value)
230{
231 unsigned long mpidr;
232
233 mpidr = ((value >> 0) & 0xFF) << MPIDR_LEVEL_SHIFT(0);
234 mpidr |= ((value >> 8) & 0xFF) << MPIDR_LEVEL_SHIFT(1);
235 mpidr |= ((value >> 16) & 0xFF) << MPIDR_LEVEL_SHIFT(2);
236 mpidr |= (u64)((value >> 24) & 0xFF) << MPIDR_LEVEL_SHIFT(3);
237
238 return mpidr;
239}
240
241/*
242 * Lookup the given MPIDR value to get the vcpu_id (if there is one)
243 * and store that in the irq_spi_cpu[] array.
244 * This limits the number of VCPUs to 255 for now, extending the data
245 * type (or storing kvm_vcpu pointers) should lift the limit.
246 * Store the original MPIDR value in an extra array to support read-as-written.
247 * Unallocated MPIDRs are translated to a special value and caught
248 * before any array accesses.
249 */
250static bool handle_mmio_route_reg(struct kvm_vcpu *vcpu,
251 struct kvm_exit_mmio *mmio,
252 phys_addr_t offset)
253{
254 struct kvm *kvm = vcpu->kvm;
255 struct vgic_dist *dist = &kvm->arch.vgic;
256 int spi;
257 u32 reg;
258 int vcpu_id;
259 unsigned long *bmap, mpidr;
260
261 /*
262 * The upper 32 bits of each 64 bit register are zero,
263 * as we don't support Aff3.
264 */
265 if ((offset & 4)) {
266 vgic_reg_access(mmio, NULL, offset,
267 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
268 return false;
269 }
270
271 /* This region only covers SPIs, so no handling of private IRQs here. */
272 spi = offset / 8;
273
274 /* get the stored MPIDR for this IRQ */
275 mpidr = uncompress_mpidr(dist->irq_spi_mpidr[spi]);
276 reg = mpidr;
277
278 vgic_reg_access(mmio, &reg, offset,
279 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
280
281 if (!mmio->is_write)
282 return false;
283
284 /*
285 * Now clear the currently assigned vCPU from the map, making room
286 * for the new one to be written below
287 */
288 vcpu = kvm_mpidr_to_vcpu(kvm, mpidr);
289 if (likely(vcpu)) {
290 vcpu_id = vcpu->vcpu_id;
291 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
292 __clear_bit(spi, bmap);
293 }
294
295 dist->irq_spi_mpidr[spi] = compress_mpidr(reg);
296 vcpu = kvm_mpidr_to_vcpu(kvm, reg & MPIDR_HWID_BITMASK);
297
298 /*
299 * The spec says that non-existent MPIDR values should not be
300 * forwarded to any existent (v)CPU, but should be able to become
301 * pending anyway. We simply keep the irq_spi_target[] array empty, so
302 * the interrupt will never be injected.
303 * irq_spi_cpu[irq] gets a magic value in this case.
304 */
305 if (likely(vcpu)) {
306 vcpu_id = vcpu->vcpu_id;
307 dist->irq_spi_cpu[spi] = vcpu_id;
308 bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[vcpu_id]);
309 __set_bit(spi, bmap);
310 } else {
311 dist->irq_spi_cpu[spi] = VCPU_NOT_ALLOCATED;
312 }
313
314 vgic_update_state(kvm);
315
316 return true;
317}
318
319/*
320 * We should be careful about promising too much when a guest reads
321 * this register. Don't claim to be like any hardware implementation,
322 * but just report the GIC as version 3 - which is what a Linux guest
323 * would check.
324 */
325static bool handle_mmio_idregs(struct kvm_vcpu *vcpu,
326 struct kvm_exit_mmio *mmio,
327 phys_addr_t offset)
328{
329 u32 reg = 0;
330
331 switch (offset + GICD_IDREGS) {
332 case GICD_PIDR2:
333 reg = 0x3b;
334 break;
335 }
336
337 vgic_reg_access(mmio, &reg, offset,
338 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
339
340 return false;
341}
342
343static const struct kvm_mmio_range vgic_v3_dist_ranges[] = {
344 {
345 .base = GICD_CTLR,
346 .len = 0x04,
347 .bits_per_irq = 0,
348 .handle_mmio = handle_mmio_ctlr,
349 },
350 {
351 .base = GICD_TYPER,
352 .len = 0x04,
353 .bits_per_irq = 0,
354 .handle_mmio = handle_mmio_typer,
355 },
356 {
357 .base = GICD_IIDR,
358 .len = 0x04,
359 .bits_per_irq = 0,
360 .handle_mmio = handle_mmio_iidr,
361 },
362 {
363 /* this register is optional, it is RAZ/WI if not implemented */
364 .base = GICD_STATUSR,
365 .len = 0x04,
366 .bits_per_irq = 0,
367 .handle_mmio = handle_mmio_raz_wi,
368 },
369 {
370 /* this write only register is WI when TYPER.MBIS=0 */
371 .base = GICD_SETSPI_NSR,
372 .len = 0x04,
373 .bits_per_irq = 0,
374 .handle_mmio = handle_mmio_raz_wi,
375 },
376 {
377 /* this write only register is WI when TYPER.MBIS=0 */
378 .base = GICD_CLRSPI_NSR,
379 .len = 0x04,
380 .bits_per_irq = 0,
381 .handle_mmio = handle_mmio_raz_wi,
382 },
383 {
384 /* this is RAZ/WI when DS=1 */
385 .base = GICD_SETSPI_SR,
386 .len = 0x04,
387 .bits_per_irq = 0,
388 .handle_mmio = handle_mmio_raz_wi,
389 },
390 {
391 /* this is RAZ/WI when DS=1 */
392 .base = GICD_CLRSPI_SR,
393 .len = 0x04,
394 .bits_per_irq = 0,
395 .handle_mmio = handle_mmio_raz_wi,
396 },
397 {
398 .base = GICD_IGROUPR,
399 .len = 0x80,
400 .bits_per_irq = 1,
401 .handle_mmio = handle_mmio_rao_wi,
402 },
403 {
404 .base = GICD_ISENABLER,
405 .len = 0x80,
406 .bits_per_irq = 1,
407 .handle_mmio = handle_mmio_set_enable_reg_dist,
408 },
409 {
410 .base = GICD_ICENABLER,
411 .len = 0x80,
412 .bits_per_irq = 1,
413 .handle_mmio = handle_mmio_clear_enable_reg_dist,
414 },
415 {
416 .base = GICD_ISPENDR,
417 .len = 0x80,
418 .bits_per_irq = 1,
419 .handle_mmio = handle_mmio_set_pending_reg_dist,
420 },
421 {
422 .base = GICD_ICPENDR,
423 .len = 0x80,
424 .bits_per_irq = 1,
425 .handle_mmio = handle_mmio_clear_pending_reg_dist,
426 },
427 {
428 .base = GICD_ISACTIVER,
429 .len = 0x80,
430 .bits_per_irq = 1,
431 .handle_mmio = handle_mmio_raz_wi,
432 },
433 {
434 .base = GICD_ICACTIVER,
435 .len = 0x80,
436 .bits_per_irq = 1,
437 .handle_mmio = handle_mmio_raz_wi,
438 },
439 {
440 .base = GICD_IPRIORITYR,
441 .len = 0x400,
442 .bits_per_irq = 8,
443 .handle_mmio = handle_mmio_priority_reg_dist,
444 },
445 {
446 /* TARGETSRn is RES0 when ARE=1 */
447 .base = GICD_ITARGETSR,
448 .len = 0x400,
449 .bits_per_irq = 8,
450 .handle_mmio = handle_mmio_raz_wi,
451 },
452 {
453 .base = GICD_ICFGR,
454 .len = 0x100,
455 .bits_per_irq = 2,
456 .handle_mmio = handle_mmio_cfg_reg_dist,
457 },
458 {
459 /* this is RAZ/WI when DS=1 */
460 .base = GICD_IGRPMODR,
461 .len = 0x80,
462 .bits_per_irq = 1,
463 .handle_mmio = handle_mmio_raz_wi,
464 },
465 {
466 /* this is RAZ/WI when DS=1 */
467 .base = GICD_NSACR,
468 .len = 0x100,
469 .bits_per_irq = 2,
470 .handle_mmio = handle_mmio_raz_wi,
471 },
472 {
473 /* this is RAZ/WI when ARE=1 */
474 .base = GICD_SGIR,
475 .len = 0x04,
476 .handle_mmio = handle_mmio_raz_wi,
477 },
478 {
479 /* this is RAZ/WI when ARE=1 */
480 .base = GICD_CPENDSGIR,
481 .len = 0x10,
482 .handle_mmio = handle_mmio_raz_wi,
483 },
484 {
485 /* this is RAZ/WI when ARE=1 */
486 .base = GICD_SPENDSGIR,
487 .len = 0x10,
488 .handle_mmio = handle_mmio_raz_wi,
489 },
490 {
491 .base = GICD_IROUTER + 0x100,
492 .len = 0x1ee0,
493 .bits_per_irq = 64,
494 .handle_mmio = handle_mmio_route_reg,
495 },
496 {
497 .base = GICD_IDREGS,
498 .len = 0x30,
499 .bits_per_irq = 0,
500 .handle_mmio = handle_mmio_idregs,
501 },
502 {},
503};
504
505static bool handle_mmio_set_enable_reg_redist(struct kvm_vcpu *vcpu,
506 struct kvm_exit_mmio *mmio,
507 phys_addr_t offset)
508{
509 struct kvm_vcpu *redist_vcpu = mmio->private;
510
511 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
512 redist_vcpu->vcpu_id,
513 ACCESS_WRITE_SETBIT);
514}
515
516static bool handle_mmio_clear_enable_reg_redist(struct kvm_vcpu *vcpu,
517 struct kvm_exit_mmio *mmio,
518 phys_addr_t offset)
519{
520 struct kvm_vcpu *redist_vcpu = mmio->private;
521
522 return vgic_handle_enable_reg(vcpu->kvm, mmio, offset,
523 redist_vcpu->vcpu_id,
524 ACCESS_WRITE_CLEARBIT);
525}
526
527static bool handle_mmio_set_pending_reg_redist(struct kvm_vcpu *vcpu,
528 struct kvm_exit_mmio *mmio,
529 phys_addr_t offset)
530{
531 struct kvm_vcpu *redist_vcpu = mmio->private;
532
533 return vgic_handle_set_pending_reg(vcpu->kvm, mmio, offset,
534 redist_vcpu->vcpu_id);
535}
536
537static bool handle_mmio_clear_pending_reg_redist(struct kvm_vcpu *vcpu,
538 struct kvm_exit_mmio *mmio,
539 phys_addr_t offset)
540{
541 struct kvm_vcpu *redist_vcpu = mmio->private;
542
543 return vgic_handle_clear_pending_reg(vcpu->kvm, mmio, offset,
544 redist_vcpu->vcpu_id);
545}
546
547static bool handle_mmio_priority_reg_redist(struct kvm_vcpu *vcpu,
548 struct kvm_exit_mmio *mmio,
549 phys_addr_t offset)
550{
551 struct kvm_vcpu *redist_vcpu = mmio->private;
552 u32 *reg;
553
554 reg = vgic_bytemap_get_reg(&vcpu->kvm->arch.vgic.irq_priority,
555 redist_vcpu->vcpu_id, offset);
556 vgic_reg_access(mmio, reg, offset,
557 ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
558 return false;
559}
560
561static bool handle_mmio_cfg_reg_redist(struct kvm_vcpu *vcpu,
562 struct kvm_exit_mmio *mmio,
563 phys_addr_t offset)
564{
565 struct kvm_vcpu *redist_vcpu = mmio->private;
566
567 u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
568 redist_vcpu->vcpu_id, offset >> 1);
569
570 return vgic_handle_cfg_reg(reg, mmio, offset);
571}
572
573static const struct kvm_mmio_range vgic_redist_sgi_ranges[] = {
574 {
575 .base = GICR_IGROUPR0,
576 .len = 0x04,
577 .bits_per_irq = 1,
578 .handle_mmio = handle_mmio_rao_wi,
579 },
580 {
581 .base = GICR_ISENABLER0,
582 .len = 0x04,
583 .bits_per_irq = 1,
584 .handle_mmio = handle_mmio_set_enable_reg_redist,
585 },
586 {
587 .base = GICR_ICENABLER0,
588 .len = 0x04,
589 .bits_per_irq = 1,
590 .handle_mmio = handle_mmio_clear_enable_reg_redist,
591 },
592 {
593 .base = GICR_ISPENDR0,
594 .len = 0x04,
595 .bits_per_irq = 1,
596 .handle_mmio = handle_mmio_set_pending_reg_redist,
597 },
598 {
599 .base = GICR_ICPENDR0,
600 .len = 0x04,
601 .bits_per_irq = 1,
602 .handle_mmio = handle_mmio_clear_pending_reg_redist,
603 },
604 {
605 .base = GICR_ISACTIVER0,
606 .len = 0x04,
607 .bits_per_irq = 1,
608 .handle_mmio = handle_mmio_raz_wi,
609 },
610 {
611 .base = GICR_ICACTIVER0,
612 .len = 0x04,
613 .bits_per_irq = 1,
614 .handle_mmio = handle_mmio_raz_wi,
615 },
616 {
617 .base = GICR_IPRIORITYR0,
618 .len = 0x20,
619 .bits_per_irq = 8,
620 .handle_mmio = handle_mmio_priority_reg_redist,
621 },
622 {
623 .base = GICR_ICFGR0,
624 .len = 0x08,
625 .bits_per_irq = 2,
626 .handle_mmio = handle_mmio_cfg_reg_redist,
627 },
628 {
629 .base = GICR_IGRPMODR0,
630 .len = 0x04,
631 .bits_per_irq = 1,
632 .handle_mmio = handle_mmio_raz_wi,
633 },
634 {
635 .base = GICR_NSACR,
636 .len = 0x04,
637 .handle_mmio = handle_mmio_raz_wi,
638 },
639 {},
640};
641
642static bool handle_mmio_ctlr_redist(struct kvm_vcpu *vcpu,
643 struct kvm_exit_mmio *mmio,
644 phys_addr_t offset)
645{
646 /* since we don't support LPIs, this register is zero for now */
647 vgic_reg_access(mmio, NULL, offset,
648 ACCESS_READ_RAZ | ACCESS_WRITE_IGNORED);
649 return false;
650}
651
652static bool handle_mmio_typer_redist(struct kvm_vcpu *vcpu,
653 struct kvm_exit_mmio *mmio,
654 phys_addr_t offset)
655{
656 u32 reg;
657 u64 mpidr;
658 struct kvm_vcpu *redist_vcpu = mmio->private;
659 int target_vcpu_id = redist_vcpu->vcpu_id;
660
661 /* the upper 32 bits contain the affinity value */
662 if ((offset & ~3) == 4) {
663 mpidr = kvm_vcpu_get_mpidr_aff(redist_vcpu);
664 reg = compress_mpidr(mpidr);
665
666 vgic_reg_access(mmio, &reg, offset,
667 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
668 return false;
669 }
670
671 reg = redist_vcpu->vcpu_id << 8;
672 if (target_vcpu_id == atomic_read(&vcpu->kvm->online_vcpus) - 1)
673 reg |= GICR_TYPER_LAST;
674 vgic_reg_access(mmio, &reg, offset,
675 ACCESS_READ_VALUE | ACCESS_WRITE_IGNORED);
676 return false;
677}
678
679static const struct kvm_mmio_range vgic_redist_ranges[] = {
680 {
681 .base = GICR_CTLR,
682 .len = 0x04,
683 .bits_per_irq = 0,
684 .handle_mmio = handle_mmio_ctlr_redist,
685 },
686 {
687 .base = GICR_TYPER,
688 .len = 0x08,
689 .bits_per_irq = 0,
690 .handle_mmio = handle_mmio_typer_redist,
691 },
692 {
693 .base = GICR_IIDR,
694 .len = 0x04,
695 .bits_per_irq = 0,
696 .handle_mmio = handle_mmio_iidr,
697 },
698 {
699 .base = GICR_WAKER,
700 .len = 0x04,
701 .bits_per_irq = 0,
702 .handle_mmio = handle_mmio_raz_wi,
703 },
704 {
705 .base = GICR_IDREGS,
706 .len = 0x30,
707 .bits_per_irq = 0,
708 .handle_mmio = handle_mmio_idregs,
709 },
710 {},
711};
712
713/*
714 * This function splits accesses between the distributor and the two
715 * redistributor parts (private/SPI). As each redistributor is accessible
716 * from any CPU, we have to determine the affected VCPU by taking the faulting
717 * address into account. We then pass this VCPU to the handler function via
718 * the private parameter.
719 */
720#define SGI_BASE_OFFSET SZ_64K
721static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
722 struct kvm_exit_mmio *mmio)
723{
724 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
725 unsigned long dbase = dist->vgic_dist_base;
726 unsigned long rdbase = dist->vgic_redist_base;
727 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
728 int vcpu_id;
729 const struct kvm_mmio_range *mmio_range;
730
731 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
732 return vgic_handle_mmio_range(vcpu, run, mmio,
733 vgic_v3_dist_ranges, dbase);
734 }
735
736 if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
737 GIC_V3_REDIST_SIZE * nrcpus))
738 return false;
739
740 vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
741 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
742 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
743
744 if (mmio->phys_addr >= rdbase + SGI_BASE_OFFSET) {
745 rdbase += SGI_BASE_OFFSET;
746 mmio_range = vgic_redist_sgi_ranges;
747 } else {
748 mmio_range = vgic_redist_ranges;
749 }
750 return vgic_handle_mmio_range(vcpu, run, mmio, mmio_range, rdbase);
751}
752
753static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
754{
755 if (vgic_queue_irq(vcpu, 0, irq)) {
756 vgic_dist_irq_clear_pending(vcpu, irq);
757 vgic_cpu_irq_clear(vcpu, irq);
758 return true;
759 }
760
761 return false;
762}
763
764static int vgic_v3_map_resources(struct kvm *kvm,
765 const struct vgic_params *params)
766{
767 int ret = 0;
768 struct vgic_dist *dist = &kvm->arch.vgic;
769
770 if (!irqchip_in_kernel(kvm))
771 return 0;
772
773 mutex_lock(&kvm->lock);
774
775 if (vgic_ready(kvm))
776 goto out;
777
778 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
779 IS_VGIC_ADDR_UNDEF(dist->vgic_redist_base)) {
780 kvm_err("Need to set vgic distributor addresses first\n");
781 ret = -ENXIO;
782 goto out;
783 }
784
785 /*
786 * For a VGICv3 we require the userland to explicitly initialize
787 * the VGIC before we need to use it.
788 */
789 if (!vgic_initialized(kvm)) {
790 ret = -EBUSY;
791 goto out;
792 }
793
794 kvm->arch.vgic.ready = true;
795out:
796 if (ret)
797 kvm_vgic_destroy(kvm);
798 mutex_unlock(&kvm->lock);
799 return ret;
800}
801
802static int vgic_v3_init_model(struct kvm *kvm)
803{
804 int i;
805 u32 mpidr;
806 struct vgic_dist *dist = &kvm->arch.vgic;
807 int nr_spis = dist->nr_irqs - VGIC_NR_PRIVATE_IRQS;
808
809 dist->irq_spi_mpidr = kcalloc(nr_spis, sizeof(dist->irq_spi_mpidr[0]),
810 GFP_KERNEL);
811
812 if (!dist->irq_spi_mpidr)
813 return -ENOMEM;
814
815 /* Initialize the target VCPUs for each IRQ to VCPU 0 */
816 mpidr = compress_mpidr(kvm_vcpu_get_mpidr_aff(kvm_get_vcpu(kvm, 0)));
817 for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i++) {
818 dist->irq_spi_cpu[i - VGIC_NR_PRIVATE_IRQS] = 0;
819 dist->irq_spi_mpidr[i - VGIC_NR_PRIVATE_IRQS] = mpidr;
820 vgic_bitmap_set_irq_val(dist->irq_spi_target, 0, i, 1);
821 }
822
823 return 0;
824}
825
826/* GICv3 does not keep track of SGI sources anymore. */
827static void vgic_v3_add_sgi_source(struct kvm_vcpu *vcpu, int irq, int source)
828{
829}
830
831void vgic_v3_init_emulation(struct kvm *kvm)
832{
833 struct vgic_dist *dist = &kvm->arch.vgic;
834
835 dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
836 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
837 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
838 dist->vm_ops.init_model = vgic_v3_init_model;
839 dist->vm_ops.map_resources = vgic_v3_map_resources;
840
841 kvm->arch.max_vcpus = KVM_MAX_VCPUS;
842}
843
844static int vgic_v3_create(struct kvm_device *dev, u32 type)
845{
846 return kvm_vgic_create(dev->kvm, type);
847}
848
849static void vgic_v3_destroy(struct kvm_device *dev)
850{
851 kfree(dev);
852}
853
854static int vgic_v3_set_attr(struct kvm_device *dev,
855 struct kvm_device_attr *attr)
856{
857 int ret;
858
859 ret = vgic_set_common_attr(dev, attr);
860 if (ret != -ENXIO)
861 return ret;
862
863 switch (attr->group) {
864 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
865 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
866 return -ENXIO;
867 }
868
869 return -ENXIO;
870}
871
872static int vgic_v3_get_attr(struct kvm_device *dev,
873 struct kvm_device_attr *attr)
874{
875 int ret;
876
877 ret = vgic_get_common_attr(dev, attr);
878 if (ret != -ENXIO)
879 return ret;
880
881 switch (attr->group) {
882 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
883 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
884 return -ENXIO;
885 }
886
887 return -ENXIO;
888}
889
890static int vgic_v3_has_attr(struct kvm_device *dev,
891 struct kvm_device_attr *attr)
892{
893 switch (attr->group) {
894 case KVM_DEV_ARM_VGIC_GRP_ADDR:
895 switch (attr->attr) {
896 case KVM_VGIC_V2_ADDR_TYPE_DIST:
897 case KVM_VGIC_V2_ADDR_TYPE_CPU:
898 return -ENXIO;
899 }
900 break;
901 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
902 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
903 return -ENXIO;
904 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS:
905 return 0;
906 case KVM_DEV_ARM_VGIC_GRP_CTRL:
907 switch (attr->attr) {
908 case KVM_DEV_ARM_VGIC_CTRL_INIT:
909 return 0;
910 }
911 }
912 return -ENXIO;
913}
914
915struct kvm_device_ops kvm_arm_vgic_v3_ops = {
916 .name = "kvm-arm-vgic-v3",
917 .create = vgic_v3_create,
918 .destroy = vgic_v3_destroy,
919 .set_attr = vgic_v3_set_attr,
920 .get_attr = vgic_v3_get_attr,
921 .has_attr = vgic_v3_has_attr,
922};