blob: b13cbd41dbc399f19410e8ca9e9252d729856953 [file] [log] [blame]
Marc Zyngierf68d2b12015-10-19 15:50:58 +01001/*
2 * Copyright (C) 2012-2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/compiler.h>
19#include <linux/irqchip/arm-gic-v3.h>
20#include <linux/kvm_host.h>
21
Marc Zyngier59da1cb2017-06-09 12:49:33 +010022#include <asm/kvm_emulate.h>
Marc Zyngier13720a52016-01-28 13:44:07 +000023#include <asm/kvm_hyp.h>
Christoffer Dall923a2e32017-10-05 00:18:07 +020024#include <asm/kvm_mmu.h>
Marc Zyngierf68d2b12015-10-19 15:50:58 +010025
26#define vtr_to_max_lr_idx(v) ((v) & 0xf)
Christoffer Dalld68356c2017-06-04 22:17:02 +020027#define vtr_to_nr_pre_bits(v) ((((u32)(v) >> 26) & 7) + 1)
Marc Zyngier132a3242017-06-09 12:49:36 +010028#define vtr_to_nr_apr_regs(v) (1 << (vtr_to_nr_pre_bits(v) - 5))
Marc Zyngierf68d2b12015-10-19 15:50:58 +010029
Marc Zyngier1b8e83c2016-02-17 10:25:05 +000030static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
31{
32 switch (lr & 0xf) {
33 case 0:
34 return read_gicreg(ICH_LR0_EL2);
35 case 1:
36 return read_gicreg(ICH_LR1_EL2);
37 case 2:
38 return read_gicreg(ICH_LR2_EL2);
39 case 3:
40 return read_gicreg(ICH_LR3_EL2);
41 case 4:
42 return read_gicreg(ICH_LR4_EL2);
43 case 5:
44 return read_gicreg(ICH_LR5_EL2);
45 case 6:
46 return read_gicreg(ICH_LR6_EL2);
47 case 7:
48 return read_gicreg(ICH_LR7_EL2);
49 case 8:
50 return read_gicreg(ICH_LR8_EL2);
51 case 9:
52 return read_gicreg(ICH_LR9_EL2);
53 case 10:
54 return read_gicreg(ICH_LR10_EL2);
55 case 11:
56 return read_gicreg(ICH_LR11_EL2);
57 case 12:
58 return read_gicreg(ICH_LR12_EL2);
59 case 13:
60 return read_gicreg(ICH_LR13_EL2);
61 case 14:
62 return read_gicreg(ICH_LR14_EL2);
63 case 15:
64 return read_gicreg(ICH_LR15_EL2);
65 }
66
67 unreachable();
68}
69
70static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
71{
72 switch (lr & 0xf) {
73 case 0:
74 write_gicreg(val, ICH_LR0_EL2);
75 break;
76 case 1:
77 write_gicreg(val, ICH_LR1_EL2);
78 break;
79 case 2:
80 write_gicreg(val, ICH_LR2_EL2);
81 break;
82 case 3:
83 write_gicreg(val, ICH_LR3_EL2);
84 break;
85 case 4:
86 write_gicreg(val, ICH_LR4_EL2);
87 break;
88 case 5:
89 write_gicreg(val, ICH_LR5_EL2);
90 break;
91 case 6:
92 write_gicreg(val, ICH_LR6_EL2);
93 break;
94 case 7:
95 write_gicreg(val, ICH_LR7_EL2);
96 break;
97 case 8:
98 write_gicreg(val, ICH_LR8_EL2);
99 break;
100 case 9:
101 write_gicreg(val, ICH_LR9_EL2);
102 break;
103 case 10:
104 write_gicreg(val, ICH_LR10_EL2);
105 break;
106 case 11:
107 write_gicreg(val, ICH_LR11_EL2);
108 break;
109 case 12:
110 write_gicreg(val, ICH_LR12_EL2);
111 break;
112 case 13:
113 write_gicreg(val, ICH_LR13_EL2);
114 break;
115 case 14:
116 write_gicreg(val, ICH_LR14_EL2);
117 break;
118 case 15:
119 write_gicreg(val, ICH_LR15_EL2);
120 break;
121 }
122}
123
Marc Zyngier63000dd2017-06-09 12:49:31 +0100124static void __hyp_text __vgic_v3_write_ap0rn(u32 val, int n)
125{
126 switch (n) {
127 case 0:
128 write_gicreg(val, ICH_AP0R0_EL2);
129 break;
130 case 1:
131 write_gicreg(val, ICH_AP0R1_EL2);
132 break;
133 case 2:
134 write_gicreg(val, ICH_AP0R2_EL2);
135 break;
136 case 3:
137 write_gicreg(val, ICH_AP0R3_EL2);
138 break;
139 }
140}
141
142static void __hyp_text __vgic_v3_write_ap1rn(u32 val, int n)
143{
144 switch (n) {
145 case 0:
146 write_gicreg(val, ICH_AP1R0_EL2);
147 break;
148 case 1:
149 write_gicreg(val, ICH_AP1R1_EL2);
150 break;
151 case 2:
152 write_gicreg(val, ICH_AP1R2_EL2);
153 break;
154 case 3:
155 write_gicreg(val, ICH_AP1R3_EL2);
156 break;
157 }
158}
159
160static u32 __hyp_text __vgic_v3_read_ap0rn(int n)
161{
162 u32 val;
163
164 switch (n) {
165 case 0:
166 val = read_gicreg(ICH_AP0R0_EL2);
167 break;
168 case 1:
169 val = read_gicreg(ICH_AP0R1_EL2);
170 break;
171 case 2:
172 val = read_gicreg(ICH_AP0R2_EL2);
173 break;
174 case 3:
175 val = read_gicreg(ICH_AP0R3_EL2);
176 break;
177 default:
178 unreachable();
179 }
180
181 return val;
182}
183
184static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
185{
186 u32 val;
187
188 switch (n) {
189 case 0:
190 val = read_gicreg(ICH_AP1R0_EL2);
191 break;
192 case 1:
193 val = read_gicreg(ICH_AP1R1_EL2);
194 break;
195 case 2:
196 val = read_gicreg(ICH_AP1R2_EL2);
197 break;
198 case 3:
199 val = read_gicreg(ICH_AP1R3_EL2);
200 break;
201 default:
202 unreachable();
203 }
204
205 return val;
206}
207
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100208void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
209{
210 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
Christoffer Dall00dafa02016-12-23 00:04:59 +0100211 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100212
213 /*
214 * Make sure stores to the GIC via the memory mapped interface
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200215 * are now visible to the system register interface when reading the
216 * LRs, and when reading back the VMCR on non-VHE systems.
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100217 */
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200218 if (used_lrs || !has_vhe()) {
219 if (!cpu_if->vgic_sre)
220 dsb(st);
Marc Zyngierff567612017-04-19 12:15:26 +0100221 }
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100222
Christoffer Dall00dafa02016-12-23 00:04:59 +0100223 if (used_lrs) {
Marc Zyngier1b8e83c2016-02-17 10:25:05 +0000224 int i;
Christoffer Dallbb5ed702017-10-05 00:02:41 +0200225 u32 elrsr;
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100226
Christoffer Dallbb5ed702017-10-05 00:02:41 +0200227 elrsr = read_gicreg(ICH_ELSR_EL2);
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100228
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200229 write_gicreg(cpu_if->vgic_hcr & ~ICH_HCR_EN, ICH_HCR_EL2);
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100230
Marc Zyngiercffcd9d2017-04-10 10:19:44 +0100231 for (i = 0; i < used_lrs; i++) {
Christoffer Dallbb5ed702017-10-05 00:02:41 +0200232 if (elrsr & (1 << i))
Marc Zyngier84e8b9c2016-02-09 17:09:49 +0000233 cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
Christoffer Dallfa89c772016-05-25 15:26:34 +0100234 else
235 cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
Marc Zyngier84e8b9c2016-02-09 17:09:49 +0000236
Marc Zyngierb40c4892016-02-09 17:36:09 +0000237 __gic_v3_set_lr(0, i);
Marc Zyngier1b8e83c2016-02-17 10:25:05 +0000238 }
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200239 }
240}
241
242void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
243{
244 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
245 u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
246 int i;
247
248 if (used_lrs) {
249 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
250
251 for (i = 0; i < used_lrs; i++)
252 __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
253 }
254
255 /*
256 * Ensure that writes to the LRs, and on non-VHE systems ensure that
257 * the write to the VMCR in __vgic_v3_activate_traps(), will have
258 * reached the (re)distributors. This ensure the guest will read the
259 * correct values from the memory-mapped interface.
260 */
261 if (used_lrs || !has_vhe()) {
262 if (!cpu_if->vgic_sre) {
263 isb();
264 dsb(sy);
265 }
266 }
267}
268
269void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
270{
271 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
272
273 /*
274 * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
275 * Group0 interrupt (as generated in GICv2 mode) to be
276 * delivered as a FIQ to the guest, with potentially fatal
277 * consequences. So we must make sure that ICC_SRE_EL1 has
278 * been actually programmed with the value we want before
279 * starting to mess with the rest of the GIC, and VMCR_EL2 in
280 * particular. This logic must be called before
281 * __vgic_v3_restore_state().
282 */
283 if (!cpu_if->vgic_sre) {
284 write_gicreg(0, ICC_SRE_EL1);
285 isb();
286 write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
287
288
289 if (has_vhe()) {
290 /*
291 * Ensure that the write to the VMCR will have reached
292 * the (re)distributors. This ensure the guest will
293 * read the correct values from the memory-mapped
294 * interface.
295 */
296 isb();
297 dsb(sy);
298 }
299 }
300
301 /*
302 * Prevent the guest from touching the GIC system registers if
303 * SRE isn't enabled for GICv3 emulation.
304 */
305 write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
306 ICC_SRE_EL2);
307
308 /*
309 * If we need to trap system registers, we must write
310 * ICH_HCR_EL2 anyway, even if no interrupts are being
311 * injected,
312 */
313 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
314 cpu_if->its_vpe.its_vm)
315 write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
316}
317
318void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
319{
320 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
321 u64 val;
322
323 if (!cpu_if->vgic_sre) {
324 cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100325 }
326
327 val = read_gicreg(ICC_SRE_EL2);
328 write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
Marc Zyngierc5851322016-05-25 15:26:39 +0100329
330 if (!cpu_if->vgic_sre) {
331 /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
332 isb();
333 write_gicreg(1, ICC_SRE_EL1);
334 }
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100335
336 /*
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200337 * If we were trapping system registers, we enabled the VGIC even if
338 * no interrupts were being injected, and we disable it again here.
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100339 */
Christoffer Dall2d0e63e2017-10-05 17:19:19 +0200340 if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
341 cpu_if->its_vpe.its_vm)
342 write_gicreg(0, ICH_HCR_EL2);
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100343}
344
Christoffer Dall923a2e32017-10-05 00:18:07 +0200345void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
346{
347 struct vgic_v3_cpu_if *cpu_if;
348 u64 val;
349 u32 nr_pre_bits;
350
351 vcpu = kern_hyp_va(vcpu);
352 cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
353
354 val = read_gicreg(ICH_VTR_EL2);
355 nr_pre_bits = vtr_to_nr_pre_bits(val);
356
357 switch (nr_pre_bits) {
358 case 7:
359 cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3);
360 cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2);
361 case 6:
362 cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1);
363 default:
364 cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0);
365 }
366
367 switch (nr_pre_bits) {
368 case 7:
369 cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3);
370 cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2);
371 case 6:
372 cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1);
373 default:
374 cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
375 }
376}
377
378void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
379{
380 struct vgic_v3_cpu_if *cpu_if;
381 u64 val;
382 u32 nr_pre_bits;
383
384 vcpu = kern_hyp_va(vcpu);
385 cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
386
387 val = read_gicreg(ICH_VTR_EL2);
388 nr_pre_bits = vtr_to_nr_pre_bits(val);
389
390 switch (nr_pre_bits) {
391 case 7:
392 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3);
393 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2);
394 case 6:
395 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1);
396 default:
397 __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0);
398 }
399
400 switch (nr_pre_bits) {
401 case 7:
402 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3);
403 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2);
404 case 6:
405 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1);
406 default:
407 __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0);
408 }
409}
410
Marc Zyngier0d98d002016-03-03 15:43:58 +0000411void __hyp_text __vgic_v3_init_lrs(void)
412{
413 int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
414 int i;
415
416 for (i = 0; i <= max_lr_idx; i++)
417 __gic_v3_set_lr(0, i);
418}
419
Christoffer Dallcf0ba182016-09-01 13:16:03 +0200420u64 __hyp_text __vgic_v3_get_ich_vtr_el2(void)
Marc Zyngierf68d2b12015-10-19 15:50:58 +0100421{
422 return read_gicreg(ICH_VTR_EL2);
423}
Christoffer Dall328e56642016-03-24 11:21:04 +0100424
425u64 __hyp_text __vgic_v3_read_vmcr(void)
426{
427 return read_gicreg(ICH_VMCR_EL2);
428}
429
430void __hyp_text __vgic_v3_write_vmcr(u32 vmcr)
431{
432 write_gicreg(vmcr, ICH_VMCR_EL2);
433}
Marc Zyngier59da1cb2017-06-09 12:49:33 +0100434
435#ifdef CONFIG_ARM64
436
Marc Zyngierd70c7b32017-06-09 12:49:34 +0100437static int __hyp_text __vgic_v3_bpr_min(void)
438{
439 /* See Pseudocode for VPriorityGroup */
440 return 8 - vtr_to_nr_pre_bits(read_gicreg(ICH_VTR_EL2));
441}
442
Marc Zyngier132a3242017-06-09 12:49:36 +0100443static int __hyp_text __vgic_v3_get_group(struct kvm_vcpu *vcpu)
444{
445 u32 esr = kvm_vcpu_get_hsr(vcpu);
446 u8 crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
447
448 return crm != 8;
449}
450
451#define GICv3_IDLE_PRIORITY 0xff
452
453static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
454 u32 vmcr,
455 u64 *lr_val)
456{
457 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
458 u8 priority = GICv3_IDLE_PRIORITY;
459 int i, lr = -1;
460
461 for (i = 0; i < used_lrs; i++) {
462 u64 val = __gic_v3_get_lr(i);
463 u8 lr_prio = (val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
464
465 /* Not pending in the state? */
466 if ((val & ICH_LR_STATE) != ICH_LR_PENDING_BIT)
467 continue;
468
469 /* Group-0 interrupt, but Group-0 disabled? */
470 if (!(val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG0_MASK))
471 continue;
472
473 /* Group-1 interrupt, but Group-1 disabled? */
474 if ((val & ICH_LR_GROUP) && !(vmcr & ICH_VMCR_ENG1_MASK))
475 continue;
476
477 /* Not the highest priority? */
478 if (lr_prio >= priority)
479 continue;
480
481 /* This is a candidate */
482 priority = lr_prio;
483 *lr_val = val;
484 lr = i;
485 }
486
487 if (lr == -1)
488 *lr_val = ICC_IAR1_EL1_SPURIOUS;
489
490 return lr;
491}
492
Marc Zyngierb6f49032017-06-09 12:49:37 +0100493static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
494 int intid, u64 *lr_val)
495{
496 unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
497 int i;
498
499 for (i = 0; i < used_lrs; i++) {
500 u64 val = __gic_v3_get_lr(i);
501
502 if ((val & ICH_LR_VIRTUAL_ID_MASK) == intid &&
503 (val & ICH_LR_ACTIVE_BIT)) {
504 *lr_val = val;
505 return i;
506 }
507 }
508
509 *lr_val = ICC_IAR1_EL1_SPURIOUS;
510 return -1;
511}
512
Marc Zyngier132a3242017-06-09 12:49:36 +0100513static int __hyp_text __vgic_v3_get_highest_active_priority(void)
514{
515 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
516 u32 hap = 0;
517 int i;
518
519 for (i = 0; i < nr_apr_regs; i++) {
520 u32 val;
521
522 /*
523 * The ICH_AP0Rn_EL2 and ICH_AP1Rn_EL2 registers
524 * contain the active priority levels for this VCPU
525 * for the maximum number of supported priority
526 * levels, and we return the full priority level only
527 * if the BPR is programmed to its minimum, otherwise
528 * we return a combination of the priority level and
529 * subpriority, as determined by the setting of the
530 * BPR, but without the full subpriority.
531 */
532 val = __vgic_v3_read_ap0rn(i);
533 val |= __vgic_v3_read_ap1rn(i);
534 if (!val) {
535 hap += 32;
536 continue;
537 }
538
539 return (hap + __ffs(val)) << __vgic_v3_bpr_min();
540 }
541
542 return GICv3_IDLE_PRIORITY;
543}
544
Marc Zyngierd70c7b32017-06-09 12:49:34 +0100545static unsigned int __hyp_text __vgic_v3_get_bpr0(u32 vmcr)
546{
547 return (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
548}
549
550static unsigned int __hyp_text __vgic_v3_get_bpr1(u32 vmcr)
551{
552 unsigned int bpr;
553
554 if (vmcr & ICH_VMCR_CBPR_MASK) {
555 bpr = __vgic_v3_get_bpr0(vmcr);
556 if (bpr < 7)
557 bpr++;
558 } else {
559 bpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
560 }
561
562 return bpr;
563}
564
Marc Zyngier132a3242017-06-09 12:49:36 +0100565/*
566 * Convert a priority to a preemption level, taking the relevant BPR
567 * into account by zeroing the sub-priority bits.
568 */
569static u8 __hyp_text __vgic_v3_pri_to_pre(u8 pri, u32 vmcr, int grp)
570{
571 unsigned int bpr;
572
573 if (!grp)
574 bpr = __vgic_v3_get_bpr0(vmcr) + 1;
575 else
576 bpr = __vgic_v3_get_bpr1(vmcr);
577
578 return pri & (GENMASK(7, 0) << bpr);
579}
580
581/*
582 * The priority value is independent of any of the BPR values, so we
583 * normalize it using the minumal BPR value. This guarantees that no
584 * matter what the guest does with its BPR, we can always set/get the
585 * same value of a priority.
586 */
587static void __hyp_text __vgic_v3_set_active_priority(u8 pri, u32 vmcr, int grp)
588{
589 u8 pre, ap;
590 u32 val;
591 int apr;
592
593 pre = __vgic_v3_pri_to_pre(pri, vmcr, grp);
594 ap = pre >> __vgic_v3_bpr_min();
595 apr = ap / 32;
596
597 if (!grp) {
598 val = __vgic_v3_read_ap0rn(apr);
599 __vgic_v3_write_ap0rn(val | BIT(ap % 32), apr);
600 } else {
601 val = __vgic_v3_read_ap1rn(apr);
602 __vgic_v3_write_ap1rn(val | BIT(ap % 32), apr);
603 }
604}
605
Marc Zyngierb6f49032017-06-09 12:49:37 +0100606static int __hyp_text __vgic_v3_clear_highest_active_priority(void)
607{
608 u8 nr_apr_regs = vtr_to_nr_apr_regs(read_gicreg(ICH_VTR_EL2));
609 u32 hap = 0;
610 int i;
611
612 for (i = 0; i < nr_apr_regs; i++) {
613 u32 ap0, ap1;
614 int c0, c1;
615
616 ap0 = __vgic_v3_read_ap0rn(i);
617 ap1 = __vgic_v3_read_ap1rn(i);
618 if (!ap0 && !ap1) {
619 hap += 32;
620 continue;
621 }
622
623 c0 = ap0 ? __ffs(ap0) : 32;
624 c1 = ap1 ? __ffs(ap1) : 32;
625
626 /* Always clear the LSB, which is the highest priority */
627 if (c0 < c1) {
628 ap0 &= ~BIT(c0);
629 __vgic_v3_write_ap0rn(ap0, i);
630 hap += c0;
631 } else {
632 ap1 &= ~BIT(c1);
633 __vgic_v3_write_ap1rn(ap1, i);
634 hap += c1;
635 }
636
637 /* Rescale to 8 bits of priority */
638 return hap << __vgic_v3_bpr_min();
639 }
640
641 return GICv3_IDLE_PRIORITY;
642}
643
Marc Zyngier132a3242017-06-09 12:49:36 +0100644static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
645{
646 u64 lr_val;
647 u8 lr_prio, pmr;
648 int lr, grp;
649
650 grp = __vgic_v3_get_group(vcpu);
651
652 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
653 if (lr < 0)
654 goto spurious;
655
656 if (grp != !!(lr_val & ICH_LR_GROUP))
657 goto spurious;
658
659 pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
660 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
661 if (pmr <= lr_prio)
662 goto spurious;
663
664 if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp))
665 goto spurious;
666
667 lr_val &= ~ICH_LR_STATE;
668 /* No active state for LPIs */
669 if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
670 lr_val |= ICH_LR_ACTIVE_BIT;
671 __gic_v3_set_lr(lr_val, lr);
672 __vgic_v3_set_active_priority(lr_prio, vmcr, grp);
673 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
674 return;
675
676spurious:
677 vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS);
678}
679
Marc Zyngierb6f49032017-06-09 12:49:37 +0100680static void __hyp_text __vgic_v3_clear_active_lr(int lr, u64 lr_val)
681{
682 lr_val &= ~ICH_LR_ACTIVE_BIT;
683 if (lr_val & ICH_LR_HW) {
684 u32 pid;
685
686 pid = (lr_val & ICH_LR_PHYS_ID_MASK) >> ICH_LR_PHYS_ID_SHIFT;
687 gic_write_dir(pid);
688 }
689
690 __gic_v3_set_lr(lr_val, lr);
691}
692
693static void __hyp_text __vgic_v3_bump_eoicount(void)
694{
695 u32 hcr;
696
697 hcr = read_gicreg(ICH_HCR_EL2);
698 hcr += 1 << ICH_HCR_EOIcount_SHIFT;
699 write_gicreg(hcr, ICH_HCR_EL2);
700}
701
Marc Zyngier40228ba2017-06-09 12:49:49 +0100702static void __hyp_text __vgic_v3_write_dir(struct kvm_vcpu *vcpu,
703 u32 vmcr, int rt)
704{
705 u32 vid = vcpu_get_reg(vcpu, rt);
706 u64 lr_val;
707 int lr;
708
709 /* EOImode == 0, nothing to be done here */
710 if (!(vmcr & ICH_VMCR_EOIM_MASK))
711 return;
712
713 /* No deactivate to be performed on an LPI */
714 if (vid >= VGIC_MIN_LPI)
715 return;
716
717 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
718 if (lr == -1) {
719 __vgic_v3_bump_eoicount();
720 return;
721 }
722
723 __vgic_v3_clear_active_lr(lr, lr_val);
724}
725
Marc Zyngierb6f49032017-06-09 12:49:37 +0100726static void __hyp_text __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
727{
728 u32 vid = vcpu_get_reg(vcpu, rt);
729 u64 lr_val;
730 u8 lr_prio, act_prio;
731 int lr, grp;
732
733 grp = __vgic_v3_get_group(vcpu);
734
735 /* Drop priority in any case */
736 act_prio = __vgic_v3_clear_highest_active_priority();
737
738 /* If EOIing an LPI, no deactivate to be performed */
739 if (vid >= VGIC_MIN_LPI)
740 return;
741
742 /* EOImode == 1, nothing to be done here */
743 if (vmcr & ICH_VMCR_EOIM_MASK)
744 return;
745
746 lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
747 if (lr == -1) {
748 __vgic_v3_bump_eoicount();
749 return;
750 }
751
752 lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
753
754 /* If priorities or group do not match, the guest has fscked-up. */
755 if (grp != !!(lr_val & ICH_LR_GROUP) ||
756 __vgic_v3_pri_to_pre(lr_prio, vmcr, grp) != act_prio)
757 return;
758
759 /* Let's now perform the deactivation */
760 __vgic_v3_clear_active_lr(lr, lr_val);
761}
762
Marc Zyngierfbc48a02017-06-09 12:49:43 +0100763static void __hyp_text __vgic_v3_read_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
764{
765 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG0_MASK));
766}
767
Marc Zyngierf8b630b2017-06-09 12:49:35 +0100768static void __hyp_text __vgic_v3_read_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
769{
770 vcpu_set_reg(vcpu, rt, !!(vmcr & ICH_VMCR_ENG1_MASK));
771}
772
Marc Zyngierfbc48a02017-06-09 12:49:43 +0100773static void __hyp_text __vgic_v3_write_igrpen0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
774{
775 u64 val = vcpu_get_reg(vcpu, rt);
776
777 if (val & 1)
778 vmcr |= ICH_VMCR_ENG0_MASK;
779 else
780 vmcr &= ~ICH_VMCR_ENG0_MASK;
781
782 __vgic_v3_write_vmcr(vmcr);
783}
784
Marc Zyngierf8b630b2017-06-09 12:49:35 +0100785static void __hyp_text __vgic_v3_write_igrpen1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
786{
787 u64 val = vcpu_get_reg(vcpu, rt);
788
789 if (val & 1)
790 vmcr |= ICH_VMCR_ENG1_MASK;
791 else
792 vmcr &= ~ICH_VMCR_ENG1_MASK;
793
794 __vgic_v3_write_vmcr(vmcr);
795}
796
Marc Zyngier423de852017-06-09 12:49:42 +0100797static void __hyp_text __vgic_v3_read_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
798{
799 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr0(vmcr));
800}
801
Marc Zyngierd70c7b32017-06-09 12:49:34 +0100802static void __hyp_text __vgic_v3_read_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
803{
804 vcpu_set_reg(vcpu, rt, __vgic_v3_get_bpr1(vmcr));
805}
806
Marc Zyngier423de852017-06-09 12:49:42 +0100807static void __hyp_text __vgic_v3_write_bpr0(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
808{
809 u64 val = vcpu_get_reg(vcpu, rt);
810 u8 bpr_min = __vgic_v3_bpr_min() - 1;
811
812 /* Enforce BPR limiting */
813 if (val < bpr_min)
814 val = bpr_min;
815
816 val <<= ICH_VMCR_BPR0_SHIFT;
817 val &= ICH_VMCR_BPR0_MASK;
818 vmcr &= ~ICH_VMCR_BPR0_MASK;
819 vmcr |= val;
820
821 __vgic_v3_write_vmcr(vmcr);
822}
823
Marc Zyngierd70c7b32017-06-09 12:49:34 +0100824static void __hyp_text __vgic_v3_write_bpr1(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
825{
826 u64 val = vcpu_get_reg(vcpu, rt);
827 u8 bpr_min = __vgic_v3_bpr_min();
828
829 if (vmcr & ICH_VMCR_CBPR_MASK)
830 return;
831
832 /* Enforce BPR limiting */
833 if (val < bpr_min)
834 val = bpr_min;
835
836 val <<= ICH_VMCR_BPR1_SHIFT;
837 val &= ICH_VMCR_BPR1_MASK;
838 vmcr &= ~ICH_VMCR_BPR1_MASK;
839 vmcr |= val;
840
841 __vgic_v3_write_vmcr(vmcr);
842}
843
Marc Zyngierf9e74492017-06-09 12:49:38 +0100844static void __hyp_text __vgic_v3_read_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
845{
846 u32 val;
847
848 if (!__vgic_v3_get_group(vcpu))
849 val = __vgic_v3_read_ap0rn(n);
850 else
851 val = __vgic_v3_read_ap1rn(n);
852
853 vcpu_set_reg(vcpu, rt, val);
854}
855
856static void __hyp_text __vgic_v3_write_apxrn(struct kvm_vcpu *vcpu, int rt, int n)
857{
858 u32 val = vcpu_get_reg(vcpu, rt);
859
860 if (!__vgic_v3_get_group(vcpu))
861 __vgic_v3_write_ap0rn(val, n);
862 else
863 __vgic_v3_write_ap1rn(val, n);
864}
865
866static void __hyp_text __vgic_v3_read_apxr0(struct kvm_vcpu *vcpu,
867 u32 vmcr, int rt)
868{
869 __vgic_v3_read_apxrn(vcpu, rt, 0);
870}
871
872static void __hyp_text __vgic_v3_read_apxr1(struct kvm_vcpu *vcpu,
873 u32 vmcr, int rt)
874{
875 __vgic_v3_read_apxrn(vcpu, rt, 1);
876}
877
878static void __hyp_text __vgic_v3_read_apxr2(struct kvm_vcpu *vcpu,
879 u32 vmcr, int rt)
880{
881 __vgic_v3_read_apxrn(vcpu, rt, 2);
882}
883
884static void __hyp_text __vgic_v3_read_apxr3(struct kvm_vcpu *vcpu,
885 u32 vmcr, int rt)
886{
887 __vgic_v3_read_apxrn(vcpu, rt, 3);
888}
889
890static void __hyp_text __vgic_v3_write_apxr0(struct kvm_vcpu *vcpu,
891 u32 vmcr, int rt)
892{
893 __vgic_v3_write_apxrn(vcpu, rt, 0);
894}
895
896static void __hyp_text __vgic_v3_write_apxr1(struct kvm_vcpu *vcpu,
897 u32 vmcr, int rt)
898{
899 __vgic_v3_write_apxrn(vcpu, rt, 1);
900}
901
902static void __hyp_text __vgic_v3_write_apxr2(struct kvm_vcpu *vcpu,
903 u32 vmcr, int rt)
904{
905 __vgic_v3_write_apxrn(vcpu, rt, 2);
906}
907
908static void __hyp_text __vgic_v3_write_apxr3(struct kvm_vcpu *vcpu,
909 u32 vmcr, int rt)
910{
911 __vgic_v3_write_apxrn(vcpu, rt, 3);
912}
913
Marc Zyngier2724c112017-06-09 12:49:39 +0100914static void __hyp_text __vgic_v3_read_hppir(struct kvm_vcpu *vcpu,
915 u32 vmcr, int rt)
916{
917 u64 lr_val;
918 int lr, lr_grp, grp;
919
920 grp = __vgic_v3_get_group(vcpu);
921
922 lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val);
923 if (lr == -1)
924 goto spurious;
925
926 lr_grp = !!(lr_val & ICH_LR_GROUP);
927 if (lr_grp != grp)
928 lr_val = ICC_IAR1_EL1_SPURIOUS;
929
930spurious:
931 vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
932}
933
Marc Zyngier6293d652017-06-09 12:49:52 +0100934static void __hyp_text __vgic_v3_read_pmr(struct kvm_vcpu *vcpu,
935 u32 vmcr, int rt)
936{
937 vmcr &= ICH_VMCR_PMR_MASK;
938 vmcr >>= ICH_VMCR_PMR_SHIFT;
939 vcpu_set_reg(vcpu, rt, vmcr);
940}
941
942static void __hyp_text __vgic_v3_write_pmr(struct kvm_vcpu *vcpu,
943 u32 vmcr, int rt)
944{
945 u32 val = vcpu_get_reg(vcpu, rt);
946
947 val <<= ICH_VMCR_PMR_SHIFT;
948 val &= ICH_VMCR_PMR_MASK;
949 vmcr &= ~ICH_VMCR_PMR_MASK;
950 vmcr |= val;
951
952 write_gicreg(vmcr, ICH_VMCR_EL2);
953}
954
Marc Zyngier43515892017-06-09 12:49:50 +0100955static void __hyp_text __vgic_v3_read_rpr(struct kvm_vcpu *vcpu,
956 u32 vmcr, int rt)
957{
958 u32 val = __vgic_v3_get_highest_active_priority();
959 vcpu_set_reg(vcpu, rt, val);
960}
961
Marc Zyngierd840b2d2017-06-09 12:49:51 +0100962static void __hyp_text __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu,
963 u32 vmcr, int rt)
964{
965 u32 vtr, val;
966
967 vtr = read_gicreg(ICH_VTR_EL2);
968 /* PRIbits */
969 val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
970 /* IDbits */
971 val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
972 /* SEIS */
973 val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
974 /* A3V */
975 val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
976 /* EOImode */
977 val |= ((vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT) << ICC_CTLR_EL1_EOImode_SHIFT;
978 /* CBPR */
979 val |= (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
980
981 vcpu_set_reg(vcpu, rt, val);
982}
983
984static void __hyp_text __vgic_v3_write_ctlr(struct kvm_vcpu *vcpu,
985 u32 vmcr, int rt)
986{
987 u32 val = vcpu_get_reg(vcpu, rt);
988
989 if (val & ICC_CTLR_EL1_CBPR_MASK)
990 vmcr |= ICH_VMCR_CBPR_MASK;
991 else
992 vmcr &= ~ICH_VMCR_CBPR_MASK;
993
994 if (val & ICC_CTLR_EL1_EOImode_MASK)
995 vmcr |= ICH_VMCR_EOIM_MASK;
996 else
997 vmcr &= ~ICH_VMCR_EOIM_MASK;
998
999 write_gicreg(vmcr, ICH_VMCR_EL2);
1000}
1001
Marc Zyngier59da1cb2017-06-09 12:49:33 +01001002int __hyp_text __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu)
1003{
1004 int rt;
1005 u32 esr;
1006 u32 vmcr;
1007 void (*fn)(struct kvm_vcpu *, u32, int);
1008 bool is_read;
1009 u32 sysreg;
1010
1011 esr = kvm_vcpu_get_hsr(vcpu);
1012 if (vcpu_mode_is_32bit(vcpu)) {
1013 if (!kvm_condition_valid(vcpu))
1014 return 1;
1015
1016 sysreg = esr_cp15_to_sysreg(esr);
1017 } else {
1018 sysreg = esr_sys64_to_sysreg(esr);
1019 }
1020
1021 is_read = (esr & ESR_ELx_SYS64_ISS_DIR_MASK) == ESR_ELx_SYS64_ISS_DIR_READ;
1022
1023 switch (sysreg) {
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001024 case SYS_ICC_IAR0_EL1:
Marc Zyngier132a3242017-06-09 12:49:36 +01001025 case SYS_ICC_IAR1_EL1:
Marc Zyngier7b1dba12017-06-09 12:49:56 +01001026 if (unlikely(!is_read))
1027 return 0;
Marc Zyngier132a3242017-06-09 12:49:36 +01001028 fn = __vgic_v3_read_iar;
1029 break;
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001030 case SYS_ICC_EOIR0_EL1:
Marc Zyngierb6f49032017-06-09 12:49:37 +01001031 case SYS_ICC_EOIR1_EL1:
Marc Zyngiere7f1d1e2017-06-09 12:49:55 +01001032 if (unlikely(is_read))
1033 return 0;
Marc Zyngierb6f49032017-06-09 12:49:37 +01001034 fn = __vgic_v3_write_eoir;
1035 break;
Mark Rutland21bc5282017-06-05 14:20:00 +01001036 case SYS_ICC_IGRPEN1_EL1:
Marc Zyngierf8b630b2017-06-09 12:49:35 +01001037 if (is_read)
1038 fn = __vgic_v3_read_igrpen1;
1039 else
1040 fn = __vgic_v3_write_igrpen1;
1041 break;
Marc Zyngierd70c7b32017-06-09 12:49:34 +01001042 case SYS_ICC_BPR1_EL1:
1043 if (is_read)
1044 fn = __vgic_v3_read_bpr1;
1045 else
1046 fn = __vgic_v3_write_bpr1;
1047 break;
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001048 case SYS_ICC_AP0Rn_EL1(0):
Marc Zyngierf9e74492017-06-09 12:49:38 +01001049 case SYS_ICC_AP1Rn_EL1(0):
1050 if (is_read)
1051 fn = __vgic_v3_read_apxr0;
1052 else
1053 fn = __vgic_v3_write_apxr0;
1054 break;
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001055 case SYS_ICC_AP0Rn_EL1(1):
Marc Zyngierf9e74492017-06-09 12:49:38 +01001056 case SYS_ICC_AP1Rn_EL1(1):
1057 if (is_read)
1058 fn = __vgic_v3_read_apxr1;
1059 else
1060 fn = __vgic_v3_write_apxr1;
1061 break;
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001062 case SYS_ICC_AP0Rn_EL1(2):
Marc Zyngierf9e74492017-06-09 12:49:38 +01001063 case SYS_ICC_AP1Rn_EL1(2):
1064 if (is_read)
1065 fn = __vgic_v3_read_apxr2;
1066 else
1067 fn = __vgic_v3_write_apxr2;
1068 break;
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001069 case SYS_ICC_AP0Rn_EL1(3):
Marc Zyngierf9e74492017-06-09 12:49:38 +01001070 case SYS_ICC_AP1Rn_EL1(3):
1071 if (is_read)
1072 fn = __vgic_v3_read_apxr3;
1073 else
1074 fn = __vgic_v3_write_apxr3;
1075 break;
Marc Zyngiereab0b2d2017-06-09 12:49:44 +01001076 case SYS_ICC_HPPIR0_EL1:
Marc Zyngier2724c112017-06-09 12:49:39 +01001077 case SYS_ICC_HPPIR1_EL1:
Marc Zyngier7b1dba12017-06-09 12:49:56 +01001078 if (unlikely(!is_read))
1079 return 0;
Marc Zyngier2724c112017-06-09 12:49:39 +01001080 fn = __vgic_v3_read_hppir;
1081 break;
Mark Rutland21bc5282017-06-05 14:20:00 +01001082 case SYS_ICC_IGRPEN0_EL1:
Marc Zyngierfbc48a02017-06-09 12:49:43 +01001083 if (is_read)
1084 fn = __vgic_v3_read_igrpen0;
1085 else
1086 fn = __vgic_v3_write_igrpen0;
1087 break;
Marc Zyngier423de852017-06-09 12:49:42 +01001088 case SYS_ICC_BPR0_EL1:
1089 if (is_read)
1090 fn = __vgic_v3_read_bpr0;
1091 else
1092 fn = __vgic_v3_write_bpr0;
1093 break;
Marc Zyngier40228ba2017-06-09 12:49:49 +01001094 case SYS_ICC_DIR_EL1:
Marc Zyngiere7f1d1e2017-06-09 12:49:55 +01001095 if (unlikely(is_read))
1096 return 0;
Marc Zyngier40228ba2017-06-09 12:49:49 +01001097 fn = __vgic_v3_write_dir;
1098 break;
Marc Zyngier43515892017-06-09 12:49:50 +01001099 case SYS_ICC_RPR_EL1:
Marc Zyngier7b1dba12017-06-09 12:49:56 +01001100 if (unlikely(!is_read))
1101 return 0;
Marc Zyngier43515892017-06-09 12:49:50 +01001102 fn = __vgic_v3_read_rpr;
1103 break;
Marc Zyngierd840b2d2017-06-09 12:49:51 +01001104 case SYS_ICC_CTLR_EL1:
1105 if (is_read)
1106 fn = __vgic_v3_read_ctlr;
1107 else
1108 fn = __vgic_v3_write_ctlr;
1109 break;
Marc Zyngier6293d652017-06-09 12:49:52 +01001110 case SYS_ICC_PMR_EL1:
1111 if (is_read)
1112 fn = __vgic_v3_read_pmr;
1113 else
1114 fn = __vgic_v3_write_pmr;
1115 break;
Marc Zyngier59da1cb2017-06-09 12:49:33 +01001116 default:
1117 return 0;
1118 }
1119
1120 vmcr = __vgic_v3_read_vmcr();
1121 rt = kvm_vcpu_sys_get_rt(vcpu);
1122 fn(vcpu, vmcr, rt);
1123
1124 return 1;
1125}
1126
1127#endif