blob: 4ef35719fcbe1aa0d6a4f35b47a2ef2f39f9143e [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
19#include <kvm/arm_vgic.h>
20
21#include "vgic.h"
22#include "vgic-mmio.h"
23
24unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
25 gpa_t addr, unsigned int len)
26{
27 return 0;
28}
29
30unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
31 gpa_t addr, unsigned int len)
32{
33 return -1UL;
34}
35
36void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
37 unsigned int len, unsigned long val)
38{
39 /* Ignore */
40}
41
Andre Przywarafd122e62015-12-01 14:33:05 +000042/*
43 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
44 * of the enabled bit, so there is only one function for both here.
45 */
46unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
47 gpa_t addr, unsigned int len)
48{
49 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
50 u32 value = 0;
51 int i;
52
53 /* Loop over all IRQs affected by this read */
54 for (i = 0; i < len * 8; i++) {
55 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56
57 if (irq->enabled)
58 value |= (1U << i);
59 }
60
61 return value;
62}
63
64void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
65 gpa_t addr, unsigned int len,
66 unsigned long val)
67{
68 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
69 int i;
70
71 for_each_set_bit(i, &val, len * 8) {
72 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
73
74 spin_lock(&irq->irq_lock);
75 irq->enabled = true;
76 vgic_queue_irq_unlock(vcpu->kvm, irq);
77 }
78}
79
80void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
81 gpa_t addr, unsigned int len,
82 unsigned long val)
83{
84 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
85 int i;
86
87 for_each_set_bit(i, &val, len * 8) {
88 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
89
90 spin_lock(&irq->irq_lock);
91
92 irq->enabled = false;
93
94 spin_unlock(&irq->irq_lock);
95 }
96}
97
Andre Przywara96b29802015-12-01 14:33:41 +000098unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
99 gpa_t addr, unsigned int len)
100{
101 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
102 u32 value = 0;
103 int i;
104
105 /* Loop over all IRQs affected by this read */
106 for (i = 0; i < len * 8; i++) {
107 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
108
109 if (irq->pending)
110 value |= (1U << i);
111 }
112
113 return value;
114}
115
116void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
117 gpa_t addr, unsigned int len,
118 unsigned long val)
119{
120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
121 int i;
122
123 for_each_set_bit(i, &val, len * 8) {
124 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
125
126 spin_lock(&irq->irq_lock);
127 irq->pending = true;
128 if (irq->config == VGIC_CONFIG_LEVEL)
129 irq->soft_pending = true;
130
131 vgic_queue_irq_unlock(vcpu->kvm, irq);
132 }
133}
134
135void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
136 gpa_t addr, unsigned int len,
137 unsigned long val)
138{
139 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
140 int i;
141
142 for_each_set_bit(i, &val, len * 8) {
143 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
144
145 spin_lock(&irq->irq_lock);
146
147 if (irq->config == VGIC_CONFIG_LEVEL) {
148 irq->soft_pending = false;
149 irq->pending = irq->line_level;
150 } else {
151 irq->pending = false;
152 }
153
154 spin_unlock(&irq->irq_lock);
155 }
156}
157
Andre Przywara69b6fe02015-12-01 12:40:58 +0000158unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
159 gpa_t addr, unsigned int len)
160{
161 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
162 u32 value = 0;
163 int i;
164
165 /* Loop over all IRQs affected by this read */
166 for (i = 0; i < len * 8; i++) {
167 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
168
169 if (irq->active)
170 value |= (1U << i);
171 }
172
173 return value;
174}
175
176void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
177 gpa_t addr, unsigned int len,
178 unsigned long val)
179{
180 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
181 int i;
182
183 kvm_arm_halt_guest(vcpu->kvm);
184 for_each_set_bit(i, &val, len * 8) {
185 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
186
187 spin_lock(&irq->irq_lock);
188 /*
189 * If this virtual IRQ was written into a list register, we
190 * have to make sure the CPU that runs the VCPU thread has
191 * synced back LR state to the struct vgic_irq. We can only
192 * know this for sure, when either this irq is not assigned to
193 * anyone's AP list anymore, or the VCPU thread is not
194 * running on any CPUs.
195 *
196 * In the opposite case, we know the VCPU thread may be on its
197 * way back from the guest and still has to sync back this
198 * IRQ, so we release and re-acquire the spin_lock to let the
199 * other thread sync back the IRQ.
200 */
201 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
202 irq->vcpu->cpu != -1) /* VCPU thread is running */
203 cond_resched_lock(&irq->irq_lock);
204
205 irq->active = false;
206 spin_unlock(&irq->irq_lock);
207 }
208 kvm_arm_resume_guest(vcpu->kvm);
209}
210
211void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
212 gpa_t addr, unsigned int len,
213 unsigned long val)
214{
215 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
216 int i;
217
218 for_each_set_bit(i, &val, len * 8) {
219 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
220
221 spin_lock(&irq->irq_lock);
222
223 /*
224 * If the IRQ was already active or there is no target VCPU
225 * assigned at the moment, then just proceed.
226 */
227 if (irq->active || !irq->target_vcpu) {
228 irq->active = true;
229
230 spin_unlock(&irq->irq_lock);
231 continue;
232 }
233
234 irq->active = true;
235 vgic_queue_irq_unlock(vcpu->kvm, irq);
236 }
237}
238
Andre Przywara055658b2015-12-01 14:34:02 +0000239unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
240 gpa_t addr, unsigned int len)
241{
242 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
243 int i;
244 u64 val = 0;
245
246 for (i = 0; i < len; i++) {
247 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
248
249 val |= (u64)irq->priority << (i * 8);
250 }
251
252 return val;
253}
254
255/*
256 * We currently don't handle changing the priority of an interrupt that
257 * is already pending on a VCPU. If there is a need for this, we would
258 * need to make this VCPU exit and re-evaluate the priorities, potentially
259 * leading to this interrupt getting presented now to the guest (if it has
260 * been masked by the priority mask before).
261 */
262void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
263 gpa_t addr, unsigned int len,
264 unsigned long val)
265{
266 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
267 int i;
268
269 for (i = 0; i < len; i++) {
270 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
271
272 spin_lock(&irq->irq_lock);
273 /* Narrow the priority range to what we actually support */
274 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
275 spin_unlock(&irq->irq_lock);
276 }
277}
278
Andre Przywara79717e42015-12-01 12:41:31 +0000279unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
280 gpa_t addr, unsigned int len)
281{
282 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
283 u32 value = 0;
284 int i;
285
286 for (i = 0; i < len * 4; i++) {
287 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
288
289 if (irq->config == VGIC_CONFIG_EDGE)
290 value |= (2U << (i * 2));
291 }
292
293 return value;
294}
295
296void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
297 gpa_t addr, unsigned int len,
298 unsigned long val)
299{
300 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
301 int i;
302
303 for (i = 0; i < len * 4; i++) {
304 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
305
306 /*
307 * The configuration cannot be changed for SGIs in general,
308 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
309 * code relies on PPIs being level triggered, so we also
310 * make them read-only here.
311 */
312 if (intid + i < VGIC_NR_PRIVATE_IRQS)
313 continue;
314
315 spin_lock(&irq->irq_lock);
316 if (test_bit(i * 2 + 1, &val)) {
317 irq->config = VGIC_CONFIG_EDGE;
318 } else {
319 irq->config = VGIC_CONFIG_LEVEL;
320 irq->pending = irq->line_level | irq->soft_pending;
321 }
322 spin_unlock(&irq->irq_lock);
323 }
324}
325
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100326static int match_region(const void *key, const void *elt)
327{
328 const unsigned int offset = (unsigned long)key;
329 const struct vgic_register_region *region = elt;
330
331 if (offset < region->reg_offset)
332 return -1;
333
334 if (offset >= region->reg_offset + region->len)
335 return 1;
336
337 return 0;
338}
339
340/* Find the proper register handler entry given a certain address offset. */
341static const struct vgic_register_region *
342vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions,
343 unsigned int offset)
344{
345 return bsearch((void *)(uintptr_t)offset, region, nr_regions,
346 sizeof(region[0]), match_region);
347}
348
349/*
350 * kvm_mmio_read_buf() returns a value in a format where it can be converted
351 * to a byte array and be directly observed as the guest wanted it to appear
352 * in memory if it had done the store itself, which is LE for the GIC, as the
353 * guest knows the GIC is always LE.
354 *
355 * We convert this value to the CPUs native format to deal with it as a data
356 * value.
357 */
358unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
359{
360 unsigned long data = kvm_mmio_read_buf(val, len);
361
362 switch (len) {
363 case 1:
364 return data;
365 case 2:
366 return le16_to_cpu(data);
367 case 4:
368 return le32_to_cpu(data);
369 default:
370 return le64_to_cpu(data);
371 }
372}
373
374/*
375 * kvm_mmio_write_buf() expects a value in a format such that if converted to
376 * a byte array it is observed as the guest would see it if it could perform
377 * the load directly. Since the GIC is LE, and the guest knows this, the
378 * guest expects a value in little endian format.
379 *
380 * We convert the data value from the CPUs native format to LE so that the
381 * value is returned in the proper format.
382 */
383void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
384 unsigned long data)
385{
386 switch (len) {
387 case 1:
388 break;
389 case 2:
390 data = cpu_to_le16(data);
391 break;
392 case 4:
393 data = cpu_to_le32(data);
394 break;
395 default:
396 data = cpu_to_le64(data);
397 }
398
399 kvm_mmio_write_buf(buf, len, data);
400}
401
402static
403struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
404{
405 return container_of(dev, struct vgic_io_device, dev);
406}
407
408static bool check_region(const struct vgic_register_region *region,
409 gpa_t addr, int len)
410{
411 if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
412 return true;
413 if ((region->access_flags & VGIC_ACCESS_32bit) &&
414 len == sizeof(u32) && !(addr & 3))
415 return true;
416 if ((region->access_flags & VGIC_ACCESS_64bit) &&
417 len == sizeof(u64) && !(addr & 7))
418 return true;
419
420 return false;
421}
422
423static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
424 gpa_t addr, int len, void *val)
425{
426 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
427 const struct vgic_register_region *region;
428 struct kvm_vcpu *r_vcpu;
429 unsigned long data;
430
431 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
432 addr - iodev->base_addr);
433 if (!region || !check_region(region, addr, len)) {
434 memset(val, 0, len);
435 return 0;
436 }
437
438 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
439 data = region->read(r_vcpu, addr, len);
440 vgic_data_host_to_mmio_bus(val, len, data);
441 return 0;
442}
443
444static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
445 gpa_t addr, int len, const void *val)
446{
447 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
448 const struct vgic_register_region *region;
449 struct kvm_vcpu *r_vcpu;
450 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
451
452 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
453 addr - iodev->base_addr);
454 if (!region)
455 return 0;
456
457 if (!check_region(region, addr, len))
458 return 0;
459
460 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
461 region->write(r_vcpu, addr, len, data);
462 return 0;
463}
464
465struct kvm_io_device_ops kvm_io_gic_ops = {
466 .read = dispatch_mmio_read,
467 .write = dispatch_mmio_write,
468};
Andre Przywarafb848db2016-04-26 21:32:49 +0100469
470int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
471 enum vgic_type type)
472{
473 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
474 int ret = 0;
475 unsigned int len;
476
477 switch (type) {
478 case VGIC_V2:
479 len = vgic_v2_init_dist_iodev(io_device);
480 break;
Andre Przywaraed9b8ce2015-12-01 14:34:34 +0000481#ifdef CONFIG_KVM_ARM_VGIC_V3
482 case VGIC_V3:
483 len = vgic_v3_init_dist_iodev(io_device);
484 break;
485#endif
Andre Przywarafb848db2016-04-26 21:32:49 +0100486 default:
487 BUG_ON(1);
488 }
489
490 io_device->base_addr = dist_base_address;
491 io_device->redist_vcpu = NULL;
492
493 mutex_lock(&kvm->slots_lock);
494 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
495 len, &io_device->dev);
496 mutex_unlock(&kvm->slots_lock);
497
498 return ret;
499}