blob: 79a4622dad04bfe8805d946137590bc5987ff247 [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
19#include <kvm/arm_vgic.h>
20
21#include "vgic.h"
22#include "vgic-mmio.h"
23
24unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
25 gpa_t addr, unsigned int len)
26{
27 return 0;
28}
29
30unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
31 gpa_t addr, unsigned int len)
32{
33 return -1UL;
34}
35
36void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
37 unsigned int len, unsigned long val)
38{
39 /* Ignore */
40}
41
Andre Przywarafd122e62015-12-01 14:33:05 +000042/*
43 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
44 * of the enabled bit, so there is only one function for both here.
45 */
46unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
47 gpa_t addr, unsigned int len)
48{
49 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
50 u32 value = 0;
51 int i;
52
53 /* Loop over all IRQs affected by this read */
54 for (i = 0; i < len * 8; i++) {
55 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56
57 if (irq->enabled)
58 value |= (1U << i);
59 }
60
61 return value;
62}
63
64void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
65 gpa_t addr, unsigned int len,
66 unsigned long val)
67{
68 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
69 int i;
70
71 for_each_set_bit(i, &val, len * 8) {
72 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
73
74 spin_lock(&irq->irq_lock);
75 irq->enabled = true;
76 vgic_queue_irq_unlock(vcpu->kvm, irq);
77 }
78}
79
80void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
81 gpa_t addr, unsigned int len,
82 unsigned long val)
83{
84 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
85 int i;
86
87 for_each_set_bit(i, &val, len * 8) {
88 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
89
90 spin_lock(&irq->irq_lock);
91
92 irq->enabled = false;
93
94 spin_unlock(&irq->irq_lock);
95 }
96}
97
Andre Przywara96b29802015-12-01 14:33:41 +000098unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
99 gpa_t addr, unsigned int len)
100{
101 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
102 u32 value = 0;
103 int i;
104
105 /* Loop over all IRQs affected by this read */
106 for (i = 0; i < len * 8; i++) {
107 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
108
109 if (irq->pending)
110 value |= (1U << i);
111 }
112
113 return value;
114}
115
116void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
117 gpa_t addr, unsigned int len,
118 unsigned long val)
119{
120 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
121 int i;
122
123 for_each_set_bit(i, &val, len * 8) {
124 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
125
126 spin_lock(&irq->irq_lock);
127 irq->pending = true;
128 if (irq->config == VGIC_CONFIG_LEVEL)
129 irq->soft_pending = true;
130
131 vgic_queue_irq_unlock(vcpu->kvm, irq);
132 }
133}
134
135void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
136 gpa_t addr, unsigned int len,
137 unsigned long val)
138{
139 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
140 int i;
141
142 for_each_set_bit(i, &val, len * 8) {
143 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
144
145 spin_lock(&irq->irq_lock);
146
147 if (irq->config == VGIC_CONFIG_LEVEL) {
148 irq->soft_pending = false;
149 irq->pending = irq->line_level;
150 } else {
151 irq->pending = false;
152 }
153
154 spin_unlock(&irq->irq_lock);
155 }
156}
157
Andre Przywara69b6fe02015-12-01 12:40:58 +0000158unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
159 gpa_t addr, unsigned int len)
160{
161 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
162 u32 value = 0;
163 int i;
164
165 /* Loop over all IRQs affected by this read */
166 for (i = 0; i < len * 8; i++) {
167 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
168
169 if (irq->active)
170 value |= (1U << i);
171 }
172
173 return value;
174}
175
176void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
177 gpa_t addr, unsigned int len,
178 unsigned long val)
179{
180 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
181 int i;
182
183 kvm_arm_halt_guest(vcpu->kvm);
184 for_each_set_bit(i, &val, len * 8) {
185 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
186
187 spin_lock(&irq->irq_lock);
188 /*
189 * If this virtual IRQ was written into a list register, we
190 * have to make sure the CPU that runs the VCPU thread has
191 * synced back LR state to the struct vgic_irq. We can only
192 * know this for sure, when either this irq is not assigned to
193 * anyone's AP list anymore, or the VCPU thread is not
194 * running on any CPUs.
195 *
196 * In the opposite case, we know the VCPU thread may be on its
197 * way back from the guest and still has to sync back this
198 * IRQ, so we release and re-acquire the spin_lock to let the
199 * other thread sync back the IRQ.
200 */
201 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
202 irq->vcpu->cpu != -1) /* VCPU thread is running */
203 cond_resched_lock(&irq->irq_lock);
204
205 irq->active = false;
206 spin_unlock(&irq->irq_lock);
207 }
208 kvm_arm_resume_guest(vcpu->kvm);
209}
210
211void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
212 gpa_t addr, unsigned int len,
213 unsigned long val)
214{
215 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
216 int i;
217
218 for_each_set_bit(i, &val, len * 8) {
219 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
220
221 spin_lock(&irq->irq_lock);
222
223 /*
224 * If the IRQ was already active or there is no target VCPU
225 * assigned at the moment, then just proceed.
226 */
227 if (irq->active || !irq->target_vcpu) {
228 irq->active = true;
229
230 spin_unlock(&irq->irq_lock);
231 continue;
232 }
233
234 irq->active = true;
235 vgic_queue_irq_unlock(vcpu->kvm, irq);
236 }
237}
238
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100239static int match_region(const void *key, const void *elt)
240{
241 const unsigned int offset = (unsigned long)key;
242 const struct vgic_register_region *region = elt;
243
244 if (offset < region->reg_offset)
245 return -1;
246
247 if (offset >= region->reg_offset + region->len)
248 return 1;
249
250 return 0;
251}
252
253/* Find the proper register handler entry given a certain address offset. */
254static const struct vgic_register_region *
255vgic_find_mmio_region(const struct vgic_register_region *region, int nr_regions,
256 unsigned int offset)
257{
258 return bsearch((void *)(uintptr_t)offset, region, nr_regions,
259 sizeof(region[0]), match_region);
260}
261
262/*
263 * kvm_mmio_read_buf() returns a value in a format where it can be converted
264 * to a byte array and be directly observed as the guest wanted it to appear
265 * in memory if it had done the store itself, which is LE for the GIC, as the
266 * guest knows the GIC is always LE.
267 *
268 * We convert this value to the CPUs native format to deal with it as a data
269 * value.
270 */
271unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
272{
273 unsigned long data = kvm_mmio_read_buf(val, len);
274
275 switch (len) {
276 case 1:
277 return data;
278 case 2:
279 return le16_to_cpu(data);
280 case 4:
281 return le32_to_cpu(data);
282 default:
283 return le64_to_cpu(data);
284 }
285}
286
287/*
288 * kvm_mmio_write_buf() expects a value in a format such that if converted to
289 * a byte array it is observed as the guest would see it if it could perform
290 * the load directly. Since the GIC is LE, and the guest knows this, the
291 * guest expects a value in little endian format.
292 *
293 * We convert the data value from the CPUs native format to LE so that the
294 * value is returned in the proper format.
295 */
296void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
297 unsigned long data)
298{
299 switch (len) {
300 case 1:
301 break;
302 case 2:
303 data = cpu_to_le16(data);
304 break;
305 case 4:
306 data = cpu_to_le32(data);
307 break;
308 default:
309 data = cpu_to_le64(data);
310 }
311
312 kvm_mmio_write_buf(buf, len, data);
313}
314
315static
316struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
317{
318 return container_of(dev, struct vgic_io_device, dev);
319}
320
321static bool check_region(const struct vgic_register_region *region,
322 gpa_t addr, int len)
323{
324 if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1)
325 return true;
326 if ((region->access_flags & VGIC_ACCESS_32bit) &&
327 len == sizeof(u32) && !(addr & 3))
328 return true;
329 if ((region->access_flags & VGIC_ACCESS_64bit) &&
330 len == sizeof(u64) && !(addr & 7))
331 return true;
332
333 return false;
334}
335
336static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
337 gpa_t addr, int len, void *val)
338{
339 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
340 const struct vgic_register_region *region;
341 struct kvm_vcpu *r_vcpu;
342 unsigned long data;
343
344 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
345 addr - iodev->base_addr);
346 if (!region || !check_region(region, addr, len)) {
347 memset(val, 0, len);
348 return 0;
349 }
350
351 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
352 data = region->read(r_vcpu, addr, len);
353 vgic_data_host_to_mmio_bus(val, len, data);
354 return 0;
355}
356
357static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
358 gpa_t addr, int len, const void *val)
359{
360 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
361 const struct vgic_register_region *region;
362 struct kvm_vcpu *r_vcpu;
363 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
364
365 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
366 addr - iodev->base_addr);
367 if (!region)
368 return 0;
369
370 if (!check_region(region, addr, len))
371 return 0;
372
373 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
374 region->write(r_vcpu, addr, len, data);
375 return 0;
376}
377
378struct kvm_io_device_ops kvm_io_gic_ops = {
379 .read = dispatch_mmio_read,
380 .write = dispatch_mmio_write,
381};
Andre Przywarafb848db2016-04-26 21:32:49 +0100382
383int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
384 enum vgic_type type)
385{
386 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
387 int ret = 0;
388 unsigned int len;
389
390 switch (type) {
391 case VGIC_V2:
392 len = vgic_v2_init_dist_iodev(io_device);
393 break;
394 default:
395 BUG_ON(1);
396 }
397
398 io_device->base_addr = dist_base_address;
399 io_device->redist_vcpu = NULL;
400
401 mutex_lock(&kvm->slots_lock);
402 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
403 len, &io_device->dev);
404 mutex_unlock(&kvm->slots_lock);
405
406 return ret;
407}