blob: deb51ee16a3da478401022699b5bbaff85b27702 [file] [log] [blame]
Marc Zyngier4493b1c2016-04-26 11:06:12 +01001/*
2 * VGIC MMIO handling functions
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/bitops.h>
15#include <linux/bsearch.h>
16#include <linux/kvm.h>
17#include <linux/kvm_host.h>
18#include <kvm/iodev.h>
19#include <kvm/arm_vgic.h>
20
21#include "vgic.h"
22#include "vgic-mmio.h"
23
24unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu,
25 gpa_t addr, unsigned int len)
26{
27 return 0;
28}
29
30unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu,
31 gpa_t addr, unsigned int len)
32{
33 return -1UL;
34}
35
36void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr,
37 unsigned int len, unsigned long val)
38{
39 /* Ignore */
40}
41
Andre Przywarafd122e62015-12-01 14:33:05 +000042/*
43 * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value
44 * of the enabled bit, so there is only one function for both here.
45 */
46unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
47 gpa_t addr, unsigned int len)
48{
49 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
50 u32 value = 0;
51 int i;
52
53 /* Loop over all IRQs affected by this read */
54 for (i = 0; i < len * 8; i++) {
55 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
56
57 if (irq->enabled)
58 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +010059
60 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +000061 }
62
63 return value;
64}
65
66void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
67 gpa_t addr, unsigned int len,
68 unsigned long val)
69{
70 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
71 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +020072 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +000073
74 for_each_set_bit(i, &val, len * 8) {
75 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
76
Christoffer Dall006df0f2016-10-16 22:19:11 +020077 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +000078 irq->enabled = true;
Christoffer Dall006df0f2016-10-16 22:19:11 +020079 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +010080
81 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +000082 }
83}
84
85void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
86 gpa_t addr, unsigned int len,
87 unsigned long val)
88{
89 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
90 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +020091 unsigned long flags;
Andre Przywarafd122e62015-12-01 14:33:05 +000092
93 for_each_set_bit(i, &val, len * 8) {
94 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
95
Christoffer Dall006df0f2016-10-16 22:19:11 +020096 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywarafd122e62015-12-01 14:33:05 +000097
98 irq->enabled = false;
99
Christoffer Dall006df0f2016-10-16 22:19:11 +0200100 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100101 vgic_put_irq(vcpu->kvm, irq);
Andre Przywarafd122e62015-12-01 14:33:05 +0000102 }
103}
104
Andre Przywara96b29802015-12-01 14:33:41 +0000105unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
106 gpa_t addr, unsigned int len)
107{
108 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
109 u32 value = 0;
110 int i;
111
112 /* Loop over all IRQs affected by this read */
113 for (i = 0; i < len * 8; i++) {
114 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
115
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100116 if (irq_is_pending(irq))
Andre Przywara96b29802015-12-01 14:33:41 +0000117 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100118
119 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000120 }
121
122 return value;
123}
124
125void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
126 gpa_t addr, unsigned int len,
127 unsigned long val)
128{
129 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
130 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200131 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000132
133 for_each_set_bit(i, &val, len * 8) {
134 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
135
Christoffer Dall006df0f2016-10-16 22:19:11 +0200136 spin_lock_irqsave(&irq->irq_lock, flags);
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100137 irq->pending_latch = true;
Andre Przywara96b29802015-12-01 14:33:41 +0000138
Christoffer Dall006df0f2016-10-16 22:19:11 +0200139 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100140 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000141 }
142}
143
144void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
145 gpa_t addr, unsigned int len,
146 unsigned long val)
147{
148 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
149 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200150 unsigned long flags;
Andre Przywara96b29802015-12-01 14:33:41 +0000151
152 for_each_set_bit(i, &val, len * 8) {
153 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
154
Christoffer Dall006df0f2016-10-16 22:19:11 +0200155 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara96b29802015-12-01 14:33:41 +0000156
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100157 irq->pending_latch = false;
Andre Przywara96b29802015-12-01 14:33:41 +0000158
Christoffer Dall006df0f2016-10-16 22:19:11 +0200159 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100160 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara96b29802015-12-01 14:33:41 +0000161 }
162}
163
Andre Przywara69b6fe02015-12-01 12:40:58 +0000164unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
165 gpa_t addr, unsigned int len)
166{
167 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
168 u32 value = 0;
169 int i;
170
171 /* Loop over all IRQs affected by this read */
172 for (i = 0; i < len * 8; i++) {
173 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
174
175 if (irq->active)
176 value |= (1U << i);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100177
178 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000179 }
180
181 return value;
182}
183
Christoffer Dall35a2d582016-05-20 15:25:28 +0200184static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
185 bool new_active_state)
186{
Jintack Lim370a0ec2017-03-06 05:42:37 -0800187 struct kvm_vcpu *requester_vcpu;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200188 unsigned long flags;
189 spin_lock_irqsave(&irq->irq_lock, flags);
Jintack Lim370a0ec2017-03-06 05:42:37 -0800190
191 /*
192 * The vcpu parameter here can mean multiple things depending on how
193 * this function is called; when handling a trap from the kernel it
194 * depends on the GIC version, and these functions are also called as
195 * part of save/restore from userspace.
196 *
197 * Therefore, we have to figure out the requester in a reliable way.
198 *
199 * When accessing VGIC state from user space, the requester_vcpu is
200 * NULL, which is fine, because we guarantee that no VCPUs are running
201 * when accessing VGIC state from user space so irq->vcpu->cpu is
202 * always -1.
203 */
204 requester_vcpu = kvm_arm_get_running_vcpu();
205
Christoffer Dall35a2d582016-05-20 15:25:28 +0200206 /*
207 * If this virtual IRQ was written into a list register, we
208 * have to make sure the CPU that runs the VCPU thread has
Jintack Lim370a0ec2017-03-06 05:42:37 -0800209 * synced back the LR state to the struct vgic_irq.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200210 *
Jintack Lim370a0ec2017-03-06 05:42:37 -0800211 * As long as the conditions below are true, we know the VCPU thread
212 * may be on its way back from the guest (we kicked the VCPU thread in
213 * vgic_change_active_prepare) and still has to sync back this IRQ,
214 * so we release and re-acquire the spin_lock to let the other thread
215 * sync back the IRQ.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200216 */
217 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
Jintack Lim370a0ec2017-03-06 05:42:37 -0800218 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
Marc Zyngier05fb05a2016-06-02 09:24:06 +0100219 irq->vcpu->cpu != -1) /* VCPU thread is running */
Christoffer Dall35a2d582016-05-20 15:25:28 +0200220 cond_resched_lock(&irq->irq_lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200221
222 irq->active = new_active_state;
223 if (new_active_state)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200224 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200225 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200226 spin_unlock_irqrestore(&irq->irq_lock, flags);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200227}
228
229/*
230 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
231 * is not queued on some running VCPU's LRs, because then the change to the
232 * active state can be overwritten when the VCPU's state is synced coming back
233 * from the guest.
234 *
235 * For shared interrupts, we have to stop all the VCPUs because interrupts can
236 * be migrated while we don't hold the IRQ locks and we don't want to be
237 * chasing moving targets.
238 *
Christoffer Dallabd72292017-05-06 20:01:24 +0200239 * For private interrupts we don't have to do anything because userspace
240 * accesses to the VGIC state already require all VCPUs to be stopped, and
241 * only the VCPU itself can modify its private interrupts active state, which
242 * guarantees that the VCPU is not running.
Christoffer Dall35a2d582016-05-20 15:25:28 +0200243 */
244static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
245{
Christoffer Dallabd72292017-05-06 20:01:24 +0200246 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200247 kvm_arm_halt_guest(vcpu->kvm);
248}
249
250/* See vgic_change_active_prepare */
251static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
252{
Christoffer Dallabd72292017-05-06 20:01:24 +0200253 if (intid > VGIC_NR_PRIVATE_IRQS)
Christoffer Dall35a2d582016-05-20 15:25:28 +0200254 kvm_arm_resume_guest(vcpu->kvm);
255}
256
Christoffer Dall31971912017-05-16 09:44:39 +0200257static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
258 gpa_t addr, unsigned int len,
259 unsigned long val)
Andre Przywara69b6fe02015-12-01 12:40:58 +0000260{
261 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
262 int i;
263
Andre Przywara69b6fe02015-12-01 12:40:58 +0000264 for_each_set_bit(i, &val, len * 8) {
265 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200266 vgic_mmio_change_active(vcpu, irq, false);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100267 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000268 }
Christoffer Dall31971912017-05-16 09:44:39 +0200269}
270
271void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
272 gpa_t addr, unsigned int len,
273 unsigned long val)
274{
275 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
276
Christoffer Dallabd72292017-05-06 20:01:24 +0200277 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall31971912017-05-16 09:44:39 +0200278 vgic_change_active_prepare(vcpu, intid);
279
280 __vgic_mmio_write_cactive(vcpu, addr, len, val);
281
Christoffer Dall35a2d582016-05-20 15:25:28 +0200282 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200283 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000284}
285
Christoffer Dall31971912017-05-16 09:44:39 +0200286void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu,
287 gpa_t addr, unsigned int len,
288 unsigned long val)
289{
290 __vgic_mmio_write_cactive(vcpu, addr, len, val);
291}
292
293static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
294 gpa_t addr, unsigned int len,
295 unsigned long val)
296{
297 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
298 int i;
299
300 for_each_set_bit(i, &val, len * 8) {
301 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
302 vgic_mmio_change_active(vcpu, irq, true);
303 vgic_put_irq(vcpu->kvm, irq);
304 }
305}
306
Andre Przywara69b6fe02015-12-01 12:40:58 +0000307void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
308 gpa_t addr, unsigned int len,
309 unsigned long val)
310{
311 u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000312
Christoffer Dallabd72292017-05-06 20:01:24 +0200313 mutex_lock(&vcpu->kvm->lock);
Christoffer Dall35a2d582016-05-20 15:25:28 +0200314 vgic_change_active_prepare(vcpu, intid);
Christoffer Dall31971912017-05-16 09:44:39 +0200315
316 __vgic_mmio_write_sactive(vcpu, addr, len, val);
317
Christoffer Dall35a2d582016-05-20 15:25:28 +0200318 vgic_change_active_finish(vcpu, intid);
Christoffer Dallabd72292017-05-06 20:01:24 +0200319 mutex_unlock(&vcpu->kvm->lock);
Andre Przywara69b6fe02015-12-01 12:40:58 +0000320}
321
Christoffer Dall31971912017-05-16 09:44:39 +0200322void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu,
323 gpa_t addr, unsigned int len,
324 unsigned long val)
325{
326 __vgic_mmio_write_sactive(vcpu, addr, len, val);
327}
328
Andre Przywara055658b2015-12-01 14:34:02 +0000329unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
330 gpa_t addr, unsigned int len)
331{
332 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
333 int i;
334 u64 val = 0;
335
336 for (i = 0; i < len; i++) {
337 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
338
339 val |= (u64)irq->priority << (i * 8);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100340
341 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000342 }
343
344 return val;
345}
346
347/*
348 * We currently don't handle changing the priority of an interrupt that
349 * is already pending on a VCPU. If there is a need for this, we would
350 * need to make this VCPU exit and re-evaluate the priorities, potentially
351 * leading to this interrupt getting presented now to the guest (if it has
352 * been masked by the priority mask before).
353 */
354void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
355 gpa_t addr, unsigned int len,
356 unsigned long val)
357{
358 u32 intid = VGIC_ADDR_TO_INTID(addr, 8);
359 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200360 unsigned long flags;
Andre Przywara055658b2015-12-01 14:34:02 +0000361
362 for (i = 0; i < len; i++) {
363 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
364
Christoffer Dall006df0f2016-10-16 22:19:11 +0200365 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara055658b2015-12-01 14:34:02 +0000366 /* Narrow the priority range to what we actually support */
367 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200368 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100369
370 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara055658b2015-12-01 14:34:02 +0000371 }
372}
373
Andre Przywara79717e42015-12-01 12:41:31 +0000374unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
375 gpa_t addr, unsigned int len)
376{
377 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
378 u32 value = 0;
379 int i;
380
381 for (i = 0; i < len * 4; i++) {
382 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
383
384 if (irq->config == VGIC_CONFIG_EDGE)
385 value |= (2U << (i * 2));
Andre Przywara5dd4b922016-07-15 12:43:27 +0100386
387 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000388 }
389
390 return value;
391}
392
393void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
394 gpa_t addr, unsigned int len,
395 unsigned long val)
396{
397 u32 intid = VGIC_ADDR_TO_INTID(addr, 2);
398 int i;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200399 unsigned long flags;
Andre Przywara79717e42015-12-01 12:41:31 +0000400
401 for (i = 0; i < len * 4; i++) {
Andre Przywara5dd4b922016-07-15 12:43:27 +0100402 struct vgic_irq *irq;
Andre Przywara79717e42015-12-01 12:41:31 +0000403
404 /*
405 * The configuration cannot be changed for SGIs in general,
406 * for PPIs this is IMPLEMENTATION DEFINED. The arch timer
407 * code relies on PPIs being level triggered, so we also
408 * make them read-only here.
409 */
410 if (intid + i < VGIC_NR_PRIVATE_IRQS)
411 continue;
412
Andre Przywara5dd4b922016-07-15 12:43:27 +0100413 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
Christoffer Dall006df0f2016-10-16 22:19:11 +0200414 spin_lock_irqsave(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100415
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100416 if (test_bit(i * 2 + 1, &val))
Andre Przywara79717e42015-12-01 12:41:31 +0000417 irq->config = VGIC_CONFIG_EDGE;
Christoffer Dall8694e4d2017-01-23 14:07:18 +0100418 else
Andre Przywara79717e42015-12-01 12:41:31 +0000419 irq->config = VGIC_CONFIG_LEVEL;
Andre Przywara5dd4b922016-07-15 12:43:27 +0100420
Christoffer Dall006df0f2016-10-16 22:19:11 +0200421 spin_unlock_irqrestore(&irq->irq_lock, flags);
Andre Przywara5dd4b922016-07-15 12:43:27 +0100422 vgic_put_irq(vcpu->kvm, irq);
Andre Przywara79717e42015-12-01 12:41:31 +0000423 }
424}
425
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530426u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid)
427{
428 int i;
429 u64 val = 0;
430 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
431
432 for (i = 0; i < 32; i++) {
433 struct vgic_irq *irq;
434
435 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
436 continue;
437
438 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
439 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level)
440 val |= (1U << i);
441
442 vgic_put_irq(vcpu->kvm, irq);
443 }
444
445 return val;
446}
447
448void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid,
449 const u64 val)
450{
451 int i;
452 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
Christoffer Dall006df0f2016-10-16 22:19:11 +0200453 unsigned long flags;
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530454
455 for (i = 0; i < 32; i++) {
456 struct vgic_irq *irq;
457 bool new_level;
458
459 if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs)
460 continue;
461
462 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
463
464 /*
465 * Line level is set irrespective of irq type
466 * (level or edge) to avoid dependency that VM should
467 * restore irq config before line level.
468 */
469 new_level = !!(val & (1U << i));
Christoffer Dall006df0f2016-10-16 22:19:11 +0200470 spin_lock_irqsave(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530471 irq->line_level = new_level;
472 if (new_level)
Christoffer Dall006df0f2016-10-16 22:19:11 +0200473 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530474 else
Christoffer Dall006df0f2016-10-16 22:19:11 +0200475 spin_unlock_irqrestore(&irq->irq_lock, flags);
Vijaya Kumar Ke96a0062017-01-26 19:50:52 +0530476
477 vgic_put_irq(vcpu->kvm, irq);
478 }
479}
480
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100481static int match_region(const void *key, const void *elt)
482{
483 const unsigned int offset = (unsigned long)key;
484 const struct vgic_register_region *region = elt;
485
486 if (offset < region->reg_offset)
487 return -1;
488
489 if (offset >= region->reg_offset + region->len)
490 return 1;
491
492 return 0;
493}
494
Eric Auger4b7171a2016-12-20 09:20:00 +0100495const struct vgic_register_region *
496vgic_find_mmio_region(const struct vgic_register_region *regions,
497 int nr_regions, unsigned int offset)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100498{
Eric Auger4b7171a2016-12-20 09:20:00 +0100499 return bsearch((void *)(uintptr_t)offset, regions, nr_regions,
500 sizeof(regions[0]), match_region);
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100501}
502
Vijaya Kumar K5fb247d2017-01-26 19:50:50 +0530503void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
504{
505 if (kvm_vgic_global_state.type == VGIC_V2)
506 vgic_v2_set_vmcr(vcpu, vmcr);
507 else
508 vgic_v3_set_vmcr(vcpu, vmcr);
509}
510
511void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr)
512{
513 if (kvm_vgic_global_state.type == VGIC_V2)
514 vgic_v2_get_vmcr(vcpu, vmcr);
515 else
516 vgic_v3_get_vmcr(vcpu, vmcr);
517}
518
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100519/*
520 * kvm_mmio_read_buf() returns a value in a format where it can be converted
521 * to a byte array and be directly observed as the guest wanted it to appear
522 * in memory if it had done the store itself, which is LE for the GIC, as the
523 * guest knows the GIC is always LE.
524 *
525 * We convert this value to the CPUs native format to deal with it as a data
526 * value.
527 */
528unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len)
529{
530 unsigned long data = kvm_mmio_read_buf(val, len);
531
532 switch (len) {
533 case 1:
534 return data;
535 case 2:
536 return le16_to_cpu(data);
537 case 4:
538 return le32_to_cpu(data);
539 default:
540 return le64_to_cpu(data);
541 }
542}
543
544/*
545 * kvm_mmio_write_buf() expects a value in a format such that if converted to
546 * a byte array it is observed as the guest would see it if it could perform
547 * the load directly. Since the GIC is LE, and the guest knows this, the
548 * guest expects a value in little endian format.
549 *
550 * We convert the data value from the CPUs native format to LE so that the
551 * value is returned in the proper format.
552 */
553void vgic_data_host_to_mmio_bus(void *buf, unsigned int len,
554 unsigned long data)
555{
556 switch (len) {
557 case 1:
558 break;
559 case 2:
560 data = cpu_to_le16(data);
561 break;
562 case 4:
563 data = cpu_to_le32(data);
564 break;
565 default:
566 data = cpu_to_le64(data);
567 }
568
569 kvm_mmio_write_buf(buf, len, data);
570}
571
572static
573struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev)
574{
575 return container_of(dev, struct vgic_io_device, dev);
576}
577
Andre Przywara112b0b82016-11-01 18:00:08 +0000578static bool check_region(const struct kvm *kvm,
579 const struct vgic_register_region *region,
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100580 gpa_t addr, int len)
581{
Andre Przywara112b0b82016-11-01 18:00:08 +0000582 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS;
583
584 switch (len) {
585 case sizeof(u8):
586 flags = VGIC_ACCESS_8bit;
587 break;
588 case sizeof(u32):
589 flags = VGIC_ACCESS_32bit;
590 break;
591 case sizeof(u64):
592 flags = VGIC_ACCESS_64bit;
593 break;
594 default:
595 return false;
596 }
597
598 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) {
599 if (!region->bits_per_irq)
600 return true;
601
602 /* Do we access a non-allocated IRQ? */
603 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs;
604 }
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100605
606 return false;
607}
608
Vijaya Kumar K94574c92017-01-26 19:50:47 +0530609const struct vgic_register_region *
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530610vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
611 gpa_t addr, int len)
612{
613 const struct vgic_register_region *region;
614
615 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions,
616 addr - iodev->base_addr);
617 if (!region || !check_region(vcpu->kvm, region, addr, len))
618 return NULL;
619
620 return region;
621}
622
623static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
624 gpa_t addr, u32 *val)
625{
626 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
627 const struct vgic_register_region *region;
628 struct kvm_vcpu *r_vcpu;
629
630 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
631 if (!region) {
632 *val = 0;
633 return 0;
634 }
635
636 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
637 if (region->uaccess_read)
638 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32));
639 else
640 *val = region->read(r_vcpu, addr, sizeof(u32));
641
642 return 0;
643}
644
645static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
646 gpa_t addr, const u32 *val)
647{
648 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
649 const struct vgic_register_region *region;
650 struct kvm_vcpu *r_vcpu;
651
652 region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32));
653 if (!region)
654 return 0;
655
656 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu;
657 if (region->uaccess_write)
658 region->uaccess_write(r_vcpu, addr, sizeof(u32), *val);
659 else
660 region->write(r_vcpu, addr, sizeof(u32), *val);
661
662 return 0;
663}
664
665/*
666 * Userland access to VGIC registers.
667 */
668int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev,
669 bool is_write, int offset, u32 *val)
670{
671 if (is_write)
672 return vgic_uaccess_write(vcpu, &dev->dev, offset, val);
673 else
674 return vgic_uaccess_read(vcpu, &dev->dev, offset, val);
675}
676
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100677static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
678 gpa_t addr, int len, void *val)
679{
680 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
681 const struct vgic_register_region *region;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100682 unsigned long data = 0;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100683
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530684 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
685 if (!region) {
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100686 memset(val, 0, len);
687 return 0;
688 }
689
Andre Przywara59c5ab42016-07-15 12:43:30 +0100690 switch (iodev->iodev_type) {
691 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000692 data = region->read(vcpu, addr, len);
693 break;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100694 case IODEV_DIST:
695 data = region->read(vcpu, addr, len);
696 break;
697 case IODEV_REDIST:
698 data = region->read(iodev->redist_vcpu, addr, len);
699 break;
700 case IODEV_ITS:
701 data = region->its_read(vcpu->kvm, iodev->its, addr, len);
702 break;
703 }
704
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100705 vgic_data_host_to_mmio_bus(val, len, data);
706 return 0;
707}
708
709static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
710 gpa_t addr, int len, const void *val)
711{
712 struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev);
713 const struct vgic_register_region *region;
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100714 unsigned long data = vgic_data_mmio_bus_to_host(val, len);
715
Vijaya Kumar K2df903a2017-01-26 19:50:46 +0530716 region = vgic_get_mmio_region(vcpu, iodev, addr, len);
717 if (!region)
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100718 return 0;
719
Andre Przywara59c5ab42016-07-15 12:43:30 +0100720 switch (iodev->iodev_type) {
721 case IODEV_CPUIF:
Eric Auger9d5fcb92016-07-18 10:57:36 +0000722 region->write(vcpu, addr, len, data);
Andre Przywara59c5ab42016-07-15 12:43:30 +0100723 break;
724 case IODEV_DIST:
725 region->write(vcpu, addr, len, data);
726 break;
727 case IODEV_REDIST:
728 region->write(iodev->redist_vcpu, addr, len, data);
729 break;
730 case IODEV_ITS:
731 region->its_write(vcpu->kvm, iodev->its, addr, len, data);
732 break;
733 }
734
Marc Zyngier4493b1c2016-04-26 11:06:12 +0100735 return 0;
736}
737
738struct kvm_io_device_ops kvm_io_gic_ops = {
739 .read = dispatch_mmio_read,
740 .write = dispatch_mmio_write,
741};
Andre Przywarafb848db2016-04-26 21:32:49 +0100742
743int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
744 enum vgic_type type)
745{
746 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
747 int ret = 0;
748 unsigned int len;
749
750 switch (type) {
751 case VGIC_V2:
752 len = vgic_v2_init_dist_iodev(io_device);
753 break;
Andre Przywaraed9b8ce2015-12-01 14:34:34 +0000754 case VGIC_V3:
755 len = vgic_v3_init_dist_iodev(io_device);
756 break;
Andre Przywarafb848db2016-04-26 21:32:49 +0100757 default:
758 BUG_ON(1);
759 }
760
761 io_device->base_addr = dist_base_address;
Andre Przywara59c5ab42016-07-15 12:43:30 +0100762 io_device->iodev_type = IODEV_DIST;
Andre Przywarafb848db2016-04-26 21:32:49 +0100763 io_device->redist_vcpu = NULL;
764
765 mutex_lock(&kvm->slots_lock);
766 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
767 len, &io_device->dev);
768 mutex_unlock(&kvm->slots_lock);
769
770 return ret;
771}