Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 1 | /* |
| 2 | * VGIC MMIO handling functions |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, |
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 11 | * GNU General Public License for more details. |
| 12 | */ |
| 13 | |
| 14 | #include <linux/bitops.h> |
| 15 | #include <linux/bsearch.h> |
| 16 | #include <linux/kvm.h> |
| 17 | #include <linux/kvm_host.h> |
| 18 | #include <kvm/iodev.h> |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 19 | #include <kvm/arm_arch_timer.h> |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 20 | #include <kvm/arm_vgic.h> |
| 21 | |
| 22 | #include "vgic.h" |
| 23 | #include "vgic-mmio.h" |
| 24 | |
| 25 | unsigned long vgic_mmio_read_raz(struct kvm_vcpu *vcpu, |
| 26 | gpa_t addr, unsigned int len) |
| 27 | { |
| 28 | return 0; |
| 29 | } |
| 30 | |
| 31 | unsigned long vgic_mmio_read_rao(struct kvm_vcpu *vcpu, |
| 32 | gpa_t addr, unsigned int len) |
| 33 | { |
| 34 | return -1UL; |
| 35 | } |
| 36 | |
| 37 | void vgic_mmio_write_wi(struct kvm_vcpu *vcpu, gpa_t addr, |
| 38 | unsigned int len, unsigned long val) |
| 39 | { |
| 40 | /* Ignore */ |
| 41 | } |
| 42 | |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 43 | /* |
| 44 | * Read accesses to both GICD_ICENABLER and GICD_ISENABLER return the value |
| 45 | * of the enabled bit, so there is only one function for both here. |
| 46 | */ |
| 47 | unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu, |
| 48 | gpa_t addr, unsigned int len) |
| 49 | { |
| 50 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 51 | u32 value = 0; |
| 52 | int i; |
| 53 | |
| 54 | /* Loop over all IRQs affected by this read */ |
| 55 | for (i = 0; i < len * 8; i++) { |
| 56 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 57 | |
| 58 | if (irq->enabled) |
| 59 | value |= (1U << i); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 60 | |
| 61 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | return value; |
| 65 | } |
| 66 | |
| 67 | void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, |
| 68 | gpa_t addr, unsigned int len, |
| 69 | unsigned long val) |
| 70 | { |
| 71 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 72 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 73 | unsigned long flags; |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 74 | |
| 75 | for_each_set_bit(i, &val, len * 8) { |
| 76 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 77 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 78 | spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 79 | irq->enabled = true; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 80 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 81 | |
| 82 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 83 | } |
| 84 | } |
| 85 | |
| 86 | void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, |
| 87 | gpa_t addr, unsigned int len, |
| 88 | unsigned long val) |
| 89 | { |
| 90 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 91 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 92 | unsigned long flags; |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 93 | |
| 94 | for_each_set_bit(i, &val, len * 8) { |
| 95 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 96 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 97 | spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 98 | |
| 99 | irq->enabled = false; |
| 100 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 101 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 102 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | fd122e6 | 2015-12-01 14:33:05 +0000 | [diff] [blame] | 103 | } |
| 104 | } |
| 105 | |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 106 | unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, |
| 107 | gpa_t addr, unsigned int len) |
| 108 | { |
| 109 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 110 | u32 value = 0; |
| 111 | int i; |
| 112 | |
| 113 | /* Loop over all IRQs affected by this read */ |
| 114 | for (i = 0; i < len * 8; i++) { |
| 115 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 116 | |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 117 | if (irq_is_pending(irq)) |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 118 | value |= (1U << i); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 119 | |
| 120 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 121 | } |
| 122 | |
| 123 | return value; |
| 124 | } |
| 125 | |
Christoffer Dall | 6c1b752 | 2017-09-14 11:08:45 -0700 | [diff] [blame] | 126 | /* |
| 127 | * This function will return the VCPU that performed the MMIO access and |
| 128 | * trapped from within the VM, and will return NULL if this is a userspace |
| 129 | * access. |
| 130 | * |
| 131 | * We can disable preemption locally around accessing the per-CPU variable, |
| 132 | * and use the resolved vcpu pointer after enabling preemption again, because |
| 133 | * even if the current thread is migrated to another CPU, reading the per-CPU |
| 134 | * value later will give us the same value as we update the per-CPU variable |
| 135 | * in the preempt notifier handlers. |
| 136 | */ |
| 137 | static struct kvm_vcpu *vgic_get_mmio_requester_vcpu(void) |
| 138 | { |
| 139 | struct kvm_vcpu *vcpu; |
| 140 | |
| 141 | preempt_disable(); |
| 142 | vcpu = kvm_arm_get_running_vcpu(); |
| 143 | preempt_enable(); |
| 144 | return vcpu; |
| 145 | } |
| 146 | |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 147 | /* Must be called with irq->irq_lock held */ |
| 148 | static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
| 149 | bool is_uaccess) |
| 150 | { |
| 151 | if (is_uaccess) |
| 152 | return; |
| 153 | |
| 154 | irq->pending_latch = true; |
| 155 | vgic_irq_set_phys_active(irq, true); |
| 156 | } |
| 157 | |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 158 | void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, |
| 159 | gpa_t addr, unsigned int len, |
| 160 | unsigned long val) |
| 161 | { |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 162 | bool is_uaccess = !vgic_get_mmio_requester_vcpu(); |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 163 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 164 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 165 | unsigned long flags; |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 166 | |
| 167 | for_each_set_bit(i, &val, len * 8) { |
| 168 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 169 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 170 | spin_lock_irqsave(&irq->irq_lock, flags); |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 171 | if (irq->hw) |
| 172 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); |
| 173 | else |
| 174 | irq->pending_latch = true; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 175 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 176 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 177 | } |
| 178 | } |
| 179 | |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 180 | /* Must be called with irq->irq_lock held */ |
| 181 | static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
| 182 | bool is_uaccess) |
| 183 | { |
| 184 | if (is_uaccess) |
| 185 | return; |
| 186 | |
| 187 | irq->pending_latch = false; |
| 188 | |
| 189 | /* |
| 190 | * We don't want the guest to effectively mask the physical |
| 191 | * interrupt by doing a write to SPENDR followed by a write to |
| 192 | * CPENDR for HW interrupts, so we clear the active state on |
| 193 | * the physical side if the virtual interrupt is not active. |
| 194 | * This may lead to taking an additional interrupt on the |
| 195 | * host, but that should not be a problem as the worst that |
| 196 | * can happen is an additional vgic injection. We also clear |
| 197 | * the pending state to maintain proper semantics for edge HW |
| 198 | * interrupts. |
| 199 | */ |
| 200 | vgic_irq_set_phys_pending(irq, false); |
| 201 | if (!irq->active) |
| 202 | vgic_irq_set_phys_active(irq, false); |
| 203 | } |
| 204 | |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 205 | void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, |
| 206 | gpa_t addr, unsigned int len, |
| 207 | unsigned long val) |
| 208 | { |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 209 | bool is_uaccess = !vgic_get_mmio_requester_vcpu(); |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 210 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 211 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 212 | unsigned long flags; |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 213 | |
| 214 | for_each_set_bit(i, &val, len * 8) { |
| 215 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 216 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 217 | spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 218 | |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 219 | if (irq->hw) |
| 220 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); |
| 221 | else |
| 222 | irq->pending_latch = false; |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 223 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 224 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 225 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 96b2980 | 2015-12-01 14:33:41 +0000 | [diff] [blame] | 226 | } |
| 227 | } |
| 228 | |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 229 | unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu, |
| 230 | gpa_t addr, unsigned int len) |
| 231 | { |
| 232 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 233 | u32 value = 0; |
| 234 | int i; |
| 235 | |
| 236 | /* Loop over all IRQs affected by this read */ |
| 237 | for (i = 0; i < len * 8; i++) { |
| 238 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 239 | |
| 240 | if (irq->active) |
| 241 | value |= (1U << i); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 242 | |
| 243 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 244 | } |
| 245 | |
| 246 | return value; |
| 247 | } |
| 248 | |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 249 | /* Must be called with irq->irq_lock held */ |
| 250 | static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
| 251 | bool active, bool is_uaccess) |
| 252 | { |
| 253 | if (is_uaccess) |
| 254 | return; |
| 255 | |
| 256 | irq->active = active; |
| 257 | vgic_irq_set_phys_active(irq, active); |
| 258 | } |
| 259 | |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 260 | static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 261 | bool active) |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 262 | { |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 263 | unsigned long flags; |
Christoffer Dall | 6c1b752 | 2017-09-14 11:08:45 -0700 | [diff] [blame] | 264 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); |
Jintack Lim | 370a0ec | 2017-03-06 05:42:37 -0800 | [diff] [blame] | 265 | |
Christoffer Dall | 6c1b752 | 2017-09-14 11:08:45 -0700 | [diff] [blame] | 266 | spin_lock_irqsave(&irq->irq_lock, flags); |
Jintack Lim | 370a0ec | 2017-03-06 05:42:37 -0800 | [diff] [blame] | 267 | |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 268 | /* |
| 269 | * If this virtual IRQ was written into a list register, we |
| 270 | * have to make sure the CPU that runs the VCPU thread has |
Jintack Lim | 370a0ec | 2017-03-06 05:42:37 -0800 | [diff] [blame] | 271 | * synced back the LR state to the struct vgic_irq. |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 272 | * |
Jintack Lim | 370a0ec | 2017-03-06 05:42:37 -0800 | [diff] [blame] | 273 | * As long as the conditions below are true, we know the VCPU thread |
| 274 | * may be on its way back from the guest (we kicked the VCPU thread in |
| 275 | * vgic_change_active_prepare) and still has to sync back this IRQ, |
| 276 | * so we release and re-acquire the spin_lock to let the other thread |
| 277 | * sync back the IRQ. |
Christoffer Dall | 6c1b752 | 2017-09-14 11:08:45 -0700 | [diff] [blame] | 278 | * |
| 279 | * When accessing VGIC state from user space, requester_vcpu is |
| 280 | * NULL, which is fine, because we guarantee that no VCPUs are running |
| 281 | * when accessing VGIC state from user space so irq->vcpu->cpu is |
| 282 | * always -1. |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 283 | */ |
| 284 | while (irq->vcpu && /* IRQ may have state in an LR somewhere */ |
Jintack Lim | 370a0ec | 2017-03-06 05:42:37 -0800 | [diff] [blame] | 285 | irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */ |
Marc Zyngier | 05fb05a | 2016-06-02 09:24:06 +0100 | [diff] [blame] | 286 | irq->vcpu->cpu != -1) /* VCPU thread is running */ |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 287 | cond_resched_lock(&irq->irq_lock); |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 288 | |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame^] | 289 | if (irq->hw) |
| 290 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); |
| 291 | else |
| 292 | irq->active = active; |
| 293 | |
| 294 | if (irq->active) |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 295 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 296 | else |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 297 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 298 | } |
| 299 | |
| 300 | /* |
| 301 | * If we are fiddling with an IRQ's active state, we have to make sure the IRQ |
| 302 | * is not queued on some running VCPU's LRs, because then the change to the |
| 303 | * active state can be overwritten when the VCPU's state is synced coming back |
| 304 | * from the guest. |
| 305 | * |
| 306 | * For shared interrupts, we have to stop all the VCPUs because interrupts can |
| 307 | * be migrated while we don't hold the IRQ locks and we don't want to be |
| 308 | * chasing moving targets. |
| 309 | * |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 310 | * For private interrupts we don't have to do anything because userspace |
| 311 | * accesses to the VGIC state already require all VCPUs to be stopped, and |
| 312 | * only the VCPU itself can modify its private interrupts active state, which |
| 313 | * guarantees that the VCPU is not running. |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 314 | */ |
| 315 | static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid) |
| 316 | { |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 317 | if (intid > VGIC_NR_PRIVATE_IRQS) |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 318 | kvm_arm_halt_guest(vcpu->kvm); |
| 319 | } |
| 320 | |
| 321 | /* See vgic_change_active_prepare */ |
| 322 | static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid) |
| 323 | { |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 324 | if (intid > VGIC_NR_PRIVATE_IRQS) |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 325 | kvm_arm_resume_guest(vcpu->kvm); |
| 326 | } |
| 327 | |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 328 | static void __vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, |
| 329 | gpa_t addr, unsigned int len, |
| 330 | unsigned long val) |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 331 | { |
| 332 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 333 | int i; |
| 334 | |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 335 | for_each_set_bit(i, &val, len * 8) { |
| 336 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 337 | vgic_mmio_change_active(vcpu, irq, false); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 338 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 339 | } |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 340 | } |
| 341 | |
| 342 | void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu, |
| 343 | gpa_t addr, unsigned int len, |
| 344 | unsigned long val) |
| 345 | { |
| 346 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 347 | |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 348 | mutex_lock(&vcpu->kvm->lock); |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 349 | vgic_change_active_prepare(vcpu, intid); |
| 350 | |
| 351 | __vgic_mmio_write_cactive(vcpu, addr, len, val); |
| 352 | |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 353 | vgic_change_active_finish(vcpu, intid); |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 354 | mutex_unlock(&vcpu->kvm->lock); |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 355 | } |
| 356 | |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 357 | void vgic_mmio_uaccess_write_cactive(struct kvm_vcpu *vcpu, |
| 358 | gpa_t addr, unsigned int len, |
| 359 | unsigned long val) |
| 360 | { |
| 361 | __vgic_mmio_write_cactive(vcpu, addr, len, val); |
| 362 | } |
| 363 | |
| 364 | static void __vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, |
| 365 | gpa_t addr, unsigned int len, |
| 366 | unsigned long val) |
| 367 | { |
| 368 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
| 369 | int i; |
| 370 | |
| 371 | for_each_set_bit(i, &val, len * 8) { |
| 372 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 373 | vgic_mmio_change_active(vcpu, irq, true); |
| 374 | vgic_put_irq(vcpu->kvm, irq); |
| 375 | } |
| 376 | } |
| 377 | |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 378 | void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu, |
| 379 | gpa_t addr, unsigned int len, |
| 380 | unsigned long val) |
| 381 | { |
| 382 | u32 intid = VGIC_ADDR_TO_INTID(addr, 1); |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 383 | |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 384 | mutex_lock(&vcpu->kvm->lock); |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 385 | vgic_change_active_prepare(vcpu, intid); |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 386 | |
| 387 | __vgic_mmio_write_sactive(vcpu, addr, len, val); |
| 388 | |
Christoffer Dall | 35a2d58 | 2016-05-20 15:25:28 +0200 | [diff] [blame] | 389 | vgic_change_active_finish(vcpu, intid); |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 390 | mutex_unlock(&vcpu->kvm->lock); |
Andre Przywara | 69b6fe0 | 2015-12-01 12:40:58 +0000 | [diff] [blame] | 391 | } |
| 392 | |
Christoffer Dall | 3197191 | 2017-05-16 09:44:39 +0200 | [diff] [blame] | 393 | void vgic_mmio_uaccess_write_sactive(struct kvm_vcpu *vcpu, |
| 394 | gpa_t addr, unsigned int len, |
| 395 | unsigned long val) |
| 396 | { |
| 397 | __vgic_mmio_write_sactive(vcpu, addr, len, val); |
| 398 | } |
| 399 | |
Andre Przywara | 055658b | 2015-12-01 14:34:02 +0000 | [diff] [blame] | 400 | unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu, |
| 401 | gpa_t addr, unsigned int len) |
| 402 | { |
| 403 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); |
| 404 | int i; |
| 405 | u64 val = 0; |
| 406 | |
| 407 | for (i = 0; i < len; i++) { |
| 408 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 409 | |
| 410 | val |= (u64)irq->priority << (i * 8); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 411 | |
| 412 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 055658b | 2015-12-01 14:34:02 +0000 | [diff] [blame] | 413 | } |
| 414 | |
| 415 | return val; |
| 416 | } |
| 417 | |
| 418 | /* |
| 419 | * We currently don't handle changing the priority of an interrupt that |
| 420 | * is already pending on a VCPU. If there is a need for this, we would |
| 421 | * need to make this VCPU exit and re-evaluate the priorities, potentially |
| 422 | * leading to this interrupt getting presented now to the guest (if it has |
| 423 | * been masked by the priority mask before). |
| 424 | */ |
| 425 | void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, |
| 426 | gpa_t addr, unsigned int len, |
| 427 | unsigned long val) |
| 428 | { |
| 429 | u32 intid = VGIC_ADDR_TO_INTID(addr, 8); |
| 430 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 431 | unsigned long flags; |
Andre Przywara | 055658b | 2015-12-01 14:34:02 +0000 | [diff] [blame] | 432 | |
| 433 | for (i = 0; i < len; i++) { |
| 434 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 435 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 436 | spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 055658b | 2015-12-01 14:34:02 +0000 | [diff] [blame] | 437 | /* Narrow the priority range to what we actually support */ |
| 438 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 439 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 440 | |
| 441 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 055658b | 2015-12-01 14:34:02 +0000 | [diff] [blame] | 442 | } |
| 443 | } |
| 444 | |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 445 | unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu, |
| 446 | gpa_t addr, unsigned int len) |
| 447 | { |
| 448 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); |
| 449 | u32 value = 0; |
| 450 | int i; |
| 451 | |
| 452 | for (i = 0; i < len * 4; i++) { |
| 453 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 454 | |
| 455 | if (irq->config == VGIC_CONFIG_EDGE) |
| 456 | value |= (2U << (i * 2)); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 457 | |
| 458 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 459 | } |
| 460 | |
| 461 | return value; |
| 462 | } |
| 463 | |
| 464 | void vgic_mmio_write_config(struct kvm_vcpu *vcpu, |
| 465 | gpa_t addr, unsigned int len, |
| 466 | unsigned long val) |
| 467 | { |
| 468 | u32 intid = VGIC_ADDR_TO_INTID(addr, 2); |
| 469 | int i; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 470 | unsigned long flags; |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 471 | |
| 472 | for (i = 0; i < len * 4; i++) { |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 473 | struct vgic_irq *irq; |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 474 | |
| 475 | /* |
| 476 | * The configuration cannot be changed for SGIs in general, |
| 477 | * for PPIs this is IMPLEMENTATION DEFINED. The arch timer |
| 478 | * code relies on PPIs being level triggered, so we also |
| 479 | * make them read-only here. |
| 480 | */ |
| 481 | if (intid + i < VGIC_NR_PRIVATE_IRQS) |
| 482 | continue; |
| 483 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 484 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 485 | spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 486 | |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 487 | if (test_bit(i * 2 + 1, &val)) |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 488 | irq->config = VGIC_CONFIG_EDGE; |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 489 | else |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 490 | irq->config = VGIC_CONFIG_LEVEL; |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 491 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 492 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 493 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 79717e4 | 2015-12-01 12:41:31 +0000 | [diff] [blame] | 494 | } |
| 495 | } |
| 496 | |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 497 | u64 vgic_read_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid) |
| 498 | { |
| 499 | int i; |
| 500 | u64 val = 0; |
| 501 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
| 502 | |
| 503 | for (i = 0; i < 32; i++) { |
| 504 | struct vgic_irq *irq; |
| 505 | |
| 506 | if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) |
| 507 | continue; |
| 508 | |
| 509 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 510 | if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) |
| 511 | val |= (1U << i); |
| 512 | |
| 513 | vgic_put_irq(vcpu->kvm, irq); |
| 514 | } |
| 515 | |
| 516 | return val; |
| 517 | } |
| 518 | |
| 519 | void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, |
| 520 | const u64 val) |
| 521 | { |
| 522 | int i; |
| 523 | int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 524 | unsigned long flags; |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 525 | |
| 526 | for (i = 0; i < 32; i++) { |
| 527 | struct vgic_irq *irq; |
| 528 | bool new_level; |
| 529 | |
| 530 | if ((intid + i) < VGIC_NR_SGIS || (intid + i) >= nr_irqs) |
| 531 | continue; |
| 532 | |
| 533 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
| 534 | |
| 535 | /* |
| 536 | * Line level is set irrespective of irq type |
| 537 | * (level or edge) to avoid dependency that VM should |
| 538 | * restore irq config before line level. |
| 539 | */ |
| 540 | new_level = !!(val & (1U << i)); |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 541 | spin_lock_irqsave(&irq->irq_lock, flags); |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 542 | irq->line_level = new_level; |
| 543 | if (new_level) |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 544 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 545 | else |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 546 | spin_unlock_irqrestore(&irq->irq_lock, flags); |
Vijaya Kumar K | e96a006 | 2017-01-26 19:50:52 +0530 | [diff] [blame] | 547 | |
| 548 | vgic_put_irq(vcpu->kvm, irq); |
| 549 | } |
| 550 | } |
| 551 | |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 552 | static int match_region(const void *key, const void *elt) |
| 553 | { |
| 554 | const unsigned int offset = (unsigned long)key; |
| 555 | const struct vgic_register_region *region = elt; |
| 556 | |
| 557 | if (offset < region->reg_offset) |
| 558 | return -1; |
| 559 | |
| 560 | if (offset >= region->reg_offset + region->len) |
| 561 | return 1; |
| 562 | |
| 563 | return 0; |
| 564 | } |
| 565 | |
Eric Auger | 4b7171a | 2016-12-20 09:20:00 +0100 | [diff] [blame] | 566 | const struct vgic_register_region * |
| 567 | vgic_find_mmio_region(const struct vgic_register_region *regions, |
| 568 | int nr_regions, unsigned int offset) |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 569 | { |
Eric Auger | 4b7171a | 2016-12-20 09:20:00 +0100 | [diff] [blame] | 570 | return bsearch((void *)(uintptr_t)offset, regions, nr_regions, |
| 571 | sizeof(regions[0]), match_region); |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 572 | } |
| 573 | |
Vijaya Kumar K | 5fb247d | 2017-01-26 19:50:50 +0530 | [diff] [blame] | 574 | void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
| 575 | { |
| 576 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 577 | vgic_v2_set_vmcr(vcpu, vmcr); |
| 578 | else |
| 579 | vgic_v3_set_vmcr(vcpu, vmcr); |
| 580 | } |
| 581 | |
| 582 | void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr) |
| 583 | { |
| 584 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 585 | vgic_v2_get_vmcr(vcpu, vmcr); |
| 586 | else |
| 587 | vgic_v3_get_vmcr(vcpu, vmcr); |
| 588 | } |
| 589 | |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 590 | /* |
| 591 | * kvm_mmio_read_buf() returns a value in a format where it can be converted |
| 592 | * to a byte array and be directly observed as the guest wanted it to appear |
| 593 | * in memory if it had done the store itself, which is LE for the GIC, as the |
| 594 | * guest knows the GIC is always LE. |
| 595 | * |
| 596 | * We convert this value to the CPUs native format to deal with it as a data |
| 597 | * value. |
| 598 | */ |
| 599 | unsigned long vgic_data_mmio_bus_to_host(const void *val, unsigned int len) |
| 600 | { |
| 601 | unsigned long data = kvm_mmio_read_buf(val, len); |
| 602 | |
| 603 | switch (len) { |
| 604 | case 1: |
| 605 | return data; |
| 606 | case 2: |
| 607 | return le16_to_cpu(data); |
| 608 | case 4: |
| 609 | return le32_to_cpu(data); |
| 610 | default: |
| 611 | return le64_to_cpu(data); |
| 612 | } |
| 613 | } |
| 614 | |
| 615 | /* |
| 616 | * kvm_mmio_write_buf() expects a value in a format such that if converted to |
| 617 | * a byte array it is observed as the guest would see it if it could perform |
| 618 | * the load directly. Since the GIC is LE, and the guest knows this, the |
| 619 | * guest expects a value in little endian format. |
| 620 | * |
| 621 | * We convert the data value from the CPUs native format to LE so that the |
| 622 | * value is returned in the proper format. |
| 623 | */ |
| 624 | void vgic_data_host_to_mmio_bus(void *buf, unsigned int len, |
| 625 | unsigned long data) |
| 626 | { |
| 627 | switch (len) { |
| 628 | case 1: |
| 629 | break; |
| 630 | case 2: |
| 631 | data = cpu_to_le16(data); |
| 632 | break; |
| 633 | case 4: |
| 634 | data = cpu_to_le32(data); |
| 635 | break; |
| 636 | default: |
| 637 | data = cpu_to_le64(data); |
| 638 | } |
| 639 | |
| 640 | kvm_mmio_write_buf(buf, len, data); |
| 641 | } |
| 642 | |
| 643 | static |
| 644 | struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) |
| 645 | { |
| 646 | return container_of(dev, struct vgic_io_device, dev); |
| 647 | } |
| 648 | |
Andre Przywara | 112b0b8 | 2016-11-01 18:00:08 +0000 | [diff] [blame] | 649 | static bool check_region(const struct kvm *kvm, |
| 650 | const struct vgic_register_region *region, |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 651 | gpa_t addr, int len) |
| 652 | { |
Andre Przywara | 112b0b8 | 2016-11-01 18:00:08 +0000 | [diff] [blame] | 653 | int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; |
| 654 | |
| 655 | switch (len) { |
| 656 | case sizeof(u8): |
| 657 | flags = VGIC_ACCESS_8bit; |
| 658 | break; |
| 659 | case sizeof(u32): |
| 660 | flags = VGIC_ACCESS_32bit; |
| 661 | break; |
| 662 | case sizeof(u64): |
| 663 | flags = VGIC_ACCESS_64bit; |
| 664 | break; |
| 665 | default: |
| 666 | return false; |
| 667 | } |
| 668 | |
| 669 | if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { |
| 670 | if (!region->bits_per_irq) |
| 671 | return true; |
| 672 | |
| 673 | /* Do we access a non-allocated IRQ? */ |
| 674 | return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; |
| 675 | } |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 676 | |
| 677 | return false; |
| 678 | } |
| 679 | |
Vijaya Kumar K | 94574c9 | 2017-01-26 19:50:47 +0530 | [diff] [blame] | 680 | const struct vgic_register_region * |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 681 | vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, |
| 682 | gpa_t addr, int len) |
| 683 | { |
| 684 | const struct vgic_register_region *region; |
| 685 | |
| 686 | region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, |
| 687 | addr - iodev->base_addr); |
| 688 | if (!region || !check_region(vcpu->kvm, region, addr, len)) |
| 689 | return NULL; |
| 690 | |
| 691 | return region; |
| 692 | } |
| 693 | |
| 694 | static int vgic_uaccess_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
| 695 | gpa_t addr, u32 *val) |
| 696 | { |
| 697 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); |
| 698 | const struct vgic_register_region *region; |
| 699 | struct kvm_vcpu *r_vcpu; |
| 700 | |
| 701 | region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); |
| 702 | if (!region) { |
| 703 | *val = 0; |
| 704 | return 0; |
| 705 | } |
| 706 | |
| 707 | r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; |
| 708 | if (region->uaccess_read) |
| 709 | *val = region->uaccess_read(r_vcpu, addr, sizeof(u32)); |
| 710 | else |
| 711 | *val = region->read(r_vcpu, addr, sizeof(u32)); |
| 712 | |
| 713 | return 0; |
| 714 | } |
| 715 | |
| 716 | static int vgic_uaccess_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
| 717 | gpa_t addr, const u32 *val) |
| 718 | { |
| 719 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); |
| 720 | const struct vgic_register_region *region; |
| 721 | struct kvm_vcpu *r_vcpu; |
| 722 | |
| 723 | region = vgic_get_mmio_region(vcpu, iodev, addr, sizeof(u32)); |
| 724 | if (!region) |
| 725 | return 0; |
| 726 | |
| 727 | r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; |
| 728 | if (region->uaccess_write) |
| 729 | region->uaccess_write(r_vcpu, addr, sizeof(u32), *val); |
| 730 | else |
| 731 | region->write(r_vcpu, addr, sizeof(u32), *val); |
| 732 | |
| 733 | return 0; |
| 734 | } |
| 735 | |
| 736 | /* |
| 737 | * Userland access to VGIC registers. |
| 738 | */ |
| 739 | int vgic_uaccess(struct kvm_vcpu *vcpu, struct vgic_io_device *dev, |
| 740 | bool is_write, int offset, u32 *val) |
| 741 | { |
| 742 | if (is_write) |
| 743 | return vgic_uaccess_write(vcpu, &dev->dev, offset, val); |
| 744 | else |
| 745 | return vgic_uaccess_read(vcpu, &dev->dev, offset, val); |
| 746 | } |
| 747 | |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 748 | static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
| 749 | gpa_t addr, int len, void *val) |
| 750 | { |
| 751 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); |
| 752 | const struct vgic_register_region *region; |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 753 | unsigned long data = 0; |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 754 | |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 755 | region = vgic_get_mmio_region(vcpu, iodev, addr, len); |
| 756 | if (!region) { |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 757 | memset(val, 0, len); |
| 758 | return 0; |
| 759 | } |
| 760 | |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 761 | switch (iodev->iodev_type) { |
| 762 | case IODEV_CPUIF: |
Eric Auger | 9d5fcb9 | 2016-07-18 10:57:36 +0000 | [diff] [blame] | 763 | data = region->read(vcpu, addr, len); |
| 764 | break; |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 765 | case IODEV_DIST: |
| 766 | data = region->read(vcpu, addr, len); |
| 767 | break; |
| 768 | case IODEV_REDIST: |
| 769 | data = region->read(iodev->redist_vcpu, addr, len); |
| 770 | break; |
| 771 | case IODEV_ITS: |
| 772 | data = region->its_read(vcpu->kvm, iodev->its, addr, len); |
| 773 | break; |
| 774 | } |
| 775 | |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 776 | vgic_data_host_to_mmio_bus(val, len, data); |
| 777 | return 0; |
| 778 | } |
| 779 | |
| 780 | static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, |
| 781 | gpa_t addr, int len, const void *val) |
| 782 | { |
| 783 | struct vgic_io_device *iodev = kvm_to_vgic_iodev(dev); |
| 784 | const struct vgic_register_region *region; |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 785 | unsigned long data = vgic_data_mmio_bus_to_host(val, len); |
| 786 | |
Vijaya Kumar K | 2df903a | 2017-01-26 19:50:46 +0530 | [diff] [blame] | 787 | region = vgic_get_mmio_region(vcpu, iodev, addr, len); |
| 788 | if (!region) |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 789 | return 0; |
| 790 | |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 791 | switch (iodev->iodev_type) { |
| 792 | case IODEV_CPUIF: |
Eric Auger | 9d5fcb9 | 2016-07-18 10:57:36 +0000 | [diff] [blame] | 793 | region->write(vcpu, addr, len, data); |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 794 | break; |
| 795 | case IODEV_DIST: |
| 796 | region->write(vcpu, addr, len, data); |
| 797 | break; |
| 798 | case IODEV_REDIST: |
| 799 | region->write(iodev->redist_vcpu, addr, len, data); |
| 800 | break; |
| 801 | case IODEV_ITS: |
| 802 | region->its_write(vcpu->kvm, iodev->its, addr, len, data); |
| 803 | break; |
| 804 | } |
| 805 | |
Marc Zyngier | 4493b1c | 2016-04-26 11:06:12 +0100 | [diff] [blame] | 806 | return 0; |
| 807 | } |
| 808 | |
| 809 | struct kvm_io_device_ops kvm_io_gic_ops = { |
| 810 | .read = dispatch_mmio_read, |
| 811 | .write = dispatch_mmio_write, |
| 812 | }; |
Andre Przywara | fb848db | 2016-04-26 21:32:49 +0100 | [diff] [blame] | 813 | |
| 814 | int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address, |
| 815 | enum vgic_type type) |
| 816 | { |
| 817 | struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; |
| 818 | int ret = 0; |
| 819 | unsigned int len; |
| 820 | |
| 821 | switch (type) { |
| 822 | case VGIC_V2: |
| 823 | len = vgic_v2_init_dist_iodev(io_device); |
| 824 | break; |
Andre Przywara | ed9b8ce | 2015-12-01 14:34:34 +0000 | [diff] [blame] | 825 | case VGIC_V3: |
| 826 | len = vgic_v3_init_dist_iodev(io_device); |
| 827 | break; |
Andre Przywara | fb848db | 2016-04-26 21:32:49 +0100 | [diff] [blame] | 828 | default: |
| 829 | BUG_ON(1); |
| 830 | } |
| 831 | |
| 832 | io_device->base_addr = dist_base_address; |
Andre Przywara | 59c5ab4 | 2016-07-15 12:43:30 +0100 | [diff] [blame] | 833 | io_device->iodev_type = IODEV_DIST; |
Andre Przywara | fb848db | 2016-04-26 21:32:49 +0100 | [diff] [blame] | 834 | io_device->redist_vcpu = NULL; |
| 835 | |
| 836 | mutex_lock(&kvm->slots_lock); |
| 837 | ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address, |
| 838 | len, &io_device->dev); |
| 839 | mutex_unlock(&kvm->slots_lock); |
| 840 | |
| 841 | return ret; |
| 842 | } |