Thomas Gleixner | caab277 | 2019-06-03 07:44:50 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2015, 2016 ARM Ltd. |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 4 | */ |
| 5 | |
Mark Rutland | 41b8759 | 2018-04-25 17:13:41 +0100 | [diff] [blame] | 6 | #include <linux/interrupt.h> |
| 7 | #include <linux/irq.h> |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 8 | #include <linux/kvm.h> |
| 9 | #include <linux/kvm_host.h> |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 10 | #include <linux/list_sort.h> |
Mark Rutland | 41b8759 | 2018-04-25 17:13:41 +0100 | [diff] [blame] | 11 | #include <linux/nospec.h> |
| 12 | |
Christoffer Dall | 771621b | 2017-10-04 23:42:32 +0200 | [diff] [blame] | 13 | #include <asm/kvm_hyp.h> |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 14 | |
| 15 | #include "vgic.h" |
| 16 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 17 | #define CREATE_TRACE_POINTS |
Christoffer Dall | 35d2d5d | 2017-05-04 13:54:17 +0200 | [diff] [blame] | 18 | #include "trace.h" |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 19 | |
Ard Biesheuvel | 63d7c6a | 2017-03-09 21:51:59 +0100 | [diff] [blame] | 20 | struct vgic_global kvm_vgic_global_state __ro_after_init = { |
| 21 | .gicv3_cpuif = STATIC_KEY_FALSE_INIT, |
| 22 | }; |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 23 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 24 | /* |
| 25 | * Locking order is always: |
Christoffer Dall | abd7229 | 2017-05-06 20:01:24 +0200 | [diff] [blame] | 26 | * kvm->lock (mutex) |
| 27 | * its->cmd_lock (mutex) |
| 28 | * its->its_lock (mutex) |
Andre Przywara | 388d435 | 2018-05-11 15:20:12 +0100 | [diff] [blame] | 29 | * vgic_cpu->ap_list_lock must be taken with IRQs disabled |
| 30 | * kvm->lpi_list_lock must be taken with IRQs disabled |
| 31 | * vgic_irq->irq_lock must be taken with IRQs disabled |
| 32 | * |
| 33 | * As the ap_list_lock might be taken from the timer interrupt handler, |
| 34 | * we have to disable IRQs before taking this lock and everything lower |
| 35 | * than it. |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 36 | * |
Andre Przywara | 424c338 | 2016-07-15 12:43:32 +0100 | [diff] [blame] | 37 | * If you need to take multiple locks, always take the upper lock first, |
| 38 | * then the lower ones, e.g. first take the its_lock, then the irq_lock. |
| 39 | * If you are already holding a lock and need to take a higher one, you |
| 40 | * have to drop the lower ranking lock first and re-aquire it after having |
| 41 | * taken the upper one. |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 42 | * |
| 43 | * When taking more than one ap_list_lock at the same time, always take the |
| 44 | * lowest numbered VCPU's ap_list_lock first, so: |
| 45 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 46 | * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
| 47 | * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 48 | * |
| 49 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 50 | * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 51 | * spinlocks for any lock that may be taken while injecting an interrupt. |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 52 | */ |
| 53 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 54 | /* |
| 55 | * Iterate over the VM's list of mapped LPIs to find the one with a |
| 56 | * matching interrupt ID and return a reference to the IRQ structure. |
| 57 | */ |
| 58 | static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) |
| 59 | { |
| 60 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 61 | struct vgic_irq *irq = NULL; |
Andre Przywara | 388d435 | 2018-05-11 15:20:12 +0100 | [diff] [blame] | 62 | unsigned long flags; |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 63 | |
Julien Thierry | fc3bc47 | 2019-01-07 15:06:16 +0000 | [diff] [blame] | 64 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 65 | |
| 66 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
| 67 | if (irq->intid != intid) |
| 68 | continue; |
| 69 | |
| 70 | /* |
| 71 | * This increases the refcount, the caller is expected to |
| 72 | * call vgic_put_irq() later once it's finished with the IRQ. |
| 73 | */ |
Marc Zyngier | d97594e | 2016-07-17 11:27:23 +0100 | [diff] [blame] | 74 | vgic_get_irq_kref(irq); |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 75 | goto out_unlock; |
| 76 | } |
| 77 | irq = NULL; |
| 78 | |
| 79 | out_unlock: |
Julien Thierry | fc3bc47 | 2019-01-07 15:06:16 +0000 | [diff] [blame] | 80 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 81 | |
| 82 | return irq; |
| 83 | } |
| 84 | |
| 85 | /* |
| 86 | * This looks up the virtual interrupt ID to get the corresponding |
| 87 | * struct vgic_irq. It also increases the refcount, so any caller is expected |
| 88 | * to call vgic_put_irq() once it's finished with this IRQ. |
| 89 | */ |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 90 | struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, |
| 91 | u32 intid) |
| 92 | { |
| 93 | /* SGIs and PPIs */ |
Mark Rutland | 41b8759 | 2018-04-25 17:13:41 +0100 | [diff] [blame] | 94 | if (intid <= VGIC_MAX_PRIVATE) { |
Gustavo A. R. Silva | c23b2e6f | 2018-12-12 14:11:23 -0600 | [diff] [blame] | 95 | intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1); |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 96 | return &vcpu->arch.vgic_cpu.private_irqs[intid]; |
Mark Rutland | 41b8759 | 2018-04-25 17:13:41 +0100 | [diff] [blame] | 97 | } |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 98 | |
| 99 | /* SPIs */ |
Marc Zyngier | bea2ef8 | 2018-12-04 17:11:19 +0000 | [diff] [blame] | 100 | if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { |
| 101 | intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 102 | return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; |
Mark Rutland | 41b8759 | 2018-04-25 17:13:41 +0100 | [diff] [blame] | 103 | } |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 104 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 105 | /* LPIs */ |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 106 | if (intid >= VGIC_MIN_LPI) |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 107 | return vgic_get_lpi(kvm, intid); |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 108 | |
Christoffer Dall | 64a959d | 2015-11-24 16:51:12 +0100 | [diff] [blame] | 109 | return NULL; |
| 110 | } |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 111 | |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 112 | /* |
| 113 | * We can't do anything in here, because we lack the kvm pointer to |
| 114 | * lock and remove the item from the lpi_list. So we keep this function |
| 115 | * empty and use the return value of kref_put() to trigger the freeing. |
| 116 | */ |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 117 | static void vgic_irq_release(struct kref *ref) |
| 118 | { |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 119 | } |
| 120 | |
Marc Zyngier | 1bb3691 | 2019-03-18 12:45:22 +0000 | [diff] [blame] | 121 | /* |
| 122 | * Drop the refcount on the LPI. Must be called with lpi_list_lock held. |
| 123 | */ |
| 124 | void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq) |
| 125 | { |
| 126 | struct vgic_dist *dist = &kvm->arch.vgic; |
| 127 | |
| 128 | if (!kref_put(&irq->refcount, vgic_irq_release)) |
| 129 | return; |
| 130 | |
| 131 | list_del(&irq->lpi_list); |
| 132 | dist->lpi_list_count--; |
| 133 | |
| 134 | kfree(irq); |
| 135 | } |
| 136 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 137 | void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) |
| 138 | { |
Christoffer Dall | 2cccbb3 | 2016-08-02 22:05:42 +0200 | [diff] [blame] | 139 | struct vgic_dist *dist = &kvm->arch.vgic; |
Andre Przywara | 388d435 | 2018-05-11 15:20:12 +0100 | [diff] [blame] | 140 | unsigned long flags; |
Andre Przywara | 3802411 | 2016-07-15 12:43:33 +0100 | [diff] [blame] | 141 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 142 | if (irq->intid < VGIC_MIN_LPI) |
| 143 | return; |
| 144 | |
Julien Thierry | fc3bc47 | 2019-01-07 15:06:16 +0000 | [diff] [blame] | 145 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
Marc Zyngier | 1bb3691 | 2019-03-18 12:45:22 +0000 | [diff] [blame] | 146 | __vgic_put_lpi_locked(kvm, irq); |
Julien Thierry | fc3bc47 | 2019-01-07 15:06:16 +0000 | [diff] [blame] | 147 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 148 | } |
| 149 | |
Marc Zyngier | 96085b9 | 2019-04-02 06:36:23 +0100 | [diff] [blame] | 150 | void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu) |
| 151 | { |
| 152 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 153 | struct vgic_irq *irq, *tmp; |
| 154 | unsigned long flags; |
| 155 | |
| 156 | raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
| 157 | |
| 158 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
| 159 | if (irq->intid >= VGIC_MIN_LPI) { |
| 160 | raw_spin_lock(&irq->irq_lock); |
| 161 | list_del(&irq->ap_list); |
| 162 | irq->vcpu = NULL; |
| 163 | raw_spin_unlock(&irq->irq_lock); |
| 164 | vgic_put_irq(vcpu->kvm, irq); |
| 165 | } |
| 166 | } |
| 167 | |
| 168 | raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
| 169 | } |
| 170 | |
Christoffer Dall | df635c5 | 2017-09-01 16:25:12 +0200 | [diff] [blame] | 171 | void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) |
| 172 | { |
| 173 | WARN_ON(irq_set_irqchip_state(irq->host_irq, |
| 174 | IRQCHIP_STATE_PENDING, |
| 175 | pending)); |
| 176 | } |
| 177 | |
Christoffer Dall | e40cc57 | 2017-08-29 10:40:44 +0200 | [diff] [blame] | 178 | bool vgic_get_phys_line_level(struct vgic_irq *irq) |
| 179 | { |
| 180 | bool line_level; |
| 181 | |
| 182 | BUG_ON(!irq->hw); |
| 183 | |
Marc Zyngier | db75f1a | 2021-03-01 17:39:39 +0000 | [diff] [blame] | 184 | if (irq->ops && irq->ops->get_input_level) |
| 185 | return irq->ops->get_input_level(irq->intid); |
Christoffer Dall | b6909a6 | 2017-10-27 19:30:09 +0200 | [diff] [blame] | 186 | |
Christoffer Dall | e40cc57 | 2017-08-29 10:40:44 +0200 | [diff] [blame] | 187 | WARN_ON(irq_get_irqchip_state(irq->host_irq, |
| 188 | IRQCHIP_STATE_PENDING, |
| 189 | &line_level)); |
| 190 | return line_level; |
| 191 | } |
| 192 | |
| 193 | /* Set/Clear the physical active state */ |
| 194 | void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) |
| 195 | { |
| 196 | |
| 197 | BUG_ON(!irq->hw); |
| 198 | WARN_ON(irq_set_irqchip_state(irq->host_irq, |
| 199 | IRQCHIP_STATE_ACTIVE, |
| 200 | active)); |
| 201 | } |
| 202 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 203 | /** |
| 204 | * kvm_vgic_target_oracle - compute the target vcpu for an irq |
| 205 | * |
| 206 | * @irq: The irq to route. Must be already locked. |
| 207 | * |
| 208 | * Based on the current state of the interrupt (enabled, pending, |
| 209 | * active, vcpu and target_vcpu), compute the next vcpu this should be |
| 210 | * given to. Return NULL if this shouldn't be injected at all. |
| 211 | * |
| 212 | * Requires the IRQ lock to be held. |
| 213 | */ |
| 214 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) |
| 215 | { |
Lance Roy | d4d592a | 2018-10-04 23:45:50 -0700 | [diff] [blame] | 216 | lockdep_assert_held(&irq->irq_lock); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 217 | |
| 218 | /* If the interrupt is active, it must stay on the current vcpu */ |
| 219 | if (irq->active) |
| 220 | return irq->vcpu ? : irq->target_vcpu; |
| 221 | |
| 222 | /* |
| 223 | * If the IRQ is not active but enabled and pending, we should direct |
| 224 | * it to its configured target VCPU. |
| 225 | * If the distributor is disabled, pending interrupts shouldn't be |
| 226 | * forwarded. |
| 227 | */ |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 228 | if (irq->enabled && irq_is_pending(irq)) { |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 229 | if (unlikely(irq->target_vcpu && |
| 230 | !irq->target_vcpu->kvm->arch.vgic.enabled)) |
| 231 | return NULL; |
| 232 | |
| 233 | return irq->target_vcpu; |
| 234 | } |
| 235 | |
| 236 | /* If neither active nor pending and enabled, then this IRQ should not |
| 237 | * be queued to any VCPU. |
| 238 | */ |
| 239 | return NULL; |
| 240 | } |
| 241 | |
| 242 | /* |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 243 | * The order of items in the ap_lists defines how we'll pack things in LRs as |
| 244 | * well, the first items in the list being the first things populated in the |
| 245 | * LRs. |
| 246 | * |
| 247 | * A hard rule is that active interrupts can never be pushed out of the LRs |
| 248 | * (and therefore take priority) since we cannot reliably trap on deactivation |
| 249 | * of IRQs and therefore they have to be present in the LRs. |
| 250 | * |
| 251 | * Otherwise things should be sorted by the priority field and the GIC |
| 252 | * hardware support will take care of preemption of priority groups etc. |
| 253 | * |
| 254 | * Return negative if "a" sorts before "b", 0 to preserve order, and positive |
| 255 | * to sort "b" before "a". |
| 256 | */ |
Sami Tolvanen | 4f0f586 | 2021-04-08 11:28:34 -0700 | [diff] [blame] | 257 | static int vgic_irq_cmp(void *priv, const struct list_head *a, |
| 258 | const struct list_head *b) |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 259 | { |
| 260 | struct vgic_irq *irqa = container_of(a, struct vgic_irq, ap_list); |
| 261 | struct vgic_irq *irqb = container_of(b, struct vgic_irq, ap_list); |
| 262 | bool penda, pendb; |
| 263 | int ret; |
| 264 | |
Heyi Guo | d4a8061 | 2019-08-27 12:26:50 +0100 | [diff] [blame] | 265 | /* |
| 266 | * list_sort may call this function with the same element when |
| 267 | * the list is fairly long. |
| 268 | */ |
| 269 | if (unlikely(irqa == irqb)) |
| 270 | return 0; |
| 271 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 272 | raw_spin_lock(&irqa->irq_lock); |
| 273 | raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 274 | |
| 275 | if (irqa->active || irqb->active) { |
| 276 | ret = (int)irqb->active - (int)irqa->active; |
| 277 | goto out; |
| 278 | } |
| 279 | |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 280 | penda = irqa->enabled && irq_is_pending(irqa); |
| 281 | pendb = irqb->enabled && irq_is_pending(irqb); |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 282 | |
| 283 | if (!penda || !pendb) { |
| 284 | ret = (int)pendb - (int)penda; |
| 285 | goto out; |
| 286 | } |
| 287 | |
| 288 | /* Both pending and enabled, sort by priority */ |
| 289 | ret = irqa->priority - irqb->priority; |
| 290 | out: |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 291 | raw_spin_unlock(&irqb->irq_lock); |
| 292 | raw_spin_unlock(&irqa->irq_lock); |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 293 | return ret; |
| 294 | } |
| 295 | |
| 296 | /* Must be called with the ap_list_lock held */ |
| 297 | static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) |
| 298 | { |
| 299 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 300 | |
Lance Roy | d4d592a | 2018-10-04 23:45:50 -0700 | [diff] [blame] | 301 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
Christoffer Dall | 8e44474 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 302 | |
| 303 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); |
| 304 | } |
| 305 | |
| 306 | /* |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 307 | * Only valid injection if changing level for level-triggered IRQs or for a |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 308 | * rising edge, and in-kernel connected IRQ lines can only be controlled by |
| 309 | * their owner. |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 310 | */ |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 311 | static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 312 | { |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 313 | if (irq->owner != owner) |
| 314 | return false; |
| 315 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 316 | switch (irq->config) { |
| 317 | case VGIC_CONFIG_LEVEL: |
| 318 | return irq->line_level != level; |
| 319 | case VGIC_CONFIG_EDGE: |
| 320 | return level; |
| 321 | } |
| 322 | |
| 323 | return false; |
| 324 | } |
| 325 | |
| 326 | /* |
| 327 | * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list. |
| 328 | * Do the queuing if necessary, taking the right locks in the right order. |
| 329 | * Returns true when the IRQ was queued, false otherwise. |
| 330 | * |
| 331 | * Needs to be entered with the IRQ lock already held, but will return |
| 332 | * with all locks dropped. |
| 333 | */ |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 334 | bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, |
| 335 | unsigned long flags) |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 336 | { |
| 337 | struct kvm_vcpu *vcpu; |
| 338 | |
Lance Roy | d4d592a | 2018-10-04 23:45:50 -0700 | [diff] [blame] | 339 | lockdep_assert_held(&irq->irq_lock); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 340 | |
| 341 | retry: |
| 342 | vcpu = vgic_target_oracle(irq); |
| 343 | if (irq->vcpu || !vcpu) { |
| 344 | /* |
| 345 | * If this IRQ is already on a VCPU's ap_list, then it |
| 346 | * cannot be moved or modified and there is no more work for |
| 347 | * us to do. |
| 348 | * |
| 349 | * Otherwise, if the irq is not pending and enabled, it does |
| 350 | * not need to be inserted into an ap_list and there is also |
| 351 | * no more work for us to do. |
| 352 | */ |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 353 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Shih-Wei Li | d42c797 | 2016-10-27 15:08:13 +0000 | [diff] [blame] | 354 | |
| 355 | /* |
| 356 | * We have to kick the VCPU here, because we could be |
| 357 | * queueing an edge-triggered interrupt for which we |
| 358 | * get no EOI maintenance interrupt. In that case, |
| 359 | * while the IRQ is already on the VCPU's AP list, the |
| 360 | * VCPU could have EOI'ed the original interrupt and |
| 361 | * won't see this one until it exits for some other |
| 362 | * reason. |
| 363 | */ |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 364 | if (vcpu) { |
| 365 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
Shih-Wei Li | d42c797 | 2016-10-27 15:08:13 +0000 | [diff] [blame] | 366 | kvm_vcpu_kick(vcpu); |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 367 | } |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 368 | return false; |
| 369 | } |
| 370 | |
| 371 | /* |
| 372 | * We must unlock the irq lock to take the ap_list_lock where |
| 373 | * we are going to insert this new pending interrupt. |
| 374 | */ |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 375 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 376 | |
| 377 | /* someone can do stuff here, which we re-check below */ |
| 378 | |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 379 | raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 380 | raw_spin_lock(&irq->irq_lock); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 381 | |
| 382 | /* |
| 383 | * Did something change behind our backs? |
| 384 | * |
| 385 | * There are two cases: |
| 386 | * 1) The irq lost its pending state or was disabled behind our |
| 387 | * backs and/or it was queued to another VCPU's ap_list. |
| 388 | * 2) Someone changed the affinity on this irq behind our |
| 389 | * backs and we are now holding the wrong ap_list_lock. |
| 390 | * |
| 391 | * In both cases, drop the locks and retry. |
| 392 | */ |
| 393 | |
| 394 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 395 | raw_spin_unlock(&irq->irq_lock); |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 396 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, |
| 397 | flags); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 398 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 399 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 400 | goto retry; |
| 401 | } |
| 402 | |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 403 | /* |
| 404 | * Grab a reference to the irq to reflect the fact that it is |
| 405 | * now in the ap_list. |
| 406 | */ |
| 407 | vgic_get_irq_kref(irq); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 408 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
| 409 | irq->vcpu = vcpu; |
| 410 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 411 | raw_spin_unlock(&irq->irq_lock); |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 412 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 413 | |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 414 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 415 | kvm_vcpu_kick(vcpu); |
| 416 | |
| 417 | return true; |
| 418 | } |
| 419 | |
Christoffer Dall | 11710de | 2017-02-01 11:03:45 +0100 | [diff] [blame] | 420 | /** |
| 421 | * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic |
| 422 | * @kvm: The VM structure pointer |
| 423 | * @cpuid: The CPU for PPIs |
| 424 | * @intid: The INTID to inject a new state to. |
| 425 | * @level: Edge-triggered: true: to trigger the interrupt |
| 426 | * false: to ignore the call |
| 427 | * Level-sensitive true: raise the input signal |
| 428 | * false: lower the input signal |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 429 | * @owner: The opaque pointer to the owner of the IRQ being raised to verify |
| 430 | * that the caller is allowed to inject this IRQ. Userspace |
| 431 | * injections will have owner == NULL. |
Christoffer Dall | 11710de | 2017-02-01 11:03:45 +0100 | [diff] [blame] | 432 | * |
| 433 | * The VGIC is not concerned with devices being active-LOW or active-HIGH for |
| 434 | * level-sensitive interrupts. You can think of the level parameter as 1 |
| 435 | * being HIGH and 0 being LOW and all devices being active-HIGH. |
| 436 | */ |
| 437 | int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 438 | bool level, void *owner) |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 439 | { |
| 440 | struct kvm_vcpu *vcpu; |
| 441 | struct vgic_irq *irq; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 442 | unsigned long flags; |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 443 | int ret; |
| 444 | |
| 445 | trace_vgic_update_irq_pending(cpuid, intid, level); |
| 446 | |
Eric Auger | ad275b8b | 2015-12-21 18:09:38 +0100 | [diff] [blame] | 447 | ret = vgic_lazy_init(kvm); |
| 448 | if (ret) |
| 449 | return ret; |
| 450 | |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 451 | vcpu = kvm_get_vcpu(kvm, cpuid); |
| 452 | if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) |
| 453 | return -EINVAL; |
| 454 | |
| 455 | irq = vgic_get_irq(kvm, vcpu, intid); |
| 456 | if (!irq) |
| 457 | return -EINVAL; |
| 458 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 459 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 460 | |
Christoffer Dall | cb3f0ad | 2017-05-16 12:41:18 +0200 | [diff] [blame] | 461 | if (!vgic_validate_injection(irq, level, owner)) { |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 462 | /* Nothing to see here, move along... */ |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 463 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 464 | vgic_put_irq(kvm, irq); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 465 | return 0; |
| 466 | } |
| 467 | |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 468 | if (irq->config == VGIC_CONFIG_LEVEL) |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 469 | irq->line_level = level; |
Christoffer Dall | 8694e4d | 2017-01-23 14:07:18 +0100 | [diff] [blame] | 470 | else |
| 471 | irq->pending_latch = true; |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 472 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 473 | vgic_queue_irq_unlock(kvm, irq, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 474 | vgic_put_irq(kvm, irq); |
Christoffer Dall | 81eeb95 | 2015-11-25 10:02:16 -0800 | [diff] [blame] | 475 | |
| 476 | return 0; |
| 477 | } |
| 478 | |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 479 | /* @irq->irq_lock must be held */ |
| 480 | static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, |
Christoffer Dall | b6909a6 | 2017-10-27 19:30:09 +0200 | [diff] [blame] | 481 | unsigned int host_irq, |
Marc Zyngier | db75f1a | 2021-03-01 17:39:39 +0000 | [diff] [blame] | 482 | struct irq_ops *ops) |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 483 | { |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 484 | struct irq_desc *desc; |
| 485 | struct irq_data *data; |
| 486 | |
| 487 | /* |
| 488 | * Find the physical IRQ number corresponding to @host_irq |
| 489 | */ |
| 490 | desc = irq_to_desc(host_irq); |
| 491 | if (!desc) { |
| 492 | kvm_err("%s: no interrupt descriptor\n", __func__); |
| 493 | return -EINVAL; |
| 494 | } |
| 495 | data = irq_desc_get_irq_data(desc); |
| 496 | while (data->parent_data) |
| 497 | data = data->parent_data; |
| 498 | |
| 499 | irq->hw = true; |
| 500 | irq->host_irq = host_irq; |
| 501 | irq->hwintid = data->hwirq; |
Marc Zyngier | db75f1a | 2021-03-01 17:39:39 +0000 | [diff] [blame] | 502 | irq->ops = ops; |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 503 | return 0; |
| 504 | } |
| 505 | |
| 506 | /* @irq->irq_lock must be held */ |
| 507 | static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) |
| 508 | { |
| 509 | irq->hw = false; |
| 510 | irq->hwintid = 0; |
Marc Zyngier | db75f1a | 2021-03-01 17:39:39 +0000 | [diff] [blame] | 511 | irq->ops = NULL; |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 512 | } |
| 513 | |
| 514 | int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, |
Marc Zyngier | db75f1a | 2021-03-01 17:39:39 +0000 | [diff] [blame] | 515 | u32 vintid, struct irq_ops *ops) |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 516 | { |
| 517 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 518 | unsigned long flags; |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 519 | int ret; |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 520 | |
| 521 | BUG_ON(!irq); |
| 522 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 523 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Marc Zyngier | db75f1a | 2021-03-01 17:39:39 +0000 | [diff] [blame] | 524 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 525 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 526 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 527 | |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 528 | return ret; |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 529 | } |
| 530 | |
Christoffer Dall | 413aa80 | 2018-03-05 11:36:38 +0100 | [diff] [blame] | 531 | /** |
| 532 | * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ |
| 533 | * @vcpu: The VCPU pointer |
| 534 | * @vintid: The INTID of the interrupt |
| 535 | * |
| 536 | * Reset the active and pending states of a mapped interrupt. Kernel |
| 537 | * subsystems injecting mapped interrupts should reset their interrupt lines |
| 538 | * when we are doing a reset of the VM. |
| 539 | */ |
| 540 | void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) |
| 541 | { |
| 542 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
| 543 | unsigned long flags; |
| 544 | |
| 545 | if (!irq->hw) |
| 546 | goto out; |
| 547 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 548 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Christoffer Dall | 413aa80 | 2018-03-05 11:36:38 +0100 | [diff] [blame] | 549 | irq->active = false; |
| 550 | irq->pending_latch = false; |
| 551 | irq->line_level = false; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 552 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Christoffer Dall | 413aa80 | 2018-03-05 11:36:38 +0100 | [diff] [blame] | 553 | out: |
| 554 | vgic_put_irq(vcpu->kvm, irq); |
| 555 | } |
| 556 | |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 557 | int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 558 | { |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 559 | struct vgic_irq *irq; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 560 | unsigned long flags; |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 561 | |
| 562 | if (!vgic_initialized(vcpu->kvm)) |
| 563 | return -EAGAIN; |
| 564 | |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 565 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 566 | BUG_ON(!irq); |
| 567 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 568 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 569 | kvm_vgic_unmap_irq(irq); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 570 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 571 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 572 | |
| 573 | return 0; |
| 574 | } |
| 575 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 576 | /** |
Christoffer Dall | c6ccd30 | 2017-05-04 13:24:20 +0200 | [diff] [blame] | 577 | * kvm_vgic_set_owner - Set the owner of an interrupt for a VM |
| 578 | * |
| 579 | * @vcpu: Pointer to the VCPU (used for PPIs) |
| 580 | * @intid: The virtual INTID identifying the interrupt (PPI or SPI) |
| 581 | * @owner: Opaque pointer to the owner |
| 582 | * |
| 583 | * Returns 0 if intid is not already used by another in-kernel device and the |
| 584 | * owner is set, otherwise returns an error code. |
| 585 | */ |
| 586 | int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) |
| 587 | { |
| 588 | struct vgic_irq *irq; |
Marc Zyngier | 7465894 | 2017-11-30 17:00:30 +0000 | [diff] [blame] | 589 | unsigned long flags; |
Christoffer Dall | c6ccd30 | 2017-05-04 13:24:20 +0200 | [diff] [blame] | 590 | int ret = 0; |
| 591 | |
| 592 | if (!vgic_initialized(vcpu->kvm)) |
| 593 | return -EAGAIN; |
| 594 | |
| 595 | /* SGIs and LPIs cannot be wired up to any device */ |
| 596 | if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid)) |
| 597 | return -EINVAL; |
| 598 | |
| 599 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 600 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Christoffer Dall | c6ccd30 | 2017-05-04 13:24:20 +0200 | [diff] [blame] | 601 | if (irq->owner && irq->owner != owner) |
| 602 | ret = -EEXIST; |
| 603 | else |
| 604 | irq->owner = owner; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 605 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Christoffer Dall | c6ccd30 | 2017-05-04 13:24:20 +0200 | [diff] [blame] | 606 | |
| 607 | return ret; |
| 608 | } |
| 609 | |
| 610 | /** |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 611 | * vgic_prune_ap_list - Remove non-relevant interrupts from the list |
| 612 | * |
| 613 | * @vcpu: The VCPU pointer |
| 614 | * |
| 615 | * Go over the list of "interesting" interrupts, and prune those that we |
| 616 | * won't have to consider in the near future. |
| 617 | */ |
| 618 | static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) |
| 619 | { |
| 620 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 621 | struct vgic_irq *irq, *tmp; |
Jia He | d0823cb | 2018-08-03 21:57:04 +0800 | [diff] [blame] | 622 | |
| 623 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 624 | |
| 625 | retry: |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 626 | raw_spin_lock(&vgic_cpu->ap_list_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 627 | |
| 628 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
| 629 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
Andre Przywara | bf9a413 | 2018-04-17 11:23:49 +0100 | [diff] [blame] | 630 | bool target_vcpu_needs_kick = false; |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 631 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 632 | raw_spin_lock(&irq->irq_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 633 | |
| 634 | BUG_ON(vcpu != irq->vcpu); |
| 635 | |
| 636 | target_vcpu = vgic_target_oracle(irq); |
| 637 | |
| 638 | if (!target_vcpu) { |
| 639 | /* |
| 640 | * We don't need to process this interrupt any |
| 641 | * further, move it off the list. |
| 642 | */ |
| 643 | list_del(&irq->ap_list); |
| 644 | irq->vcpu = NULL; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 645 | raw_spin_unlock(&irq->irq_lock); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 646 | |
| 647 | /* |
| 648 | * This vgic_put_irq call matches the |
| 649 | * vgic_get_irq_kref in vgic_queue_irq_unlock, |
| 650 | * where we added the LPI to the ap_list. As |
| 651 | * we remove the irq from the list, we drop |
| 652 | * also drop the refcount. |
| 653 | */ |
| 654 | vgic_put_irq(vcpu->kvm, irq); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 655 | continue; |
| 656 | } |
| 657 | |
| 658 | if (target_vcpu == vcpu) { |
| 659 | /* We're on the right CPU */ |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 660 | raw_spin_unlock(&irq->irq_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 661 | continue; |
| 662 | } |
| 663 | |
| 664 | /* This interrupt looks like it has to be migrated. */ |
| 665 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 666 | raw_spin_unlock(&irq->irq_lock); |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 667 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 668 | |
| 669 | /* |
| 670 | * Ensure locking order by always locking the smallest |
| 671 | * ID first. |
| 672 | */ |
| 673 | if (vcpu->vcpu_id < target_vcpu->vcpu_id) { |
| 674 | vcpuA = vcpu; |
| 675 | vcpuB = target_vcpu; |
| 676 | } else { |
| 677 | vcpuA = target_vcpu; |
| 678 | vcpuB = vcpu; |
| 679 | } |
| 680 | |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 681 | raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
| 682 | raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
| 683 | SINGLE_DEPTH_NESTING); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 684 | raw_spin_lock(&irq->irq_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 685 | |
| 686 | /* |
| 687 | * If the affinity has been preserved, move the |
| 688 | * interrupt around. Otherwise, it means things have |
| 689 | * changed while the interrupt was unlocked, and we |
| 690 | * need to replay this. |
| 691 | * |
| 692 | * In all cases, we cannot trust the list not to have |
| 693 | * changed, so we restart from the beginning. |
| 694 | */ |
| 695 | if (target_vcpu == vgic_target_oracle(irq)) { |
| 696 | struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; |
| 697 | |
| 698 | list_del(&irq->ap_list); |
| 699 | irq->vcpu = target_vcpu; |
| 700 | list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); |
Andre Przywara | bf9a413 | 2018-04-17 11:23:49 +0100 | [diff] [blame] | 701 | target_vcpu_needs_kick = true; |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 702 | } |
| 703 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 704 | raw_spin_unlock(&irq->irq_lock); |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 705 | raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
| 706 | raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
Andre Przywara | bf9a413 | 2018-04-17 11:23:49 +0100 | [diff] [blame] | 707 | |
| 708 | if (target_vcpu_needs_kick) { |
| 709 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); |
| 710 | kvm_vcpu_kick(target_vcpu); |
| 711 | } |
| 712 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 713 | goto retry; |
| 714 | } |
| 715 | |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 716 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 717 | } |
| 718 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 719 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
| 720 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 721 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 722 | vgic_v2_fold_lr_state(vcpu); |
| 723 | else |
| 724 | vgic_v3_fold_lr_state(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 725 | } |
| 726 | |
| 727 | /* Requires the irq_lock to be held. */ |
| 728 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, |
| 729 | struct vgic_irq *irq, int lr) |
| 730 | { |
Lance Roy | d4d592a | 2018-10-04 23:45:50 -0700 | [diff] [blame] | 731 | lockdep_assert_held(&irq->irq_lock); |
Marc Zyngier | 140b086 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 732 | |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 733 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 734 | vgic_v2_populate_lr(vcpu, irq, lr); |
| 735 | else |
| 736 | vgic_v3_populate_lr(vcpu, irq, lr); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 737 | } |
| 738 | |
| 739 | static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr) |
| 740 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 741 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 742 | vgic_v2_clear_lr(vcpu, lr); |
| 743 | else |
| 744 | vgic_v3_clear_lr(vcpu, lr); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 745 | } |
| 746 | |
| 747 | static inline void vgic_set_underflow(struct kvm_vcpu *vcpu) |
| 748 | { |
Marc Zyngier | 59529f6 | 2015-11-30 13:09:53 +0000 | [diff] [blame] | 749 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 750 | vgic_v2_set_underflow(vcpu); |
| 751 | else |
| 752 | vgic_v3_set_underflow(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 753 | } |
| 754 | |
| 755 | /* Requires the ap_list_lock to be held. */ |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 756 | static int compute_ap_list_depth(struct kvm_vcpu *vcpu, |
| 757 | bool *multi_sgi) |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 758 | { |
| 759 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 760 | struct vgic_irq *irq; |
| 761 | int count = 0; |
| 762 | |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 763 | *multi_sgi = false; |
| 764 | |
Lance Roy | d4d592a | 2018-10-04 23:45:50 -0700 | [diff] [blame] | 765 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 766 | |
| 767 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
Marc Zyngier | 5369290 | 2018-04-18 10:39:04 +0100 | [diff] [blame] | 768 | int w; |
| 769 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 770 | raw_spin_lock(&irq->irq_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 771 | /* GICv2 SGIs can count for more than one... */ |
Marc Zyngier | 5369290 | 2018-04-18 10:39:04 +0100 | [diff] [blame] | 772 | w = vgic_irq_get_lr_count(irq); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 773 | raw_spin_unlock(&irq->irq_lock); |
Marc Zyngier | 5369290 | 2018-04-18 10:39:04 +0100 | [diff] [blame] | 774 | |
| 775 | count += w; |
| 776 | *multi_sgi |= (w > 1); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 777 | } |
| 778 | return count; |
| 779 | } |
| 780 | |
| 781 | /* Requires the VCPU's ap_list_lock to be held. */ |
| 782 | static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) |
| 783 | { |
| 784 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 785 | struct vgic_irq *irq; |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 786 | int count; |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 787 | bool multi_sgi; |
| 788 | u8 prio = 0xff; |
Christoffer Dall | fc5d1f1 | 2018-12-01 08:41:28 -0800 | [diff] [blame] | 789 | int i = 0; |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 790 | |
Lance Roy | d4d592a | 2018-10-04 23:45:50 -0700 | [diff] [blame] | 791 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 792 | |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 793 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
| 794 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 795 | vgic_sort_ap_list(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 796 | |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 797 | count = 0; |
| 798 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 799 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 800 | raw_spin_lock(&irq->irq_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 801 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 802 | /* |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 803 | * If we have multi-SGIs in the pipeline, we need to |
| 804 | * guarantee that they are all seen before any IRQ of |
| 805 | * lower priority. In that case, we need to filter out |
| 806 | * these interrupts by exiting early. This is easy as |
| 807 | * the AP list has been sorted already. |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 808 | */ |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 809 | if (multi_sgi && irq->priority > prio) { |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 810 | _raw_spin_unlock(&irq->irq_lock); |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 811 | break; |
| 812 | } |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 813 | |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 814 | if (likely(vgic_target_oracle(irq) == vcpu)) { |
| 815 | vgic_populate_lr(vcpu, irq, count++); |
| 816 | |
Marc Zyngier | 5369290 | 2018-04-18 10:39:04 +0100 | [diff] [blame] | 817 | if (irq->source) |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 818 | prio = irq->priority; |
Marc Zyngier | 16ca6a6 | 2018-03-06 21:48:01 +0000 | [diff] [blame] | 819 | } |
| 820 | |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 821 | raw_spin_unlock(&irq->irq_lock); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 822 | |
Christoffer Dall | 90cac1f | 2017-03-21 21:16:12 +0100 | [diff] [blame] | 823 | if (count == kvm_vgic_global_state.nr_lr) { |
| 824 | if (!list_is_last(&irq->ap_list, |
| 825 | &vgic_cpu->ap_list_head)) |
| 826 | vgic_set_underflow(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 827 | break; |
Christoffer Dall | 90cac1f | 2017-03-21 21:16:12 +0100 | [diff] [blame] | 828 | } |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 829 | } |
| 830 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 831 | /* Nuke remaining LRs */ |
Christoffer Dall | fc5d1f1 | 2018-12-01 08:41:28 -0800 | [diff] [blame] | 832 | for (i = count ; i < kvm_vgic_global_state.nr_lr; i++) |
| 833 | vgic_clear_lr(vcpu, i); |
| 834 | |
| 835 | if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
| 836 | vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count; |
| 837 | else |
| 838 | vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count; |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 839 | } |
| 840 | |
Christoffer Dall | 771621b | 2017-10-04 23:42:32 +0200 | [diff] [blame] | 841 | static inline bool can_access_vgic_from_kernel(void) |
| 842 | { |
| 843 | /* |
| 844 | * GICv2 can always be accessed from the kernel because it is |
| 845 | * memory-mapped, and VHE systems can access GICv3 EL2 system |
| 846 | * registers. |
| 847 | */ |
| 848 | return !static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) || has_vhe(); |
| 849 | } |
| 850 | |
Christoffer Dall | 75174ba | 2016-12-22 20:39:10 +0100 | [diff] [blame] | 851 | static inline void vgic_save_state(struct kvm_vcpu *vcpu) |
| 852 | { |
| 853 | if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
| 854 | vgic_v2_save_state(vcpu); |
Christoffer Dall | 771621b | 2017-10-04 23:42:32 +0200 | [diff] [blame] | 855 | else |
Christoffer Dall | fc5d1f1 | 2018-12-01 08:41:28 -0800 | [diff] [blame] | 856 | __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); |
Christoffer Dall | 75174ba | 2016-12-22 20:39:10 +0100 | [diff] [blame] | 857 | } |
| 858 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 859 | /* Sync back the hardware VGIC state into our emulation after a guest's run. */ |
| 860 | void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) |
| 861 | { |
Christoffer Dall | fc5d1f1 | 2018-12-01 08:41:28 -0800 | [diff] [blame] | 862 | int used_lrs; |
Shih-Wei Li | f676958 | 2016-10-19 18:12:34 +0000 | [diff] [blame] | 863 | |
Christoffer Dall | 8ac76ef | 2017-03-18 13:48:42 +0100 | [diff] [blame] | 864 | /* An empty ap_list_head implies used_lrs == 0 */ |
| 865 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) |
Christoffer Dall | 0099b77 | 2016-09-27 18:53:35 +0200 | [diff] [blame] | 866 | return; |
| 867 | |
Christoffer Dall | 2d0e63e | 2017-10-05 17:19:19 +0200 | [diff] [blame] | 868 | if (can_access_vgic_from_kernel()) |
| 869 | vgic_save_state(vcpu); |
| 870 | |
Christoffer Dall | fc5d1f1 | 2018-12-01 08:41:28 -0800 | [diff] [blame] | 871 | if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
| 872 | used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; |
| 873 | else |
| 874 | used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; |
| 875 | |
| 876 | if (used_lrs) |
Christoffer Dall | 8ac76ef | 2017-03-18 13:48:42 +0100 | [diff] [blame] | 877 | vgic_fold_lr_state(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 878 | vgic_prune_ap_list(vcpu); |
| 879 | } |
| 880 | |
Christoffer Dall | 75174ba | 2016-12-22 20:39:10 +0100 | [diff] [blame] | 881 | static inline void vgic_restore_state(struct kvm_vcpu *vcpu) |
| 882 | { |
| 883 | if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
| 884 | vgic_v2_restore_state(vcpu); |
Christoffer Dall | 771621b | 2017-10-04 23:42:32 +0200 | [diff] [blame] | 885 | else |
Christoffer Dall | fc5d1f1 | 2018-12-01 08:41:28 -0800 | [diff] [blame] | 886 | __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); |
Christoffer Dall | 75174ba | 2016-12-22 20:39:10 +0100 | [diff] [blame] | 887 | } |
| 888 | |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 889 | /* Flush our emulation state into the GIC hardware before entering the guest. */ |
| 890 | void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) |
| 891 | { |
Shih-Wei Li | f676958 | 2016-10-19 18:12:34 +0000 | [diff] [blame] | 892 | /* |
| 893 | * If there are no virtual interrupts active or pending for this |
| 894 | * VCPU, then there is no work to do and we can bail out without |
| 895 | * taking any lock. There is a potential race with someone injecting |
| 896 | * interrupts to the VCPU, but it is a benign race as the VCPU will |
| 897 | * either observe the new interrupt before or after doing this check, |
| 898 | * and introducing additional synchronization mechanism doesn't change |
| 899 | * this. |
Marc Zyngier | ca71228 | 2019-03-13 18:07:50 +0000 | [diff] [blame] | 900 | * |
| 901 | * Note that we still need to go through the whole thing if anything |
| 902 | * can be directly injected (GICv4). |
Shih-Wei Li | f676958 | 2016-10-19 18:12:34 +0000 | [diff] [blame] | 903 | */ |
Marc Zyngier | ca71228 | 2019-03-13 18:07:50 +0000 | [diff] [blame] | 904 | if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && |
| 905 | !vgic_supports_direct_msis(vcpu->kvm)) |
Christoffer Dall | 2d0e63e | 2017-10-05 17:19:19 +0200 | [diff] [blame] | 906 | return; |
Christoffer Dall | 0099b77 | 2016-09-27 18:53:35 +0200 | [diff] [blame] | 907 | |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 908 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
| 909 | |
Marc Zyngier | ca71228 | 2019-03-13 18:07:50 +0000 | [diff] [blame] | 910 | if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { |
| 911 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 912 | vgic_flush_lr_state(vcpu); |
| 913 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
| 914 | } |
Christoffer Dall | 75174ba | 2016-12-22 20:39:10 +0100 | [diff] [blame] | 915 | |
Christoffer Dall | 771621b | 2017-10-04 23:42:32 +0200 | [diff] [blame] | 916 | if (can_access_vgic_from_kernel()) |
| 917 | vgic_restore_state(vcpu); |
Shenming Lu | 57e3ceb | 2020-11-28 22:18:57 +0800 | [diff] [blame] | 918 | |
| 919 | if (vgic_supports_direct_msis(vcpu->kvm)) |
| 920 | vgic_v4_commit(vcpu); |
Marc Zyngier | 0919e84 | 2015-11-26 17:19:25 +0000 | [diff] [blame] | 921 | } |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 922 | |
Christoffer Dall | 328e566 | 2016-03-24 11:21:04 +0100 | [diff] [blame] | 923 | void kvm_vgic_load(struct kvm_vcpu *vcpu) |
| 924 | { |
| 925 | if (unlikely(!vgic_initialized(vcpu->kvm))) |
| 926 | return; |
| 927 | |
| 928 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 929 | vgic_v2_load(vcpu); |
| 930 | else |
| 931 | vgic_v3_load(vcpu); |
| 932 | } |
| 933 | |
| 934 | void kvm_vgic_put(struct kvm_vcpu *vcpu) |
| 935 | { |
| 936 | if (unlikely(!vgic_initialized(vcpu->kvm))) |
| 937 | return; |
| 938 | |
| 939 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 940 | vgic_v2_put(vcpu); |
| 941 | else |
| 942 | vgic_v3_put(vcpu); |
| 943 | } |
| 944 | |
Marc Zyngier | 5eeaf10 | 2019-08-02 10:28:32 +0100 | [diff] [blame] | 945 | void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu) |
| 946 | { |
| 947 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) |
| 948 | return; |
| 949 | |
| 950 | if (kvm_vgic_global_state.type == VGIC_V2) |
| 951 | vgic_v2_vmcr_sync(vcpu); |
| 952 | else |
| 953 | vgic_v3_vmcr_sync(vcpu); |
| 954 | } |
| 955 | |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 956 | int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) |
| 957 | { |
| 958 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
| 959 | struct vgic_irq *irq; |
| 960 | bool pending = false; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 961 | unsigned long flags; |
Christoffer Dall | 9009782 | 2018-12-01 13:21:47 -0800 | [diff] [blame] | 962 | struct vgic_vmcr vmcr; |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 963 | |
| 964 | if (!vcpu->kvm->arch.vgic.enabled) |
| 965 | return false; |
| 966 | |
Marc Zyngier | c971968 | 2017-10-27 15:28:47 +0100 | [diff] [blame] | 967 | if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last) |
| 968 | return true; |
| 969 | |
Christoffer Dall | 9009782 | 2018-12-01 13:21:47 -0800 | [diff] [blame] | 970 | vgic_get_vmcr(vcpu, &vmcr); |
| 971 | |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 972 | raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 973 | |
| 974 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 975 | raw_spin_lock(&irq->irq_lock); |
Christoffer Dall | 9009782 | 2018-12-01 13:21:47 -0800 | [diff] [blame] | 976 | pending = irq_is_pending(irq) && irq->enabled && |
| 977 | !irq->active && |
| 978 | irq->priority < vmcr.pmr; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 979 | raw_spin_unlock(&irq->irq_lock); |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 980 | |
| 981 | if (pending) |
| 982 | break; |
| 983 | } |
| 984 | |
Julien Thierry | e08d8d2 | 2019-01-07 15:06:17 +0000 | [diff] [blame] | 985 | raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
Eric Auger | 90eee56 | 2015-12-07 15:30:38 +0000 | [diff] [blame] | 986 | |
| 987 | return pending; |
| 988 | } |
Marc Zyngier | 2b0cda8 | 2016-04-26 11:06:47 +0100 | [diff] [blame] | 989 | |
| 990 | void vgic_kick_vcpus(struct kvm *kvm) |
| 991 | { |
| 992 | struct kvm_vcpu *vcpu; |
Marc Zyngier | 46808a4 | 2021-11-16 16:04:02 +0000 | [diff] [blame] | 993 | unsigned long c; |
Marc Zyngier | 2b0cda8 | 2016-04-26 11:06:47 +0100 | [diff] [blame] | 994 | |
| 995 | /* |
| 996 | * We've injected an interrupt, time to find out who deserves |
| 997 | * a good kick... |
| 998 | */ |
| 999 | kvm_for_each_vcpu(c, vcpu, kvm) { |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 1000 | if (kvm_vgic_vcpu_pending_irq(vcpu)) { |
| 1001 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
Marc Zyngier | 2b0cda8 | 2016-04-26 11:06:47 +0100 | [diff] [blame] | 1002 | kvm_vcpu_kick(vcpu); |
Andrew Jones | 325f9c6 | 2017-06-04 14:43:59 +0200 | [diff] [blame] | 1003 | } |
Marc Zyngier | 2b0cda8 | 2016-04-26 11:06:47 +0100 | [diff] [blame] | 1004 | } |
| 1005 | } |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 1006 | |
Eric Auger | 47bbd31 | 2017-10-27 15:28:32 +0100 | [diff] [blame] | 1007 | bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 1008 | { |
Andre Przywara | 285a90e | 2017-11-17 17:58:21 +0000 | [diff] [blame] | 1009 | struct vgic_irq *irq; |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 1010 | bool map_is_active; |
Christoffer Dall | 006df0f | 2016-10-16 22:19:11 +0200 | [diff] [blame] | 1011 | unsigned long flags; |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 1012 | |
Christoffer Dall | f39d16c | 2016-10-19 12:40:17 +0200 | [diff] [blame] | 1013 | if (!vgic_initialized(vcpu->kvm)) |
| 1014 | return false; |
| 1015 | |
Andre Przywara | 285a90e | 2017-11-17 17:58:21 +0000 | [diff] [blame] | 1016 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 1017 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 1018 | map_is_active = irq->hw && irq->active; |
Julien Thierry | 8fa3adb | 2019-01-07 15:06:15 +0000 | [diff] [blame] | 1019 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
Andre Przywara | 5dd4b92 | 2016-07-15 12:43:27 +0100 | [diff] [blame] | 1020 | vgic_put_irq(vcpu->kvm, irq); |
Andre Przywara | 568e8c9 | 2015-12-22 00:52:33 +0000 | [diff] [blame] | 1021 | |
| 1022 | return map_is_active; |
| 1023 | } |
Marc Zyngier | 3134cc8 | 2021-08-19 19:03:05 +0100 | [diff] [blame] | 1024 | |
| 1025 | /* |
| 1026 | * Level-triggered mapped IRQs are special because we only observe rising |
| 1027 | * edges as input to the VGIC. |
| 1028 | * |
| 1029 | * If the guest never acked the interrupt we have to sample the physical |
| 1030 | * line and set the line level, because the device state could have changed |
| 1031 | * or we simply need to process the still pending interrupt later. |
| 1032 | * |
| 1033 | * We could also have entered the guest with the interrupt active+pending. |
| 1034 | * On the next exit, we need to re-evaluate the pending state, as it could |
| 1035 | * otherwise result in a spurious interrupt by injecting a now potentially |
| 1036 | * stale pending state. |
| 1037 | * |
| 1038 | * If this causes us to lower the level, we have to also clear the physical |
| 1039 | * active state, since we will otherwise never be told when the interrupt |
| 1040 | * becomes asserted again. |
| 1041 | * |
| 1042 | * Another case is when the interrupt requires a helping hand on |
| 1043 | * deactivation (no HW deactivation, for example). |
| 1044 | */ |
| 1045 | void vgic_irq_handle_resampling(struct vgic_irq *irq, |
| 1046 | bool lr_deactivated, bool lr_pending) |
| 1047 | { |
| 1048 | if (vgic_irq_is_mapped_level(irq)) { |
| 1049 | bool resample = false; |
| 1050 | |
| 1051 | if (unlikely(vgic_irq_needs_resampling(irq))) { |
| 1052 | resample = !(irq->active || irq->pending_latch); |
| 1053 | } else if (lr_pending || (lr_deactivated && irq->line_level)) { |
| 1054 | irq->line_level = vgic_get_phys_line_level(irq); |
| 1055 | resample = !irq->line_level; |
| 1056 | } |
| 1057 | |
| 1058 | if (resample) |
| 1059 | vgic_irq_set_phys_active(irq, false); |
| 1060 | } |
| 1061 | } |