arm/arm64: KVM: Perform local TLB invalidation when multiplexing vcpus on a single CPU
Architecturally, TLBs are private to the (physical) CPU they're
associated with. But when multiple vcpus from the same VM are
being multiplexed on the same CPU, the TLBs are not private
to the vcpus (and are actually shared across the VMID).
Let's consider the following scenario:
- vcpu-0 maps PA to VA
- vcpu-1 maps PA' to VA
If run on the same physical CPU, vcpu-1 can hit TLB entries generated
by vcpu-0 accesses, and access the wrong physical page.
The solution to this is to keep a per-VM map of which vcpu ran last
on each given physical CPU, and invalidate local TLBs when switching
to a different vcpu from the same VM.
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 08bb84f..19b5f5c 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -114,11 +114,18 @@ void kvm_arch_check_processor_compat(void *rtn)
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
- int ret = 0;
+ int ret, cpu;
if (type)
return -EINVAL;
+ kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+ if (!kvm->arch.last_vcpu_ran)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu)
+ *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
@@ -141,6 +148,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
return ret;
}
@@ -168,6 +177,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
+ free_percpu(kvm->arch.last_vcpu_ran);
+ kvm->arch.last_vcpu_ran = NULL;
+
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
@@ -312,6 +324,19 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
+ int *last_ran;
+
+ last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+
+ /*
+ * We might get preempted before the vCPU actually runs, but
+ * over-invalidation doesn't affect correctness.
+ */
+ if (*last_ran != vcpu->vcpu_id) {
+ kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+ *last_ran = vcpu->vcpu_id;
+ }
+
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);