KVM: x86: trap invlpg
With pages out of sync invlpg needs to be trapped. For now simply nuke
the entry.
Untested on AMD.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 776fb6d..dc169e8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -461,6 +461,31 @@
return 0;
}
+static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
+ struct kvm_vcpu *vcpu, u64 addr,
+ u64 *sptep, int level)
+{
+
+ if (level == PT_PAGE_TABLE_LEVEL) {
+ if (is_shadow_present_pte(*sptep))
+ rmap_remove(vcpu->kvm, sptep);
+ set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
+ return 1;
+ }
+ if (!is_shadow_present_pte(*sptep))
+ return 1;
+ return 0;
+}
+
+static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
+{
+ struct shadow_walker walker = {
+ .walker = { .entry = FNAME(shadow_invlpg_entry), },
+ };
+
+ walk_shadow(&walker.walker, vcpu, gva);
+}
+
static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
{
struct guest_walker walker;