blob: 476b133544dd94e8465258c91cfdba0f2ddb8029 [file] [log] [blame]
Ben Gardonfe5db272020-10-14 11:26:43 -07001// SPDX-License-Identifier: GPL-2.0
2
3#ifndef __KVM_X86_MMU_TDP_MMU_H
4#define __KVM_X86_MMU_TDP_MMU_H
5
6#include <linux/kvm_host.h>
7
Ben Gardon02c00b32020-10-14 20:26:44 +02008hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
Ben Gardon02c00b32020-10-14 20:26:44 +02009
Ben Gardonfb101292021-04-01 16:37:30 -070010__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
11 struct kvm_mmu_page *root)
Ben Gardon76eb54e2021-04-01 16:37:25 -070012{
Ben Gardonb7cccd392021-04-01 16:37:35 -070013 if (root->role.invalid)
14 return false;
15
Ben Gardonfb101292021-04-01 16:37:30 -070016 return refcount_inc_not_zero(&root->tdp_mmu_root_count);
Ben Gardon76eb54e2021-04-01 16:37:25 -070017}
18
Ben Gardon6103bc02021-04-01 16:37:32 -070019void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
20 bool shared);
Ben Gardon76eb54e2021-04-01 16:37:25 -070021
Sean Christopherson2b9663d2021-03-25 19:19:44 -070022bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
Sean Christopherson5a324c22021-08-10 23:52:36 +030023 gfn_t end, bool can_yield, bool flush);
Sean Christopherson2b9663d2021-03-25 19:19:44 -070024static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
Sean Christopherson5a324c22021-08-10 23:52:36 +030025 gfn_t start, gfn_t end, bool flush)
Sean Christopherson33a31642021-03-25 13:01:19 -070026{
Sean Christopherson5a324c22021-08-10 23:52:36 +030027 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
Sean Christopherson33a31642021-03-25 13:01:19 -070028}
29static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
30{
Kai Huangf1b83252021-06-15 12:57:11 +120031 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
Sean Christopherson33a31642021-03-25 13:01:19 -070032
33 /*
34 * Don't allow yielding, as the caller may have a flush pending. Note,
35 * if mmu_lock is held for write, zapping will never yield in this case,
36 * but explicitly disallow it for safety. The TDP MMU does not yield
37 * until it has made forward progress (steps sideways), and when zapping
38 * a single shadow page that it's guaranteed to see (thus the mmu_lock
39 * requirement), its "step sideways" will always step beyond the bounds
40 * of the shadow page's gfn range and stop iterating before yielding.
41 */
42 lockdep_assert_held_write(&kvm->mmu_lock);
Sean Christopherson2b9663d2021-03-25 19:19:44 -070043 return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
Sean Christopherson5a324c22021-08-10 23:52:36 +030044 sp->gfn, end, false, false);
Sean Christopherson33a31642021-03-25 13:01:19 -070045}
Ben Gardonb7cccd392021-04-01 16:37:35 -070046
Ben Gardonfaaf05b02020-10-14 11:26:47 -070047void kvm_tdp_mmu_zap_all(struct kvm *kvm);
Ben Gardonb7cccd392021-04-01 16:37:35 -070048void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
Ben Gardon4c6654b2021-04-01 16:37:36 -070049void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
Ben Gardonbb188422020-10-14 11:26:50 -070050
Paolo Bonzini2f6305d2021-08-06 04:35:50 -040051int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
Ben Gardon063afac2020-10-14 11:26:52 -070052
Sean Christopherson3039bcc2021-04-01 17:56:50 -070053bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
54 bool flush);
55bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
56bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
57bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
Ben Gardona6a0b052020-10-14 11:26:55 -070058
Hamza Mahfooz269e9552021-07-12 22:33:38 -040059bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
60 const struct kvm_memory_slot *slot, int min_level);
Ben Gardona6a0b052020-10-14 11:26:55 -070061bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
Hamza Mahfooz269e9552021-07-12 22:33:38 -040062 const struct kvm_memory_slot *slot);
Ben Gardona6a0b052020-10-14 11:26:55 -070063void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
64 struct kvm_memory_slot *slot,
65 gfn_t gfn, unsigned long mask,
66 bool wrprot);
Sean Christopherson142ccde2021-03-25 19:19:42 -070067bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
Ben Gardon8ca6f062021-04-01 16:37:24 -070068 const struct kvm_memory_slot *slot,
69 bool flush);
Ben Gardon46044f72020-10-14 11:26:57 -070070
71bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
Keqian Zhu3ad93562021-04-29 11:41:14 +080072 struct kvm_memory_slot *slot, gfn_t gfn,
73 int min_level);
Ben Gardon95fb5b02020-10-14 11:26:58 -070074
David Matlackc5c8c7c2021-07-13 22:09:54 +000075static inline void kvm_tdp_mmu_walk_lockless_begin(void)
76{
77 rcu_read_lock();
78}
79
80static inline void kvm_tdp_mmu_walk_lockless_end(void)
81{
82 rcu_read_unlock();
83}
84
Sean Christopherson39b4d432020-12-17 16:31:37 -080085int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
86 int *root_level);
David Matlack6e8eb202021-07-13 22:09:55 +000087u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
88 u64 *spte);
Sean Christopherson39b4d432020-12-17 16:31:37 -080089
Paolo Bonzini897218f2021-02-06 09:53:33 -050090#ifdef CONFIG_X86_64
Ben Gardond501f742021-05-18 10:34:14 -070091bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
Paolo Bonzini897218f2021-02-06 09:53:33 -050092void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
Paolo Bonzini897218f2021-02-06 09:53:33 -050093static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
Paolo Bonzini897218f2021-02-06 09:53:33 -050094
David Matlack63c0cac2021-06-17 23:19:47 +000095static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
Paolo Bonzini897218f2021-02-06 09:53:33 -050096{
97 struct kvm_mmu_page *sp;
David Matlack63c0cac2021-06-17 23:19:47 +000098 hpa_t hpa = mmu->root_hpa;
Paolo Bonzini897218f2021-02-06 09:53:33 -050099
Paolo Bonzini897218f2021-02-06 09:53:33 -0500100 if (WARN_ON(!VALID_PAGE(hpa)))
101 return false;
102
Sean Christopherson6c6e1662021-06-22 00:24:54 -0700103 /*
104 * A NULL shadow page is legal when shadowing a non-paging guest with
105 * PAE paging, as the MMU will be direct with root_hpa pointing at the
106 * pae_root page, not a shadow page.
107 */
Paolo Bonzini897218f2021-02-06 09:53:33 -0500108 sp = to_shadow_page(hpa);
Sean Christopherson6c6e1662021-06-22 00:24:54 -0700109 return sp && is_tdp_mmu_page(sp) && sp->root_count;
Paolo Bonzini897218f2021-02-06 09:53:33 -0500110}
Paolo Bonzinic62efff2021-06-18 06:42:10 -0400111#else
112static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
113static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
Paolo Bonzinic62efff2021-06-18 06:42:10 -0400114static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
David Matlack63c0cac2021-06-17 23:19:47 +0000115static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
Paolo Bonzinic62efff2021-06-18 06:42:10 -0400116#endif
Paolo Bonzini897218f2021-02-06 09:53:33 -0500117
Ben Gardonfe5db272020-10-14 11:26:43 -0700118#endif /* __KVM_X86_MMU_TDP_MMU_H */