blob: 358f447d4012093a175ae03709c4e0390d688c48 [file] [log] [blame]
Ben Gardonfe5db272020-10-14 11:26:43 -07001// SPDX-License-Identifier: GPL-2.0
2
3#ifndef __KVM_X86_MMU_TDP_MMU_H
4#define __KVM_X86_MMU_TDP_MMU_H
5
6#include <linux/kvm_host.h>
7
Ben Gardon02c00b32020-10-14 20:26:44 +02008hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
Ben Gardon02c00b32020-10-14 20:26:44 +02009
Ben Gardonfb101292021-04-01 16:37:30 -070010__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
11 struct kvm_mmu_page *root)
Ben Gardon76eb54e2021-04-01 16:37:25 -070012{
Ben Gardonb7cccd392021-04-01 16:37:35 -070013 if (root->role.invalid)
14 return false;
15
Ben Gardonfb101292021-04-01 16:37:30 -070016 return refcount_inc_not_zero(&root->tdp_mmu_root_count);
Ben Gardon76eb54e2021-04-01 16:37:25 -070017}
18
Ben Gardon6103bc02021-04-01 16:37:32 -070019void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
20 bool shared);
Ben Gardon76eb54e2021-04-01 16:37:25 -070021
Sean Christopherson2b9663d2021-03-25 19:19:44 -070022bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
Sean Christopherson5a324c22021-08-10 23:52:36 +030023 gfn_t end, bool can_yield, bool flush);
Sean Christopherson2b9663d2021-03-25 19:19:44 -070024static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
Sean Christopherson5a324c22021-08-10 23:52:36 +030025 gfn_t start, gfn_t end, bool flush)
Sean Christopherson33a31642021-03-25 13:01:19 -070026{
Sean Christopherson5a324c22021-08-10 23:52:36 +030027 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush);
Sean Christopherson33a31642021-03-25 13:01:19 -070028}
29static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
30{
Kai Huangf1b83252021-06-15 12:57:11 +120031 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
Sean Christopherson33a31642021-03-25 13:01:19 -070032
33 /*
34 * Don't allow yielding, as the caller may have a flush pending. Note,
35 * if mmu_lock is held for write, zapping will never yield in this case,
36 * but explicitly disallow it for safety. The TDP MMU does not yield
37 * until it has made forward progress (steps sideways), and when zapping
38 * a single shadow page that it's guaranteed to see (thus the mmu_lock
39 * requirement), its "step sideways" will always step beyond the bounds
40 * of the shadow page's gfn range and stop iterating before yielding.
41 */
42 lockdep_assert_held_write(&kvm->mmu_lock);
Sean Christopherson2b9663d2021-03-25 19:19:44 -070043 return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
Sean Christopherson5a324c22021-08-10 23:52:36 +030044 sp->gfn, end, false, false);
Sean Christopherson33a31642021-03-25 13:01:19 -070045}
Ben Gardonb7cccd392021-04-01 16:37:35 -070046
Ben Gardonfaaf05b02020-10-14 11:26:47 -070047void kvm_tdp_mmu_zap_all(struct kvm *kvm);
Ben Gardonb7cccd392021-04-01 16:37:35 -070048void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
Ben Gardon4c6654b2021-04-01 16:37:36 -070049void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
Ben Gardonbb188422020-10-14 11:26:50 -070050
51int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
52 int map_writable, int max_level, kvm_pfn_t pfn,
53 bool prefault);
Ben Gardon063afac2020-10-14 11:26:52 -070054
Sean Christopherson3039bcc2021-04-01 17:56:50 -070055bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
56 bool flush);
57bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
58bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
59bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
Ben Gardona6a0b052020-10-14 11:26:55 -070060
Hamza Mahfooz269e9552021-07-12 22:33:38 -040061bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
62 const struct kvm_memory_slot *slot, int min_level);
Ben Gardona6a0b052020-10-14 11:26:55 -070063bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
Hamza Mahfooz269e9552021-07-12 22:33:38 -040064 const struct kvm_memory_slot *slot);
Ben Gardona6a0b052020-10-14 11:26:55 -070065void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
66 struct kvm_memory_slot *slot,
67 gfn_t gfn, unsigned long mask,
68 bool wrprot);
Sean Christopherson142ccde2021-03-25 19:19:42 -070069bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
Ben Gardon8ca6f062021-04-01 16:37:24 -070070 const struct kvm_memory_slot *slot,
71 bool flush);
Ben Gardon46044f72020-10-14 11:26:57 -070072
73bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
Keqian Zhu3ad93562021-04-29 11:41:14 +080074 struct kvm_memory_slot *slot, gfn_t gfn,
75 int min_level);
Ben Gardon95fb5b02020-10-14 11:26:58 -070076
David Matlackc5c8c7c2021-07-13 22:09:54 +000077static inline void kvm_tdp_mmu_walk_lockless_begin(void)
78{
79 rcu_read_lock();
80}
81
82static inline void kvm_tdp_mmu_walk_lockless_end(void)
83{
84 rcu_read_unlock();
85}
86
Sean Christopherson39b4d432020-12-17 16:31:37 -080087int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
88 int *root_level);
David Matlack6e8eb202021-07-13 22:09:55 +000089u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
90 u64 *spte);
Sean Christopherson39b4d432020-12-17 16:31:37 -080091
Paolo Bonzini897218f2021-02-06 09:53:33 -050092#ifdef CONFIG_X86_64
Ben Gardond501f742021-05-18 10:34:14 -070093bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
Paolo Bonzini897218f2021-02-06 09:53:33 -050094void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
95static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
96static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
Paolo Bonzini897218f2021-02-06 09:53:33 -050097
David Matlack63c0cac2021-06-17 23:19:47 +000098static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
Paolo Bonzini897218f2021-02-06 09:53:33 -050099{
100 struct kvm_mmu_page *sp;
David Matlack63c0cac2021-06-17 23:19:47 +0000101 hpa_t hpa = mmu->root_hpa;
Paolo Bonzini897218f2021-02-06 09:53:33 -0500102
Paolo Bonzini897218f2021-02-06 09:53:33 -0500103 if (WARN_ON(!VALID_PAGE(hpa)))
104 return false;
105
Sean Christopherson6c6e1662021-06-22 00:24:54 -0700106 /*
107 * A NULL shadow page is legal when shadowing a non-paging guest with
108 * PAE paging, as the MMU will be direct with root_hpa pointing at the
109 * pae_root page, not a shadow page.
110 */
Paolo Bonzini897218f2021-02-06 09:53:33 -0500111 sp = to_shadow_page(hpa);
Sean Christopherson6c6e1662021-06-22 00:24:54 -0700112 return sp && is_tdp_mmu_page(sp) && sp->root_count;
Paolo Bonzini897218f2021-02-06 09:53:33 -0500113}
Paolo Bonzinic62efff2021-06-18 06:42:10 -0400114#else
115static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
116static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
117static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
118static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
David Matlack63c0cac2021-06-17 23:19:47 +0000119static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
Paolo Bonzinic62efff2021-06-18 06:42:10 -0400120#endif
Paolo Bonzini897218f2021-02-06 09:53:33 -0500121
Ben Gardonfe5db272020-10-14 11:26:43 -0700122#endif /* __KVM_X86_MMU_TDP_MMU_H */